Skip to content

toolboxv2 API Reference

This section provides an API reference for key components directly available from the toolboxv2 package.

Core Application & Tooling

toolboxv2.AppType

Source code in toolboxv2/utils/system/types.py
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
class AppType:
    prefix: str
    id: str
    globals: dict[str, Any] = {"root": dict, }
    locals: dict[str, Any] = {"user": {'app': "self"}, }

    local_test: bool = False
    start_dir: str
    data_dir: str
    config_dir: str
    info_dir: str

    logger: logging.Logger
    logging_filename: str

    api_allowed_mods_list: list[str] = []

    version: str
    loop: asyncio.AbstractEventLoop

    keys: dict[str, str] = {
        "MACRO": "macro~~~~:",
        "MACRO_C": "m_color~~:",
        "HELPER": "helper~~~:",
        "debug": "debug~~~~:",
        "id": "name-spa~:",
        "st-load": "mute~load:",
        "comm-his": "comm-his~:",
        "develop-mode": "dev~mode~:",
        "provider::": "provider::",
    }

    defaults: dict[str, (bool or dict or dict[str, dict[str, str]] or str or list[str] or list[list]) | None] = {
        "MACRO": list[str],
        "MACRO_C": dict,
        "HELPER": dict,
        "debug": str,
        "id": str,
        "st-load": False,
        "comm-his": list[list],
        "develop-mode": bool,
    }

    config_fh: FileHandler
    _debug: bool
    flows: dict[str, Callable]
    dev_modi: bool
    functions: dict[str, Any]
    modules: dict[str, Any]

    interface_type: ToolBoxInterfaces
    REFIX: str

    alive: bool
    called_exit: tuple[bool, float]
    args_sto: AppArgs
    system_flag = None
    session = None
    appdata = None
    exit_tasks = []

    enable_profiling: bool = False
    sto = None

    def __init__(self, prefix: None | str= None, args: AppArgs | None = None):
        self.args_sto = args
        self.prefix = prefix
        """proxi attr"""

    @staticmethod
    def exit_main(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def hide_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def show_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def disconnect(*args, **kwargs):
        """proxi attr"""

    def set_logger(self, debug=False):
        """proxi attr"""

    @property
    def debug(self):
        """proxi attr"""
        return self._debug

    def debug_rains(self, e):
        """proxi attr"""

    def set_flows(self, r):
        """proxi attr"""

    def run_flows(self, name, **kwargs):
        """proxi attr"""

    def rrun_flows(self, name, **kwargs):
        """proxi attr"""

    def idle(self):
        import time
        self.print("idle")
        try:
            while self.alive:
                time.sleep(1)
        except KeyboardInterrupt:
            pass
        self.print("idle done")

    async def a_idle(self):
        self.print("a idle")
        try:
            if hasattr(self, 'daemon_app'):
                self.print("serving daemon")
                await self.daemon_app.connect(self)
            else:
                self.print("serving default")
                while self.alive:
                    await asyncio.sleep(1)
        except KeyboardInterrupt:
            pass
        self.print("a idle done")

    @debug.setter
    def debug(self, value):
        """proxi attr"""

    def _coppy_mod(self, content, new_mod_dir, mod_name, file_type='py'):
        """proxi attr"""

    def _pre_lib_mod(self, mod_name, path_to="./runtime", file_type='py'):
        """proxi attr"""

    def _copy_load(self, mod_name, file_type='py', **kwargs):
        """proxi attr"""

    def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True):
        """proxi attr"""

    def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):
        """proxi attr"""

    def save_initialized_module(self, tools_class, spec):
        """proxi attr"""

    def mod_online(self, mod_name, installed=False):
        """proxi attr"""

    def _get_function(self,
                      name: Enum or None,
                      state: bool = True,
                      specification: str = "app",
                      metadata=False, as_str: tuple or None = None, r=0):
        """proxi attr"""

    def save_exit(self):
        """proxi attr"""

    def load_mod(self, mod_name: str, mlm='I', **kwargs):
        """proxi attr"""

    async def init_module(self, modular):
        return await self.load_mod(modular)

    async def load_all_mods_in_file(self, working_dir="mods"):
        """proxi attr"""

    def get_all_mods(self, working_dir="mods", path_to="./runtime"):
        """proxi attr"""

    def remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            self.remove_mod(mod, delete=delete)

    async def a_remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            await self.a_remove_mod(mod, delete=delete)

    def print_ok(self):
        """proxi attr"""
        self.logger.info("OK")

    def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
        """proxi attr"""

    def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None):
        """proxi attr"""

    def remove_mod(self, mod_name, spec='app', delete=True):
        """proxi attr"""

    async def a_remove_mod(self, mod_name, spec='app', delete=True):
        """proxi attr"""

    def exit(self):
        """proxi attr"""

    def web_context(self) -> str:
        """returns the build index ( toolbox web component )"""

    async def a_exit(self):
        """proxi attr"""

    def save_load(self, modname, spec='app'):
        """proxi attr"""

    def get_function(self, name: Enum or tuple, **kwargs):
        """
        Kwargs for _get_function
            metadata:: return the registered function dictionary
                stateless: (function_data, None), 0
                stateful: (function_data, higher_order_function), 0
            state::boolean
                specification::str default app
        """

    def run_a_from_sync(self, function, *args):
        """
        run a async fuction
        """

    def run_function(self, mod_function_name: Enum or tuple,
                     tb_run_function_with_state=True,
                     tb_run_with_specification='app',
                     args_=None,
                     kwargs_=None,
                     *args,
                     **kwargs) -> Result:

        """proxi attr"""

    async def a_run_function(self, mod_function_name: Enum or tuple,
                             tb_run_function_with_state=True,
                             tb_run_with_specification='app',
                             args_=None,
                             kwargs_=None,
                             *args,
                             **kwargs) -> Result:

        """proxi attr"""

    def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):
        """
        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        mod_function_name = f"{modular_name}.{function_name}"

        proxi attr
        """

    async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict):
        """
        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        mod_function_name = f"{modular_name}.{function_name}"

        proxi attr
        """

    async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None, method="GET",
                       args_=None,
                       kwargs_=None,
                       *args, **kwargs):
        """run a function remote via http / https"""

    def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
                get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                kwargs_=None,
                *args, **kwargs):
        """proxi attr"""

    async def a_run_any(self, mod_function_name: Enum or str or tuple,
                        backwords_compability_variabel_string_holder=None,
                        get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                        kwargs_=None,
                        *args, **kwargs):
        """proxi attr"""

    def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
        """proxi attr"""

    @staticmethod
    def print(text, *args, **kwargs):
        """proxi attr"""

    @staticmethod
    def sprint(text, *args, **kwargs):
        """proxi attr"""

    # ----------------------------------------------------------------
    # Decorators for the toolbox

    def _register_function(self, module_name, func_name, data):
        """proxi attr"""

    def _create_decorator(self, type_: str,
                          name: str = "",
                          mod_name: str = "",
                          level: int = -1,
                          restrict_in_virtual_mode: bool = False,
                          api: bool = False,
                          helper: str = "",
                          version: str or None = None,
                          initial=False,
                          exit_f=False,
                          test=True,
                          samples=None,
                          state=None,
                          pre_compute=None,
                          post_compute=None,
                          memory_cache=False,
                          file_cache=False,
                          row=False,
                          request_as_kwarg=False,
                          memory_cache_max_size=100,
                          memory_cache_ttl=300):
        """proxi attr"""

        # data = {
        #     "type": type_,
        #     "module_name": module_name,
        #     "func_name": func_name,
        #     "level": level,
        #     "restrict_in_virtual_mode": restrict_in_virtual_mode,
        #     "func": func,
        #     "api": api,
        #     "helper": helper,
        #     "version": version,
        #     "initial": initial,
        #     "exit_f": exit_f,
        #     "__module__": func.__module__,
        #     "signature": sig,
        #     "params": params,
        #     "state": (
        #         False if len(params) == 0 else params[0] in ['self', 'state', 'app']) if state is None else state,
        #     "do_test": test,
        #     "samples": samples,
        #     "request_as_kwarg": request_as_kwarg,

    def tb(self, name=None,
           mod_name: str = "",
           helper: str = "",
           version: str or None = None,
           test: bool = True,
           restrict_in_virtual_mode: bool = False,
           api: bool = False,
           initial: bool = False,
           exit_f: bool = False,
           test_only: bool = False,
           memory_cache: bool = False,
           file_cache: bool = False,
           row=False,
           request_as_kwarg: bool = False,
           state: bool or None = None,
           level: int = 0,
           memory_cache_max_size: int = 100,
           memory_cache_ttl: int = 300,
           samples: list or dict or None = None,
           interface: ToolBoxInterfaces or None or str = None,
           pre_compute=None,
           post_compute=None,
           api_methods=None,
           ):
        """
    A decorator for registering and configuring functions within a module.

    This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

    Args:
        name (str, optional): The name to register the function under. Defaults to the function's own name.
        mod_name (str, optional): The name of the module the function belongs to.
        helper (str, optional): A helper string providing additional information about the function.
        version (str or None, optional): The version of the function or module.
        test (bool, optional): Flag to indicate if the function is for testing purposes.
        restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
        api (bool, optional): Flag to indicate if the function is part of an API.
        initial (bool, optional): Flag to indicate if the function should be executed at initialization.
        exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
        test_only (bool, optional): Flag to indicate if the function should only be used for testing.
        memory_cache (bool, optional): Flag to enable memory caching for the function.
        request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
        file_cache (bool, optional): Flag to enable file caching for the function.
        row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
        state (bool or None, optional): Flag to indicate if the function maintains state.
        level (int, optional): The level of the function, used for prioritization or categorization.
        memory_cache_max_size (int, optional): Maximum size of the memory cache.
        memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
        samples (list or dict or None, optional): Samples or examples of function usage.
        interface (str, optional): The interface type for the function.
        pre_compute (callable, optional): A function to be called before the main function.
        post_compute (callable, optional): A function to be called after the main function.
        api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

    Returns:
        function: The decorated function with additional processing and registration capabilities.
    """
        if interface is None:
            interface = "tb"
        if test_only and 'test' not in self.id:
            return lambda *args, **kwargs: args
        return self._create_decorator(interface,
                                      name,
                                      mod_name,
                                      level=level,
                                      restrict_in_virtual_mode=restrict_in_virtual_mode,
                                      helper=helper,
                                      api=api,
                                      version=version,
                                      initial=initial,
                                      exit_f=exit_f,
                                      test=test,
                                      samples=samples,
                                      state=state,
                                      pre_compute=pre_compute,
                                      post_compute=post_compute,
                                      memory_cache=memory_cache,
                                      file_cache=file_cache,
                                      row=row,
                                      request_as_kwarg=request_as_kwarg,
                                      memory_cache_max_size=memory_cache_max_size,
                                      memory_cache_ttl=memory_cache_ttl)

    def print_functions(self, name=None):


        if not self.functions:
            print("Nothing to see")
            return

        def helper(_functions):
            for func_name, data in _functions.items():
                if not isinstance(data, dict):
                    continue

                func_type = data.get('type', 'Unknown')
                func_level = 'r' if data['level'] == -1 else data['level']
                api_status = 'Api' if data.get('api', False) else 'Non-Api'

                print(f"  Function: {func_name}{data.get('signature', '()')}; "
                      f"Type: {func_type}, Level: {func_level}, {api_status}")

        if name is not None:
            functions = self.functions.get(name)
            if functions is not None:
                print(f"\nModule: {name}; Type: {functions.get('app_instance_type', 'Unknown')}")
                helper(functions)
                return
        for module, functions in self.functions.items():
            print(f"\nModule: {module}; Type: {functions.get('app_instance_type', 'Unknown')}")
            helper(functions)

    def save_autocompletion_dict(self):
        """proxi attr"""

    def get_autocompletion_dict(self):
        """proxi attr"""

    def get_username(self, get_input=False, default="loot") -> str:
        """proxi attr"""

    def save_registry_as_enums(self, directory: str, filename: str):
        """proxi attr"""

    async def execute_all_functions_(self, m_query='', f_query=''):
        print("Executing all functions")
        from ..extras import generate_test_cases
        all_data = {
            "modular_run": 0,
            "modular_fatal_error": 0,
            "errors": 0,
            "modular_sug": 0,
            "coverage": [],
            "total_coverage": {},
        }
        items = list(self.functions.items()).copy()
        for module_name, functions in items:
            infos = {
                "functions_run": 0,
                "functions_fatal_error": 0,
                "error": 0,
                "functions_sug": 0,
                'calls': {},
                'callse': {},
                "coverage": [0, 0],
            }
            all_data['modular_run'] += 1
            if not module_name.startswith(m_query):
                all_data['modular_sug'] += 1
                continue

            with Spinner(message=f"In {module_name}| "):
                f_items = list(functions.items()).copy()
                for function_name, function_data in f_items:
                    if not isinstance(function_data, dict):
                        continue
                    if not function_name.startswith(f_query):
                        continue
                    test: list = function_data.get('do_test')
                    # print(test, module_name, function_name, function_data)
                    infos["coverage"][0] += 1
                    if test is False:
                        continue

                    with Spinner(message=f"\t\t\t\t\t\tfuction {function_name}..."):
                        params: list = function_data.get('params')
                        sig: signature = function_data.get('signature')
                        state: bool = function_data.get('state')
                        samples: bool = function_data.get('samples')

                        test_kwargs_list = [{}]

                        if params is not None:
                            test_kwargs_list = samples if samples is not None else generate_test_cases(sig=sig)
                            # print(test_kwargs)
                            # print(test_kwargs[0])
                            # test_kwargs = test_kwargs_list[0]
                        # print(module_name, function_name, test_kwargs_list)
                        infos["coverage"][1] += 1
                        for test_kwargs in test_kwargs_list:
                            try:
                                # print(f"test Running {state=} |{module_name}.{function_name}")
                                result = await self.a_run_function((module_name, function_name),
                                                                   tb_run_function_with_state=state,
                                                                   **test_kwargs)
                                if not isinstance(result, Result):
                                    result = Result.ok(result)
                                if result.info.exec_code == 0:
                                    infos['calls'][function_name] = [test_kwargs, str(result)]
                                    infos['functions_sug'] += 1
                                else:
                                    infos['functions_sug'] += 1
                                    infos['error'] += 1
                                    infos['callse'][function_name] = [test_kwargs, str(result)]
                            except Exception as e:
                                infos['functions_fatal_error'] += 1
                                infos['callse'][function_name] = [test_kwargs, str(e)]
                            finally:
                                infos['functions_run'] += 1

                if infos['functions_run'] == infos['functions_sug']:
                    all_data['modular_sug'] += 1
                else:
                    all_data['modular_fatal_error'] += 1
                if infos['error'] > 0:
                    all_data['errors'] += infos['error']

                all_data[module_name] = infos
                if infos['coverage'][0] == 0:
                    c = 0
                else:
                    c = infos['coverage'][1] / infos['coverage'][0]
                all_data["coverage"].append(f"{module_name}:{c:.2f}\n")
        total_coverage = sum([float(t.split(":")[-1]) for t in all_data["coverage"]]) / len(all_data["coverage"])
        print(
            f"\n{all_data['modular_run']=}\n{all_data['modular_sug']=}\n{all_data['modular_fatal_error']=}\n{total_coverage=}")
        d = analyze_data(all_data)
        return Result.ok(data=all_data, data_info=d)

    @staticmethod
    def calculate_complexity(filename_or_code):
        from radon.complexity import cc_rank, cc_visit
        if os.path.exists(filename_or_code):
            with open(filename_or_code) as file:
                code = file.read()
        else:
            code = filename_or_code

        # Calculate and print Cyclomatic Complexity
        complexity_results = cc_visit(code)
        i = -1
        avg_complexity = 0
        for block in complexity_results:
            complexity = block.complexity
            i += 1
            print(f"block: {block.name} {i} Class/Fuction/Methode : {block.letter}")
            print(f"    fullname: {block.fullname}")
            print(f"    Cyclomatic Complexity: {complexity}")
            # Optional: Get complexity rank
            avg_complexity += complexity
            rank = cc_rank(complexity)
            print(f"    Complexity Rank: {rank}")
            # print(f"    lineno: {block.lineno}")
            print(f"    endline: {block.endline}")
            print(f"    col_offset: {block.col_offset}\n")
        if i <= 0:
            i += 2
        avg_complexity = avg_complexity / i
        print(f"\nAVG Complexity: {avg_complexity:.2f}")
        print(f"Total Rank: {cc_rank(int(avg_complexity + i // 10))}")

    async def execute_function_test(self, module_name: str, function_name: str,
                                    function_data: dict, test_kwargs: dict,
                                    profiler: cProfile.Profile) -> tuple[bool, str, dict, float]:
        start_time = time.time()
        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            try:
                result = await self.a_run_function(
                    (module_name, function_name),
                    tb_run_function_with_state=function_data.get('state'),
                    **test_kwargs
                )

                if not isinstance(result, Result):
                    result = Result.ok(result)

                success = result.info.exec_code == 0
                execution_time = time.time() - start_time
                return success, str(result), test_kwargs, execution_time
            except Exception as e:
                execution_time = time.time() - start_time
                return False, str(e), test_kwargs, execution_time

    async def process_function(self, module_name: str, function_name: str,
                               function_data: dict, profiler: cProfile.Profile) -> tuple[str, ModuleInfo]:
        start_time = time.time()
        info = ModuleInfo()

        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            if not isinstance(function_data, dict):
                return function_name, info

            test = function_data.get('do_test')
            info.coverage[0] += 1

            if test is False:
                return function_name, info

            params = function_data.get('params')
            sig = function_data.get('signature')
            samples = function_data.get('samples')

            test_kwargs_list = [{}] if params is None else (
                samples if samples is not None else generate_test_cases(sig=sig)
            )

            info.coverage[1] += 1

            # Create tasks for all test cases
            tasks = [
                self.execute_function_test(module_name, function_name, function_data, test_kwargs, profiler)
                for test_kwargs in test_kwargs_list
            ]

            # Execute all tests concurrently
            results = await asyncio.gather(*tasks)

            total_execution_time = 0
            for success, result_str, test_kwargs, execution_time in results:
                info.functions_run += 1
                total_execution_time += execution_time

                if success:
                    info.functions_sug += 1
                    info.calls[function_name] = [test_kwargs, result_str]
                else:
                    info.functions_sug += 1
                    info.error += 1
                    info.callse[function_name] = [test_kwargs, result_str]

            info.execution_time = time.time() - start_time
            return function_name, info

    async def process_module(self, module_name: str, functions: dict,
                             f_query: str, profiler: cProfile.Profile) -> tuple[str, ModuleInfo]:
        start_time = time.time()

        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            async with asyncio.Semaphore(mp.cpu_count()):
                tasks = [
                    self.process_function(module_name, fname, fdata, profiler)
                    for fname, fdata in functions.items()
                    if fname.startswith(f_query)
                ]

                if not tasks:
                    return module_name, ModuleInfo()

                results = await asyncio.gather(*tasks)

                # Combine results from all functions in the module
                combined_info = ModuleInfo()
                total_execution_time = 0

                for _, info in results:
                    combined_info.functions_run += info.functions_run
                    combined_info.functions_fatal_error += info.functions_fatal_error
                    combined_info.error += info.error
                    combined_info.functions_sug += info.functions_sug
                    combined_info.calls.update(info.calls)
                    combined_info.callse.update(info.callse)
                    combined_info.coverage[0] += info.coverage[0]
                    combined_info.coverage[1] += info.coverage[1]
                    total_execution_time += info.execution_time

                combined_info.execution_time = time.time() - start_time
                return module_name, combined_info

    async def execute_all_functions(self, m_query='', f_query='', enable_profiling=True):
        """
        Execute all functions with parallel processing and optional profiling.

        Args:
            m_query (str): Module name query filter
            f_query (str): Function name query filter
            enable_profiling (bool): Enable detailed profiling information
        """
        print("Executing all functions in parallel" + (" with profiling" if enable_profiling else ""))

        start_time = time.time()
        stats = ExecutionStats()
        items = list(self.functions.items()).copy()

        # Set up profiling
        self.enable_profiling = enable_profiling
        profiler = cProfile.Profile()

        with profile_section(profiler, enable_profiling):
            # Filter modules based on query
            filtered_modules = [
                (mname, mfuncs) for mname, mfuncs in items
                if mname.startswith(m_query)
            ]

            stats.modular_run = len(filtered_modules)

            # Process all modules concurrently
            async with asyncio.Semaphore(mp.cpu_count()):
                tasks = [
                    self.process_module(mname, mfuncs, f_query, profiler)
                    for mname, mfuncs in filtered_modules
                ]

                results = await asyncio.gather(*tasks)

            # Combine results and calculate statistics
            for module_name, info in results:
                if info.functions_run == info.functions_sug:
                    stats.modular_sug += 1
                else:
                    stats.modular_fatal_error += 1

                stats.errors += info.error

                # Calculate coverage
                coverage = (info.coverage[1] / info.coverage[0]) if info.coverage[0] > 0 else 0
                stats.coverage.append(f"{module_name}:{coverage:.2f}\n")

                # Store module info
                stats.__dict__[module_name] = info

            # Calculate total coverage
            total_coverage = (
                sum(float(t.split(":")[-1]) for t in stats.coverage) / len(stats.coverage)
                if stats.coverage else 0
            )

            stats.total_execution_time = time.time() - start_time

            # Generate profiling stats if enabled
            if enable_profiling:
                s = io.StringIO()
                ps = pstats.Stats(profiler, stream=s).sort_stats('cumulative')
                ps.print_stats()
                stats.profiling_data = {
                    'detailed_stats': s.getvalue(),
                    'total_time': stats.total_execution_time,
                    'function_count': stats.modular_run,
                    'successful_functions': stats.modular_sug
                }

            print(
                f"\n{stats.modular_run=}"
                f"\n{stats.modular_sug=}"
                f"\n{stats.modular_fatal_error=}"
                f"\n{total_coverage=}"
                f"\nTotal execution time: {stats.total_execution_time:.2f}s"
            )

            if enable_profiling:
                print("\nProfiling Summary:")
                print(f"{'=' * 50}")
                print("Top 10 time-consuming functions:")
                ps.print_stats(10)

            analyzed_data = analyze_data(stats.__dict__)
            return Result.ok(data=stats.__dict__, data_info=analyzed_data)

debug property writable

proxi attr

prefix = prefix instance-attribute

proxi attr

a_exit() async

proxi attr

Source code in toolboxv2/utils/system/types.py
1413
1414
async def a_exit(self):
    """proxi attr"""

a_fuction_runner(function, function_data, args, kwargs) async

parameters = function_data.get('params') modular_name = function_data.get('module_name') function_name = function_data.get('func_name') mod_function_name = f"{modular_name}.{function_name}"

proxi attr

Source code in toolboxv2/utils/system/types.py
1464
1465
1466
1467
1468
1469
1470
1471
1472
async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict):
    """
    parameters = function_data.get('params')
    modular_name = function_data.get('module_name')
    function_name = function_data.get('func_name')
    mod_function_name = f"{modular_name}.{function_name}"

    proxi attr
    """

a_remove_mod(mod_name, spec='app', delete=True) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1404
1405
async def a_remove_mod(self, mod_name, spec='app', delete=True):
    """proxi attr"""

a_run_any(mod_function_name, backwords_compability_variabel_string_holder=None, get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1486
1487
1488
1489
1490
1491
async def a_run_any(self, mod_function_name: Enum or str or tuple,
                    backwords_compability_variabel_string_holder=None,
                    get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                    kwargs_=None,
                    *args, **kwargs):
    """proxi attr"""

a_run_function(mod_function_name, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1444
1445
1446
1447
1448
1449
1450
1451
1452
async def a_run_function(self, mod_function_name: Enum or tuple,
                         tb_run_function_with_state=True,
                         tb_run_with_specification='app',
                         args_=None,
                         kwargs_=None,
                         *args,
                         **kwargs) -> Result:

    """proxi attr"""

debug_rains(e)

proxi attr

Source code in toolboxv2/utils/system/types.py
1298
1299
def debug_rains(self, e):
    """proxi attr"""

disconnect(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1286
1287
1288
@staticmethod
async def disconnect(*args, **kwargs):
    """proxi attr"""

execute_all_functions(m_query='', f_query='', enable_profiling=True) async

Execute all functions with parallel processing and optional profiling.

Parameters:

Name Type Description Default
m_query str

Module name query filter

''
f_query str

Function name query filter

''
enable_profiling bool

Enable detailed profiling information

True
Source code in toolboxv2/utils/system/types.py
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
async def execute_all_functions(self, m_query='', f_query='', enable_profiling=True):
    """
    Execute all functions with parallel processing and optional profiling.

    Args:
        m_query (str): Module name query filter
        f_query (str): Function name query filter
        enable_profiling (bool): Enable detailed profiling information
    """
    print("Executing all functions in parallel" + (" with profiling" if enable_profiling else ""))

    start_time = time.time()
    stats = ExecutionStats()
    items = list(self.functions.items()).copy()

    # Set up profiling
    self.enable_profiling = enable_profiling
    profiler = cProfile.Profile()

    with profile_section(profiler, enable_profiling):
        # Filter modules based on query
        filtered_modules = [
            (mname, mfuncs) for mname, mfuncs in items
            if mname.startswith(m_query)
        ]

        stats.modular_run = len(filtered_modules)

        # Process all modules concurrently
        async with asyncio.Semaphore(mp.cpu_count()):
            tasks = [
                self.process_module(mname, mfuncs, f_query, profiler)
                for mname, mfuncs in filtered_modules
            ]

            results = await asyncio.gather(*tasks)

        # Combine results and calculate statistics
        for module_name, info in results:
            if info.functions_run == info.functions_sug:
                stats.modular_sug += 1
            else:
                stats.modular_fatal_error += 1

            stats.errors += info.error

            # Calculate coverage
            coverage = (info.coverage[1] / info.coverage[0]) if info.coverage[0] > 0 else 0
            stats.coverage.append(f"{module_name}:{coverage:.2f}\n")

            # Store module info
            stats.__dict__[module_name] = info

        # Calculate total coverage
        total_coverage = (
            sum(float(t.split(":")[-1]) for t in stats.coverage) / len(stats.coverage)
            if stats.coverage else 0
        )

        stats.total_execution_time = time.time() - start_time

        # Generate profiling stats if enabled
        if enable_profiling:
            s = io.StringIO()
            ps = pstats.Stats(profiler, stream=s).sort_stats('cumulative')
            ps.print_stats()
            stats.profiling_data = {
                'detailed_stats': s.getvalue(),
                'total_time': stats.total_execution_time,
                'function_count': stats.modular_run,
                'successful_functions': stats.modular_sug
            }

        print(
            f"\n{stats.modular_run=}"
            f"\n{stats.modular_sug=}"
            f"\n{stats.modular_fatal_error=}"
            f"\n{total_coverage=}"
            f"\nTotal execution time: {stats.total_execution_time:.2f}s"
        )

        if enable_profiling:
            print("\nProfiling Summary:")
            print(f"{'=' * 50}")
            print("Top 10 time-consuming functions:")
            ps.print_stats(10)

        analyzed_data = analyze_data(stats.__dict__)
        return Result.ok(data=stats.__dict__, data_info=analyzed_data)

exit()

proxi attr

Source code in toolboxv2/utils/system/types.py
1407
1408
def exit(self):
    """proxi attr"""

exit_main(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1274
1275
1276
@staticmethod
def exit_main(*args, **kwargs):
    """proxi attr"""

fuction_runner(function, function_data, args, kwargs, t0=0.0)

parameters = function_data.get('params') modular_name = function_data.get('module_name') function_name = function_data.get('func_name') mod_function_name = f"{modular_name}.{function_name}"

proxi attr

Source code in toolboxv2/utils/system/types.py
1454
1455
1456
1457
1458
1459
1460
1461
1462
def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):
    """
    parameters = function_data.get('params')
    modular_name = function_data.get('module_name')
    function_name = function_data.get('func_name')
    mod_function_name = f"{modular_name}.{function_name}"

    proxi attr
    """

get_all_mods(working_dir='mods', path_to='./runtime')

proxi attr

Source code in toolboxv2/utils/system/types.py
1378
1379
def get_all_mods(self, working_dir="mods", path_to="./runtime"):
    """proxi attr"""

get_autocompletion_dict()

proxi attr

Source code in toolboxv2/utils/system/types.py
1669
1670
def get_autocompletion_dict(self):
    """proxi attr"""

get_function(name, **kwargs)

Kwargs for _get_function metadata:: return the registered function dictionary stateless: (function_data, None), 0 stateful: (function_data, higher_order_function), 0 state::boolean specification::str default app

Source code in toolboxv2/utils/system/types.py
1419
1420
1421
1422
1423
1424
1425
1426
1427
def get_function(self, name: Enum or tuple, **kwargs):
    """
    Kwargs for _get_function
        metadata:: return the registered function dictionary
            stateless: (function_data, None), 0
            stateful: (function_data, higher_order_function), 0
        state::boolean
            specification::str default app
    """

get_mod(name, spec='app')

proxi attr

Source code in toolboxv2/utils/system/types.py
1493
1494
def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
    """proxi attr"""

get_username(get_input=False, default='loot')

proxi attr

Source code in toolboxv2/utils/system/types.py
1672
1673
def get_username(self, get_input=False, default="loot") -> str:
    """proxi attr"""

hide_console(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1278
1279
1280
@staticmethod
async def hide_console(*args, **kwargs):
    """proxi attr"""

inplace_load_instance(mod_name, loc='toolboxv2.mods.', spec='app', save=True)

proxi attr

Source code in toolboxv2/utils/system/types.py
1347
1348
def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True):
    """proxi attr"""

load_all_mods_in_file(working_dir='mods') async

proxi attr

Source code in toolboxv2/utils/system/types.py
1375
1376
async def load_all_mods_in_file(self, working_dir="mods"):
    """proxi attr"""

load_mod(mod_name, mlm='I', **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1369
1370
def load_mod(self, mod_name: str, mlm='I', **kwargs):
    """proxi attr"""

mod_online(mod_name, installed=False)

proxi attr

Source code in toolboxv2/utils/system/types.py
1356
1357
def mod_online(self, mod_name, installed=False):
    """proxi attr"""

print(text, *args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1496
1497
1498
@staticmethod
def print(text, *args, **kwargs):
    """proxi attr"""

print_ok()

proxi attr

Source code in toolboxv2/utils/system/types.py
1391
1392
1393
def print_ok(self):
    """proxi attr"""
    self.logger.info("OK")

reload_mod(mod_name, spec='app', is_file=True, loc='toolboxv2.mods.')

proxi attr

Source code in toolboxv2/utils/system/types.py
1395
1396
def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
    """proxi attr"""

remove_mod(mod_name, spec='app', delete=True)

proxi attr

Source code in toolboxv2/utils/system/types.py
1401
1402
def remove_mod(self, mod_name, spec='app', delete=True):
    """proxi attr"""

rrun_flows(name, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1307
1308
def rrun_flows(self, name, **kwargs):
    """proxi attr"""

run_a_from_sync(function, *args)

run a async fuction

Source code in toolboxv2/utils/system/types.py
1429
1430
1431
1432
def run_a_from_sync(self, function, *args):
    """
    run a async fuction
    """

run_any(mod_function_name, backwords_compability_variabel_string_holder=None, get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1480
1481
1482
1483
1484
def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
            get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
            kwargs_=None,
            *args, **kwargs):
    """proxi attr"""

run_flows(name, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1304
1305
def run_flows(self, name, **kwargs):
    """proxi attr"""

run_function(mod_function_name, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1434
1435
1436
1437
1438
1439
1440
1441
1442
def run_function(self, mod_function_name: Enum or tuple,
                 tb_run_function_with_state=True,
                 tb_run_with_specification='app',
                 args_=None,
                 kwargs_=None,
                 *args,
                 **kwargs) -> Result:

    """proxi attr"""

run_http(mod_function_name, function_name=None, method='GET', args_=None, kwargs_=None, *args, **kwargs) async

run a function remote via http / https

Source code in toolboxv2/utils/system/types.py
1474
1475
1476
1477
1478
async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None, method="GET",
                   args_=None,
                   kwargs_=None,
                   *args, **kwargs):
    """run a function remote via http / https"""

save_autocompletion_dict()

proxi attr

Source code in toolboxv2/utils/system/types.py
1666
1667
def save_autocompletion_dict(self):
    """proxi attr"""

save_exit()

proxi attr

Source code in toolboxv2/utils/system/types.py
1366
1367
def save_exit(self):
    """proxi attr"""

save_initialized_module(tools_class, spec)

proxi attr

Source code in toolboxv2/utils/system/types.py
1353
1354
def save_initialized_module(self, tools_class, spec):
    """proxi attr"""

save_instance(instance, modular_id, spec='app', instance_type='file/application', tools_class=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1350
1351
def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):
    """proxi attr"""

save_load(modname, spec='app')

proxi attr

Source code in toolboxv2/utils/system/types.py
1416
1417
def save_load(self, modname, spec='app'):
    """proxi attr"""

save_registry_as_enums(directory, filename)

proxi attr

Source code in toolboxv2/utils/system/types.py
1675
1676
def save_registry_as_enums(self, directory: str, filename: str):
    """proxi attr"""

set_flows(r)

proxi attr

Source code in toolboxv2/utils/system/types.py
1301
1302
def set_flows(self, r):
    """proxi attr"""

set_logger(debug=False)

proxi attr

Source code in toolboxv2/utils/system/types.py
1290
1291
def set_logger(self, debug=False):
    """proxi attr"""

show_console(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1282
1283
1284
@staticmethod
async def show_console(*args, **kwargs):
    """proxi attr"""

sprint(text, *args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1500
1501
1502
@staticmethod
def sprint(text, *args, **kwargs):
    """proxi attr"""

tb(name=None, mod_name='', helper='', version=None, test=True, restrict_in_virtual_mode=False, api=False, initial=False, exit_f=False, test_only=False, memory_cache=False, file_cache=False, row=False, request_as_kwarg=False, state=None, level=0, memory_cache_max_size=100, memory_cache_ttl=300, samples=None, interface=None, pre_compute=None, post_compute=None, api_methods=None)

A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Parameters:

Name Type Description Default
name str

The name to register the function under. Defaults to the function's own name.

None
mod_name str

The name of the module the function belongs to.

''
helper str

A helper string providing additional information about the function.

''
version str or None

The version of the function or module.

None
test bool

Flag to indicate if the function is for testing purposes.

True
restrict_in_virtual_mode bool

Flag to restrict the function in virtual mode.

False
api bool

Flag to indicate if the function is part of an API.

False
initial bool

Flag to indicate if the function should be executed at initialization.

False
exit_f bool

Flag to indicate if the function should be executed at exit.

False
test_only bool

Flag to indicate if the function should only be used for testing.

False
memory_cache bool

Flag to enable memory caching for the function.

False
request_as_kwarg bool

Flag to get request if the fuction is calld from api.

False
file_cache bool

Flag to enable file caching for the function.

False
row bool

rather to auto wrap the result in Result type default False means no row data aka result type

False
state bool or None

Flag to indicate if the function maintains state.

None
level int

The level of the function, used for prioritization or categorization.

0
memory_cache_max_size int

Maximum size of the memory cache.

100
memory_cache_ttl int

Time-to-live for the memory cache entries.

300
samples list or dict or None

Samples or examples of function usage.

None
interface str

The interface type for the function.

None
pre_compute callable

A function to be called before the main function.

None
post_compute callable

A function to be called after the main function.

None
api_methods list[str]

default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

None

Returns:

Name Type Description
function

The decorated function with additional processing and registration capabilities.

Source code in toolboxv2/utils/system/types.py
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
def tb(self, name=None,
       mod_name: str = "",
       helper: str = "",
       version: str or None = None,
       test: bool = True,
       restrict_in_virtual_mode: bool = False,
       api: bool = False,
       initial: bool = False,
       exit_f: bool = False,
       test_only: bool = False,
       memory_cache: bool = False,
       file_cache: bool = False,
       row=False,
       request_as_kwarg: bool = False,
       state: bool or None = None,
       level: int = 0,
       memory_cache_max_size: int = 100,
       memory_cache_ttl: int = 300,
       samples: list or dict or None = None,
       interface: ToolBoxInterfaces or None or str = None,
       pre_compute=None,
       post_compute=None,
       api_methods=None,
       ):
    """
A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Args:
    name (str, optional): The name to register the function under. Defaults to the function's own name.
    mod_name (str, optional): The name of the module the function belongs to.
    helper (str, optional): A helper string providing additional information about the function.
    version (str or None, optional): The version of the function or module.
    test (bool, optional): Flag to indicate if the function is for testing purposes.
    restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
    api (bool, optional): Flag to indicate if the function is part of an API.
    initial (bool, optional): Flag to indicate if the function should be executed at initialization.
    exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
    test_only (bool, optional): Flag to indicate if the function should only be used for testing.
    memory_cache (bool, optional): Flag to enable memory caching for the function.
    request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
    file_cache (bool, optional): Flag to enable file caching for the function.
    row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
    state (bool or None, optional): Flag to indicate if the function maintains state.
    level (int, optional): The level of the function, used for prioritization or categorization.
    memory_cache_max_size (int, optional): Maximum size of the memory cache.
    memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
    samples (list or dict or None, optional): Samples or examples of function usage.
    interface (str, optional): The interface type for the function.
    pre_compute (callable, optional): A function to be called before the main function.
    post_compute (callable, optional): A function to be called after the main function.
    api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

Returns:
    function: The decorated function with additional processing and registration capabilities.
"""
    if interface is None:
        interface = "tb"
    if test_only and 'test' not in self.id:
        return lambda *args, **kwargs: args
    return self._create_decorator(interface,
                                  name,
                                  mod_name,
                                  level=level,
                                  restrict_in_virtual_mode=restrict_in_virtual_mode,
                                  helper=helper,
                                  api=api,
                                  version=version,
                                  initial=initial,
                                  exit_f=exit_f,
                                  test=test,
                                  samples=samples,
                                  state=state,
                                  pre_compute=pre_compute,
                                  post_compute=post_compute,
                                  memory_cache=memory_cache,
                                  file_cache=file_cache,
                                  row=row,
                                  request_as_kwarg=request_as_kwarg,
                                  memory_cache_max_size=memory_cache_max_size,
                                  memory_cache_ttl=memory_cache_ttl)

watch_mod(mod_name, spec='app', loc='toolboxv2.mods.', use_thread=True, path_name=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1398
1399
def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None):
    """proxi attr"""

web_context()

returns the build index ( toolbox web component )

Source code in toolboxv2/utils/system/types.py
1410
1411
def web_context(self) -> str:
    """returns the build index ( toolbox web component )"""

toolboxv2.MainTool

Source code in toolboxv2/utils/system/main_tool.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
class MainTool:
    toolID: str = ""
    # app = None
    interface = None
    spec = "app"
    name = ""
    color = "Bold"
    stuf = False

    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.__storedargs = args, kwargs
        self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
        self.async_initialized = False
        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    pass
                else:
                    self.todo()
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")

    async def __ainit__(self, *args, **kwargs):
        self.version = kwargs["v"]
        self.tools = kwargs.get("tool", {})
        self.name = kwargs["name"]
        self.logger = kwargs.get("logs", get_logger())
        self.color = kwargs.get("color", "WHITE")
        self.todo = kwargs.get("load", kwargs.get("on_start", None))
        if not hasattr(self, 'config'):
            self.config = {}
        self.user = None
        self.description = "A toolbox mod" if kwargs.get("description") is None else kwargs.get("description")
        if MainTool.interface is None:
            MainTool.interface = self.app.interface_type
        # Result.default(self.app.interface)

        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    await self.todo()
                else:
                    pass
                await asyncio.sleep(0.1)
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")
        self.app.print(f"TOOL : {self.spec}.{self.name} online")



    @property
    def app(self):
        return get_app(
            from_=f"{self.spec}.{self.name}|{self.toolID if self.toolID else '*' + MainTool.toolID} {self.interface if self.interface else MainTool.interface}")

    @app.setter
    def app(self, v):
        raise PermissionError(f"You cannot set the App Instance! {v=}")

    @staticmethod
    def return_result(error: ToolBoxError = ToolBoxError.none,
                      exec_code: int = 0,
                      help_text: str = "",
                      data_info=None,
                      data=None,
                      data_to=None):

        if data_to is None:
            data_to = MainTool.interface if MainTool.interface is not None else ToolBoxInterfaces.cli

        if data is None:
            data = {}

        if data_info is None:
            data_info = {}

        return Result(
            error,
            ToolBoxResult(data_info=data_info, data=data, data_to=data_to),
            ToolBoxInfo(exec_code=exec_code, help_text=help_text)
        )

    def print(self, message, end="\n", **kwargs):
        if self.stuf:
            return

        self.app.print(Style.style_dic[self.color] + self.name + Style.style_dic["END"] + ":", message, end=end,
                       **kwargs)

    def add_str_to_config(self, command):
        if len(command) != 2:
            self.logger.error('Invalid command must be key value')
            return False
        self.config[command[0]] = command[1]

    def webInstall(self, user_instance, construct_render) -> str:
        """"Returns a web installer for the given user instance and construct render template"""

    def get_version(self) -> str:
        """"Returns the version"""
        return self.version

    async def get_user(self, username: str) -> Result:
        return await self.app.a_run_any(CLOUDM_AUTHMANAGER.GET_USER_BY_NAME, username=username, get_results=True)

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()

__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/system/main_tool.py
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.__storedargs = args, kwargs
    self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
    self.async_initialized = False
    if self.todo:
        try:
            if inspect.iscoroutinefunction(self.todo):
                pass
            else:
                self.todo()
            get_logger().info(f"{self.name} on load suspended")
        except Exception as e:
            get_logger().error(f"Error loading mod {self.name} {e}")
            if self.app.debug:
                import traceback
                traceback.print_exc()
    else:
        get_logger().info(f"{self.name} no load require")

__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/system/main_tool.py
163
164
165
166
167
168
169
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self

get_version()

"Returns the version

Source code in toolboxv2/utils/system/main_tool.py
156
157
158
def get_version(self) -> str:
    """"Returns the version"""
    return self.version

webInstall(user_instance, construct_render)

"Returns a web installer for the given user instance and construct render template

Source code in toolboxv2/utils/system/main_tool.py
153
154
def webInstall(self, user_instance, construct_render) -> str:
    """"Returns a web installer for the given user instance and construct render template"""

toolboxv2.get_app(from_=None, name=None, args=AppArgs().default(), app_con=None, sync=False)

Source code in toolboxv2/utils/system/getting_and_closing_app.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
def get_app(from_=None, name=None, args=AppArgs().default(), app_con=None, sync=False) -> AppType:
    global registered_apps
    # name = None
    # print(f"get app requested from: {from_} withe name: {name}")
    logger = get_logger()
    logger.info(Style.GREYBG(f"get app requested from: {from_}"))
    if registered_apps[0] is not None:
        return registered_apps[0]

    if app_con is None:
        from ... import App
        app_con = App
    app = app_con(name, args=args) if name else app_con()
    logger.info(Style.Bold(f"App instance, returned ID: {app.id}"))

    registered_apps[0] = app
    return app

System Utilities & Configuration

toolboxv2.FileHandler

Bases: Code

Source code in toolboxv2/utils/system/file_handler.py
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
class FileHandler(Code):

    def __init__(self, filename, name='mainTool', keys=None, defaults=None):
        if defaults is None:
            defaults = {}
        if keys is None:
            keys = {}
        assert filename.endswith(".config") or filename.endswith(".data"), \
            f"filename must end with .config or .data {filename=}"
        self.file_handler_save = {}
        self.file_handler_load = {}
        self.file_handler_key_mapper = {}
        self.file_handler_filename = filename
        self.file_handler_storage = None
        self.file_handler_max_loaded_index_ = 0
        self.file_handler_file_prefix = (f".{filename.split('.')[1]}/"
                                         f"{name.replace('.', '-')}/")
        # self.load_file_handler()
        self.set_defaults_keys_file_handler(keys, defaults)

    def _open_file_handler(self, mode: str, rdu):
        logger = get_logger()
        logger.info(Style.Bold(Style.YELLOW(f"Opening file in mode : {mode}")))
        if self.file_handler_storage:
            self.file_handler_storage.close()
            self.file_handler_storage = None
        try:
            self.file_handler_storage = open(self.file_handler_file_prefix + self.file_handler_filename, mode)
            self.file_handler_max_loaded_index_ += 1
        except FileNotFoundError:
            if self.file_handler_max_loaded_index_ == 2:
                os.makedirs(self.file_handler_file_prefix, exist_ok=True)
            if self.file_handler_max_loaded_index_ == 3:
                os.makedirs(".config/mainTool", exist_ok=True)
            if self.file_handler_max_loaded_index_ >= 5:
                print(Style.RED(f"pleas create this file to prosed : {self.file_handler_file_prefix}"
                                f"{self.file_handler_filename}"))
                logger.critical(f"{self.file_handler_file_prefix} {self.file_handler_filename} FileNotFoundError cannot"
                                f" be Created")
                exit(0)
            self.file_handler_max_loaded_index_ += 1
            logger.info(Style.YELLOW(f"Try Creating File: {self.file_handler_file_prefix}{self.file_handler_filename}"))

            if not os.path.exists(f"{self.file_handler_file_prefix}"):
                os.makedirs(f"{self.file_handler_file_prefix}")

            with open(self.file_handler_file_prefix + self.file_handler_filename, 'a'):
                logger.info(Style.GREEN("File created successfully"))
                self.file_handler_max_loaded_index_ = -1
            rdu()
        except OSError and PermissionError as e:
            raise e

    def open_s_file_handler(self):
        self._open_file_handler('w+', self.open_s_file_handler)
        return self

    def open_l_file_handler(self):
        self._open_file_handler('r+', self.open_l_file_handler)
        return self

    def save_file_handler(self):
        get_logger().info(
            Style.BLUE(
                f"init Saving (S) {self.file_handler_filename} "
            )
        )
        if self.file_handler_storage:
            get_logger().warning(
                f"WARNING file is already open (S): {self.file_handler_filename} {self.file_handler_storage}")

        self.open_s_file_handler()

        get_logger().info(
            Style.BLUE(
                f"Elements to save : ({len(self.file_handler_save.keys())})"
            )
        )

        self.file_handler_storage.write(json.dumps(self.file_handler_save))

        self.file_handler_storage.close()
        self.file_handler_storage = None

        get_logger().info(
            Style.BLUE(
                f"closing file : {self.file_handler_filename} "
            )
        )

        return self

    def add_to_save_file_handler(self, key: str, value: str):
        if len(key) != 10:
            get_logger(). \
                warning(
                Style.YELLOW(
                    'WARNING: key length is not 10 characters'
                )
            )
            return False
        if key not in self.file_handler_load:
            if key in self.file_handler_key_mapper:
                key = self.file_handler_key_mapper[key]

        self.file_handler_load[key] = value
        self.file_handler_save[key] = self.encode_code(value)
        return True

    def remove_key_file_handler(self, key: str):
        if key == 'Pka7237327':
            print("Cant remove Root Key")
            return
        if key in self.file_handler_load:
            del self.file_handler_load[key]
        if key in self.file_handler_save:
            del self.file_handler_save[key]

    def load_file_handler(self):
        get_logger().info(
            Style.BLUE(
                f"loading {self.file_handler_filename} "
            )
        )
        if self.file_handler_storage:
            get_logger().warning(
                Style.YELLOW(
                    f"WARNING file is already open (L) {self.file_handler_filename}"
                )
            )
        self.open_l_file_handler()

        try:

            self.file_handler_save = json.load(self.file_handler_storage)
            for key, line in self.file_handler_save.items():
                self.file_handler_load[key] = self.decode_code(line)

        except json.decoder.JSONDecodeError and Exception:

            for line in self.file_handler_storage:
                line = line[:-1]
                heda = line[:10]
                self.file_handler_save[heda] = line[10:]
                enc = self.decode_code(line[10:])
                self.file_handler_load[heda] = enc

            self.file_handler_save = {}

        self.file_handler_storage.close()
        self.file_handler_storage = None

        return self

    def get_file_handler(self, obj: str, default=None) -> str or None:
        logger = get_logger()
        if obj not in self.file_handler_load:
            if obj in self.file_handler_key_mapper:
                obj = self.file_handler_key_mapper[obj]
        logger.info(Style.ITALIC(Style.GREY(f"Collecting data from storage key : {obj}")))
        self.file_handler_max_loaded_index_ = -1
        for objects in self.file_handler_load.items():
            self.file_handler_max_loaded_index_ += 1
            if obj == objects[0]:

                try:
                    if len(objects[1]) > 0:
                        return ast.literal_eval(objects[1]) if isinstance(objects[1], str) else objects[1]
                    logger.warning(
                        Style.YELLOW(
                            f"No data  {obj}  ; {self.file_handler_filename}"
                        )
                    )
                except ValueError:
                    logger.error(f"ValueError Loading {obj} ; {self.file_handler_filename}")
                except SyntaxError:
                    if isinstance(objects[1], str):
                        return objects[1]
                    logger.warning(
                        Style.YELLOW(
                            f"Possible SyntaxError Loading {obj} ; {self.file_handler_filename}"
                            f" {len(objects[1])} {type(objects[1])}"
                        )
                    )
                    return objects[1]
                except NameError:
                    return str(objects[1])

        if obj in list(self.file_handler_save.keys()):
            r = self.decode_code(self.file_handler_save[obj])
            logger.info(f"returning Default for {obj}")
            return r

        if default is None:
            default = self.file_handler_load.get(obj)

        logger.info("no data found")
        return default

    def set_defaults_keys_file_handler(self, keys: dict, defaults: dict):
        list_keys = iter(list(keys.keys()))
        df_keys = defaults.keys()
        for key in list_keys:
            self.file_handler_key_mapper[key] = keys[key]
            self.file_handler_key_mapper[keys[key]] = key
            if key in df_keys:
                self.file_handler_load[keys[key]] = str(defaults[key])
                self.file_handler_save[keys[key]] = self.encode_code(defaults[key])
            else:
                self.file_handler_load[keys[key]] = "None"

    def delete_file(self):
        os.remove(self.file_handler_file_prefix + self.file_handler_filename)
        get_logger().warning(Style.GREEN(f"File deleted {self.file_handler_file_prefix + self.file_handler_filename}"))

toolboxv2.utils

App

Source code in toolboxv2/utils/toolbox.py
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
class App(AppType, metaclass=Singleton):

    def __init__(self, prefix: str = "", args=AppArgs().default()):
        super().__init__(prefix, args)
        self._web_context = None
        t0 = time.perf_counter()
        abspath = os.path.abspath(__file__)
        self.system_flag = system()  # Linux: Linux Mac: Darwin Windows: Windows

        self.appdata = os.getenv('APPDATA') if os.name == 'nt' else os.getenv('XDG_CONFIG_HOME') or os.path.expanduser(
                '~/.config') if os.name == 'posix' else None

        if self.system_flag == "Darwin" or self.system_flag == "Linux":
            dir_name = os.path.dirname(abspath).replace("/utils", "")
        else:
            dir_name = os.path.dirname(abspath).replace("\\utils", "")

        self.start_dir = str(dir_name)

        self.bg_tasks = []

        lapp = dir_name + '\\.data\\'

        if not prefix:
            if not os.path.exists(f"{lapp}last-app-prefix.txt"):
                os.makedirs(lapp, exist_ok=True)
                open(f"{lapp}last-app-prefix.txt", "a").close()
            with open(f"{lapp}last-app-prefix.txt") as prefix_file:
                cont = prefix_file.read()
                if cont:
                    prefix = cont.rstrip()
        else:
            if not os.path.exists(f"{lapp}last-app-prefix.txt"):
                os.makedirs(lapp, exist_ok=True)
                open(f"{lapp}last-app-prefix.txt", "a").close()
            with open(f"{lapp}last-app-prefix.txt", "w") as prefix_file:
                prefix_file.write(prefix)

        self.prefix = prefix

        node_ = node()

        if 'localhost' in node_ and (host := os.getenv('HOSTNAME', 'localhost')) != 'localhost':
            node_ = node_.replace('localhost', host)
        self.id = prefix + '-' + node_
        self.globals = {
            "root": {**globals()},
        }
        self.locals = {
            "user": {'app': self, **locals()},
        }

        identification = self.id

        if "test" in prefix:
            if self.system_flag == "Darwin" or self.system_flag == "Linux":
                start_dir = self.start_dir.replace("ToolBoxV2/toolboxv2", "toolboxv2")
            else:
                start_dir = self.start_dir.replace("ToolBoxV2\\toolboxv2", "toolboxv2")
            self.data_dir = start_dir + '\\.data\\' + "test"
            self.config_dir = start_dir + '\\.config\\' + "test"
            self.info_dir = start_dir + '\\.info\\' + "test"
        elif identification.startswith('collective-'):
            collective_identification = identification.split('-')[1]
            self.data_dir = self.start_dir + '\\.data\\' + collective_identification
            self.config_dir = self.start_dir + '\\.config\\' + collective_identification
            self.info_dir = self.start_dir + '\\.info\\' + collective_identification
        else:
            self.data_dir = self.start_dir + '\\.data\\' + identification
            self.config_dir = self.start_dir + '\\.config\\' + identification
            self.info_dir = self.start_dir + '\\.info\\' + identification

        if self.appdata is None:
            self.appdata = self.data_dir
        else:
            self.appdata += "/ToolBoxV2"

        if not os.path.exists(self.appdata):
            os.makedirs(self.appdata, exist_ok=True)
        if not os.path.exists(self.data_dir):
            os.makedirs(self.data_dir, exist_ok=True)
        if not os.path.exists(self.config_dir):
            os.makedirs(self.config_dir, exist_ok=True)
        if not os.path.exists(self.info_dir):
            os.makedirs(self.info_dir, exist_ok=True)

        print(f"Starting ToolBox as {prefix} from :", Style.Bold(Style.CYAN(f"{os.getcwd()}")))

        logger_info_str, self.logger, self.logging_filename = self.set_logger(args.debug)

        print("Logger " + logger_info_str)
        print("================================")
        self.logger.info("Logger initialized")
        get_logger().info(Style.GREEN("Starting Application instance"))
        if args.init and args.init is not None and self.start_dir not in sys.path:
            sys.path.append(self.start_dir)


        __version__ = get_version_from_pyproject()

        self.version = __version__

        self.keys = {
            "MACRO": "macro~~~~:",
            "MACRO_C": "m_color~~:",
            "HELPER": "helper~~~:",
            "debug": "debug~~~~:",
            "id": "name-spa~:",
            "st-load": "mute~load:",
            "comm-his": "comm-his~:",
            "develop-mode": "dev~mode~:",
            "provider::": "provider::",
        }

        defaults = {
            "MACRO": ['Exit'],
            "MACRO_C": {},
            "HELPER": {},
            "debug": args.debug,
            "id": self.id,
            "st-load": False,
            "comm-his": [[]],
            "develop-mode": False,
        }
        self.config_fh = FileHandler(self.id + ".config", keys=self.keys, defaults=defaults)
        self.config_fh.load_file_handler()
        self._debug = args.debug
        self.flows = {}
        self.dev_modi = self.config_fh.get_file_handler(self.keys["develop-mode"])
        if self.config_fh.get_file_handler("provider::") is None:
            self.config_fh.add_to_save_file_handler("provider::", "http://localhost:" + str(
                self.args_sto.port) if os.environ.get("HOSTNAME",
                                                                     "localhost") == "localhost" else "https://simplecore.app")
        self.functions = {}
        self.modules = {}

        self.interface_type = ToolBoxInterfaces.native
        self.PREFIX = Style.CYAN(f"~{node()}@>")
        self.alive = True
        self.called_exit = False, time.time()

        self.print(f"Infos:\n  {'Name':<8} -> {node()}\n  {'ID':<8} -> {self.id}\n  {'Version':<8} -> {self.version}\n")

        self.logger.info(
            Style.GREEN(
                f"Finish init up in {time.perf_counter() - t0:.2f}s"
            )
        )

        self.args_sto = args
        self.loop = None

        from .system.session import Session
        self.session: Session = Session(self.get_username())

    def get_username(self, get_input=False, default="loot") -> str:
        user_name = self.config_fh.get_file_handler("ac_user:::")
        if get_input and user_name is None:
            user_name = input("Input your username: ")
            self.config_fh.add_to_save_file_handler("ac_user:::", user_name)
        if user_name is None:
            user_name = default
            self.config_fh.add_to_save_file_handler("ac_user:::", user_name)
        return user_name

    def set_username(self, username):
        return self.config_fh.add_to_save_file_handler("ac_user:::", username)

    @staticmethod
    def exit_main(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    def hide_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    def show_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    def disconnect(*args, **kwargs):
        """proxi attr"""

    def set_logger(self, debug=False):
        if "test" in self.prefix and not debug:
            logger, logging_filename = setup_logging(logging.NOTSET, name="toolbox-test", interminal=True,
                                                     file_level=logging.NOTSET, app_name=self.id)
            logger_info_str = "in Test Mode"
        elif "live" in self.prefix and not debug:
            logger, logging_filename = setup_logging(logging.DEBUG, name="toolbox-live", interminal=False,
                                                     file_level=logging.WARNING, app_name=self.id)
            logger_info_str = "in Live Mode"
            # setup_logging(logging.WARNING, name="toolbox-live", is_online=True
            #              , online_level=logging.WARNING).info("Logger initialized")
        elif "debug" in self.prefix or self.prefix.endswith("D"):
            self.prefix = self.prefix.replace("-debug", '').replace("debug", '')
            logger, logging_filename = setup_logging(logging.DEBUG, name="toolbox-debug", interminal=True,
                                                     file_level=logging.WARNING, app_name=self.id)
            logger_info_str = "in debug Mode"
            self.debug = True
        elif debug:
            logger, logging_filename = setup_logging(logging.DEBUG, name=f"toolbox-{self.prefix}-debug",
                                                     interminal=True,
                                                     file_level=logging.DEBUG, app_name=self.id)
            logger_info_str = "in args debug Mode"
        else:
            logger, logging_filename = setup_logging(logging.ERROR, name=f"toolbox-{self.prefix}", app_name=self.id)
            logger_info_str = "in Default"

        return logger_info_str, logger, logging_filename

    @property
    def debug(self):
        return self._debug

    @debug.setter
    def debug(self, value):
        if not isinstance(value, bool):
            self.logger.debug(f"Value must be an boolean. is : {value} type of {type(value)}")
            raise ValueError("Value must be an boolean.")

        # self.logger.info(f"Setting debug {value}")
        self._debug = value

    def debug_rains(self, e):
        if self.debug:
            import traceback
            x = "="*5
            x += " DEBUG "
            x += "="*5
            self.print(x)
            self.print(traceback.format_exc())
            self.print(x)
            raise e
        else:
            self.logger.error(f"Error: {e}")
            import traceback
            x = "="*5
            x += " DEBUG "
            x += "="*5
            self.print(x)
            self.print(traceback.format_exc())
            self.print(x)

    def set_flows(self, r):
        self.flows = r

    async def run_flows(self, name, **kwargs):
        from ..flows import flows_dict as flows_dict_func
        if name not in self.flows:
            self.flows = {**self.flows, **flows_dict_func(s=name, remote=True)}
        if name in self.flows:
            if asyncio.iscoroutinefunction(self.flows[name]):
                return await self.flows[name](get_app(from_="runner"), self.args_sto, **kwargs)
            else:
                return self.flows[name](get_app(from_="runner"), self.args_sto, **kwargs)
        else:
            print("Flow not found, active flows:", len(self.flows.keys()))

    def _coppy_mod(self, content, new_mod_dir, mod_name, file_type='py'):

        mode = 'xb'
        self.logger.info(f" coppy mod {mod_name} to {new_mod_dir} size : {sys.getsizeof(content) / 8388608:.3f} mb")

        if not os.path.exists(new_mod_dir):
            os.makedirs(new_mod_dir)
            with open(f"{new_mod_dir}/__init__.py", "w") as nmd:
                nmd.write(f"__version__ = '{self.version}'")

        if os.path.exists(f"{new_mod_dir}/{mod_name}.{file_type}"):
            mode = False

            with open(f"{new_mod_dir}/{mod_name}.{file_type}", 'rb') as d:
                runtime_mod = d.read()  # Testing version but not efficient

            if len(content) != len(runtime_mod):
                mode = 'wb'

        if mode:
            with open(f"{new_mod_dir}/{mod_name}.{file_type}", mode) as f:
                f.write(content)

    def _pre_lib_mod(self, mod_name, path_to="./runtime", file_type='py'):
        working_dir = self.id.replace(".", "_")
        lib_mod_dir = f"toolboxv2.runtime.{working_dir}.mod_lib."

        self.logger.info(f"pre_lib_mod {mod_name} from {lib_mod_dir}")

        postfix = "_dev" if self.dev_modi else ""
        mod_file_dir = f"./mods{postfix}/{mod_name}.{file_type}"
        new_mod_dir = f"{path_to}/{working_dir}/mod_lib"
        with open(mod_file_dir, "rb") as c:
            content = c.read()
        self._coppy_mod(content, new_mod_dir, mod_name, file_type=file_type)
        return lib_mod_dir

    def _copy_load(self, mod_name, file_type='py', **kwargs):
        loc = self._pre_lib_mod(mod_name, file_type)
        return self.inplace_load_instance(mod_name, loc=loc, **kwargs)

    def helper_install_pip_module(self, module_name):
        if 'main' in self.id:
            return
        self.print(f"Installing {module_name} GREEDY")
        os.system(f"{sys.executable} -m pip install {module_name}")

    def python_module_import_classifier(self, mod_name, error_message):

        if error_message.startswith("No module named 'toolboxv2.utils"):
            return Result.default_internal_error(f"404 {error_message.split('utils')[1]} not found")
        if error_message.startswith("No module named 'toolboxv2.mods"):
            if mod_name.startswith('.'):
                return
            return self.run_a_from_sync(self.a_run_any, ("CloudM", "install"), module_name=mod_name)
        if error_message.startswith("No module named '"):
            pip_requ = error_message.split("'")[1].replace("'", "").strip()
            # if 'y' in input(f"\t\t\tAuto install {pip_requ} Y/n").lower:
            return self.helper_install_pip_module(pip_requ)
            # return Result.default_internal_error(f"404 {pip_requ} not found")

    def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True, mfo=None):
        if self.dev_modi and loc == "toolboxv2.mods.":
            loc = "toolboxv2.mods_dev."
        if self.mod_online(mod_name):
            self.logger.info(f"Reloading mod from : {loc + mod_name}")
            self.remove_mod(mod_name, spec=spec, delete=False)

        if (os.path.exists(self.start_dir + '/mods/' + mod_name) or os.path.exists(
            self.start_dir + '/mods/' + mod_name + '.py')) and (
            os.path.isdir(self.start_dir + '/mods/' + mod_name) or os.path.isfile(
            self.start_dir + '/mods/' + mod_name + '.py')):
            try:
                if mfo is None:
                    modular_file_object = import_module(loc + mod_name)
                else:
                    modular_file_object = mfo
                self.modules[mod_name] = modular_file_object
            except ModuleNotFoundError as e:
                self.logger.error(Style.RED(f"module {loc + mod_name} not found is type sensitive {e}"))
                self.print(Style.RED(f"module {loc + mod_name} not found is type sensitive {e}"))
                if self.debug or self.args_sto.sysPrint:
                    self.python_module_import_classifier(mod_name, str(e))
                self.debug_rains(e)
                return None
        else:
            self.print(f"module {loc + mod_name} is not valid")
            return None
        if hasattr(modular_file_object, "Tools"):
            tools_class = modular_file_object.Tools
        else:
            if hasattr(modular_file_object, "name"):
                tools_class = modular_file_object
                modular_file_object = import_module(loc + mod_name)
            else:
                tools_class = None

        modular_id = None
        instance = modular_file_object
        app_instance_type = "file/application"

        if tools_class is None:
            modular_id = modular_file_object.Name if hasattr(modular_file_object, "Name") else mod_name

        if tools_class is None and modular_id is None:
            modular_id = str(modular_file_object.__name__)
            self.logger.warning(f"Unknown instance loaded {mod_name}")
            return modular_file_object

        if tools_class is not None:
            tools_class = self.save_initialized_module(tools_class, spec)
            modular_id = tools_class.name
            app_instance_type = "functions/class"
        else:
            instance.spec = spec
        # if private:
        #     self.functions[modular_id][f"{spec}_private"] = private

        if not save:
            return instance if tools_class is None else tools_class

        return self.save_instance(instance, modular_id, spec, app_instance_type, tools_class=tools_class)

    def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):

        if modular_id in self.functions and tools_class is None:
            if self.functions[modular_id].get(f"{spec}_instance", None) is None:
                self.functions[modular_id][f"{spec}_instance"] = instance
                self.functions[modular_id][f"{spec}_instance_type"] = instance_type
            else:
                self.print("ERROR OVERRIDE")
                raise ImportError(f"Module already known {modular_id}")

        elif tools_class is not None:
            if modular_id not in self.functions:
                self.functions[modular_id] = {}
            self.functions[modular_id][f"{spec}_instance"] = tools_class
            self.functions[modular_id][f"{spec}_instance_type"] = instance_type

            try:
                if not hasattr(tools_class, 'tools'):
                    tools_class.tools = {"Version": tools_class.get_version, 'name': tools_class.name}
                for function_name in list(tools_class.tools.keys()):
                    t_function_name = function_name.lower()
                    if t_function_name != "all" and t_function_name != "name":
                        self.tb(function_name, mod_name=modular_id)(tools_class.tools.get(function_name))
                self.functions[modular_id][f"{spec}_instance_type"] += "/BC"
            except Exception as e:
                self.logger.error(f"Starting Module {modular_id} compatibility failed with : {e}")
                pass
        elif modular_id not in self.functions and tools_class is None:
            self.functions[modular_id] = {}
            self.functions[modular_id][f"{spec}_instance"] = instance
            self.functions[modular_id][f"{spec}_instance_type"] = instance_type

        else:
            raise ImportError(f"Modular {modular_id} is not a valid mod")
        on_start = self.functions[modular_id].get("on_start")
        if on_start is not None:
            i = 1
            for f in on_start:
                try:
                    f_, e = self.get_function((modular_id, f), state=True, specification=spec)
                    if e == 0:
                        self.logger.info(Style.GREY(f"Running On start {f} {i}/{len(on_start)}"))
                        if asyncio.iscoroutinefunction(f_):
                            self.print(f"Async on start is only in Tool claas supported for {modular_id}.{f}" if tools_class is None else f"initialization starting soon for {modular_id}.{f}")
                        else:
                            o = f_()
                            if o is not None:
                                self.print(f"Function {modular_id} On start result: {o}")
                    else:
                        self.logger.warning(f"starting function not found {e}")
                except Exception as e:
                    self.logger.debug(Style.YELLOW(
                        Style.Bold(f"modular:{modular_id}.{f} on_start error {i}/{len(on_start)} -> {e}")))
                    self.debug_rains(e)
                finally:
                    i += 1
        return instance if tools_class is None else tools_class

    def save_initialized_module(self, tools_class, spec):
        tools_class.spec = spec
        live_tools_class = tools_class(app=self)
        return live_tools_class

    def mod_online(self, mod_name, installed=False):
        if installed and mod_name not in self.functions:
            self.save_load(mod_name)
        return mod_name in self.functions

    def _get_function(self,
                      name: Enum or None,
                      state: bool = True,
                      specification: str = "app",
                      metadata=False, as_str: tuple or None = None, r=0):

        if as_str is None and isinstance(name, Enum):
            modular_id = str(name.NAME.value)
            function_id = str(name.value)
        elif as_str is None and isinstance(name, list):
            modular_id, function_id = name[0], name[1]
        else:
            modular_id, function_id = as_str

        self.logger.info(f"getting function : {specification}.{modular_id}.{function_id}")

        if modular_id not in self.functions:
            if r == 0:
                self.save_load(modular_id, spec=specification)
                return self.get_function(name=(modular_id, function_id),
                                         state=state,
                                         specification=specification,
                                         metadata=metadata,
                                         r=1)
            self.logger.warning(f"function modular not found {modular_id} 404")
            return "404", 404

        if function_id not in self.functions[modular_id]:
            self.logger.warning(f"function data not found {modular_id}.{function_id} 404")
            return "404", 404

        function_data = self.functions[modular_id][function_id]

        if isinstance(function_data, list):
            print(f"functions {function_id} : {function_data}")
            function_data = self.functions[modular_id][function_data[-1]]

        function = function_data.get("func")
        params = function_data.get("params")

        state_ = function_data.get("state")
        if state_ is not None and state != state_:
            state = state_

        if function is None:
            self.logger.warning("No function found")
            return "404", 404

        if params is None:
            self.logger.warning("No function (params) found")
            return "404", 301

        if metadata and not state:
            self.logger.info("returning metadata stateless")
            return (function_data, function), 0

        if not state:  # mens a stateless function
            self.logger.info("returning stateless function")
            return function, 0

        instance = self.functions[modular_id].get(f"{specification}_instance")

        # instance_type = self.functions[modular_id].get(f"{specification}_instance_type", "functions/class")

        if params[0] == 'app':
            instance = get_app(from_=f"fuction {specification}.{modular_id}.{function_id}")

        if instance is None and self.alive:
            self.inplace_load_instance(modular_id, spec=specification)
            instance = self.functions[modular_id].get(f"{specification}_instance")

        if instance is None:
            self.logger.warning("No live Instance found")
            return "404", 400

        # if instance_type.endswith("/BC"):  # for backwards compatibility  functions/class/BC old modules
        #     # returning as stateless
        #     # return "422", -1
        #     self.logger.info(
        #         f"returning stateless function, cant find tools class for state handling found {instance_type}")
        #     if metadata:
        #         self.logger.info(f"returning metadata stateless")
        #         return (function_data, function), 0
        #     return function, 0

        self.logger.info("wrapping in higher_order_function")

        self.logger.info(f"returned fuction {specification}.{modular_id}.{function_id}")
        higher_order_function = partial(function, instance)

        if metadata:
            self.logger.info("returning metadata stateful")
            return (function_data, higher_order_function), 0

        self.logger.info("returning stateful function")
        return higher_order_function, 0

    def save_exit(self):
        self.logger.info(f"save exiting saving data to {self.config_fh.file_handler_filename} states of {self.debug=}")
        self.config_fh.add_to_save_file_handler(self.keys["debug"], str(self.debug))

    def init_mod(self, mod_name, spec='app'):
        if '.' in mod_name:
            mod_name = mod_name.split('.')[0]
        return self.loop_gard().run_until_complete(self.a_init_mod(mod_name, spec))

    def run_bg_task(self, task):
        """
        Run a task in the background that will properly handle nested asyncio operations.
        This implementation ensures that asyncio.create_task() and asyncio.gather() work
        correctly within the background task.

        Args:
            task: A callable function that can be synchronous or asynchronous
        """
        if not callable(task):
            self.logger.warning("Task is not callable!")
            return None

        # Function that will run in a separate thread with its own event loop
        def thread_target(task_):
            # Create a new event loop for this thread
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)

            try:
                # Determine how to run the task based on its type
                if asyncio.iscoroutinefunction(task_):
                    # If it's an async function, run it directly
                    loop.run_until_complete(task_())
                elif asyncio.iscoroutine(task_):
                    # If it's already a coroutine object
                    loop.run_until_complete(task_)
                else:
                    # If it's a synchronous function that might create async tasks internally
                    async def wrapper():
                        # Run potentially blocking synchronous code in an executor
                        return await loop.run_in_executor(None, task_)

                    loop.run_until_complete(wrapper())

                self.logger.debug("Background task completed successfully")
            except Exception as e:
                self.logger.error(f"Background task failed with error: {str(e)}")
            finally:
                # Clean up any pending tasks
                pending = asyncio.all_tasks(loop)
                if pending:
                    # Cancel any remaining tasks
                    for task_ in pending:
                        task_.cancel()

                    # Allow tasks to finish cancellation
                    loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))

                loop.close()

        # Create and start a non-daemon thread that will run to completion
        # Using non-daemon thread ensures the task completes even if main thread exits
        t = threading.Thread(target=thread_target, args=(task,))
        t.daemon = False  # Non-daemon thread will keep program alive until it completes
        self.bg_tasks.append(t)
        t.start()
        return t

    # Alternative implementation that may be needed if your function creates many nested tasks
    def run_bg_task_advanced(self, task, *args, **kwargs):
        """
        Alternative implementation for complex async scenarios where the task creates
        nested asyncio tasks using create_task() and gather().

        This version ensures proper execution of nested tasks by maintaining the thread
        and its event loop throughout the lifetime of all child tasks.

        Args:
            task: A callable function that can be synchronous or asynchronous
            *args, **kwargs: Arguments to pass to the task
        """
        if not callable(task):
            self.logger.warning("Task is not callable!")
            return None

        # Create a dedicated thread with its own event loop
        async def async_wrapper():
            try:
                if asyncio.iscoroutinefunction(task):
                    return await task(*args, **kwargs)
                elif asyncio.iscoroutine(task):
                    return await task
                else:
                    # Run in executor to avoid blocking
                    loop = asyncio.get_event_loop()
                    return await loop.run_in_executor(None, lambda: task(*args, **kwargs))
            except Exception as e:
                self.logger.error(f"Background task error: {str(e)}")
                raise

        def thread_target():
            # Create new event loop for this thread
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)

            try:
                # Run the task to completion with all its nested tasks
                loop.run_until_complete(async_wrapper())
            except Exception as e:
                self.logger.error(f"Background task thread failed: {str(e)}")
            finally:
                # Clean up any pending tasks that might still be running
                try:
                    pending = asyncio.all_tasks(loop)
                    if pending:
                        # Allow tasks time to clean up
                        loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
                except Exception:
                    pass

                loop.close()

        # Use a non-daemon thread so it will run to completion
        t = threading.Thread(target=thread_target, daemon=True)
        t.daemon = False
        self.bg_tasks.append(t)
        t.start()
        return t

    # Helper method to wait for background tasks to complete (optional)
    def wait_for_bg_tasks(self, timeout=None):
        """
        Wait for all background tasks to complete.

        Args:
            timeout: Maximum time to wait (in seconds) for all tasks to complete.
                     None means wait indefinitely.

        Returns:
            bool: True if all tasks completed, False if timeout occurred
        """
        active_tasks = [t for t in self.bg_tasks if t.is_alive()]

        for task in active_tasks:
            task.join(timeout=timeout)
            if task.is_alive():
                return False

        return True

    def __call__(self, *args, **kwargs):
        return self.run(*args, **kwargs)

    def run(self, *args, request=None, running_function_coro=None, **kwargs):
        """
        Run a function with support for SSE streaming in both
        threaded and non-threaded contexts.
        """
        if running_function_coro is None:
            mn, fn = args[0]
            if self.functions.get(mn, {}).get(fn, {}).get('request_as_kwarg', False):
                kwargs["request"] = RequestData.from_dict(request)
                if 'data' in kwargs and 'data' not in self.functions.get(mn, {}).get(fn, {}).get('params', []):
                    kwargs["request"].data = kwargs["request"].body = kwargs['data']
                    del kwargs['data']
                if 'form_data' in kwargs and 'form_data' not in self.functions.get(mn, {}).get(fn, {}).get('params',
                                                                                                           []):
                    kwargs["request"].form_data = kwargs["request"].body = kwargs['form_data']
                    del kwargs['form_data']

        # Create the coroutine
        coro = running_function_coro or self.a_run_any(*args, **kwargs)

        # Get or create an event loop
        try:
            loop = asyncio.get_event_loop()
            is_running = loop.is_running()
        except RuntimeError:
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
            is_running = False

        # If the loop is already running, run in a separate thread
        if is_running:
            # Create thread pool executor as needed
            if not hasattr(self.__class__, '_executor'):
                self.__class__._executor = ThreadPoolExecutor(max_workers=4)

            def run_in_new_thread():
                # Set up a new loop in this thread
                new_loop = asyncio.new_event_loop()
                asyncio.set_event_loop(new_loop)

                try:
                    # Run the coroutine
                    return new_loop.run_until_complete(coro)
                finally:
                    new_loop.close()

            # Run in thread and get result
            thread_result = self.__class__._executor.submit(run_in_new_thread).result()

            # Handle streaming results from thread
            if isinstance(thread_result, dict) and thread_result.get("is_stream"):
                # Create a new SSE stream in the main thread
                async def stream_from_function():
                    # Re-run the function with direct async access
                    stream_result = await self.a_run_any(*args, **kwargs)

                    if (isinstance(stream_result, Result) and
                        getattr(stream_result.result, 'data_type', None) == "stream"):
                        # Get and forward data from the original generator
                        original_gen = stream_result.result.data.get("generator")
                        if inspect.isasyncgen(original_gen):
                            async for item in original_gen:
                                yield item

                # Return a new streaming Result
                return Result.stream(
                    stream_generator=stream_from_function(),
                    headers=thread_result.get("headers", {})
                )

            result = thread_result
        else:
            # Direct execution when loop is not running
            result = loop.run_until_complete(coro)

        # Process the final result
        if isinstance(result, Result):
            if 'debug' in self.id:
                result.print()
            if getattr(result.result, 'data_type', None) == "stream":
                return result
            return result.to_api_result().model_dump(mode='json')

        return result

    def loop_gard(self):
        if self.loop is None:
            self.loop = asyncio.get_event_loop()
        if self.loop.is_closed():
            self.loop = asyncio.get_event_loop()
        return self.loop

    async def a_init_mod(self, mod_name, spec='app'):
        mod = self.save_load(mod_name, spec=spec)
        if hasattr(mod, "__initobj") and not mod.async_initialized:
            await mod
        return mod


    def load_mod(self, mod_name: str, mlm='I', **kwargs):

        action_list_helper = ['I (inplace load dill on error python)',
                              # 'C (coppy py file to runtime dir)',
                              # 'S (save py file to dill)',
                              # 'CS (coppy and save py file)',
                              # 'D (development mode, inplace load py file)'
                              ]
        action_list = {"I": lambda: self.inplace_load_instance(mod_name, **kwargs),
                       "C": lambda: self._copy_load(mod_name, **kwargs)
                       }

        try:
            if mlm in action_list:

                return action_list.get(mlm)()
            else:
                self.logger.critical(
                    f"config mlm must be {' or '.join(action_list_helper)} is {mlm=}")
                raise ValueError(f"config mlm must be {' or '.join(action_list_helper)} is {mlm=}")
        except ValueError as e:
            self.logger.warning(Style.YELLOW(f"Error Loading Module '{mod_name}', with error :{e}"))
            self.debug_rains(e)
        except ImportError as e:
            self.logger.error(Style.YELLOW(f"Error Loading Module '{mod_name}', with error :{e}"))
            self.debug_rains(e)
        except Exception as e:
            self.logger.critical(Style.RED(f"Error Loading Module '{mod_name}', with critical error :{e}"))
            print(Style.RED(f"Error Loading Module '{mod_name}'"))
            self.debug_rains(e)

        return Result.default_internal_error(info="info's in logs.")

    async def load_all_mods_in_file(self, working_dir="mods"):
        print(f"LOADING ALL MODS FROM FOLDER : {working_dir}")
        t0 = time.perf_counter()
        # Get the list of all modules
        module_list = self.get_all_mods(working_dir)
        open_modules = self.functions.keys()
        start_len = len(open_modules)

        for om in open_modules:
            if om in module_list:
                module_list.remove(om)

        tasks: set[Task] = set()

        _ = {tasks.add(asyncio.create_task(asyncio.to_thread(self.save_load, mod, 'app'))) for mod in module_list}
        for t in asyncio.as_completed(tasks):
            try:
                result = await t
                if hasattr(result, 'Name'):
                    print('Opened :', result.Name)
                elif hasattr(result, 'name'):
                    if hasattr(result, 'async_initialized'):
                        if not result.async_initialized:
                            async def _():
                                try:
                                    if asyncio.iscoroutine(result):
                                        await result
                                    if hasattr(result, 'Name'):
                                        print('Opened :', result.Name)
                                    elif hasattr(result, 'name'):
                                        print('Opened :', result.name)
                                except Exception as e:
                                    self.debug_rains(e)
                                    if hasattr(result, 'Name'):
                                        print('Error opening :', result.Name)
                                    elif hasattr(result, 'name'):
                                        print('Error opening :', result.name)
                            asyncio.create_task(_())
                        else:
                            print('Opened :', result.name)
                else:
                    print('Opened :', result)
            except Exception as e:
                self.logger.error(Style.RED(f"An Error occurred while opening all modules error: {str(e)}"))
                self.debug_rains(e)
        opened = len(self.functions.keys()) - start_len

        self.logger.info(f"Opened {opened} modules in {time.perf_counter() - t0:.2f}s")
        return f"Opened {opened} modules in {time.perf_counter() - t0:.2f}s"

    def get_all_mods(self, working_dir="mods", path_to="./runtime", use_wd=True):
        self.logger.info(f"collating all mods in working directory {working_dir}")

        pr = "_dev" if self.dev_modi else ""
        if working_dir == "mods" and use_wd:
            working_dir = f"{self.start_dir}/mods{pr}"
        elif use_wd:
            pass
        else:
            w_dir = self.id.replace(".", "_")
            working_dir = f"{path_to}/{w_dir}/mod_lib{pr}/"
        res = os.listdir(working_dir)

        self.logger.info(f"found : {len(res)} files")

        def do_helper(_mod):
            if "mainTool" in _mod:
                return False
            # if not _mod.endswith(".py"):
            #     return False
            if _mod.startswith("__"):
                return False
            if _mod.startswith("."):
                return False
            return not _mod.startswith("test_")

        def r_endings(word: str):
            if word.endswith(".py"):
                return word[:-3]
            return word

        mods_list = list(map(r_endings, filter(do_helper, res)))

        self.logger.info(f"found : {len(mods_list)} Modules")
        return mods_list

    def remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            self.remove_mod(mod, delete=delete)

    def remove_mod(self, mod_name, spec='app', delete=True):
        if mod_name not in self.functions:
            self.logger.info(f"mod not active {mod_name}")
            return
        on_exit = self.functions[mod_name].get("on_exit")

        def helper():
            if f"{spec}_instance" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance"]
            if f"{spec}_instance_type" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance_type"]

        if on_exit is None and self.functions[mod_name].get(f"{spec}_instance_type", "").endswith("/BC"):
            instance = self.functions[mod_name].get(f"{spec}_instance", None)
            if instance is not None and hasattr(instance, 'on_exit'):
                if asyncio.iscoroutinefunction(instance.on_exit):
                    self.exit_tasks.append(instance.on_exit)
                else:
                    instance.on_exit()

        if on_exit is None and delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]
            return
        if on_exit is None:
            helper()
            return

        i = 1
        for f in on_exit:
            try:
                f_, e = self.get_function((mod_name, f), state=True, specification=spec)
                if e == 0:
                    self.logger.info(Style.GREY(f"Running On exit {f} {i}/{len(on_exit)}"))
                    if asyncio.iscoroutinefunction(f_):
                        self.exit_tasks.append(f_)
                        o = None
                    else:
                        o = f_()
                    if o is not None:
                        self.print(f"Function On Exit result: {o}")
                else:
                    self.logger.warning("closing function not found")
            except Exception as e:
                self.logger.debug(
                    Style.YELLOW(Style.Bold(f"modular:{mod_name}.{f} on_exit error {i}/{len(on_exit)} -> {e}")))
            finally:
                i += 1

        helper()

        if delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]

    async def a_remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            await self.a_remove_mod(mod, delete=delete)

    async def a_remove_mod(self, mod_name, spec='app', delete=True):
        if mod_name not in self.functions:
            self.logger.info(f"mod not active {mod_name}")
            return
        on_exit = self.functions[mod_name].get("on_exit")

        def helper():
            if f"{spec}_instance" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance"]
            if f"{spec}_instance_type" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance_type"]

        if on_exit is None and self.functions[mod_name].get(f"{spec}_instance_type", "").endswith("/BC"):
            instance = self.functions[mod_name].get(f"{spec}_instance", None)
            if instance is not None and hasattr(instance, 'on_exit'):
                if asyncio.iscoroutinefunction(instance.on_exit):
                    await instance.on_exit()
                else:
                    instance.on_exit()

        if on_exit is None and delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]
            return
        if on_exit is None:
            helper()
            return

        i = 1
        for f in on_exit:
            try:
                f_, e = self.get_function((mod_name, f), state=True, specification=spec)
                if e == 0:
                    self.logger.info(Style.GREY(f"Running On exit {f} {i}/{len(on_exit)}"))
                    if asyncio.iscoroutinefunction(f_):
                        o = await f_()
                    else:
                        o = f_()
                    if o is not None:
                        self.print(f"Function On Exit result: {o}")
                else:
                    self.logger.warning("closing function not found")
            except Exception as e:
                self.logger.debug(
                    Style.YELLOW(Style.Bold(f"modular:{mod_name}.{f} on_exit error {i}/{len(on_exit)} -> {e}")))
            finally:
                i += 1

        helper()

        if delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]

    def exit(self, remove_all=True):
        if not self.alive:
            return
        if self.args_sto.debug:
            self.hide_console()
        self.disconnect()
        if remove_all:
            self.remove_all_modules()
        self.logger.info("Exiting ToolBox interface")
        self.alive = False
        self.called_exit = True, time.time()
        self.save_exit()
        try:
            self.config_fh.save_file_handler()
        except SystemExit:
            print("If u ar testing this is fine else ...")

        if hasattr(self, 'daemon_app'):
            import threading

            for thread in threading.enumerate()[::-1]:
                if thread.name == "MainThread":
                    continue
                try:
                    with Spinner(f"closing Thread {thread.name:^50}|", symbols="s", count_down=True,
                                 time_in_s=0.751 if not self.debug else 0.6):
                        thread.join(timeout=0.751 if not self.debug else 0.6)
                except TimeoutError as e:
                    self.logger.error(f"Timeout error on exit {thread.name} {str(e)}")
                    print(str(e), f"Timeout {thread.name}")
                except KeyboardInterrupt:
                    print("Unsave Exit")
                    break
        if hasattr(self, 'loop') and self.loop is not None:
            with Spinner("closing Event loop:", symbols="+"):
                self.loop.stop()

    async def a_exit(self):
        await self.a_remove_all_modules()
        results = await asyncio.gather(
            *[asyncio.create_task(f()) for f in self.exit_tasks if asyncio.iscoroutinefunction(f)])
        for result in results:
            self.print(f"Function On Exit result: {result}")
        self.exit(remove_all=False)

    def save_load(self, modname, spec='app'):
        self.logger.debug(f"Save load module {modname}")
        if not modname:
            self.logger.warning("no filename specified")
            return False
        try:
            return self.load_mod(modname, spec=spec)
        except ModuleNotFoundError as e:
            self.logger.error(Style.RED(f"Module {modname} not found"))
            self.debug_rains(e)

        return False

    def get_function(self, name: Enum or tuple, **kwargs):
        """
        Kwargs for _get_function
            metadata:: return the registered function dictionary
                stateless: (function_data, None), 0
                stateful: (function_data, higher_order_function), 0
            state::boolean
                specification::str default app
        """
        if isinstance(name, tuple):
            return self._get_function(None, as_str=name, **kwargs)
        else:
            return self._get_function(name, **kwargs)

    async def a_run_function(self, mod_function_name: Enum or tuple,
                             tb_run_function_with_state=True,
                             tb_run_with_specification='app',
                             args_=None,
                             kwargs_=None,
                             *args,
                             **kwargs) -> Result:

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_
        if isinstance(mod_function_name, tuple):
            modular_name, function_name = mod_function_name
        elif isinstance(mod_function_name, list):
            modular_name, function_name = mod_function_name[0], mod_function_name[1]
        elif isinstance(mod_function_name, Enum):
            modular_name, function_name = mod_function_name.__class__.NAME.value, mod_function_name.value
        else:
            raise TypeError("Unknown function type")

        if not self.mod_online(modular_name, installed=True):
            self.get_mod(modular_name)

        function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                      metadata=True, specification=tb_run_with_specification)
        self.logger.info(f"Received fuction : {mod_function_name}, with execode: {error_code}")
        if error_code == 404:
            mod = self.get_mod(modular_name)
            if hasattr(mod, "async_initialized") and not mod.async_initialized:
                await mod
            function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                          metadata=True, specification=tb_run_with_specification)

        if error_code == 404:
            self.logger.warning(Style.RED("Function Not Found"))
            return (Result.default_user_error(interface=self.interface_type,
                                              exec_code=404,
                                              info="function not found function is not decorated").
                    set_origin(mod_function_name))

        if error_code == 300:
            return Result.default_internal_error(interface=self.interface_type,
                                                 info=f"module {modular_name}"
                                                      f" has no state (instance)").set_origin(mod_function_name)

        if error_code != 0:
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=error_code,
                                                 info=f"Internal error"
                                                      f" {modular_name}."
                                                      f"{function_name}").set_origin(mod_function_name)

        if not tb_run_function_with_state:
            function_data, _ = function_data
            function = function_data.get('func')
        else:
            function_data, function = function_data

        if not function:
            self.logger.warning(Style.RED(f"Function {function_name} not found"))
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=404,
                                                 info="function not found function").set_origin(mod_function_name)

        self.logger.info("Profiling function")
        t0 = time.perf_counter()
        if asyncio.iscoroutinefunction(function):
            return await self.a_fuction_runner(function, function_data, args, kwargs, t0)
        else:
            return self.fuction_runner(function, function_data, args, kwargs, t0)

    def run_function(self, mod_function_name: Enum or tuple,
                     tb_run_function_with_state=True,
                     tb_run_with_specification='app',
                     args_=None,
                     kwargs_=None,
                     *args,
                     **kwargs) -> Result:

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_
        if isinstance(mod_function_name, tuple):
            modular_name, function_name = mod_function_name
        elif isinstance(mod_function_name, list):
            modular_name, function_name = mod_function_name[0], mod_function_name[1]
        elif isinstance(mod_function_name, Enum):
            modular_name, function_name = mod_function_name.__class__.NAME.value, mod_function_name.value
        else:
            raise TypeError("Unknown function type")

        if not self.mod_online(modular_name, installed=True):
            self.get_mod(modular_name)

        function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                      metadata=True, specification=tb_run_with_specification)
        self.logger.info(f"Received fuction : {mod_function_name}, with execode: {error_code}")
        if error_code == 1 or error_code == 3 or error_code == 400:
            self.get_mod(modular_name)
            function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                          metadata=True, specification=tb_run_with_specification)

        if error_code == 2:
            self.logger.warning(Style.RED("Function Not Found"))
            return (Result.default_user_error(interface=self.interface_type,
                                              exec_code=404,
                                              info="function not found function is not decorated").
                    set_origin(mod_function_name))

        if error_code == -1:
            return Result.default_internal_error(interface=self.interface_type,
                                                 info=f"module {modular_name}"
                                                      f" has no state (instance)").set_origin(mod_function_name)

        if error_code != 0:
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=error_code,
                                                 info=f"Internal error"
                                                      f" {modular_name}."
                                                      f"{function_name}").set_origin(mod_function_name)

        if not tb_run_function_with_state:
            function_data, _ = function_data
            function = function_data.get('func')
        else:
            function_data, function = function_data

        if not function:
            self.logger.warning(Style.RED(f"Function {function_name} not found"))
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=404,
                                                 info="function not found function").set_origin(mod_function_name)

        self.logger.info("Profiling function")
        t0 = time.perf_counter()
        if asyncio.iscoroutinefunction(function):
            raise ValueError(f"Fuction {function_name} is Async use a_run_any")
        else:
            return self.fuction_runner(function, function_data, args, kwargs, t0)

    def run_a_from_sync(self, function, *args, **kwargs):
        # Initialize self.loop if not already set.
        if self.loop is None:
            try:
                self.loop = asyncio.get_running_loop()
            except RuntimeError:
                self.loop = asyncio.new_event_loop()

        # If the loop is running, offload the coroutine to a new thread.
        if self.loop.is_running():
            result_future = Future()

            def run_in_new_loop():
                new_loop = asyncio.new_event_loop()
                asyncio.set_event_loop(new_loop)
                try:
                    result = new_loop.run_until_complete(function(*args, **kwargs))
                    result_future.set_result(result)
                except Exception as e:
                    result_future.set_exception(e)
                finally:
                    new_loop.close()

            thread = threading.Thread(target=run_in_new_loop)
            thread.start()
            thread.join()  # Block until the thread completes.
            return result_future.result()
        else:
            # If the loop is not running, schedule and run the coroutine directly.
            future = self.loop.create_task(function(*args, **kwargs))
            return self.loop.run_until_complete(future)

    def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):

        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        row = function_data.get('row')
        mod_function_name = f"{modular_name}.{function_name}"

        if_self_state = 1 if 'self' in parameters else 0

        try:
            if len(parameters) == 0:
                res = function()
            elif len(parameters) == len(args) + if_self_state:
                res = function(*args)
            elif len(parameters) == len(kwargs.keys()) + if_self_state:
                res = function(**kwargs)
            else:
                res = function(*args, **kwargs)
            self.logger.info(f"Execution done in {time.perf_counter()-t0:.4f}")
            if isinstance(res, Result):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.set_origin(mod_function_name)
            elif isinstance(res, ApiResult):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.as_result().set_origin(mod_function_name).to_api_result()
            elif row:
                formatted_result = res
            else:
                # Wrap the result in a Result object
                formatted_result = Result.ok(
                    interface=self.interface_type,
                    data_info="Auto generated result",
                    data=res,
                    info="Function executed successfully"
                ).set_origin(mod_function_name)
            if not row:
                self.logger.info(
                    f"Function Exec code: {formatted_result.info.exec_code} Info's: {formatted_result.info.help_text}")
            else:
                self.logger.info(
                    f"Function Exec data: {formatted_result}")
        except Exception as e:
            self.logger.error(
                Style.YELLOW(Style.Bold(
                    f"! Function ERROR: in {modular_name}.{function_name}")))
            # Wrap the exception in a Result object
            formatted_result = Result.default_internal_error(info=str(e)).set_origin(mod_function_name)
            # res = formatted_result
            self.logger.error(
                f"Function {modular_name}.{function_name}"
                f" executed wit an error {str(e)}, {type(e)}")
            self.debug_rains(e)
            self.print(f"! Function ERROR: in {modular_name}.{function_name} ")



        else:
            self.print_ok()

            self.logger.info(
                f"Function {modular_name}.{function_name}"
                f" executed successfully")

        return formatted_result

    async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):

        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        row = function_data.get('row')
        mod_function_name = f"{modular_name}.{function_name}"

        if_self_state = 1 if 'self' in parameters else 0

        try:
            if len(parameters) == 0:
                res = await function()
            elif len(parameters) == len(args) + if_self_state:
                res = await function(*args)
            elif len(parameters) == len(kwargs.keys()) + if_self_state:
                res = await function(**kwargs)
            else:
                res = await function(*args, **kwargs)
            self.logger.info(f"Execution done in {time.perf_counter()-t0:.4f}")
            if isinstance(res, Result):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.set_origin(mod_function_name)
            elif isinstance(res, ApiResult):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.as_result().set_origin(mod_function_name).to_api_result()
            elif row:
                formatted_result = res
            else:
                # Wrap the result in a Result object
                formatted_result = Result.ok(
                    interface=self.interface_type,
                    data_info="Auto generated result",
                    data=res,
                    info="Function executed successfully"
                ).set_origin(mod_function_name)
            if not row:
                self.logger.info(
                    f"Function Exec code: {formatted_result.info.exec_code} Info's: {formatted_result.info.help_text}")
            else:
                self.logger.info(
                    f"Function Exec data: {formatted_result}")
        except Exception as e:
            self.logger.error(
                Style.YELLOW(Style.Bold(
                    f"! Function ERROR: in {modular_name}.{function_name}")))
            # Wrap the exception in a Result object
            formatted_result = Result.default_internal_error(info=str(e)).set_origin(mod_function_name)
            # res = formatted_result
            self.logger.error(
                f"Function {modular_name}.{function_name}"
                f" executed wit an error {str(e)}, {type(e)}")
            self.debug_rains(e)

        else:
            self.print_ok()

            self.logger.info(
                f"Function {modular_name}.{function_name}"
                f" executed successfully")

        return formatted_result

    async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None,
                       args_=None,
                       kwargs_=None, method="GET",
                       *args, **kwargs):
        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_

        modular_name = mod_function_name
        function_name = function_name

        if isinstance(mod_function_name, str) and isinstance(function_name, str):
            mod_function_name = (mod_function_name, function_name)

        if isinstance(mod_function_name, tuple):
            modular_name, function_name = mod_function_name
        elif isinstance(mod_function_name, list):
            modular_name, function_name = mod_function_name[0], mod_function_name[1]
        elif isinstance(mod_function_name, Enum):
            modular_name, function_name = mod_function_name.__class__.NAME.value, mod_function_name.value

        r = await self.session.fetch(f"/api/{modular_name}/{function_name}{'?' + args_ if args_ is not None else ''}",
                                     data=kwargs, method=method)
        try:
            if not r:
                print("§ Session server Offline!", self.session.base)
                return Result.default_internal_error(info="Session fetch failed").as_dict()

            content_type = r.headers.get('Content-Type', '').lower()
            raw = await r.read()
            encoding = r.get_encoding() or 'utf-8'
            text = raw.decode(encoding, errors='ignore')

            # Attempt JSON
            if 'application/json' in content_type:
                try:
                    return await r.json()
                except Exception as e:
                    print("⚠ JSON decode error:", e)

            # Attempt YAML
            if 'yaml' in content_type or text.strip().startswith('---'):
                try:
                    import yaml
                    return yaml.safe_load(text)
                except Exception as e:
                    print("⚠ YAML decode error:", e)

            # Attempt XML
            if 'xml' in content_type or text.strip().startswith('<?xml'):
                try:
                    import xmltodict
                    return xmltodict.parse(text)
                except Exception as e:
                    print("⚠ XML decode error:", e)

            # Fallback: return plain text
            return Result.default_internal_error(data={'raw_text': text, 'content_type': content_type}).as_dict()

        except Exception as e:
            print("❌ Fatal error during API call:", e)
            return Result.default_internal_error(str(e)).as_dict()

    def run_local(self, *args, **kwargs):
        return self.run_any(*args, **kwargs)

    async def a_run_local(self, *args, **kwargs):
        return await self.a_run_any(*args, **kwargs)

    def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
                get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                kwargs_=None,
                *args, **kwargs):

        # if self.debug:
        #     self.logger.info(f'Called from: {getouterframes(currentframe(), 2)}')

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_

        if isinstance(mod_function_name, str) and backwords_compability_variabel_string_holder is None:
            backwords_compability_variabel_string_holder = mod_function_name.split('.')[-1]
            mod_function_name = mod_function_name.replace(f".{backwords_compability_variabel_string_holder}", "")

        if isinstance(mod_function_name, str) and isinstance(backwords_compability_variabel_string_holder, str):
            mod_function_name = (mod_function_name, backwords_compability_variabel_string_holder)

        res: Result = self.run_function(mod_function_name,
                                        tb_run_function_with_state=tb_run_function_with_state,
                                        tb_run_with_specification=tb_run_with_specification,
                                        args_=args, kwargs_=kwargs).as_result()
        if isinstance(res, ApiResult):
            res = res.as_result()

        if isinstance(res, Result) and res.bg_task is not None:
            self.run_bg_task(res.bg_task)

        if self.debug:
            res.log(show_data=False)

        if not get_results and isinstance(res, Result):
            return res.get()

        if get_results and not isinstance(res, Result):
            return Result.ok(data=res)

        return res

    async def a_run_any(self, mod_function_name: Enum or str or tuple,
                        backwords_compability_variabel_string_holder=None,
                        get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                        kwargs_=None,
                        *args, **kwargs):

        # if self.debug:
        #     self.logger.info(f'Called from: {getouterframes(currentframe(), 2)}')

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_

        if isinstance(mod_function_name, str) and backwords_compability_variabel_string_holder is None:
            backwords_compability_variabel_string_holder = mod_function_name.split('.')[-1]
            mod_function_name = mod_function_name.replace(f".{backwords_compability_variabel_string_holder}", "")

        if isinstance(mod_function_name, str) and isinstance(backwords_compability_variabel_string_holder, str):
            mod_function_name = (mod_function_name, backwords_compability_variabel_string_holder)

        res: Result = await self.a_run_function(mod_function_name,
                                                tb_run_function_with_state=tb_run_function_with_state,
                                                tb_run_with_specification=tb_run_with_specification,
                                                args_=args, kwargs_=kwargs)
        if isinstance(res, ApiResult):
            res = res.as_result()

        if isinstance(res, Result) and res.bg_task is not None:
            self.run_bg_task(res.bg_task)

        if self.debug:
            res.print()
            res.log(show_data=False) if isinstance(res, Result) else self.logger.debug(res)
        if not get_results and isinstance(res, Result):
            return res.get()

        if get_results and not isinstance(res, Result):
            return Result.ok(data=res)

        return res


    def web_context(self):
        if self._web_context is None:
            try:
                self._web_context = open("./dist/helper.html", encoding="utf-8").read()
            except Exception as e:
                self.logger.error(f"Could not load web context: {e}")
                self._web_context = "<div><h1>Web Context not found</h1></div>"
        return self._web_context

    def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
        if spec != "app":
            self.print(f"Getting Module {name} spec: {spec}")
        if name not in self.functions:
            mod = self.save_load(name, spec=spec)
            if mod is False or (isinstance(mod, Result) and mod.is_error()):
                self.logger.warning(f"Could not find {name} in {list(self.functions.keys())}")
                raise ValueError(f"Could not find {name} in {list(self.functions.keys())} pleas install the module, or its posibly broken use --debug for infos")
        # private = self.functions[name].get(f"{spec}_private")
        # if private is not None:
        #     if private and spec != 'app':
        #         raise ValueError("Module is private")
        if name not in self.functions:
            self.logger.warning(f"Module '{name}' is not found")
            return None
        instance = self.functions[name].get(f"{spec}_instance")
        if instance is None:
            return self.load_mod(name, spec=spec)
        return self.functions[name].get(f"{spec}_instance")

    def print(self, text, *args, **kwargs):
        # self.logger.info(f"Output : {text}")
        if 'live' in self.id:
            return
        if self.sprint(None):
            print(Style.CYAN(f"System${self.id}:"), end=" ")
        print(text, *args, **kwargs)

    def sprint(self, text, *args, **kwargs):
        if text is None:
            return True
        if 'live' in self.id:
            return
        # self.logger.info(f"Output : {text}")
        print(Style.CYAN(f"System${self.id}:"), end=" ")
        if isinstance(text, str) and kwargs == {} and text:
            stram_print(text + ' '.join(args))
            print()
        else:
            print(text, *args, **kwargs)

    # ----------------------------------------------------------------
    # Decorators for the toolbox

    def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
        self.remove_mod(mod_name, delete=True)
        if mod_name not in self.modules:
            self.logger.warning(f"Module '{mod_name}' is not found")
            return
        if hasattr(self.modules[mod_name], 'reload_save') and self.modules[mod_name].reload_save:
            def reexecute_module_code(x):
                return x
        else:
            def reexecute_module_code(module_name):
                if isinstance(module_name, str):
                    module = import_module(module_name)
                else:
                    module = module_name
                # Get the source code of the module
                try:
                    source = inspect.getsource(module)
                except Exception:
                    # print(f"No source for {str(module_name).split('from')[0]}: {e}")
                    return module
                # Compile the source code
                try:
                    code = compile(source, module.__file__, 'exec')
                    # Execute the code in the module's namespace
                    exec(code, module.__dict__)
                except Exception:
                    # print(f"No source for {str(module_name).split('from')[0]}: {e}")
                    pass
                return module

        if not is_file:
            mods = self.get_all_mods("./mods/" + mod_name)
            def recursive_reload(package_name):
                package = import_module(package_name)

                # First, reload all submodules
                if hasattr(package, '__path__'):
                    for _finder, name, _ispkg in pkgutil.walk_packages(package.__path__, package.__name__ + "."):
                        try:
                            mod = import_module(name)
                            reexecute_module_code(mod)
                            reload(mod)
                        except Exception as e:
                            print(f"Error reloading module {name}: {e}")
                            break

                # Finally, reload the package itself
                reexecute_module_code(package)
                reload(package)

            for mod in mods:
                if mod.endswith(".txt") or mod.endswith(".yaml"):
                    continue
                try:
                    recursive_reload(loc + mod_name + '.' + mod)
                    self.print(f"Reloaded {mod_name}.{mod}")
                except ImportError:
                    self.print(f"Could not load {mod_name}.{mod}")
        reexecute_module_code(self.modules[mod_name])
        if mod_name in self.functions:
            if "on_exit" in self.functions[mod_name]:
                self.functions[mod_name]["on_exit"] = []
            if "on_start" in self.functions[mod_name]:
                self.functions[mod_name]["on_start"] = []
        self.inplace_load_instance(mod_name, spec=spec, mfo=reload(self.modules[mod_name]) if mod_name in self.modules else None)

    def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None, on_reload=None):
        if path_name is None:
            path_name = mod_name
        is_file = os.path.isfile(self.start_dir + '/mods/' + path_name + '.py')
        import watchfiles
        def helper():
            paths = f'mods/{path_name}' + ('.py' if is_file else '')
            self.print(f'Watching Path: {paths}')
            for changes in watchfiles.watch(paths):
                if not changes:
                    continue
                self.reload_mod(mod_name, spec, is_file, loc)
                if on_reload:
                    on_reload()

        if not use_thread:
            helper()
        else:
            threading.Thread(target=helper, daemon=True).start()

    def _register_function(self, module_name, func_name, data):
        if module_name not in self.functions:
            self.functions[module_name] = {}
        if func_name in self.functions[module_name]:
            self.print(f"Overriding function {func_name} from {module_name}", end="\r")
            self.functions[module_name][func_name] = data
        else:
            self.functions[module_name][func_name] = data

    def _create_decorator(self, type_: str,
                          name: str = "",
                          mod_name: str = "",
                          level: int = -1,
                          restrict_in_virtual_mode: bool = False,
                          api: bool = False,
                          helper: str = "",
                          version: str or None = None,
                          initial: bool=False,
                          exit_f: bool=False,
                          test: bool=True,
                          samples:list[dict[str, Any]] | None=None,
                          state:bool | None=None,
                          pre_compute:Callable | None=None,
                          post_compute:Callable[[], Result] | None=None,
                          api_methods:list[str] | None=None,
                          memory_cache: bool=False,
                          file_cache: bool=False,
                          request_as_kwarg: bool=False,
                          row: bool=False,
                          memory_cache_max_size:int=100,
                          memory_cache_ttl:int=300):

        if isinstance(type_, Enum):
            type_ = type_.value

        if memory_cache and file_cache:
            raise ValueError("Don't use both cash at the same time for the same fuction")

        use_cache = memory_cache or file_cache
        cache = {}
        if file_cache:
            cache = FileCache(folder=self.data_dir + f'\\cache\\{mod_name}\\',
                              filename=self.data_dir + f'\\cache\\{mod_name}\\{name}cache.db')
        if memory_cache:
            cache = MemoryCache(maxsize=memory_cache_max_size, ttl=memory_cache_ttl)

        version = self.version if version is None else self.version + ':' + version

        def a_additional_process(func):

            async def executor(*args, **kwargs):

                if pre_compute is not None:
                    args, kwargs = await pre_compute(*args, **kwargs)
                if asyncio.iscoroutinefunction(func):
                    result = await func(*args, **kwargs)
                else:
                    result = func(*args, **kwargs)
                if post_compute is not None:
                    result = await post_compute(result)
                if row:
                    return result
                if not isinstance(result, Result):
                    result = Result.ok(data=result)
                if result.origin is None:
                    result.set_origin((mod_name if mod_name else func.__module__.split('.')[-1]
                                       , name if name else func.__name__
                                       , type_))
                if result.result.data_to == ToolBoxInterfaces.native.name:
                    result.result.data_to = ToolBoxInterfaces.remote if api else ToolBoxInterfaces.native
                # Wenden Sie die to_api_result Methode auf das Ergebnis an, falls verfügbar
                if api and hasattr(result, 'to_api_result'):
                    return result.to_api_result()
                return result

            @wraps(func)
            async def wrapper(*args, **kwargs):

                if not use_cache:
                    return await executor(*args, **kwargs)

                try:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{str(args)},{str(kwargs.items())}")
                except ValueError:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{bytes(args)},{str(kwargs.items())}")

                result = cache.get(cache_key)
                if result is not None:
                    return result

                result = await executor(*args, **kwargs)

                cache.set(cache_key, result)

                return result

            return wrapper

        def additional_process(func):

            def executor(*args, **kwargs):

                if pre_compute is not None:
                    args, kwargs = pre_compute(*args, **kwargs)
                if asyncio.iscoroutinefunction(func):
                    result = func(*args, **kwargs)
                else:
                    result = func(*args, **kwargs)
                if post_compute is not None:
                    result = post_compute(result)
                if row:
                    return result
                if not isinstance(result, Result):
                    result = Result.ok(data=result)
                if result.origin is None:
                    result.set_origin((mod_name if mod_name else func.__module__.split('.')[-1]
                                       , name if name else func.__name__
                                       , type_))
                if result.result.data_to == ToolBoxInterfaces.native.name:
                    result.result.data_to = ToolBoxInterfaces.remote if api else ToolBoxInterfaces.native
                # Wenden Sie die to_api_result Methode auf das Ergebnis an, falls verfügbar
                if api and hasattr(result, 'to_api_result'):
                    return result.to_api_result()
                return result

            @wraps(func)
            def wrapper(*args, **kwargs):

                if not use_cache:
                    return executor(*args, **kwargs)

                try:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{str(args)},{str(kwargs.items())}")
                except ValueError:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{bytes(args)},{str(kwargs.items())}")

                result = cache.get(cache_key)
                if result is not None:
                    return result

                result = executor(*args, **kwargs)

                cache.set(cache_key, result)

                return result

            return wrapper

        def decorator(func):
            sig = signature(func)
            params = list(sig.parameters)
            module_name = mod_name if mod_name else func.__module__.split('.')[-1]
            func_name = name if name else func.__name__
            if func_name == 'on_start':
                func_name = 'on_startup'
            if func_name == 'on_exit':
                func_name = 'on_close'
            if api or pre_compute is not None or post_compute is not None or memory_cache or file_cache:
                if asyncio.iscoroutinefunction(func):
                    func = a_additional_process(func)
                else:
                    func = additional_process(func)
            if api and str(sig.return_annotation) == 'Result':
                raise ValueError(f"Fuction {module_name}.{func_name} registered as "
                                 f"Api fuction but uses {str(sig.return_annotation)}\n"
                                 f"Please change the sig from ..)-> Result to ..)-> ApiResult")
            data = {
                "type": type_,
                "module_name": module_name,
                "func_name": func_name,
                "level": level,
                "restrict_in_virtual_mode": restrict_in_virtual_mode,
                "func": func,
                "api": api,
                "helper": helper,
                "version": version,
                "initial": initial,
                "exit_f": exit_f,
                "api_methods": api_methods if api_methods is not None else ["AUTO"],
                "__module__": func.__module__,
                "signature": sig,
                "params": params,
                "row": row,
                "state": (
                    False if len(params) == 0 else params[0] in ['self', 'state', 'app']) if state is None else state,
                "do_test": test,
                "samples": samples,
                "request_as_kwarg": request_as_kwarg,

            }
            self._register_function(module_name, func_name, data)
            if exit_f:
                if "on_exit" not in self.functions[module_name]:
                    self.functions[module_name]["on_exit"] = []
                self.functions[module_name]["on_exit"].append(func_name)
            if initial:
                if "on_start" not in self.functions[module_name]:
                    self.functions[module_name]["on_start"] = []
                self.functions[module_name]["on_start"].append(func_name)

            return func

        decorator.tb_init = True

        return decorator

    def tb(self, name=None,
           mod_name: str = "",
           helper: str = "",
           version: str | None = None,
           test: bool = True,
           restrict_in_virtual_mode: bool = False,
           api: bool = False,
           initial: bool = False,
           exit_f: bool = False,
           test_only: bool = False,
           memory_cache: bool = False,
           file_cache: bool = False,
           request_as_kwarg: bool = False,
           row: bool = False,
           state: bool | None = None,
           level: int = -1,
           memory_cache_max_size: int = 100,
           memory_cache_ttl: int = 300,
           samples: list or dict or None = None,
           interface: ToolBoxInterfaces or None or str = None,
           pre_compute=None,
           post_compute=None,
           api_methods=None,
           ):
        """
    A decorator for registering and configuring functions within a module.

    This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

    Args:
        name (str, optional): The name to register the function under. Defaults to the function's own name.
        mod_name (str, optional): The name of the module the function belongs to.
        helper (str, optional): A helper string providing additional information about the function.
        version (str or None, optional): The version of the function or module.
        test (bool, optional): Flag to indicate if the function is for testing purposes.
        restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
        api (bool, optional): Flag to indicate if the function is part of an API.
        initial (bool, optional): Flag to indicate if the function should be executed at initialization.
        exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
        test_only (bool, optional): Flag to indicate if the function should only be used for testing.
        memory_cache (bool, optional): Flag to enable memory caching for the function.
        request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
        file_cache (bool, optional): Flag to enable file caching for the function.
        row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
        state (bool or None, optional): Flag to indicate if the function maintains state.
        level (int, optional): The level of the function, used for prioritization or categorization.
        memory_cache_max_size (int, optional): Maximum size of the memory cache.
        memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
        samples (list or dict or None, optional): Samples or examples of function usage.
        interface (str, optional): The interface type for the function.
        pre_compute (callable, optional): A function to be called before the main function.
        post_compute (callable, optional): A function to be called after the main function.
        api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

    Returns:
        function: The decorated function with additional processing and registration capabilities.
    """
        if interface is None:
            interface = "tb"
        if test_only and 'test' not in self.id:
            return lambda *args, **kwargs: args
        return self._create_decorator(interface,
                                      name,
                                      mod_name,
                                      level=level,
                                      restrict_in_virtual_mode=restrict_in_virtual_mode,
                                      helper=helper,
                                      api=api,
                                      version=version,
                                      initial=initial,
                                      exit_f=exit_f,
                                      test=test,
                                      samples=samples,
                                      state=state,
                                      pre_compute=pre_compute,
                                      post_compute=post_compute,
                                      memory_cache=memory_cache,
                                      file_cache=file_cache,
                                      request_as_kwarg=request_as_kwarg,
                                      row=row,
                                      api_methods=api_methods,
                                      memory_cache_max_size=memory_cache_max_size,
                                      memory_cache_ttl=memory_cache_ttl)

    def save_autocompletion_dict(self):
        autocompletion_dict = {}
        for module_name, _module in self.functions.items():
            data = {}
            for function_name, function_data in self.functions[module_name].items():
                if not isinstance(function_data, dict):
                    continue
                data[function_name] = {arg: None for arg in
                                       function_data.get("params", [])}
                if len(data[function_name].keys()) == 0:
                    data[function_name] = None
            autocompletion_dict[module_name] = data if len(data.keys()) > 0 else None
        self.config_fh.add_to_save_file_handler("auto~~~~~~", str(autocompletion_dict))

    def get_autocompletion_dict(self):
        return self.config_fh.get_file_handler("auto~~~~~~")

    def save_registry_as_enums(self, directory: str, filename: str):
        # Ordner erstellen, falls nicht vorhanden
        if not os.path.exists(directory):
            os.makedirs(directory)

        # Dateipfad vorbereiten
        filepath = os.path.join(directory, filename)

        # Enum-Klassen als Strings generieren
        enum_classes = [f'"""Automatic generated by ToolBox v = {self.version}"""'
                        f'\nfrom enum import Enum\nfrom dataclasses import dataclass'
                        f'\n\n\n']
        for module, functions in self.functions.items():
            if module.startswith("APP_INSTANCE"):
                continue
            class_name = module
            enum_members = "\n    ".join(
                [
                    f"{func_name.upper().replace('-', '')}"
                    f" = '{func_name}' "
                    f"# Input: ({fuction_data['params'] if isinstance(fuction_data, dict) else ''}),"
                    f" Output: {fuction_data['signature'].return_annotation if isinstance(fuction_data, dict) else 'None'}"
                    for func_name, fuction_data in functions.items()])
            enum_class = (f'@dataclass\nclass {class_name.upper().replace(".", "_").replace("-", "")}(Enum):'
                          f"\n    NAME = '{class_name}'\n    {enum_members}")
            enum_classes.append(enum_class)

        # Enums in die Datei schreiben
        data = "\n\n\n".join(enum_classes)
        if len(data) < 12:
            raise ValueError(
                "Invalid Enums Loosing content pleas delete it ur self in the (utils/system/all_functions_enums.py) or add mor new stuff :}")
        with open(filepath, 'w') as file:
            file.write(data)

        print(Style.Bold(Style.BLUE(f"Enums gespeichert in {filepath}")))
disconnect(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
223
224
225
@staticmethod
def disconnect(*args, **kwargs):
    """proxi attr"""
exit_main(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
211
212
213
@staticmethod
def exit_main(*args, **kwargs):
    """proxi attr"""
get_function(name, **kwargs)

Kwargs for _get_function metadata:: return the registered function dictionary stateless: (function_data, None), 0 stateful: (function_data, higher_order_function), 0 state::boolean specification::str default app

Source code in toolboxv2/utils/toolbox.py
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
def get_function(self, name: Enum or tuple, **kwargs):
    """
    Kwargs for _get_function
        metadata:: return the registered function dictionary
            stateless: (function_data, None), 0
            stateful: (function_data, higher_order_function), 0
        state::boolean
            specification::str default app
    """
    if isinstance(name, tuple):
        return self._get_function(None, as_str=name, **kwargs)
    else:
        return self._get_function(name, **kwargs)
hide_console(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
215
216
217
@staticmethod
def hide_console(*args, **kwargs):
    """proxi attr"""
run(*args, request=None, running_function_coro=None, **kwargs)

Run a function with support for SSE streaming in both threaded and non-threaded contexts.

Source code in toolboxv2/utils/toolbox.py
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
def run(self, *args, request=None, running_function_coro=None, **kwargs):
    """
    Run a function with support for SSE streaming in both
    threaded and non-threaded contexts.
    """
    if running_function_coro is None:
        mn, fn = args[0]
        if self.functions.get(mn, {}).get(fn, {}).get('request_as_kwarg', False):
            kwargs["request"] = RequestData.from_dict(request)
            if 'data' in kwargs and 'data' not in self.functions.get(mn, {}).get(fn, {}).get('params', []):
                kwargs["request"].data = kwargs["request"].body = kwargs['data']
                del kwargs['data']
            if 'form_data' in kwargs and 'form_data' not in self.functions.get(mn, {}).get(fn, {}).get('params',
                                                                                                       []):
                kwargs["request"].form_data = kwargs["request"].body = kwargs['form_data']
                del kwargs['form_data']

    # Create the coroutine
    coro = running_function_coro or self.a_run_any(*args, **kwargs)

    # Get or create an event loop
    try:
        loop = asyncio.get_event_loop()
        is_running = loop.is_running()
    except RuntimeError:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        is_running = False

    # If the loop is already running, run in a separate thread
    if is_running:
        # Create thread pool executor as needed
        if not hasattr(self.__class__, '_executor'):
            self.__class__._executor = ThreadPoolExecutor(max_workers=4)

        def run_in_new_thread():
            # Set up a new loop in this thread
            new_loop = asyncio.new_event_loop()
            asyncio.set_event_loop(new_loop)

            try:
                # Run the coroutine
                return new_loop.run_until_complete(coro)
            finally:
                new_loop.close()

        # Run in thread and get result
        thread_result = self.__class__._executor.submit(run_in_new_thread).result()

        # Handle streaming results from thread
        if isinstance(thread_result, dict) and thread_result.get("is_stream"):
            # Create a new SSE stream in the main thread
            async def stream_from_function():
                # Re-run the function with direct async access
                stream_result = await self.a_run_any(*args, **kwargs)

                if (isinstance(stream_result, Result) and
                    getattr(stream_result.result, 'data_type', None) == "stream"):
                    # Get and forward data from the original generator
                    original_gen = stream_result.result.data.get("generator")
                    if inspect.isasyncgen(original_gen):
                        async for item in original_gen:
                            yield item

            # Return a new streaming Result
            return Result.stream(
                stream_generator=stream_from_function(),
                headers=thread_result.get("headers", {})
            )

        result = thread_result
    else:
        # Direct execution when loop is not running
        result = loop.run_until_complete(coro)

    # Process the final result
    if isinstance(result, Result):
        if 'debug' in self.id:
            result.print()
        if getattr(result.result, 'data_type', None) == "stream":
            return result
        return result.to_api_result().model_dump(mode='json')

    return result
run_bg_task(task)

Run a task in the background that will properly handle nested asyncio operations. This implementation ensures that asyncio.create_task() and asyncio.gather() work correctly within the background task.

Parameters:

Name Type Description Default
task

A callable function that can be synchronous or asynchronous

required
Source code in toolboxv2/utils/toolbox.py
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
def run_bg_task(self, task):
    """
    Run a task in the background that will properly handle nested asyncio operations.
    This implementation ensures that asyncio.create_task() and asyncio.gather() work
    correctly within the background task.

    Args:
        task: A callable function that can be synchronous or asynchronous
    """
    if not callable(task):
        self.logger.warning("Task is not callable!")
        return None

    # Function that will run in a separate thread with its own event loop
    def thread_target(task_):
        # Create a new event loop for this thread
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

        try:
            # Determine how to run the task based on its type
            if asyncio.iscoroutinefunction(task_):
                # If it's an async function, run it directly
                loop.run_until_complete(task_())
            elif asyncio.iscoroutine(task_):
                # If it's already a coroutine object
                loop.run_until_complete(task_)
            else:
                # If it's a synchronous function that might create async tasks internally
                async def wrapper():
                    # Run potentially blocking synchronous code in an executor
                    return await loop.run_in_executor(None, task_)

                loop.run_until_complete(wrapper())

            self.logger.debug("Background task completed successfully")
        except Exception as e:
            self.logger.error(f"Background task failed with error: {str(e)}")
        finally:
            # Clean up any pending tasks
            pending = asyncio.all_tasks(loop)
            if pending:
                # Cancel any remaining tasks
                for task_ in pending:
                    task_.cancel()

                # Allow tasks to finish cancellation
                loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))

            loop.close()

    # Create and start a non-daemon thread that will run to completion
    # Using non-daemon thread ensures the task completes even if main thread exits
    t = threading.Thread(target=thread_target, args=(task,))
    t.daemon = False  # Non-daemon thread will keep program alive until it completes
    self.bg_tasks.append(t)
    t.start()
    return t
run_bg_task_advanced(task, *args, **kwargs)

Alternative implementation for complex async scenarios where the task creates nested asyncio tasks using create_task() and gather().

This version ensures proper execution of nested tasks by maintaining the thread and its event loop throughout the lifetime of all child tasks.

Parameters:

Name Type Description Default
task

A callable function that can be synchronous or asynchronous

required
*args, **kwargs

Arguments to pass to the task

required
Source code in toolboxv2/utils/toolbox.py
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
def run_bg_task_advanced(self, task, *args, **kwargs):
    """
    Alternative implementation for complex async scenarios where the task creates
    nested asyncio tasks using create_task() and gather().

    This version ensures proper execution of nested tasks by maintaining the thread
    and its event loop throughout the lifetime of all child tasks.

    Args:
        task: A callable function that can be synchronous or asynchronous
        *args, **kwargs: Arguments to pass to the task
    """
    if not callable(task):
        self.logger.warning("Task is not callable!")
        return None

    # Create a dedicated thread with its own event loop
    async def async_wrapper():
        try:
            if asyncio.iscoroutinefunction(task):
                return await task(*args, **kwargs)
            elif asyncio.iscoroutine(task):
                return await task
            else:
                # Run in executor to avoid blocking
                loop = asyncio.get_event_loop()
                return await loop.run_in_executor(None, lambda: task(*args, **kwargs))
        except Exception as e:
            self.logger.error(f"Background task error: {str(e)}")
            raise

    def thread_target():
        # Create new event loop for this thread
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

        try:
            # Run the task to completion with all its nested tasks
            loop.run_until_complete(async_wrapper())
        except Exception as e:
            self.logger.error(f"Background task thread failed: {str(e)}")
        finally:
            # Clean up any pending tasks that might still be running
            try:
                pending = asyncio.all_tasks(loop)
                if pending:
                    # Allow tasks time to clean up
                    loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
            except Exception:
                pass

            loop.close()

    # Use a non-daemon thread so it will run to completion
    t = threading.Thread(target=thread_target, daemon=True)
    t.daemon = False
    self.bg_tasks.append(t)
    t.start()
    return t
show_console(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
219
220
221
@staticmethod
def show_console(*args, **kwargs):
    """proxi attr"""
tb(name=None, mod_name='', helper='', version=None, test=True, restrict_in_virtual_mode=False, api=False, initial=False, exit_f=False, test_only=False, memory_cache=False, file_cache=False, request_as_kwarg=False, row=False, state=None, level=-1, memory_cache_max_size=100, memory_cache_ttl=300, samples=None, interface=None, pre_compute=None, post_compute=None, api_methods=None)

A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Parameters:

Name Type Description Default
name str

The name to register the function under. Defaults to the function's own name.

None
mod_name str

The name of the module the function belongs to.

''
helper str

A helper string providing additional information about the function.

''
version str or None

The version of the function or module.

None
test bool

Flag to indicate if the function is for testing purposes.

True
restrict_in_virtual_mode bool

Flag to restrict the function in virtual mode.

False
api bool

Flag to indicate if the function is part of an API.

False
initial bool

Flag to indicate if the function should be executed at initialization.

False
exit_f bool

Flag to indicate if the function should be executed at exit.

False
test_only bool

Flag to indicate if the function should only be used for testing.

False
memory_cache bool

Flag to enable memory caching for the function.

False
request_as_kwarg bool

Flag to get request if the fuction is calld from api.

False
file_cache bool

Flag to enable file caching for the function.

False
row bool

rather to auto wrap the result in Result type default False means no row data aka result type

False
state bool or None

Flag to indicate if the function maintains state.

None
level int

The level of the function, used for prioritization or categorization.

-1
memory_cache_max_size int

Maximum size of the memory cache.

100
memory_cache_ttl int

Time-to-live for the memory cache entries.

300
samples list or dict or None

Samples or examples of function usage.

None
interface str

The interface type for the function.

None
pre_compute callable

A function to be called before the main function.

None
post_compute callable

A function to be called after the main function.

None
api_methods list[str]

default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

None

Returns:

Name Type Description
function

The decorated function with additional processing and registration capabilities.

Source code in toolboxv2/utils/toolbox.py
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
def tb(self, name=None,
       mod_name: str = "",
       helper: str = "",
       version: str | None = None,
       test: bool = True,
       restrict_in_virtual_mode: bool = False,
       api: bool = False,
       initial: bool = False,
       exit_f: bool = False,
       test_only: bool = False,
       memory_cache: bool = False,
       file_cache: bool = False,
       request_as_kwarg: bool = False,
       row: bool = False,
       state: bool | None = None,
       level: int = -1,
       memory_cache_max_size: int = 100,
       memory_cache_ttl: int = 300,
       samples: list or dict or None = None,
       interface: ToolBoxInterfaces or None or str = None,
       pre_compute=None,
       post_compute=None,
       api_methods=None,
       ):
    """
A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Args:
    name (str, optional): The name to register the function under. Defaults to the function's own name.
    mod_name (str, optional): The name of the module the function belongs to.
    helper (str, optional): A helper string providing additional information about the function.
    version (str or None, optional): The version of the function or module.
    test (bool, optional): Flag to indicate if the function is for testing purposes.
    restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
    api (bool, optional): Flag to indicate if the function is part of an API.
    initial (bool, optional): Flag to indicate if the function should be executed at initialization.
    exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
    test_only (bool, optional): Flag to indicate if the function should only be used for testing.
    memory_cache (bool, optional): Flag to enable memory caching for the function.
    request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
    file_cache (bool, optional): Flag to enable file caching for the function.
    row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
    state (bool or None, optional): Flag to indicate if the function maintains state.
    level (int, optional): The level of the function, used for prioritization or categorization.
    memory_cache_max_size (int, optional): Maximum size of the memory cache.
    memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
    samples (list or dict or None, optional): Samples or examples of function usage.
    interface (str, optional): The interface type for the function.
    pre_compute (callable, optional): A function to be called before the main function.
    post_compute (callable, optional): A function to be called after the main function.
    api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

Returns:
    function: The decorated function with additional processing and registration capabilities.
"""
    if interface is None:
        interface = "tb"
    if test_only and 'test' not in self.id:
        return lambda *args, **kwargs: args
    return self._create_decorator(interface,
                                  name,
                                  mod_name,
                                  level=level,
                                  restrict_in_virtual_mode=restrict_in_virtual_mode,
                                  helper=helper,
                                  api=api,
                                  version=version,
                                  initial=initial,
                                  exit_f=exit_f,
                                  test=test,
                                  samples=samples,
                                  state=state,
                                  pre_compute=pre_compute,
                                  post_compute=post_compute,
                                  memory_cache=memory_cache,
                                  file_cache=file_cache,
                                  request_as_kwarg=request_as_kwarg,
                                  row=row,
                                  api_methods=api_methods,
                                  memory_cache_max_size=memory_cache_max_size,
                                  memory_cache_ttl=memory_cache_ttl)
wait_for_bg_tasks(timeout=None)

Wait for all background tasks to complete.

Parameters:

Name Type Description Default
timeout

Maximum time to wait (in seconds) for all tasks to complete. None means wait indefinitely.

None

Returns:

Name Type Description
bool

True if all tasks completed, False if timeout occurred

Source code in toolboxv2/utils/toolbox.py
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
def wait_for_bg_tasks(self, timeout=None):
    """
    Wait for all background tasks to complete.

    Args:
        timeout: Maximum time to wait (in seconds) for all tasks to complete.
                 None means wait indefinitely.

    Returns:
        bool: True if all tasks completed, False if timeout occurred
    """
    active_tasks = [t for t in self.bg_tasks if t.is_alive()]

    for task in active_tasks:
        task.join(timeout=timeout)
        if task.is_alive():
            return False

    return True

Code

Source code in toolboxv2/utils/security/cryp.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
class Code:

    @staticmethod
    def DK():
        return DEVICE_KEY

    def decode_code(self, encrypted_data, key=None):

        if not isinstance(encrypted_data, str):
            encrypted_data = str(encrypted_data)

        if key is None:
            key = DEVICE_KEY()

        return self.decrypt_symmetric(encrypted_data, key)

    def encode_code(self, data, key=None):

        if not isinstance(data, str):
            data = str(data)

        if key is None:
            key = DEVICE_KEY()

        return self.encrypt_symmetric(data, key)

    @staticmethod
    def generate_seed() -> int:
        """
        Erzeugt eine zufällige Zahl als Seed.

        Returns:
            int: Eine zufällige Zahl.
        """
        return random.randint(2 ** 32 - 1, 2 ** 64 - 1)

    @staticmethod
    def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
        """
        Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

        Args:
            text (str): Der zu hashende Text.
            salt (str): Der Salt-Wert.
            pepper (str): Der Pepper-Wert.
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            str: Der resultierende Hash-Wert.
        """
        return hashlib.sha256((salt + text + pepper).encode()).hexdigest()

    @staticmethod
    def generate_symmetric_key() -> str:
        """
        Generiert einen Schlüssel für die symmetrische Verschlüsselung.

        Returns:
            str: Der generierte Schlüssel.
        """
        return Fernet.generate_key().decode()

    @staticmethod
    def encrypt_symmetric(text: str or bytes, key: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.

        Returns:
            str: Der verschlüsselte Text.
        """
        if isinstance(text, str):
            text = text.encode()

        try:
            fernet = Fernet(key.encode())
            return fernet.encrypt(text).decode()
        except Exception as e:
            get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
            return "Error encrypt"

    @staticmethod
    def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
        """
        Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            encrypted_text (str): Der zu entschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.
            to_str (bool): default true returns str if false returns bytes
        Returns:
            str: Der entschlüsselte Text.
        """

        if isinstance(key, str):
            key = key.encode()

        #try:
        fernet = Fernet(key)
        text_b = fernet.decrypt(encrypted_text)
        if not to_str:
            return text_b
        return text_b.decode()
        # except Exception as e:
        #     get_logger().error(f"Error decrypt_symmetric {e}")
        #     if not mute:
        #         raise e
        #     if not to_str:
        #         return f"Error decoding".encode()
        #     return f"Error decoding"

    @staticmethod
    def generate_asymmetric_keys() -> (str, str):
        """
        Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

        Args:
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
        """
        private_key = rsa.generate_private_key(
            public_exponent=65537,
            key_size=2048 * 3,
        )
        public_key = private_key.public_key()

        # Serialisieren der Schlüssel
        pem_private_key = private_key.private_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PrivateFormat.PKCS8,
            encryption_algorithm=serialization.NoEncryption()
        ).decode()

        pem_public_key = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        ).decode()

        return pem_public_key, pem_private_key

    @staticmethod
    def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
        """
        Speichert die generierten Schlüssel in separate Dateien.
        Der private Schlüssel wird mit dem Device Key verschlüsselt.

        Args:
            public_key (str): Der öffentliche Schlüssel im PEM-Format
            private_key (str): Der private Schlüssel im PEM-Format
            directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
        """
        # Erstelle das Verzeichnis, falls es nicht existiert
        os.makedirs(directory, exist_ok=True)

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Verschlüssele den privaten Schlüssel mit dem Device Key
        encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

        # Speichere den öffentlichen Schlüssel
        public_key_path = os.path.join(directory, "public_key.pem")
        with open(public_key_path, "w") as f:
            f.write(public_key)

        # Speichere den verschlüsselten privaten Schlüssel
        private_key_path = os.path.join(directory, "private_key.pem")
        with open(private_key_path, "w") as f:
            f.write(encrypted_private_key)

        print("Saved keys in ", public_key_path)

    @staticmethod
    def load_keys_from_files(directory: str = "keys") -> (str, str):
        """
        Lädt die Schlüssel aus den Dateien.
        Der private Schlüssel wird mit dem Device Key entschlüsselt.

        Args:
            directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

        Raises:
            FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
        """
        # Pfade zu den Schlüsseldateien
        public_key_path = os.path.join(directory, "public_key.pem")
        private_key_path = os.path.join(directory, "private_key.pem")

        # Prüfe ob die Dateien existieren
        if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
            return "", ""

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Lade den öffentlichen Schlüssel
        with open(public_key_path) as f:
            public_key = f.read()

        # Lade und entschlüssele den privaten Schlüssel
        with open(private_key_path) as f:
            encrypted_private_key = f.read()
            private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

        return public_key, private_key

    @staticmethod
    def encrypt_asymmetric(text: str, public_key_str: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

        Returns:
            str: Der verschlüsselte Text.
        """
        # try:
        #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        #  except Exception as e:
        #     get_logger().error(f"Error encrypt_asymmetric {e}")
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            encrypted = public_key.encrypt(
                text.encode(),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return encrypted.hex()
        except Exception as e:
            get_logger().error(f"Error encrypt_asymmetric {e}")
            return "Invalid"

    @staticmethod
    def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
        """
        Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

        Args:
            encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
            private_key_str (str): Der private Schlüssel als String.

        Returns:
            str: Der entschlüsselte Text.
        """
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            decrypted = private_key.decrypt(
                bytes.fromhex(encrypted_text_hex),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return decrypted.decode()

        except Exception as e:
            get_logger().error(f"Error decrypt_asymmetric {e}")
        return "Invalid"

    @staticmethod
    def verify_signature(signature: str or bytes, message: str or bytes, public_key_str: str,
                         salt_length=padding.PSS.MAX_LENGTH) -> bool:
        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                padding=padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                algorithm=hashes.SHA512()
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def verify_signature_web_algo(signature: str or bytes, message: str or bytes, public_key_str: str,
                                  algo: int = -512) -> bool:
        signature_algorithm = ECDSA(hashes.SHA512())
        if algo != -512:
            signature_algorithm = ECDSA(hashes.SHA256())

        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                # padding=padding.PSS(
                #    mgf=padding.MGF1(hashes.SHA512()),
                #    salt_length=padding.PSS.MAX_LENGTH
                # ),
                signature_algorithm=signature_algorithm
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def create_signature(message: str, private_key_str: str, salt_length=padding.PSS.MAX_LENGTH,
                         row=False) -> str or bytes:
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            signature = private_key.sign(
                message.encode(),
                padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                hashes.SHA512()
            )
            if row:
                return signature
            return base64.b64encode(signature).decode()
        except Exception as e:
            get_logger().error(f"Error create_signature {e}")
            print(e)
        return "Invalid Key"

    @staticmethod
    def pem_to_public_key(pem_key: str):
        """
        Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

        Args:
            pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

        Returns:
            PublicKey: Das PublicKey-Objekt.
        """
        public_key = serialization.load_pem_public_key(pem_key.encode())
        return public_key

    @staticmethod
    def public_key_to_pem(public_key: RSAPublicKey):
        """
        Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

        Args:
            public_key (PublicKey): Das PublicKey-Objekt.

        Returns:
            str: Der PEM-kodierte öffentliche Schlüssel.
        """
        pem = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        )
        return pem.decode()
decrypt_asymmetric(encrypted_text_hex, private_key_str) staticmethod

Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

Parameters:

Name Type Description Default
encrypted_text_hex str

Der verschlüsselte Text als Hex-String.

required
private_key_str str

Der private Schlüssel als String.

required

Returns:

Name Type Description
str str

Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
@staticmethod
def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
    """
    Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

    Args:
        encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
        private_key_str (str): Der private Schlüssel als String.

    Returns:
        str: Der entschlüsselte Text.
    """
    try:
        private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
        decrypted = private_key.decrypt(
            bytes.fromhex(encrypted_text_hex),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return decrypted.decode()

    except Exception as e:
        get_logger().error(f"Error decrypt_asymmetric {e}")
    return "Invalid"
decrypt_symmetric(encrypted_text, key, to_str=True, mute=False) staticmethod

Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
encrypted_text str

Der zu entschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required
to_str bool

default true returns str if false returns bytes

True

Returns: str: Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
@staticmethod
def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
    """
    Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        encrypted_text (str): Der zu entschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.
        to_str (bool): default true returns str if false returns bytes
    Returns:
        str: Der entschlüsselte Text.
    """

    if isinstance(key, str):
        key = key.encode()

    #try:
    fernet = Fernet(key)
    text_b = fernet.decrypt(encrypted_text)
    if not to_str:
        return text_b
    return text_b.decode()
encrypt_asymmetric(text, public_key_str) staticmethod

Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
public_key_str str

Der öffentliche Schlüssel als String oder im pem format.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
@staticmethod
def encrypt_asymmetric(text: str, public_key_str: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

    Returns:
        str: Der verschlüsselte Text.
    """
    # try:
    #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
    #  except Exception as e:
    #     get_logger().error(f"Error encrypt_asymmetric {e}")
    try:
        public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        encrypted = public_key.encrypt(
            text.encode(),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return encrypted.hex()
    except Exception as e:
        get_logger().error(f"Error encrypt_asymmetric {e}")
        return "Invalid"
encrypt_symmetric(text, key) staticmethod

Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
@staticmethod
def encrypt_symmetric(text: str or bytes, key: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.

    Returns:
        str: Der verschlüsselte Text.
    """
    if isinstance(text, str):
        text = text.encode()

    try:
        fernet = Fernet(key.encode())
        return fernet.encrypt(text).decode()
    except Exception as e:
        get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
        return "Error encrypt"
generate_asymmetric_keys() staticmethod

Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

Parameters:

Name Type Description Default
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
@staticmethod
def generate_asymmetric_keys() -> (str, str):
    """
    Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

    Args:
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
    """
    private_key = rsa.generate_private_key(
        public_exponent=65537,
        key_size=2048 * 3,
    )
    public_key = private_key.public_key()

    # Serialisieren der Schlüssel
    pem_private_key = private_key.private_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PrivateFormat.PKCS8,
        encryption_algorithm=serialization.NoEncryption()
    ).decode()

    pem_public_key = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    ).decode()

    return pem_public_key, pem_private_key
generate_seed() staticmethod

Erzeugt eine zufällige Zahl als Seed.

Returns:

Name Type Description
int int

Eine zufällige Zahl.

Source code in toolboxv2/utils/security/cryp.py
68
69
70
71
72
73
74
75
76
@staticmethod
def generate_seed() -> int:
    """
    Erzeugt eine zufällige Zahl als Seed.

    Returns:
        int: Eine zufällige Zahl.
    """
    return random.randint(2 ** 32 - 1, 2 ** 64 - 1)
generate_symmetric_key() staticmethod

Generiert einen Schlüssel für die symmetrische Verschlüsselung.

Returns:

Name Type Description
str str

Der generierte Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
 94
 95
 96
 97
 98
 99
100
101
102
@staticmethod
def generate_symmetric_key() -> str:
    """
    Generiert einen Schlüssel für die symmetrische Verschlüsselung.

    Returns:
        str: Der generierte Schlüssel.
    """
    return Fernet.generate_key().decode()
load_keys_from_files(directory='keys') staticmethod

Lädt die Schlüssel aus den Dateien. Der private Schlüssel wird mit dem Device Key entschlüsselt.

Parameters:

Name Type Description Default
directory str

Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

'keys'

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel

Raises:

Type Description
FileNotFoundError

Wenn die Schlüsseldateien nicht gefunden werden können

Source code in toolboxv2/utils/security/cryp.py
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
@staticmethod
def load_keys_from_files(directory: str = "keys") -> (str, str):
    """
    Lädt die Schlüssel aus den Dateien.
    Der private Schlüssel wird mit dem Device Key entschlüsselt.

    Args:
        directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

    Raises:
        FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
    """
    # Pfade zu den Schlüsseldateien
    public_key_path = os.path.join(directory, "public_key.pem")
    private_key_path = os.path.join(directory, "private_key.pem")

    # Prüfe ob die Dateien existieren
    if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
        return "", ""

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Lade den öffentlichen Schlüssel
    with open(public_key_path) as f:
        public_key = f.read()

    # Lade und entschlüssele den privaten Schlüssel
    with open(private_key_path) as f:
        encrypted_private_key = f.read()
        private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

    return public_key, private_key
one_way_hash(text, salt='', pepper='') staticmethod

Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

Parameters:

Name Type Description Default
text str

Der zu hashende Text.

required
salt str

Der Salt-Wert.

''
pepper str

Der Pepper-Wert.

''
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Name Type Description
str str

Der resultierende Hash-Wert.

Source code in toolboxv2/utils/security/cryp.py
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
@staticmethod
def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
    """
    Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

    Args:
        text (str): Der zu hashende Text.
        salt (str): Der Salt-Wert.
        pepper (str): Der Pepper-Wert.
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        str: Der resultierende Hash-Wert.
    """
    return hashlib.sha256((salt + text + pepper).encode()).hexdigest()
pem_to_public_key(pem_key) staticmethod

Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

Parameters:

Name Type Description Default
pem_key str

Der PEM-kodierte öffentliche Schlüssel.

required

Returns:

Name Type Description
PublicKey

Das PublicKey-Objekt.

Source code in toolboxv2/utils/security/cryp.py
386
387
388
389
390
391
392
393
394
395
396
397
398
@staticmethod
def pem_to_public_key(pem_key: str):
    """
    Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

    Args:
        pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

    Returns:
        PublicKey: Das PublicKey-Objekt.
    """
    public_key = serialization.load_pem_public_key(pem_key.encode())
    return public_key
public_key_to_pem(public_key) staticmethod

Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

Parameters:

Name Type Description Default
public_key PublicKey

Das PublicKey-Objekt.

required

Returns:

Name Type Description
str

Der PEM-kodierte öffentliche Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
@staticmethod
def public_key_to_pem(public_key: RSAPublicKey):
    """
    Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

    Args:
        public_key (PublicKey): Das PublicKey-Objekt.

    Returns:
        str: Der PEM-kodierte öffentliche Schlüssel.
    """
    pem = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    )
    return pem.decode()
save_keys_to_files(public_key, private_key, directory='keys') staticmethod

Speichert die generierten Schlüssel in separate Dateien. Der private Schlüssel wird mit dem Device Key verschlüsselt.

Parameters:

Name Type Description Default
public_key str

Der öffentliche Schlüssel im PEM-Format

required
private_key str

Der private Schlüssel im PEM-Format

required
directory str

Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen

'keys'
Source code in toolboxv2/utils/security/cryp.py
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
@staticmethod
def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
    """
    Speichert die generierten Schlüssel in separate Dateien.
    Der private Schlüssel wird mit dem Device Key verschlüsselt.

    Args:
        public_key (str): Der öffentliche Schlüssel im PEM-Format
        private_key (str): Der private Schlüssel im PEM-Format
        directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
    """
    # Erstelle das Verzeichnis, falls es nicht existiert
    os.makedirs(directory, exist_ok=True)

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Verschlüssele den privaten Schlüssel mit dem Device Key
    encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

    # Speichere den öffentlichen Schlüssel
    public_key_path = os.path.join(directory, "public_key.pem")
    with open(public_key_path, "w") as f:
        f.write(public_key)

    # Speichere den verschlüsselten privaten Schlüssel
    private_key_path = os.path.join(directory, "private_key.pem")
    with open(private_key_path, "w") as f:
        f.write(encrypted_private_key)

    print("Saved keys in ", public_key_path)

MainTool

Source code in toolboxv2/utils/system/main_tool.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
class MainTool:
    toolID: str = ""
    # app = None
    interface = None
    spec = "app"
    name = ""
    color = "Bold"
    stuf = False

    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.__storedargs = args, kwargs
        self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
        self.async_initialized = False
        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    pass
                else:
                    self.todo()
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")

    async def __ainit__(self, *args, **kwargs):
        self.version = kwargs["v"]
        self.tools = kwargs.get("tool", {})
        self.name = kwargs["name"]
        self.logger = kwargs.get("logs", get_logger())
        self.color = kwargs.get("color", "WHITE")
        self.todo = kwargs.get("load", kwargs.get("on_start", None))
        if not hasattr(self, 'config'):
            self.config = {}
        self.user = None
        self.description = "A toolbox mod" if kwargs.get("description") is None else kwargs.get("description")
        if MainTool.interface is None:
            MainTool.interface = self.app.interface_type
        # Result.default(self.app.interface)

        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    await self.todo()
                else:
                    pass
                await asyncio.sleep(0.1)
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")
        self.app.print(f"TOOL : {self.spec}.{self.name} online")



    @property
    def app(self):
        return get_app(
            from_=f"{self.spec}.{self.name}|{self.toolID if self.toolID else '*' + MainTool.toolID} {self.interface if self.interface else MainTool.interface}")

    @app.setter
    def app(self, v):
        raise PermissionError(f"You cannot set the App Instance! {v=}")

    @staticmethod
    def return_result(error: ToolBoxError = ToolBoxError.none,
                      exec_code: int = 0,
                      help_text: str = "",
                      data_info=None,
                      data=None,
                      data_to=None):

        if data_to is None:
            data_to = MainTool.interface if MainTool.interface is not None else ToolBoxInterfaces.cli

        if data is None:
            data = {}

        if data_info is None:
            data_info = {}

        return Result(
            error,
            ToolBoxResult(data_info=data_info, data=data, data_to=data_to),
            ToolBoxInfo(exec_code=exec_code, help_text=help_text)
        )

    def print(self, message, end="\n", **kwargs):
        if self.stuf:
            return

        self.app.print(Style.style_dic[self.color] + self.name + Style.style_dic["END"] + ":", message, end=end,
                       **kwargs)

    def add_str_to_config(self, command):
        if len(command) != 2:
            self.logger.error('Invalid command must be key value')
            return False
        self.config[command[0]] = command[1]

    def webInstall(self, user_instance, construct_render) -> str:
        """"Returns a web installer for the given user instance and construct render template"""

    def get_version(self) -> str:
        """"Returns the version"""
        return self.version

    async def get_user(self, username: str) -> Result:
        return await self.app.a_run_any(CLOUDM_AUTHMANAGER.GET_USER_BY_NAME, username=username, get_results=True)

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/system/main_tool.py
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.__storedargs = args, kwargs
    self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
    self.async_initialized = False
    if self.todo:
        try:
            if inspect.iscoroutinefunction(self.todo):
                pass
            else:
                self.todo()
            get_logger().info(f"{self.name} on load suspended")
        except Exception as e:
            get_logger().error(f"Error loading mod {self.name} {e}")
            if self.app.debug:
                import traceback
                traceback.print_exc()
    else:
        get_logger().info(f"{self.name} no load require")
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/system/main_tool.py
163
164
165
166
167
168
169
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self
get_version()

"Returns the version

Source code in toolboxv2/utils/system/main_tool.py
156
157
158
def get_version(self) -> str:
    """"Returns the version"""
    return self.version
webInstall(user_instance, construct_render)

"Returns a web installer for the given user instance and construct render template

Source code in toolboxv2/utils/system/main_tool.py
153
154
def webInstall(self, user_instance, construct_render) -> str:
    """"Returns a web installer for the given user instance and construct render template"""

Result

Source code in toolboxv2/utils/system/types.py
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
class Result:
    _task = None
    def __init__(self,
                 error: ToolBoxError,
                 result: ToolBoxResult,
                 info: ToolBoxInfo,
                 origin: Any | None = None,
                 ):
        self.error: ToolBoxError = error
        self.result: ToolBoxResult = result
        self.info: ToolBoxInfo = info
        self.origin = origin

    def as_result(self):
        return self

    def as_dict(self):
        return {
            "error":self.error.value if isinstance(self.error, Enum) else self.error,
        "result" : {
            "data_to":self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
            "data_info":self.result.data_info,
            "data":self.result.data,
            "data_type":self.result.data_type
        } if self.result else None,
        "info" : {
            "exec_code" : self.info.exec_code,  # exec_code umwandel in http resposn codes
        "help_text" : self.info.help_text
        } if self.info else None,
        "origin" : self.origin
        }

    def set_origin(self, origin):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = origin
        return self

    def set_dir_origin(self, name, extras="assets/"):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = f"mods/{name}/{extras}"
        return self

    def is_error(self):
        if _test_is_result(self.result.data):
            return self.result.data.is_error()
        if self.error == ToolBoxError.none:
            return False
        if self.info.exec_code == 0:
            return False
        if self.info.exec_code == 200:
            return False
        return True

    def is_data(self):
        return self.result.data is not None

    def to_api_result(self):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=self.error.value if isinstance(self.error, Enum) else self.error,
            result=ToolBoxResultBM(
                data_to=self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
                data_info=self.result.data_info,
                data=self.result.data,
                data_type=self.result.data_type
            ) if self.result else None,
            info=ToolBoxInfoBM(
                exec_code=self.info.exec_code,  # exec_code umwandel in http resposn codes
                help_text=self.info.help_text
            ) if self.info else None,
            origin=self.origin
        )

    def task(self, task):
        self._task = task
        return self

    @staticmethod
    def result_from_dict(error: str, result: dict, info: dict, origin: list or None or str):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=error if isinstance(error, Enum) else error,
            result=ToolBoxResultBM(
                data_to=result.get('data_to') if isinstance(result.get('data_to'), Enum) else result.get('data_to'),
                data_info=result.get('data_info', '404'),
                data=result.get('data'),
                data_type=result.get('data_type', '404'),
            ) if result else ToolBoxResultBM(
                data_to=ToolBoxInterfaces.cli.value,
                data_info='',
                data='404',
                data_type='404',
            ),
            info=ToolBoxInfoBM(
                exec_code=info.get('exec_code', 404),
                help_text=info.get('help_text', '404')
            ) if info else ToolBoxInfoBM(
                exec_code=404,
                help_text='404'
            ),
            origin=origin
        ).as_result()

    @classmethod
    def stream(cls,
               stream_generator: Any,  # Renamed from source for clarity
               content_type: str = "text/event-stream",  # Default to SSE
               headers: Union[dict, None] = None,
               info: str = "OK",
               interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
               cleanup_func: Union[
                   Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None):
        """
        Create a streaming response Result. Handles SSE and other stream types.

        Args:
            stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
            content_type: Content-Type header (default: text/event-stream for SSE).
            headers: Additional HTTP headers for the response.
            info: Help text for the result.
            interface: Interface to send data to.
            cleanup_func: Optional function for cleanup.

        Returns:
            A Result object configured for streaming.
        """
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        final_generator: AsyncGenerator[str, None]

        if content_type == "text/event-stream":
            # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
            # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
            final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

            # Standard SSE headers for the HTTP response itself
            # These will be stored in the Result object. Rust side decides how to use them.
            standard_sse_headers = {
                "Cache-Control": "no-cache",  # SSE specific
                "Connection": "keep-alive",  # SSE specific
                "X-Accel-Buffering": "no",  # Useful for proxies with SSE
                # Content-Type is implicitly text/event-stream, will be in streaming_data below
            }
            all_response_headers = standard_sse_headers.copy()
            if headers:
                all_response_headers.update(headers)
        else:
            # For non-SSE streams.
            # If stream_generator is sync, wrap it to be async.
            # If already async or single item, it will be handled.
            # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
            # For consistency with how SSEGenerator does it, we can wrap sync ones.
            if inspect.isgenerator(stream_generator) or \
                (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
                final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
            elif inspect.isasyncgen(stream_generator):
                final_generator = stream_generator
            else:  # Single item or string
                async def _single_item_gen():
                    yield stream_generator

                final_generator = _single_item_gen()
            all_response_headers = headers if headers else {}

        # Prepare streaming data to be stored in the Result object
        streaming_data = {
            "type": "stream",  # Indicator for Rust side
            "generator": final_generator,
            "content_type": content_type,  # Let Rust know the intended content type
            "headers": all_response_headers  # Intended HTTP headers for the overall response
        }

        result_payload = ToolBoxResult(
            data_to=interface,
            data=streaming_data,
            data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
            data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
        )

        return cls(error=error, info=info_obj, result=result_payload)

    @classmethod
    def sse(cls,
            stream_generator: Any,
            info: str = "OK",
            interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
            cleanup_func: Union[
                Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None,
            # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
            ):
        """
        Create an Server-Sent Events (SSE) streaming response Result.

        Args:
            stream_generator: A source yielding individual data items. This can be an
                              async generator, sync generator, iterable, or a single item.
                              Each item will be formatted as an SSE event.
            info: Optional help text for the Result.
            interface: Optional ToolBoxInterface to target.
            cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
            #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

        Returns:
            A Result object configured for SSE streaming.
        """
        # Result.stream will handle calling SSEGenerator.create_sse_stream
        # and setting appropriate default headers for SSE when content_type is "text/event-stream".
        return cls.stream(
            stream_generator=stream_generator,
            content_type="text/event-stream",
            # headers=http_headers, # Pass if we add http_headers param
            info=info,
            interface=interface,
            cleanup_func=cleanup_func
        )

    @classmethod
    def default(cls, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=-1, help_text="")
        result = ToolBoxResult(data_to=interface)
        return cls(error=error, info=info, result=result)

    @classmethod
    def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
        """Create a JSON response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=data,
            data_info="JSON response",
            data_type="json"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
        """Create a text response Result with specific content type."""
        if headers is not None:
            return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=text_data,
            data_info="Text response",
            data_type=content_type
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
               interface=ToolBoxInterfaces.remote):
        """Create a binary data response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        # Create a dictionary with binary data and metadata
        binary_data = {
            "data": data,
            "content_type": content_type,
            "filename": download_name
        }

        result = ToolBoxResult(
            data_to=interface,
            data=binary_data,
            data_info=f"Binary response: {download_name}" if download_name else "Binary response",
            data_type="binary"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
        """Create a file download response Result.

        Args:
            data: File data as bytes or base64 string
            filename: Name of the file for download
            content_type: MIME type of the file (auto-detected if None)
            info: Response info text
            interface: Target interface

        Returns:
            Result object configured for file download
        """
        import base64
        import mimetypes

        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=200, help_text=info)

        # Auto-detect content type if not provided
        if content_type is None:
            content_type, _ = mimetypes.guess_type(filename)
            if content_type is None:
                content_type = "application/octet-stream"

        # Ensure data is base64 encoded string (as expected by Rust server)
        if isinstance(data, bytes):
            base64_data = base64.b64encode(data).decode('utf-8')
        elif isinstance(data, str):
            # Assume it's already base64 encoded
            base64_data = data
        else:
            raise ValueError("File data must be bytes or base64 string")

        result = ToolBoxResult(
            data_to=interface,
            data=base64_data,  # Rust expects base64 string for "file" type
            data_info=f"File download: {filename}",
            data_type="file"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
        """Create a redirect response."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=url,
            data_info="Redirect response",
            data_type="redirect"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def ok(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def html(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.remote, data_type="html",status=200, headers=None, row=False):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=status, help_text=info)
        from ...utils.system.getting_and_closing_app import get_app

        if not row and not '"<div class="main-content""' in data:
            data = f'<div class="main-content frosted-glass">{data}<div>'
        if not row and not get_app().web_context() in data:
            data = get_app().web_context() + data

        if isinstance(headers, dict):
            result = ToolBoxResult(data_to=interface, data={'html':data,'headers':headers}, data_info=data_info,
                                   data_type="special_html")
        else:
            result = ToolBoxResult(data_to=interface, data=data, data_info=data_info,
                                   data_type=data_type if data_type is not None else type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def future(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.future):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type="future")
        return cls(error=error, info=info, result=result)

    @classmethod
    def custom_error(cls, data=None, data_info="", info="", exec_code=-1, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def error(cls, data=None, data_info="", info="", exec_code=450, interface=ToolBoxInterfaces.remote):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_user_error(cls, info="", exec_code=-3, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.input_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_internal_error(cls, info="", exec_code=-2, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.internal_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    def print(self, show=True, show_data=True, prifix=""):
        data = '\n' + f"{((prifix + 'Data: ' + str(self.result.data) if self.result.data is not None else 'NO Data') if not isinstance(self.result.data, Result) else self.result.data.print(show=False, show_data=show_data, prifix=prifix + '-')) if show_data else 'Data: private'}"
        origin = '\n' + f"{prifix + 'Origin: ' + str(self.origin) if self.origin is not None else 'NO Origin'}"
        text = (f"Function Exec code: {self.info.exec_code}"
                f"\n{prifix}Info's:"
                f" {self.info.help_text} {'<|> ' + str(self.result.data_info) if self.result.data_info is not None else ''}"
                f"{origin}{data if not data.endswith('NO Data') else ''}")
        if not show:
            return text
        print("\n======== Result ========\n" + text + "\n------- EndOfD -------")
        return self

    def log(self, show_data=True, prifix=""):
        from toolboxv2 import get_logger
        get_logger().debug(self.print(show=False, show_data=show_data, prifix=prifix).replace("\n", " - "))
        return self

    def __str__(self):
        return self.print(show=False, show_data=True)

    def get(self, key=None, default=None):
        data = self.result.data
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    async def aget(self, key=None, default=None):
        if asyncio.isfuture(self.result.data) or asyncio.iscoroutine(self.result.data) or (
            isinstance(self.result.data_to, Enum) and self.result.data_to.name == ToolBoxInterfaces.future.name):
            data = await self.result.data
        else:
            data = self.get(key=None, default=None)
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    def lazy_return(self, _=0, data=None, **kwargs):
        flags = ['raise', 'logg', 'user', 'intern']
        flag = flags[_] if isinstance(_, int) else _
        if self.info.exec_code == 0:
            return self if data is None else data if _test_is_result(data) else self.ok(data=data, **kwargs)
        if flag == 'raise':
            raise ValueError(self.print(show=False))
        if flag == 'logg':
            from .. import get_logger
            get_logger().error(self.print(show=False))

        if flag == 'user':
            return self if data is None else data if _test_is_result(data) else self.default_user_error(data=data,
                                                                                                        **kwargs)
        if flag == 'intern':
            return self if data is None else data if _test_is_result(data) else self.default_internal_error(data=data,
                                                                                                            **kwargs)

        return self if data is None else data if _test_is_result(data) else self.custom_error(data=data, **kwargs)

    @property
    def bg_task(self):
        return self._task
binary(data, content_type='application/octet-stream', download_name=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a binary data response Result.

Source code in toolboxv2/utils/system/types.py
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
@classmethod
def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
           interface=ToolBoxInterfaces.remote):
    """Create a binary data response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    # Create a dictionary with binary data and metadata
    binary_data = {
        "data": data,
        "content_type": content_type,
        "filename": download_name
    }

    result = ToolBoxResult(
        data_to=interface,
        data=binary_data,
        data_info=f"Binary response: {download_name}" if download_name else "Binary response",
        data_type="binary"
    )

    return cls(error=error, info=info_obj, result=result)
file(data, filename, content_type=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a file download response Result.

Parameters:

Name Type Description Default
data

File data as bytes or base64 string

required
filename

Name of the file for download

required
content_type

MIME type of the file (auto-detected if None)

None
info

Response info text

'OK'
interface

Target interface

remote

Returns:

Type Description

Result object configured for file download

Source code in toolboxv2/utils/system/types.py
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
@classmethod
def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
    """Create a file download response Result.

    Args:
        data: File data as bytes or base64 string
        filename: Name of the file for download
        content_type: MIME type of the file (auto-detected if None)
        info: Response info text
        interface: Target interface

    Returns:
        Result object configured for file download
    """
    import base64
    import mimetypes

    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=200, help_text=info)

    # Auto-detect content type if not provided
    if content_type is None:
        content_type, _ = mimetypes.guess_type(filename)
        if content_type is None:
            content_type = "application/octet-stream"

    # Ensure data is base64 encoded string (as expected by Rust server)
    if isinstance(data, bytes):
        base64_data = base64.b64encode(data).decode('utf-8')
    elif isinstance(data, str):
        # Assume it's already base64 encoded
        base64_data = data
    else:
        raise ValueError("File data must be bytes or base64 string")

    result = ToolBoxResult(
        data_to=interface,
        data=base64_data,  # Rust expects base64 string for "file" type
        data_info=f"File download: {filename}",
        data_type="file"
    )

    return cls(error=error, info=info_obj, result=result)
json(data, info='OK', interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None) classmethod

Create a JSON response Result.

Source code in toolboxv2/utils/system/types.py
847
848
849
850
851
852
853
854
855
856
857
858
859
860
@classmethod
def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
    """Create a JSON response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=data,
        data_info="JSON response",
        data_type="json"
    )

    return cls(error=error, info=info_obj, result=result)
redirect(url, status_code=302, info='Redirect', interface=ToolBoxInterfaces.remote) classmethod

Create a redirect response.

Source code in toolboxv2/utils/system/types.py
946
947
948
949
950
951
952
953
954
955
956
957
958
959
@classmethod
def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
    """Create a redirect response."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=url,
        data_info="Redirect response",
        data_type="redirect"
    )

    return cls(error=error, info=info_obj, result=result)
sse(stream_generator, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create an Server-Sent Events (SSE) streaming response Result.

Parameters:

Name Type Description Default
stream_generator Any

A source yielding individual data items. This can be an async generator, sync generator, iterable, or a single item. Each item will be formatted as an SSE event.

required
info str

Optional help text for the Result.

'OK'
interface ToolBoxInterfaces

Optional ToolBoxInterface to target.

remote
cleanup_func Union[Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None]

Optional cleanup function to run when the stream ends or is cancelled.

None
#http_headers

Optional dictionary of custom HTTP headers for the SSE response.

required

Returns:

Type Description

A Result object configured for SSE streaming.

Source code in toolboxv2/utils/system/types.py
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
@classmethod
def sse(cls,
        stream_generator: Any,
        info: str = "OK",
        interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
        cleanup_func: Union[
            Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None,
        # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
        ):
    """
    Create an Server-Sent Events (SSE) streaming response Result.

    Args:
        stream_generator: A source yielding individual data items. This can be an
                          async generator, sync generator, iterable, or a single item.
                          Each item will be formatted as an SSE event.
        info: Optional help text for the Result.
        interface: Optional ToolBoxInterface to target.
        cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
        #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

    Returns:
        A Result object configured for SSE streaming.
    """
    # Result.stream will handle calling SSEGenerator.create_sse_stream
    # and setting appropriate default headers for SSE when content_type is "text/event-stream".
    return cls.stream(
        stream_generator=stream_generator,
        content_type="text/event-stream",
        # headers=http_headers, # Pass if we add http_headers param
        info=info,
        interface=interface,
        cleanup_func=cleanup_func
    )
stream(stream_generator, content_type='text/event-stream', headers=None, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create a streaming response Result. Handles SSE and other stream types.

Parameters:

Name Type Description Default
stream_generator Any

Any stream source (async generator, sync generator, iterable, or single item).

required
content_type str

Content-Type header (default: text/event-stream for SSE).

'text/event-stream'
headers Union[dict, None]

Additional HTTP headers for the response.

None
info str

Help text for the result.

'OK'
interface ToolBoxInterfaces

Interface to send data to.

remote
cleanup_func Union[Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None]

Optional function for cleanup.

None

Returns:

Type Description

A Result object configured for streaming.

Source code in toolboxv2/utils/system/types.py
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
@classmethod
def stream(cls,
           stream_generator: Any,  # Renamed from source for clarity
           content_type: str = "text/event-stream",  # Default to SSE
           headers: Union[dict, None] = None,
           info: str = "OK",
           interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
           cleanup_func: Union[
               Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None):
    """
    Create a streaming response Result. Handles SSE and other stream types.

    Args:
        stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
        content_type: Content-Type header (default: text/event-stream for SSE).
        headers: Additional HTTP headers for the response.
        info: Help text for the result.
        interface: Interface to send data to.
        cleanup_func: Optional function for cleanup.

    Returns:
        A Result object configured for streaming.
    """
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    final_generator: AsyncGenerator[str, None]

    if content_type == "text/event-stream":
        # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
        # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
        final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

        # Standard SSE headers for the HTTP response itself
        # These will be stored in the Result object. Rust side decides how to use them.
        standard_sse_headers = {
            "Cache-Control": "no-cache",  # SSE specific
            "Connection": "keep-alive",  # SSE specific
            "X-Accel-Buffering": "no",  # Useful for proxies with SSE
            # Content-Type is implicitly text/event-stream, will be in streaming_data below
        }
        all_response_headers = standard_sse_headers.copy()
        if headers:
            all_response_headers.update(headers)
    else:
        # For non-SSE streams.
        # If stream_generator is sync, wrap it to be async.
        # If already async or single item, it will be handled.
        # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
        # For consistency with how SSEGenerator does it, we can wrap sync ones.
        if inspect.isgenerator(stream_generator) or \
            (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
            final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
        elif inspect.isasyncgen(stream_generator):
            final_generator = stream_generator
        else:  # Single item or string
            async def _single_item_gen():
                yield stream_generator

            final_generator = _single_item_gen()
        all_response_headers = headers if headers else {}

    # Prepare streaming data to be stored in the Result object
    streaming_data = {
        "type": "stream",  # Indicator for Rust side
        "generator": final_generator,
        "content_type": content_type,  # Let Rust know the intended content type
        "headers": all_response_headers  # Intended HTTP headers for the overall response
    }

    result_payload = ToolBoxResult(
        data_to=interface,
        data=streaming_data,
        data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
        data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
    )

    return cls(error=error, info=info_obj, result=result_payload)
text(text_data, content_type='text/plain', exec_code=None, status=200, info='OK', interface=ToolBoxInterfaces.remote, headers=None) classmethod

Create a text response Result with specific content type.

Source code in toolboxv2/utils/system/types.py
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
@classmethod
def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
    """Create a text response Result with specific content type."""
    if headers is not None:
        return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=text_data,
        data_info="Text response",
        data_type=content_type
    )

    return cls(error=error, info=info_obj, result=result)

Singleton

Singleton metaclass for ensuring only one instance of a class.

Source code in toolboxv2/utils/singelton_class.py
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
class Singleton(type):
    """
    Singleton metaclass for ensuring only one instance of a class.
    """

    _instances = {}
    _kwargs = {}
    _args = {}

    def __call__(cls, *args, **kwargs):
        if cls not in cls._instances:
            cls._instances[cls] = super().__call__(*args, **kwargs)
            cls._args[cls] = args
            cls._kwargs[cls] = kwargs
        return cls._instances[cls]

Spinner

Enhanced Spinner with tqdm-like line rendering.

Source code in toolboxv2/utils/extras/Style.py
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
class Spinner:
    """
    Enhanced Spinner with tqdm-like line rendering.
    """
    SYMBOL_SETS = {
        "c": ["◐", "◓", "◑", "◒"],
        "b": ["▁", "▃", "▄", "▅", "▆", "▇", "█", "▇", "▆", "▅", "▄", "▃"],
        "d": ["⣾", "⣽", "⣻", "⢿", "⡿", "⣟", "⣯", "⣷"],
        "w": ["🌍", "🌎", "🌏"],
        "s": ["🌀   ", " 🌀  ", "  🌀 ", "   🌀", "  🌀 ", " 🌀  "],
        "+": ["+", "x"],
        "t": ["✶", "✸", "✹", "✺", "✹", "✷"]
    }

    def __init__(
        self,
        message: str = "Loading...",
        delay: float = 0.1,
        symbols=None,
        count_down: bool = False,
        time_in_s: float = 0
    ):
        """Initialize spinner with flexible configuration."""
        # Resolve symbol set.
        if isinstance(symbols, str):
            symbols = self.SYMBOL_SETS.get(symbols, None)

        # Default symbols if not provided.
        if symbols is None:
            symbols = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]

        # Test mode symbol set.
        if 'unittest' in sys.argv[0]:
            symbols = ['#', '=', '-']

        self.spinner = itertools.cycle(symbols)
        self.delay = delay
        self.message = message
        self.running = False
        self.spinner_thread = None
        self.max_t = time_in_s
        self.contd = count_down

        # Rendering management.
        self._is_primary = False
        self._start_time = 0

        # Central manager.
        self.manager = SpinnerManager()

    def _generate_render_line(self):
        """Generate the primary render line."""
        current_time = time.time()
        if self.contd:
            remaining = max(0, self.max_t - (current_time - self._start_time))
            time_display = f"{remaining:.2f}"
        else:
            time_display = f"{current_time - self._start_time:.2f}"

        symbol = next(self.spinner)
        return f"{symbol} {self.message} | {time_display}"

    def _generate_secondary_info(self):
        """Generate secondary spinner info for additional spinners."""
        return f"{self.message}"

    def __enter__(self):
        """Start the spinner."""
        self.running = True
        self._start_time = time.time()
        self.manager.register_spinner(self)
        return self

    def __exit__(self, exc_type, exc_value, exc_traceback):
        """Stop the spinner."""
        self.running = False
        self.manager.unregister_spinner(self)
        # Clear the spinner's line if it was the primary spinner.
        if self._is_primary:
            sys.stdout.write("\r\033[K")
            sys.stdout.flush()
__enter__()

Start the spinner.

Source code in toolboxv2/utils/extras/Style.py
591
592
593
594
595
596
def __enter__(self):
    """Start the spinner."""
    self.running = True
    self._start_time = time.time()
    self.manager.register_spinner(self)
    return self
__exit__(exc_type, exc_value, exc_traceback)

Stop the spinner.

Source code in toolboxv2/utils/extras/Style.py
598
599
600
601
602
603
604
605
def __exit__(self, exc_type, exc_value, exc_traceback):
    """Stop the spinner."""
    self.running = False
    self.manager.unregister_spinner(self)
    # Clear the spinner's line if it was the primary spinner.
    if self._is_primary:
        sys.stdout.write("\r\033[K")
        sys.stdout.flush()
__init__(message='Loading...', delay=0.1, symbols=None, count_down=False, time_in_s=0)

Initialize spinner with flexible configuration.

Source code in toolboxv2/utils/extras/Style.py
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
def __init__(
    self,
    message: str = "Loading...",
    delay: float = 0.1,
    symbols=None,
    count_down: bool = False,
    time_in_s: float = 0
):
    """Initialize spinner with flexible configuration."""
    # Resolve symbol set.
    if isinstance(symbols, str):
        symbols = self.SYMBOL_SETS.get(symbols, None)

    # Default symbols if not provided.
    if symbols is None:
        symbols = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]

    # Test mode symbol set.
    if 'unittest' in sys.argv[0]:
        symbols = ['#', '=', '-']

    self.spinner = itertools.cycle(symbols)
    self.delay = delay
    self.message = message
    self.running = False
    self.spinner_thread = None
    self.max_t = time_in_s
    self.contd = count_down

    # Rendering management.
    self._is_primary = False
    self._start_time = 0

    # Central manager.
    self.manager = SpinnerManager()

TBEF

Automatic generated by ToolBox v = 0.1.21

daemon

DaemonUtil
Source code in toolboxv2/utils/daemon/daemon_util.py
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
class DaemonUtil:

    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.server = None
        self.alive = False
        self.__storedargs = args, kwargs
        self.async_initialized = False

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()

    async def __ainit__(self, class_instance: Any, host='0.0.0.0', port=6587, t=False,
                        app: (App or AppType) | None = None,
                        peer=False, name='daemonApp-server', on_register=None, on_client_exit=None, on_server_exit=None,
                        unix_socket=False, test_override=False):
        from toolboxv2.mods.SocketManager import SocketType
        self.class_instance = class_instance
        self.server = None
        self.port = port
        self.host = host
        self.alive = False
        self.test_override = test_override
        self._name = name
        if on_register is None:
            def on_register(*args):
                return None
        self._on_register = on_register
        if on_client_exit is None:
            def on_client_exit(*args):
                return None
        self.on_client_exit = on_client_exit
        if on_server_exit is None:
            def on_server_exit():
                return None
        self.on_server_exit = on_server_exit
        self.unix_socket = unix_socket
        self.online = None
        connection_type = SocketType.server
        if peer:
            connection_type = SocketType.peer

        await self.start_server(connection_type)
        app = app if app is not None else get_app(from_=f"DaemonUtil.{self._name}")
        self.online = await asyncio.to_thread(self.connect, app)
        if t:
            await self.online

    async def start_server(self, connection_type=None):
        """Start the server using app and the socket manager"""
        from toolboxv2.mods.SocketManager import SocketType
        if connection_type is None:
            connection_type = SocketType.server
        app = get_app(from_="Starting.Daemon")
        print(app.mod_online("SocketManager"), "SocketManager")
        if not app.mod_online("SocketManager"):
            await app.load_mod("SocketManager")
        server_result = await app.a_run_any(SOCKETMANAGER.CREATE_SOCKET,
                                            get_results=True,
                                            name=self._name,
                                            host=self.host,
                                            port=self.port,
                                            type_id=connection_type,
                                            max_connections=-1,
                                            return_full_object=True,
                                            test_override=self.test_override,
                                            unix_file=self.unix_socket)
        if server_result.is_error():
            raise Exception(f"Server error: {server_result.print(False)}")
        if not server_result.is_data():
            raise Exception(f"Server error: {server_result.print(False)}")
        self.alive = True
        self.server = server_result
        # 'socket': socket,
        # 'receiver_socket': r_socket,
        # 'host': host,
        # 'port': port,
        # 'p2p-port': endpoint_port,
        # 'sender': send,
        # 'receiver_queue': receiver_queue,
        # 'connection_error': connection_error,
        # 'receiver_thread': s_thread,
        # 'keepalive_thread': keep_alive_thread,
        # 'running_dict': running_dict,
        # 'client_to_receiver_thread': to_receive,
        # 'client_receiver_threads': threeds,

    async def send(self, data: dict or bytes or str, identifier: tuple[str, int] or str = "main"):
        result = await self.server.aget()
        sender = result.get('sender')
        await sender(data, identifier)
        return "Data Transmitted"

    @staticmethod
    async def runner_co(fuction, *args, **kwargs):
        if asyncio.iscoroutinefunction(fuction):
            return await fuction(*args, **kwargs)
        return fuction(*args, **kwargs)

    async def connect(self, app):
        result = await self.server.aget()
        if not isinstance(result, dict) or result.get('connection_error') != 0:
            raise Exception(f"Server error: {result}")
        self.server = Result.ok(result)
        receiver_queue: queue.Queue = self.server.get('receiver_queue')
        client_to_receiver_thread = self.server.get('client_to_receiver_thread')
        running_dict = self.server.get('running_dict')
        sender = self.server.get('sender')
        known_clients = {}
        valid_clients = {}
        app.print(f"Starting Demon {self._name}")

        while self.alive:

            if not receiver_queue.empty():
                data = receiver_queue.get()
                if not data:
                    continue
                if 'identifier' not in data:
                    continue

                identifier = data.get('identifier', 'unknown')
                try:
                    if identifier == "new_con":
                        client, address = data.get('data')
                        get_logger().info(f"New connection: {address}")
                        known_clients[str(address)] = client
                        await client_to_receiver_thread(client, str(address))

                        await self.runner_co(self._on_register, identifier, address)
                        identifier = str(address)
                        # await sender({'ok': 0}, identifier)

                    print("Receiver queue", identifier, identifier in known_clients, identifier in valid_clients)
                    # validation
                    if identifier in known_clients:
                        get_logger().info(identifier)
                        if identifier.startswith("('127.0.0.1'"):
                            valid_clients[identifier] = known_clients[identifier]
                            await self.runner_co(self._on_register, identifier, data)
                        elif data.get("claim", False):
                            do = app.run_any(("CloudM.UserInstances", "validate_ws_id"),
                                             ws_id=data.get("claim"))[0]
                            get_logger().info(do)
                            if do:
                                valid_clients[identifier] = known_clients[identifier]
                                await self.runner_co(self._on_register, identifier, data)
                        elif data.get("key", False) == os.getenv("TB_R_KEY"):
                            valid_clients[identifier] = known_clients[identifier]
                            await self.runner_co(self._on_register, identifier, data)
                        else:
                            get_logger().warning(f"Validating Failed: {identifier}")
                            # sender({'Validating Failed': -1}, eval(identifier))
                        get_logger().info(f"Validating New: {identifier}")
                        del known_clients[identifier]

                    elif identifier in valid_clients:
                        get_logger().info(f"New valid Request: {identifier}")
                        name = data.get('name')
                        args = data.get('args')
                        kwargs = data.get('kwargs')

                        get_logger().info(f"Request data: {name=}{args=}{kwargs=}{identifier=}")

                        if name == 'exit_main':
                            self.alive = False
                            break

                        if name == 'show_console':
                            show_console(True)
                            await sender({'ok': 0}, identifier)
                            continue

                        if name == 'hide_console':
                            show_console(False)
                            await sender({'ok': 0}, identifier)
                            continue

                        if name == 'rrun_flow':
                            show_console(True)
                            runnner = self.class_instance.run_flow
                            threading.Thread(target=runnner, args=args, kwargs=kwargs, daemon=True).start()
                            await sender({'ok': 0}, identifier)
                            show_console(False)
                            continue

                        async def _helper_runner():
                            try:
                                attr_f = getattr(self.class_instance, name)

                                if asyncio.iscoroutinefunction(attr_f):
                                    res = await attr_f(*args, **kwargs)
                                else:
                                    res = attr_f(*args, **kwargs)

                                if res is None:
                                    res = {'data': res}
                                elif isinstance(res, Result):
                                    if asyncio.iscoroutine(res.get()) or isinstance(res.get(), asyncio.Task):
                                        res_ = await res.aget()
                                        res.result.data = res_
                                    res = json.loads(res.to_api_result().json())
                                elif isinstance(res, bytes | dict):
                                    pass
                                else:
                                    res = {'data': 'unsupported type', 'type': str(type(res))}

                                get_logger().info(f"sending response {res} {type(res)}")

                                await sender(res, identifier)
                            except Exception as e:
                                await sender({"data": str(e)}, identifier)

                        await _helper_runner()
                    else:
                        print("Unknown connection data:", data)

                except Exception as e:
                    get_logger().warning(Style.RED(f"An error occurred on {identifier} {str(e)}"))
                    if identifier != "unknown":
                        running_dict["receive"][str(identifier)] = False
                        await self.runner_co(self.on_client_exit,  identifier)
            await asyncio.sleep(0.1)
        running_dict["server_receiver"] = False
        for x in running_dict["receive"]:
            running_dict["receive"][x] = False
        running_dict["keep_alive_var"] = False
        await self.runner_co(self.on_server_exit)
        app.print(f"Closing Demon {self._name}")
        return Result.ok()

    async def a_exit(self):
        result = await self.server.aget()
        await result.get("close")()
        self.alive = False
        if asyncio.iscoroutine(self.online):
            await self.online
        print("Connection result :", result.get("host"), result.get("port"),
              "total connections:", result.get("connections"))
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/daemon/daemon_util.py
19
20
21
22
23
24
25
26
27
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.server = None
    self.alive = False
    self.__storedargs = args, kwargs
    self.async_initialized = False
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/daemon/daemon_util.py
29
30
31
32
33
34
35
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self
start_server(connection_type=None) async

Start the server using app and the socket manager

Source code in toolboxv2/utils/daemon/daemon_util.py
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
async def start_server(self, connection_type=None):
    """Start the server using app and the socket manager"""
    from toolboxv2.mods.SocketManager import SocketType
    if connection_type is None:
        connection_type = SocketType.server
    app = get_app(from_="Starting.Daemon")
    print(app.mod_online("SocketManager"), "SocketManager")
    if not app.mod_online("SocketManager"):
        await app.load_mod("SocketManager")
    server_result = await app.a_run_any(SOCKETMANAGER.CREATE_SOCKET,
                                        get_results=True,
                                        name=self._name,
                                        host=self.host,
                                        port=self.port,
                                        type_id=connection_type,
                                        max_connections=-1,
                                        return_full_object=True,
                                        test_override=self.test_override,
                                        unix_file=self.unix_socket)
    if server_result.is_error():
        raise Exception(f"Server error: {server_result.print(False)}")
    if not server_result.is_data():
        raise Exception(f"Server error: {server_result.print(False)}")
    self.alive = True
    self.server = server_result
daemon_util
DaemonUtil
Source code in toolboxv2/utils/daemon/daemon_util.py
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
class DaemonUtil:

    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.server = None
        self.alive = False
        self.__storedargs = args, kwargs
        self.async_initialized = False

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()

    async def __ainit__(self, class_instance: Any, host='0.0.0.0', port=6587, t=False,
                        app: (App or AppType) | None = None,
                        peer=False, name='daemonApp-server', on_register=None, on_client_exit=None, on_server_exit=None,
                        unix_socket=False, test_override=False):
        from toolboxv2.mods.SocketManager import SocketType
        self.class_instance = class_instance
        self.server = None
        self.port = port
        self.host = host
        self.alive = False
        self.test_override = test_override
        self._name = name
        if on_register is None:
            def on_register(*args):
                return None
        self._on_register = on_register
        if on_client_exit is None:
            def on_client_exit(*args):
                return None
        self.on_client_exit = on_client_exit
        if on_server_exit is None:
            def on_server_exit():
                return None
        self.on_server_exit = on_server_exit
        self.unix_socket = unix_socket
        self.online = None
        connection_type = SocketType.server
        if peer:
            connection_type = SocketType.peer

        await self.start_server(connection_type)
        app = app if app is not None else get_app(from_=f"DaemonUtil.{self._name}")
        self.online = await asyncio.to_thread(self.connect, app)
        if t:
            await self.online

    async def start_server(self, connection_type=None):
        """Start the server using app and the socket manager"""
        from toolboxv2.mods.SocketManager import SocketType
        if connection_type is None:
            connection_type = SocketType.server
        app = get_app(from_="Starting.Daemon")
        print(app.mod_online("SocketManager"), "SocketManager")
        if not app.mod_online("SocketManager"):
            await app.load_mod("SocketManager")
        server_result = await app.a_run_any(SOCKETMANAGER.CREATE_SOCKET,
                                            get_results=True,
                                            name=self._name,
                                            host=self.host,
                                            port=self.port,
                                            type_id=connection_type,
                                            max_connections=-1,
                                            return_full_object=True,
                                            test_override=self.test_override,
                                            unix_file=self.unix_socket)
        if server_result.is_error():
            raise Exception(f"Server error: {server_result.print(False)}")
        if not server_result.is_data():
            raise Exception(f"Server error: {server_result.print(False)}")
        self.alive = True
        self.server = server_result
        # 'socket': socket,
        # 'receiver_socket': r_socket,
        # 'host': host,
        # 'port': port,
        # 'p2p-port': endpoint_port,
        # 'sender': send,
        # 'receiver_queue': receiver_queue,
        # 'connection_error': connection_error,
        # 'receiver_thread': s_thread,
        # 'keepalive_thread': keep_alive_thread,
        # 'running_dict': running_dict,
        # 'client_to_receiver_thread': to_receive,
        # 'client_receiver_threads': threeds,

    async def send(self, data: dict or bytes or str, identifier: tuple[str, int] or str = "main"):
        result = await self.server.aget()
        sender = result.get('sender')
        await sender(data, identifier)
        return "Data Transmitted"

    @staticmethod
    async def runner_co(fuction, *args, **kwargs):
        if asyncio.iscoroutinefunction(fuction):
            return await fuction(*args, **kwargs)
        return fuction(*args, **kwargs)

    async def connect(self, app):
        result = await self.server.aget()
        if not isinstance(result, dict) or result.get('connection_error') != 0:
            raise Exception(f"Server error: {result}")
        self.server = Result.ok(result)
        receiver_queue: queue.Queue = self.server.get('receiver_queue')
        client_to_receiver_thread = self.server.get('client_to_receiver_thread')
        running_dict = self.server.get('running_dict')
        sender = self.server.get('sender')
        known_clients = {}
        valid_clients = {}
        app.print(f"Starting Demon {self._name}")

        while self.alive:

            if not receiver_queue.empty():
                data = receiver_queue.get()
                if not data:
                    continue
                if 'identifier' not in data:
                    continue

                identifier = data.get('identifier', 'unknown')
                try:
                    if identifier == "new_con":
                        client, address = data.get('data')
                        get_logger().info(f"New connection: {address}")
                        known_clients[str(address)] = client
                        await client_to_receiver_thread(client, str(address))

                        await self.runner_co(self._on_register, identifier, address)
                        identifier = str(address)
                        # await sender({'ok': 0}, identifier)

                    print("Receiver queue", identifier, identifier in known_clients, identifier in valid_clients)
                    # validation
                    if identifier in known_clients:
                        get_logger().info(identifier)
                        if identifier.startswith("('127.0.0.1'"):
                            valid_clients[identifier] = known_clients[identifier]
                            await self.runner_co(self._on_register, identifier, data)
                        elif data.get("claim", False):
                            do = app.run_any(("CloudM.UserInstances", "validate_ws_id"),
                                             ws_id=data.get("claim"))[0]
                            get_logger().info(do)
                            if do:
                                valid_clients[identifier] = known_clients[identifier]
                                await self.runner_co(self._on_register, identifier, data)
                        elif data.get("key", False) == os.getenv("TB_R_KEY"):
                            valid_clients[identifier] = known_clients[identifier]
                            await self.runner_co(self._on_register, identifier, data)
                        else:
                            get_logger().warning(f"Validating Failed: {identifier}")
                            # sender({'Validating Failed': -1}, eval(identifier))
                        get_logger().info(f"Validating New: {identifier}")
                        del known_clients[identifier]

                    elif identifier in valid_clients:
                        get_logger().info(f"New valid Request: {identifier}")
                        name = data.get('name')
                        args = data.get('args')
                        kwargs = data.get('kwargs')

                        get_logger().info(f"Request data: {name=}{args=}{kwargs=}{identifier=}")

                        if name == 'exit_main':
                            self.alive = False
                            break

                        if name == 'show_console':
                            show_console(True)
                            await sender({'ok': 0}, identifier)
                            continue

                        if name == 'hide_console':
                            show_console(False)
                            await sender({'ok': 0}, identifier)
                            continue

                        if name == 'rrun_flow':
                            show_console(True)
                            runnner = self.class_instance.run_flow
                            threading.Thread(target=runnner, args=args, kwargs=kwargs, daemon=True).start()
                            await sender({'ok': 0}, identifier)
                            show_console(False)
                            continue

                        async def _helper_runner():
                            try:
                                attr_f = getattr(self.class_instance, name)

                                if asyncio.iscoroutinefunction(attr_f):
                                    res = await attr_f(*args, **kwargs)
                                else:
                                    res = attr_f(*args, **kwargs)

                                if res is None:
                                    res = {'data': res}
                                elif isinstance(res, Result):
                                    if asyncio.iscoroutine(res.get()) or isinstance(res.get(), asyncio.Task):
                                        res_ = await res.aget()
                                        res.result.data = res_
                                    res = json.loads(res.to_api_result().json())
                                elif isinstance(res, bytes | dict):
                                    pass
                                else:
                                    res = {'data': 'unsupported type', 'type': str(type(res))}

                                get_logger().info(f"sending response {res} {type(res)}")

                                await sender(res, identifier)
                            except Exception as e:
                                await sender({"data": str(e)}, identifier)

                        await _helper_runner()
                    else:
                        print("Unknown connection data:", data)

                except Exception as e:
                    get_logger().warning(Style.RED(f"An error occurred on {identifier} {str(e)}"))
                    if identifier != "unknown":
                        running_dict["receive"][str(identifier)] = False
                        await self.runner_co(self.on_client_exit,  identifier)
            await asyncio.sleep(0.1)
        running_dict["server_receiver"] = False
        for x in running_dict["receive"]:
            running_dict["receive"][x] = False
        running_dict["keep_alive_var"] = False
        await self.runner_co(self.on_server_exit)
        app.print(f"Closing Demon {self._name}")
        return Result.ok()

    async def a_exit(self):
        result = await self.server.aget()
        await result.get("close")()
        self.alive = False
        if asyncio.iscoroutine(self.online):
            await self.online
        print("Connection result :", result.get("host"), result.get("port"),
              "total connections:", result.get("connections"))
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/daemon/daemon_util.py
19
20
21
22
23
24
25
26
27
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.server = None
    self.alive = False
    self.__storedargs = args, kwargs
    self.async_initialized = False
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/daemon/daemon_util.py
29
30
31
32
33
34
35
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self
start_server(connection_type=None) async

Start the server using app and the socket manager

Source code in toolboxv2/utils/daemon/daemon_util.py
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
async def start_server(self, connection_type=None):
    """Start the server using app and the socket manager"""
    from toolboxv2.mods.SocketManager import SocketType
    if connection_type is None:
        connection_type = SocketType.server
    app = get_app(from_="Starting.Daemon")
    print(app.mod_online("SocketManager"), "SocketManager")
    if not app.mod_online("SocketManager"):
        await app.load_mod("SocketManager")
    server_result = await app.a_run_any(SOCKETMANAGER.CREATE_SOCKET,
                                        get_results=True,
                                        name=self._name,
                                        host=self.host,
                                        port=self.port,
                                        type_id=connection_type,
                                        max_connections=-1,
                                        return_full_object=True,
                                        test_override=self.test_override,
                                        unix_file=self.unix_socket)
    if server_result.is_error():
        raise Exception(f"Server error: {server_result.print(False)}")
    if not server_result.is_data():
        raise Exception(f"Server error: {server_result.print(False)}")
    self.alive = True
    self.server = server_result

extras

BaseWidget
Source code in toolboxv2/utils/extras/base_widget.py
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
class BaseWidget:
    def __init__(self, name: str):
        self.name = name
        self.openWidgetsIDs = {}
        self.onReload = []
        self.iframes = {}

    def register(self, app, fuction, version=None, name="get_widget", level=1, **kwargs):
        if version is None:
            version = app.version
        app.tb(mod_name=self.name, version=version, request_as_kwarg=True, level=level, api=True, name=name, **kwargs)(
            fuction)

    def modify_iterator(self, iterator, replace):
        """
        ['a', 'b'] -> [{replace[0]: 'a',..., replace[len(replace)-1]: 'a'},
        {replace[0]: 'b',..., replace[len(replace)-1]: 'b'}, ]
        """

        for item in iterator:
            modified_item = {replace[i]: (self.name if replace[i] == "name" else '') + item for i in
                             range(len(replace))}
            yield modified_item

    def register2reload(self, *functions):
        for fuction in functions:
            def x(r):
                return fuction(request=r)
            self.onReload.append(x)

    def reload_guard(self, function):
        c = None
        if len(self.onReload) == 0:
            c = function()
        return c

    async def oa_reload_guard(self, function):
        c = None
        if len(self.onReload) == 0:
            c = await function() if asyncio.iscoroutinefunction(function) else function()
        return c

    @staticmethod
    def get_a_group(asset_name, template=None, file_path=None, a_kwargs=None):
        if a_kwargs is None:
            raise ValueError("a_kwargs must be specified")
        return [{'name': asset_name,
                 'file_path': file_path,
                 'kwargs': a_kwargs
                 } if file_path is not None else {'name': asset_name,
                                                  'template': template,
                                                  'kwargs': a_kwargs
                                                  }]

    def group_generator(self, asset_name: str, iterator: iter, template=None, file_path=None, a_kwargs=None):
        groups = []
        work_kwargs = a_kwargs
        for _i, data in enumerate(iterator):
            if isinstance(data, dict):
                work_kwargs = {**a_kwargs, **data}
            groups.append(self.get_a_group(asset_name, template=template, file_path=file_path, a_kwargs=work_kwargs))
        return groups

    def asset_loder(self, app, name, asset_id, file_path=None, template=None, iterator=None, **kwargs):
        a_kwargs = {**{
            'root': f"/api/{self.name}",
            'WidgetID': asset_id},
                    **kwargs}
        asset_name = f"{name}-{asset_id}"
        if iterator is None:
            group = self.get_a_group(asset_name,
                                     template=template,
                                     file_path=file_path,
                                     a_kwargs=a_kwargs)
        else:
            group = self.group_generator(asset_name,
                                         iterator=iterator,
                                         template=template,
                                         file_path=file_path,
                                         a_kwargs=a_kwargs)

        asset = app.run_any(MINIMALHTML.ADD_COLLECTION_TO_GROUP,
                            group_name=self.name,
                            collection={'name': f"{asset_name}",
                                        'group': group},
                            get_results=True)
        if asset.is_error():
            app.run_any(MINIMALHTML.ADD_GROUP, command=self.name)
            asset = app.run_any(MINIMALHTML.ADD_COLLECTION_TO_GROUP,
                                group_name=self.name,
                                collection={'name': f"{self.name}-{asset_name}",
                                            'group': group},
                                get_results=True)
        return asset

    def generate_html(self, app, name="MainWidget", asset_id=str(uuid.uuid4())[:4]):
        return app.run_any(MINIMALHTML.GENERATE_HTML,
                           group_name=self.name,
                           collection_name=f"{name}-{asset_id}")

    def load_widget(self, app, request, name="MainWidget", asset_id=str(uuid.uuid4())[:4]):
        app.run_any(MINIMALHTML.ADD_GROUP, command=self.name)
        self.reload(request)
        html_widget = self.generate_html(app, name, asset_id)
        return html_widget[0]['html_element']

    @staticmethod
    async def get_user_from_request(app, request):
        from toolboxv2.mods.CloudM import User
        if request is None:
            return User()
        return await get_current_user_from_request(app, request)

    @staticmethod
    def get_s_id(request):
        from ..system.types import Result
        if request is None:
            return Result.default_internal_error("No request specified")
        return Result.ok(request.session.get('ID', ''))

    def reload(self, request):
        [_(request) for _ in self.onReload]

    async def oa_reload(self, request):
        [_(request) if not asyncio.iscoroutinefunction(_) else await _(request) for _ in self.onReload]

    async def get_widget(self, request, **kwargs):
        raise NotImplementedError

    def hash_wrapper(self, _id, _salt=''):
        from ..security.cryp import Code
        return Code.one_way_hash(text=_id, salt=_salt, pepper=self.name)

    def register_iframe(self, iframe_id: str, src: str, width: str = "100%", height: str = "500px", **kwargs):
        """
        Registriert einen iframe mit gegebener ID und Quelle

        Args:
            iframe_id: Eindeutige ID für den iframe
            src: URL oder Pfad zur Quelle des iframes
            width: Breite des iframes (default: "100%")
            height: Höhe des iframes (default: "500px")
            **kwargs: Weitere iframe-Attribute
        """
        iframe_config = {
            'src': src,
            'width': width,
            'height': height,
            **kwargs
        }
        self.iframes[iframe_id] = iframe_config

    def create_iframe_asset(self, app, iframe_id: str, asset_id: str = None):
        """
        Erstellt ein Asset für einen registrierten iframe

        Args:
            app: App-Instanz
            iframe_id: ID des registrierten iframes
            asset_id: Optional, spezifische Asset-ID
        """
        if iframe_id not in self.iframes:
            raise ValueError(f"iframe mit ID {iframe_id} nicht registriert")

        if asset_id is None:
            asset_id = str(uuid.uuid4())[:4]

        iframe_config = self.iframes[iframe_id]
        iframe_template = """
        <iframe id="{iframe_id}"
                src="{src}"
                width="{width}"
                height="{height}"
                frameborder="0"
                {additional_attrs}></iframe>
        """.strip()

        # Filtere bekannte Attribute heraus und erstelle String für zusätzliche Attribute
        known_attrs = {'src', 'width', 'height'}
        additional_attrs = ' '.join(
            f'{k}="{v}"' for k, v in iframe_config.items()
            if k not in known_attrs
        )

        iframe_html = iframe_template.format(
            iframe_id=iframe_id,
            src=iframe_config['src'],
            width=iframe_config['width'],
            height=iframe_config['height'],
            additional_attrs=additional_attrs
        )

        return self.asset_loder(
            app=app,
            name=f"iframe-{iframe_id}",
            asset_id=asset_id,
            template=iframe_html
        )

    def load_iframe(self, app, iframe_id: str, asset_id: str = None):
        """
        Lädt einen registrierten iframe und gibt das HTML-Element zurück

        Args:
            app: App-Instanz
            iframe_id: ID des registrierten iframes
            asset_id: Optional, spezifische Asset-ID
        """
        self.create_iframe_asset(app, iframe_id, asset_id)
        return self.generate_html(app, f"iframe-{iframe_id}", asset_id)[0]['html_element']
create_iframe_asset(app, iframe_id, asset_id=None)

Erstellt ein Asset für einen registrierten iframe

Parameters:

Name Type Description Default
app

App-Instanz

required
iframe_id str

ID des registrierten iframes

required
asset_id str

Optional, spezifische Asset-ID

None
Source code in toolboxv2/utils/extras/base_widget.py
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
def create_iframe_asset(self, app, iframe_id: str, asset_id: str = None):
    """
    Erstellt ein Asset für einen registrierten iframe

    Args:
        app: App-Instanz
        iframe_id: ID des registrierten iframes
        asset_id: Optional, spezifische Asset-ID
    """
    if iframe_id not in self.iframes:
        raise ValueError(f"iframe mit ID {iframe_id} nicht registriert")

    if asset_id is None:
        asset_id = str(uuid.uuid4())[:4]

    iframe_config = self.iframes[iframe_id]
    iframe_template = """
    <iframe id="{iframe_id}"
            src="{src}"
            width="{width}"
            height="{height}"
            frameborder="0"
            {additional_attrs}></iframe>
    """.strip()

    # Filtere bekannte Attribute heraus und erstelle String für zusätzliche Attribute
    known_attrs = {'src', 'width', 'height'}
    additional_attrs = ' '.join(
        f'{k}="{v}"' for k, v in iframe_config.items()
        if k not in known_attrs
    )

    iframe_html = iframe_template.format(
        iframe_id=iframe_id,
        src=iframe_config['src'],
        width=iframe_config['width'],
        height=iframe_config['height'],
        additional_attrs=additional_attrs
    )

    return self.asset_loder(
        app=app,
        name=f"iframe-{iframe_id}",
        asset_id=asset_id,
        template=iframe_html
    )
load_iframe(app, iframe_id, asset_id=None)

Lädt einen registrierten iframe und gibt das HTML-Element zurück

Parameters:

Name Type Description Default
app

App-Instanz

required
iframe_id str

ID des registrierten iframes

required
asset_id str

Optional, spezifische Asset-ID

None
Source code in toolboxv2/utils/extras/base_widget.py
281
282
283
284
285
286
287
288
289
290
291
def load_iframe(self, app, iframe_id: str, asset_id: str = None):
    """
    Lädt einen registrierten iframe und gibt das HTML-Element zurück

    Args:
        app: App-Instanz
        iframe_id: ID des registrierten iframes
        asset_id: Optional, spezifische Asset-ID
    """
    self.create_iframe_asset(app, iframe_id, asset_id)
    return self.generate_html(app, f"iframe-{iframe_id}", asset_id)[0]['html_element']
modify_iterator(iterator, replace)

['a', 'b'] -> [{replace[0]: 'a',..., replace[len(replace)-1]: 'a'}, {replace[0]: 'b',..., replace[len(replace)-1]: 'b'}, ]

Source code in toolboxv2/utils/extras/base_widget.py
 95
 96
 97
 98
 99
100
101
102
103
104
def modify_iterator(self, iterator, replace):
    """
    ['a', 'b'] -> [{replace[0]: 'a',..., replace[len(replace)-1]: 'a'},
    {replace[0]: 'b',..., replace[len(replace)-1]: 'b'}, ]
    """

    for item in iterator:
        modified_item = {replace[i]: (self.name if replace[i] == "name" else '') + item for i in
                         range(len(replace))}
        yield modified_item
register_iframe(iframe_id, src, width='100%', height='500px', **kwargs)

Registriert einen iframe mit gegebener ID und Quelle

Parameters:

Name Type Description Default
iframe_id str

Eindeutige ID für den iframe

required
src str

URL oder Pfad zur Quelle des iframes

required
width str

Breite des iframes (default: "100%")

'100%'
height str

Höhe des iframes (default: "500px")

'500px'
**kwargs

Weitere iframe-Attribute

{}
Source code in toolboxv2/utils/extras/base_widget.py
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
def register_iframe(self, iframe_id: str, src: str, width: str = "100%", height: str = "500px", **kwargs):
    """
    Registriert einen iframe mit gegebener ID und Quelle

    Args:
        iframe_id: Eindeutige ID für den iframe
        src: URL oder Pfad zur Quelle des iframes
        width: Breite des iframes (default: "100%")
        height: Höhe des iframes (default: "500px")
        **kwargs: Weitere iframe-Attribute
    """
    iframe_config = {
        'src': src,
        'width': width,
        'height': height,
        **kwargs
    }
    self.iframes[iframe_id] = iframe_config
Style
Spinner

Enhanced Spinner with tqdm-like line rendering.

Source code in toolboxv2/utils/extras/Style.py
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
class Spinner:
    """
    Enhanced Spinner with tqdm-like line rendering.
    """
    SYMBOL_SETS = {
        "c": ["◐", "◓", "◑", "◒"],
        "b": ["▁", "▃", "▄", "▅", "▆", "▇", "█", "▇", "▆", "▅", "▄", "▃"],
        "d": ["⣾", "⣽", "⣻", "⢿", "⡿", "⣟", "⣯", "⣷"],
        "w": ["🌍", "🌎", "🌏"],
        "s": ["🌀   ", " 🌀  ", "  🌀 ", "   🌀", "  🌀 ", " 🌀  "],
        "+": ["+", "x"],
        "t": ["✶", "✸", "✹", "✺", "✹", "✷"]
    }

    def __init__(
        self,
        message: str = "Loading...",
        delay: float = 0.1,
        symbols=None,
        count_down: bool = False,
        time_in_s: float = 0
    ):
        """Initialize spinner with flexible configuration."""
        # Resolve symbol set.
        if isinstance(symbols, str):
            symbols = self.SYMBOL_SETS.get(symbols, None)

        # Default symbols if not provided.
        if symbols is None:
            symbols = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]

        # Test mode symbol set.
        if 'unittest' in sys.argv[0]:
            symbols = ['#', '=', '-']

        self.spinner = itertools.cycle(symbols)
        self.delay = delay
        self.message = message
        self.running = False
        self.spinner_thread = None
        self.max_t = time_in_s
        self.contd = count_down

        # Rendering management.
        self._is_primary = False
        self._start_time = 0

        # Central manager.
        self.manager = SpinnerManager()

    def _generate_render_line(self):
        """Generate the primary render line."""
        current_time = time.time()
        if self.contd:
            remaining = max(0, self.max_t - (current_time - self._start_time))
            time_display = f"{remaining:.2f}"
        else:
            time_display = f"{current_time - self._start_time:.2f}"

        symbol = next(self.spinner)
        return f"{symbol} {self.message} | {time_display}"

    def _generate_secondary_info(self):
        """Generate secondary spinner info for additional spinners."""
        return f"{self.message}"

    def __enter__(self):
        """Start the spinner."""
        self.running = True
        self._start_time = time.time()
        self.manager.register_spinner(self)
        return self

    def __exit__(self, exc_type, exc_value, exc_traceback):
        """Stop the spinner."""
        self.running = False
        self.manager.unregister_spinner(self)
        # Clear the spinner's line if it was the primary spinner.
        if self._is_primary:
            sys.stdout.write("\r\033[K")
            sys.stdout.flush()
__enter__()

Start the spinner.

Source code in toolboxv2/utils/extras/Style.py
591
592
593
594
595
596
def __enter__(self):
    """Start the spinner."""
    self.running = True
    self._start_time = time.time()
    self.manager.register_spinner(self)
    return self
__exit__(exc_type, exc_value, exc_traceback)

Stop the spinner.

Source code in toolboxv2/utils/extras/Style.py
598
599
600
601
602
603
604
605
def __exit__(self, exc_type, exc_value, exc_traceback):
    """Stop the spinner."""
    self.running = False
    self.manager.unregister_spinner(self)
    # Clear the spinner's line if it was the primary spinner.
    if self._is_primary:
        sys.stdout.write("\r\033[K")
        sys.stdout.flush()
__init__(message='Loading...', delay=0.1, symbols=None, count_down=False, time_in_s=0)

Initialize spinner with flexible configuration.

Source code in toolboxv2/utils/extras/Style.py
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
def __init__(
    self,
    message: str = "Loading...",
    delay: float = 0.1,
    symbols=None,
    count_down: bool = False,
    time_in_s: float = 0
):
    """Initialize spinner with flexible configuration."""
    # Resolve symbol set.
    if isinstance(symbols, str):
        symbols = self.SYMBOL_SETS.get(symbols, None)

    # Default symbols if not provided.
    if symbols is None:
        symbols = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]

    # Test mode symbol set.
    if 'unittest' in sys.argv[0]:
        symbols = ['#', '=', '-']

    self.spinner = itertools.cycle(symbols)
    self.delay = delay
    self.message = message
    self.running = False
    self.spinner_thread = None
    self.max_t = time_in_s
    self.contd = count_down

    # Rendering management.
    self._is_primary = False
    self._start_time = 0

    # Central manager.
    self.manager = SpinnerManager()
SpinnerManager

Manages multiple spinners to ensure tqdm-like line rendering. Automatically captures SIGINT (Ctrl+C) to stop all spinners.

Source code in toolboxv2/utils/extras/Style.py
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
class SpinnerManager(metaclass=Singleton):
    """
    Manages multiple spinners to ensure tqdm-like line rendering.
    Automatically captures SIGINT (Ctrl+C) to stop all spinners.
    """
    _instance = None

    def __new__(cls):
        if not cls._instance:
            cls._instance = super().__new__(cls)
            cls._instance._init_manager()
        return cls._instance

    def _init_manager(self):
        """Initialize spinner management resources and register SIGINT handler."""
        self._spinners = []
        self._lock = threading.Lock()
        self._render_thread = None
        self._should_run = False
        try:
            signal.signal(signal.SIGINT, self._signal_handler)
        except ValueError:
            print("Spinner Manager not in the min Thread no signal possible")
            pass

    def _signal_handler(self, signum, frame):
        """Handle SIGINT by stopping all spinners gracefully."""
        with self._lock:
            for spinner in self._spinners:
                spinner.running = False
            self._spinners.clear()
        self._should_run = False
        sys.stdout.write("\r\033[K")  # Clear the spinner's line.
        sys.stdout.flush()
        sys.exit(0)

    def register_spinner(self, spinner):
        """Register a new spinner."""
        with self._lock:
            # First spinner defines the rendering line.
            if not self._spinners:
                spinner._is_primary = True
            self._spinners.append(spinner)
            # Start rendering if not already running.
            if not self._should_run:
                self._should_run = True
                self._render_thread = threading.Thread(
                    target=self._render_loop,
                    daemon=True
                )
                self._render_thread.start()

    def unregister_spinner(self, spinner):
        """Unregister a completed spinner."""
        with self._lock:
            if spinner in self._spinners:
                self._spinners.remove(spinner)

    def _render_loop(self):
        """Continuous rendering loop for all active spinners."""
        while self._should_run:
            if not self._spinners:
                self._should_run = False
                break

            with self._lock:
                # Find primary spinner (first registered).
                primary_spinner = next((s for s in self._spinners if s._is_primary), None)

                if primary_spinner and primary_spinner.running:
                    # Render in the same line.
                    render_line = primary_spinner._generate_render_line()

                    # Append additional spinner info if multiple exist.
                    if len(self._spinners) > 1:
                        secondary_info = " | ".join(
                            s._generate_secondary_info()
                            for s in self._spinners
                            if s is not primary_spinner and s.running
                        )
                        render_line += f" [{secondary_info}]"

                    # Clear line and write.
                    try:
                        sys.stdout.write("\r" + render_line + "\033[K")
                        sys.stdout.flush()
                    except Exception:
                        self._should_run = False

            time.sleep(0.1)  # Render interval.
register_spinner(spinner)

Register a new spinner.

Source code in toolboxv2/utils/extras/Style.py
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
def register_spinner(self, spinner):
    """Register a new spinner."""
    with self._lock:
        # First spinner defines the rendering line.
        if not self._spinners:
            spinner._is_primary = True
        self._spinners.append(spinner)
        # Start rendering if not already running.
        if not self._should_run:
            self._should_run = True
            self._render_thread = threading.Thread(
                target=self._render_loop,
                daemon=True
            )
            self._render_thread.start()
unregister_spinner(spinner)

Unregister a completed spinner.

Source code in toolboxv2/utils/extras/Style.py
486
487
488
489
490
def unregister_spinner(self, spinner):
    """Unregister a completed spinner."""
    with self._lock:
        if spinner in self._spinners:
            self._spinners.remove(spinner)
base_widget
BaseWidget
Source code in toolboxv2/utils/extras/base_widget.py
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
class BaseWidget:
    def __init__(self, name: str):
        self.name = name
        self.openWidgetsIDs = {}
        self.onReload = []
        self.iframes = {}

    def register(self, app, fuction, version=None, name="get_widget", level=1, **kwargs):
        if version is None:
            version = app.version
        app.tb(mod_name=self.name, version=version, request_as_kwarg=True, level=level, api=True, name=name, **kwargs)(
            fuction)

    def modify_iterator(self, iterator, replace):
        """
        ['a', 'b'] -> [{replace[0]: 'a',..., replace[len(replace)-1]: 'a'},
        {replace[0]: 'b',..., replace[len(replace)-1]: 'b'}, ]
        """

        for item in iterator:
            modified_item = {replace[i]: (self.name if replace[i] == "name" else '') + item for i in
                             range(len(replace))}
            yield modified_item

    def register2reload(self, *functions):
        for fuction in functions:
            def x(r):
                return fuction(request=r)
            self.onReload.append(x)

    def reload_guard(self, function):
        c = None
        if len(self.onReload) == 0:
            c = function()
        return c

    async def oa_reload_guard(self, function):
        c = None
        if len(self.onReload) == 0:
            c = await function() if asyncio.iscoroutinefunction(function) else function()
        return c

    @staticmethod
    def get_a_group(asset_name, template=None, file_path=None, a_kwargs=None):
        if a_kwargs is None:
            raise ValueError("a_kwargs must be specified")
        return [{'name': asset_name,
                 'file_path': file_path,
                 'kwargs': a_kwargs
                 } if file_path is not None else {'name': asset_name,
                                                  'template': template,
                                                  'kwargs': a_kwargs
                                                  }]

    def group_generator(self, asset_name: str, iterator: iter, template=None, file_path=None, a_kwargs=None):
        groups = []
        work_kwargs = a_kwargs
        for _i, data in enumerate(iterator):
            if isinstance(data, dict):
                work_kwargs = {**a_kwargs, **data}
            groups.append(self.get_a_group(asset_name, template=template, file_path=file_path, a_kwargs=work_kwargs))
        return groups

    def asset_loder(self, app, name, asset_id, file_path=None, template=None, iterator=None, **kwargs):
        a_kwargs = {**{
            'root': f"/api/{self.name}",
            'WidgetID': asset_id},
                    **kwargs}
        asset_name = f"{name}-{asset_id}"
        if iterator is None:
            group = self.get_a_group(asset_name,
                                     template=template,
                                     file_path=file_path,
                                     a_kwargs=a_kwargs)
        else:
            group = self.group_generator(asset_name,
                                         iterator=iterator,
                                         template=template,
                                         file_path=file_path,
                                         a_kwargs=a_kwargs)

        asset = app.run_any(MINIMALHTML.ADD_COLLECTION_TO_GROUP,
                            group_name=self.name,
                            collection={'name': f"{asset_name}",
                                        'group': group},
                            get_results=True)
        if asset.is_error():
            app.run_any(MINIMALHTML.ADD_GROUP, command=self.name)
            asset = app.run_any(MINIMALHTML.ADD_COLLECTION_TO_GROUP,
                                group_name=self.name,
                                collection={'name': f"{self.name}-{asset_name}",
                                            'group': group},
                                get_results=True)
        return asset

    def generate_html(self, app, name="MainWidget", asset_id=str(uuid.uuid4())[:4]):
        return app.run_any(MINIMALHTML.GENERATE_HTML,
                           group_name=self.name,
                           collection_name=f"{name}-{asset_id}")

    def load_widget(self, app, request, name="MainWidget", asset_id=str(uuid.uuid4())[:4]):
        app.run_any(MINIMALHTML.ADD_GROUP, command=self.name)
        self.reload(request)
        html_widget = self.generate_html(app, name, asset_id)
        return html_widget[0]['html_element']

    @staticmethod
    async def get_user_from_request(app, request):
        from toolboxv2.mods.CloudM import User
        if request is None:
            return User()
        return await get_current_user_from_request(app, request)

    @staticmethod
    def get_s_id(request):
        from ..system.types import Result
        if request is None:
            return Result.default_internal_error("No request specified")
        return Result.ok(request.session.get('ID', ''))

    def reload(self, request):
        [_(request) for _ in self.onReload]

    async def oa_reload(self, request):
        [_(request) if not asyncio.iscoroutinefunction(_) else await _(request) for _ in self.onReload]

    async def get_widget(self, request, **kwargs):
        raise NotImplementedError

    def hash_wrapper(self, _id, _salt=''):
        from ..security.cryp import Code
        return Code.one_way_hash(text=_id, salt=_salt, pepper=self.name)

    def register_iframe(self, iframe_id: str, src: str, width: str = "100%", height: str = "500px", **kwargs):
        """
        Registriert einen iframe mit gegebener ID und Quelle

        Args:
            iframe_id: Eindeutige ID für den iframe
            src: URL oder Pfad zur Quelle des iframes
            width: Breite des iframes (default: "100%")
            height: Höhe des iframes (default: "500px")
            **kwargs: Weitere iframe-Attribute
        """
        iframe_config = {
            'src': src,
            'width': width,
            'height': height,
            **kwargs
        }
        self.iframes[iframe_id] = iframe_config

    def create_iframe_asset(self, app, iframe_id: str, asset_id: str = None):
        """
        Erstellt ein Asset für einen registrierten iframe

        Args:
            app: App-Instanz
            iframe_id: ID des registrierten iframes
            asset_id: Optional, spezifische Asset-ID
        """
        if iframe_id not in self.iframes:
            raise ValueError(f"iframe mit ID {iframe_id} nicht registriert")

        if asset_id is None:
            asset_id = str(uuid.uuid4())[:4]

        iframe_config = self.iframes[iframe_id]
        iframe_template = """
        <iframe id="{iframe_id}"
                src="{src}"
                width="{width}"
                height="{height}"
                frameborder="0"
                {additional_attrs}></iframe>
        """.strip()

        # Filtere bekannte Attribute heraus und erstelle String für zusätzliche Attribute
        known_attrs = {'src', 'width', 'height'}
        additional_attrs = ' '.join(
            f'{k}="{v}"' for k, v in iframe_config.items()
            if k not in known_attrs
        )

        iframe_html = iframe_template.format(
            iframe_id=iframe_id,
            src=iframe_config['src'],
            width=iframe_config['width'],
            height=iframe_config['height'],
            additional_attrs=additional_attrs
        )

        return self.asset_loder(
            app=app,
            name=f"iframe-{iframe_id}",
            asset_id=asset_id,
            template=iframe_html
        )

    def load_iframe(self, app, iframe_id: str, asset_id: str = None):
        """
        Lädt einen registrierten iframe und gibt das HTML-Element zurück

        Args:
            app: App-Instanz
            iframe_id: ID des registrierten iframes
            asset_id: Optional, spezifische Asset-ID
        """
        self.create_iframe_asset(app, iframe_id, asset_id)
        return self.generate_html(app, f"iframe-{iframe_id}", asset_id)[0]['html_element']
create_iframe_asset(app, iframe_id, asset_id=None)

Erstellt ein Asset für einen registrierten iframe

Parameters:

Name Type Description Default
app

App-Instanz

required
iframe_id str

ID des registrierten iframes

required
asset_id str

Optional, spezifische Asset-ID

None
Source code in toolboxv2/utils/extras/base_widget.py
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
def create_iframe_asset(self, app, iframe_id: str, asset_id: str = None):
    """
    Erstellt ein Asset für einen registrierten iframe

    Args:
        app: App-Instanz
        iframe_id: ID des registrierten iframes
        asset_id: Optional, spezifische Asset-ID
    """
    if iframe_id not in self.iframes:
        raise ValueError(f"iframe mit ID {iframe_id} nicht registriert")

    if asset_id is None:
        asset_id = str(uuid.uuid4())[:4]

    iframe_config = self.iframes[iframe_id]
    iframe_template = """
    <iframe id="{iframe_id}"
            src="{src}"
            width="{width}"
            height="{height}"
            frameborder="0"
            {additional_attrs}></iframe>
    """.strip()

    # Filtere bekannte Attribute heraus und erstelle String für zusätzliche Attribute
    known_attrs = {'src', 'width', 'height'}
    additional_attrs = ' '.join(
        f'{k}="{v}"' for k, v in iframe_config.items()
        if k not in known_attrs
    )

    iframe_html = iframe_template.format(
        iframe_id=iframe_id,
        src=iframe_config['src'],
        width=iframe_config['width'],
        height=iframe_config['height'],
        additional_attrs=additional_attrs
    )

    return self.asset_loder(
        app=app,
        name=f"iframe-{iframe_id}",
        asset_id=asset_id,
        template=iframe_html
    )
load_iframe(app, iframe_id, asset_id=None)

Lädt einen registrierten iframe und gibt das HTML-Element zurück

Parameters:

Name Type Description Default
app

App-Instanz

required
iframe_id str

ID des registrierten iframes

required
asset_id str

Optional, spezifische Asset-ID

None
Source code in toolboxv2/utils/extras/base_widget.py
281
282
283
284
285
286
287
288
289
290
291
def load_iframe(self, app, iframe_id: str, asset_id: str = None):
    """
    Lädt einen registrierten iframe und gibt das HTML-Element zurück

    Args:
        app: App-Instanz
        iframe_id: ID des registrierten iframes
        asset_id: Optional, spezifische Asset-ID
    """
    self.create_iframe_asset(app, iframe_id, asset_id)
    return self.generate_html(app, f"iframe-{iframe_id}", asset_id)[0]['html_element']
modify_iterator(iterator, replace)

['a', 'b'] -> [{replace[0]: 'a',..., replace[len(replace)-1]: 'a'}, {replace[0]: 'b',..., replace[len(replace)-1]: 'b'}, ]

Source code in toolboxv2/utils/extras/base_widget.py
 95
 96
 97
 98
 99
100
101
102
103
104
def modify_iterator(self, iterator, replace):
    """
    ['a', 'b'] -> [{replace[0]: 'a',..., replace[len(replace)-1]: 'a'},
    {replace[0]: 'b',..., replace[len(replace)-1]: 'b'}, ]
    """

    for item in iterator:
        modified_item = {replace[i]: (self.name if replace[i] == "name" else '') + item for i in
                         range(len(replace))}
        yield modified_item
register_iframe(iframe_id, src, width='100%', height='500px', **kwargs)

Registriert einen iframe mit gegebener ID und Quelle

Parameters:

Name Type Description Default
iframe_id str

Eindeutige ID für den iframe

required
src str

URL oder Pfad zur Quelle des iframes

required
width str

Breite des iframes (default: "100%")

'100%'
height str

Höhe des iframes (default: "500px")

'500px'
**kwargs

Weitere iframe-Attribute

{}
Source code in toolboxv2/utils/extras/base_widget.py
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
def register_iframe(self, iframe_id: str, src: str, width: str = "100%", height: str = "500px", **kwargs):
    """
    Registriert einen iframe mit gegebener ID und Quelle

    Args:
        iframe_id: Eindeutige ID für den iframe
        src: URL oder Pfad zur Quelle des iframes
        width: Breite des iframes (default: "100%")
        height: Höhe des iframes (default: "500px")
        **kwargs: Weitere iframe-Attribute
    """
    iframe_config = {
        'src': src,
        'width': width,
        'height': height,
        **kwargs
    }
    self.iframes[iframe_id] = iframe_config
gist_control
GistLoader
Source code in toolboxv2/utils/extras/gist_control.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
class GistLoader:
    def __init__(self, gist_url):
        self.gist_url = gist_url
        self.module_code = None

    def load_module(self, module_name):
        """Lädt das Modul mit dem gegebenen Namen."""
        if self.module_code is None:
            self.module_code = self._fetch_gist_content()

        # Erstelle ein neues Modul
        module = importlib.util.module_from_spec(self.get_spec(module_name))
        exec(self.module_code, module.__dict__)
        return module

    def get_spec(self, module_name):
        """Gibt die Modul-Specifikation zurück."""
        return ModuleSpec(module_name, self)

    def get_filename(self, module_name):
        return f"<gist:{self.gist_url}>"

    def _fetch_gist_content(self):
        """Lädt den Inhalt des Gists von der GitHub API herunter."""
        gist_id = self.gist_url.split('/')[-1]
        api_url = f"https://api.github.com/gists/{gist_id}"

        response = requests.get(api_url)

        if response.status_code == 200:
            gist_data = response.json()
            first_file = next(iter(gist_data['files'].values()))
            return first_file['content']
        else:
            raise Exception(f"Failed to fetch gist: {response.status_code}")
get_spec(module_name)

Gibt die Modul-Specifikation zurück.

Source code in toolboxv2/utils/extras/gist_control.py
23
24
25
def get_spec(self, module_name):
    """Gibt die Modul-Specifikation zurück."""
    return ModuleSpec(module_name, self)
load_module(module_name)

Lädt das Modul mit dem gegebenen Namen.

Source code in toolboxv2/utils/extras/gist_control.py
13
14
15
16
17
18
19
20
21
def load_module(self, module_name):
    """Lädt das Modul mit dem gegebenen Namen."""
    if self.module_code is None:
        self.module_code = self._fetch_gist_content()

    # Erstelle ein neues Modul
    module = importlib.util.module_from_spec(self.get_spec(module_name))
    exec(self.module_code, module.__dict__)
    return module
helper_test_functions
generate_edge_value(param_type)

Generiert Edge-Case-Werte basierend auf dem Parametertyp.

Source code in toolboxv2/utils/extras/helper_test_functions.py
35
36
37
38
39
40
41
42
43
44
def generate_edge_value(param_type: Any) -> Any:
    """
    Generiert Edge-Case-Werte basierend auf dem Parametertyp.
    """
    if param_type in [int, float]:
        return -999  # Beispiel für negative Zahlen
    elif param_type == str:
        return "test " * 100  # Lange zufällige Strings
    # Fügen Sie hier weitere Bedingungen für andere Datentypen hinzu
    return None
generate_normal_value(param_type)

Generiert normale Werte basierend auf dem Parametertyp.

Source code in toolboxv2/utils/extras/helper_test_functions.py
47
48
49
50
51
52
53
54
55
56
def generate_normal_value(param_type: Any) -> Any:
    """
    Generiert normale Werte basierend auf dem Parametertyp.
    """
    if param_type in [int, float]:
        return random.randint(0, 100)  # Zufällige normale Zahlen
    elif param_type == str:
        return "test" # Zufälliges Wort
    # Fügen Sie hier weitere Bedingungen für andere Datentypen hinzu
    return None
keword_matcher
calculate_keyword_score(text, keywords)

Berechnet den Keyword-Score basierend auf der Häufigkeit der Keywords im Text. Case-insensitive und optimiert für Geschwindigkeit.

:param text: Eingabetext als String :param keywords: Set von Keywords :return: Gesamt-Score als Integer

Source code in toolboxv2/utils/extras/keword_matcher.py
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
def calculate_keyword_score(text: str, keywords: set[str]) -> int:
    """
    Berechnet den Keyword-Score basierend auf der Häufigkeit der Keywords im Text.
    Case-insensitive und optimiert für Geschwindigkeit.

    :param text: Eingabetext als String
    :param keywords: Set von Keywords
    :return: Gesamt-Score als Integer
    """
    # Vorverarbeitung der Keywords
    keyword_pattern = re.compile(
        r'\b(' + '|'.join(re.escape(k.lower()) for k in keywords) + r')\b',
        flags=re.IGNORECASE
    )

    # Erstelle Frequenz-Wörterbuch
    freq_dict = defaultdict(int)

    # Finde alle Übereinstimmungen
    matches = keyword_pattern.findall(text.lower())

    # Zähle die Treffer
    for match in matches:
        freq_dict[match.lower()] += 1

    # Berechne Gesamt-Score
    total_score = sum(freq_dict.values())

    return total_score
calculate_weighted_score(text, keyword_weights)

Berechnet gewichteten Score mit unterschiedlichen Gewichten pro Keyword

:param text: Eingabetext :param keyword_weights: Dictionary mit {Keyword: Gewicht} :return: Gewichteter Gesamt-Score

Source code in toolboxv2/utils/extras/keword_matcher.py
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
def calculate_weighted_score(text: str, keyword_weights: dict or list) -> float:
    """
    Berechnet gewichteten Score mit unterschiedlichen Gewichten pro Keyword

    :param text: Eingabetext
    :param keyword_weights: Dictionary mit {Keyword: Gewicht}
    :return: Gewichteter Gesamt-Score
    """
    total = 0.0
    text_lower = text.lower()

    if isinstance(keyword_weights, list):
        keyword_weights = {k:v for k, v in keyword_weights}

    for keyword, weight in keyword_weights.items():
        count = len(re.findall(r'\b' + re.escape(keyword.lower()) + r'\b', text_lower))
        total += count * weight

    return round(total, 2)
extract_keywords(text, max_len=-1, min_word_length=3, with_weights=False, remove_stopwords=True, stopwords=True)

Extrahiert Keywords mit optionaler Frequenzgewichtung

:param text: Eingabetext :param max_len: Maximale Anzahl Keywords (-1 = alle) :param min_word_length: Minimale Wortlänge :param with_weights: Gibt Wort+Frequenz zurück wenn True :param remove_stopwords: Filtert deutsche Stopwörter :param german_stopwords: Verwendet deutsche Standard-Stopwörter :return: Keywords oder (Keyword, Häufigkeit) Paare

Source code in toolboxv2/utils/extras/keword_matcher.py
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
def extract_keywords(
    text: str,
    max_len: int = -1,
    min_word_length: int = 3,
    with_weights: bool = False,
    remove_stopwords: bool = True,
    stopwords: bool = True
) -> list[str] | list[tuple[str, int]]:
    """
    Extrahiert Keywords mit optionaler Frequenzgewichtung

    :param text: Eingabetext
    :param max_len: Maximale Anzahl Keywords (-1 = alle)
    :param min_word_length: Minimale Wortlänge
    :param with_weights: Gibt Wort+Frequenz zurück wenn True
    :param remove_stopwords: Filtert deutsche Stopwörter
    :param german_stopwords: Verwendet deutsche Standard-Stopwörter
    :return: Keywords oder (Keyword, Häufigkeit) Paare
    """

    # Deutsche Basis-Stopwörter
    DEFAULT_STOPWORDS = STOPWORDS if stopwords else set()

    # Text vorverarbeiten
    words = re.findall(r'\b\w+\b', text.lower())

    # Worte filtern
    filtered_words = [
        word for word in words
        if len(word) > min_word_length
           and (not remove_stopwords or word not in DEFAULT_STOPWORDS)
    ]

    # Frequenzanalyse
    word_counts = defaultdict(int)
    for word in filtered_words:
        word_counts[word] += 1

    # Sortierung: Zuerst Häufigkeit, dann alphabetisch
    sorted_words = sorted(
        word_counts.items(),
        key=lambda x: (-x[1], x[0])
    )

    # Längenbegrenzung
    if max_len == -1:
        max_len = None
    result = sorted_words[:max_len]

    return result if with_weights else [word for word, _ in result]
reqbuilder
generate_requirements(folder, output_file)

Generates requirements.txt for the specified folder using pipreqs.

Source code in toolboxv2/utils/extras/reqbuilder.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
def generate_requirements(folder: str, output_file: str):
    """Generates requirements.txt for the specified folder using pipreqs."""
    print(folder, output_file, os.path.abspath(os.curdir))
    try:
        from pipreqs.pipreqs import get_all_imports
    except ImportError:
        subprocess.run([sys.executable, "-m", "pip", "install", "pipreqs"], check=True)
    from pipreqs.pipreqs import get_all_imports
    imports = set(get_all_imports(os.path.abspath(folder)))
    imports.remove('toolboxv2') if 'toolboxv2' in imports else None
    with open(os.path.abspath(output_file), "w") as f:
        f.write("\n".join(imports))
run_pipeline(base_dir)

Runs the entire pipeline to generate requirements files.

Source code in toolboxv2/utils/extras/reqbuilder.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
def run_pipeline(base_dir: str):
    """Runs the entire pipeline to generate requirements files."""
    toolbox_path = os.path.join(base_dir, "toolboxv2")
    utils_path = os.path.join(toolbox_path, "utils")
    mini_req_file = os.path.join(base_dir, "requirements_mini.txt")
    extras_req_file = os.path.join(base_dir, "requirements_tests.txt")

    # Step 1: Generate minimal requirements
    print("Step 1/2: ")
    generate_requirements(utils_path, mini_req_file)

    # Step 2: Generate extended requirements
    print("Step 2/2: ")
    extras_path = os.path.join(toolbox_path, "tests")
    generate_requirements(extras_path, extras_req_file)

proxy

ProxyUtil
Source code in toolboxv2/utils/proxy/prox_util.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
class ProxyUtil:
    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.__storedargs = args, kwargs
        self.async_initialized = False

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        # assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()

    async def __ainit__(self, class_instance: Any, host='0.0.0.0', port=6587, timeout=6,
                        app: (App or AppType) | None = None,
                        remote_functions=None, peer=False, name='ProxyApp-client', do_connect=True, unix_socket=False,
                        test_override=False):
        self.class_instance = class_instance
        self.client = None
        self.test_override = test_override
        self.port = port
        self.host = host
        self.timeout = timeout
        if app is None:
            app = get_app("ProxyUtil")
        self.app = app
        self._name = name
        self.unix_socket = unix_socket
        if remote_functions is None:
            remote_functions = ["run_any", "a_run_any", "remove_mod", "save_load", "exit_main", "show_console", "hide_console",
                                "rrun_flow",
                                "get_autocompletion_dict",
                                "exit_main", "watch_mod"]
        self.remote_functions = remote_functions

        from toolboxv2.mods.SocketManager import SocketType
        self.connection_type = SocketType.client
        if peer:
            self.connection_type = SocketType.peer
        if do_connect:
            await self.connect()

    async def connect(self):
        client_result = await self.app.a_run_local(SOCKETMANAGER.CREATE_SOCKET,
                                           get_results=True,
                                           name=self._name,
                                           host=self.host,
                                           port=self.port,
                                           type_id=self.connection_type,
                                           max_connections=-1,
                                           return_full_object=True,
                                           test_override=self.test_override,
                                           unix_file=self.unix_socket)

        if client_result.is_error():
            raise Exception(f"Client {self._name} error: {client_result.print(False)}")
        if not client_result.is_data():
            raise Exception(f"Client {self._name} error: {client_result.print(False)}")
        # 'socket': socket,
        # 'receiver_socket': r_socket,
        # 'host': host,
        # 'port': port,
        # 'p2p-port': endpoint_port,
        # 'sender': send,
        # 'receiver_queue': receiver_queue,
        # 'connection_error': connection_error,
        # 'receiver_thread': s_thread,
        # 'keepalive_thread': keep_alive_thread,
        # 'running_dict': running_dict,
        # 'client_to_receiver_thread': to_receive,
        # 'client_receiver_threads': threeds,
        result = await client_result.aget()
        if result is None or result.get('connection_error') != 0:
            raise Exception(f"Client {self._name} error: {client_result.print(False)}")
        self.client = Result.ok(result)

    async def disconnect(self):
        time.sleep(1)
        close = self.client.get("close")
        await close()
        self.client = None

    async def reconnect(self):
        if self.client is not None:
            await self.disconnect()
        await self.connect()

    async def verify(self, message=b"verify"):
        await asyncio.sleep(1)
        # self.client.get('sender')({'keepalive': 0})
        await self.client.get('sender')(message)

    def __getattr__(self, name):

        # print(f"ProxyApp: {name}, {self.client is None}")
        if name == "on_exit":
            return self.disconnect
        if name == "rc":
            return self.reconnect

        if name == "r":
            try:
                return self.client.get('receiver_queue').get(timeout=self.timeout)
            except:
                return "No data"

        app_attr = getattr(self.class_instance, name)

        async def method(*args, **kwargs):
            # if name == 'run_any':
            #     print("method", name, kwargs.get('get_results', False), args[0])
            if self.client is None:
                await self.reconnect()
            if kwargs.get('spec', '-') == 'app':
                if asyncio.iscoroutinefunction(app_attr):
                    return await app_attr(*args, **kwargs)
                return app_attr(*args, **kwargs)
            try:
                if name in self.remote_functions:
                    if (name == 'run_any' or name == 'a_run_any') and not kwargs.get('get_results', False):
                        if asyncio.iscoroutinefunction(app_attr):
                            return await app_attr(*args, **kwargs)
                        return app_attr(*args, **kwargs)
                    if (name == 'run_any' or name == 'a_run_any') and kwargs.get('get_results', False):
                        if isinstance(args[0], Enum):
                            args = (args[0].__class__.NAME.value, args[0].value), args[1:]
                    self.app.sprint(f"Calling method {name}, {args=}, {kwargs}=")
                    await self.client.get('sender')({'name': name, 'args': args, 'kwargs': kwargs})
                    while Spinner("Waiting for result"):
                        try:
                            data = self.client.get('receiver_queue').get(timeout=self.timeout)
                            if isinstance(data, dict) and 'identifier' in data:
                                del data["identifier"]
                            if 'error' in data and 'origin' in data and 'result' in data and 'info' in data:
                                data = ApiResult(**data).as_result()
                            return data
                        except:
                            print("No data look later with class_instance.r")
                            return Result.default_internal_error("No data received from Demon."
                                                                 " uns class_instance.r to get data later")
            except:
                if self.client.get('socket') is None:
                    self.client = None
            return app_attr(*args, **kwargs)

        if callable(app_attr) and name in self.remote_functions and self.client is not None:
            return method
        return app_attr
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/proxy/prox_util.py
20
21
22
23
24
25
26
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.__storedargs = args, kwargs
    self.async_initialized = False
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/proxy/prox_util.py
28
29
30
31
32
33
34
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    # assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self
prox_util
ProxyUtil
Source code in toolboxv2/utils/proxy/prox_util.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
class ProxyUtil:
    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.__storedargs = args, kwargs
        self.async_initialized = False

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        # assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()

    async def __ainit__(self, class_instance: Any, host='0.0.0.0', port=6587, timeout=6,
                        app: (App or AppType) | None = None,
                        remote_functions=None, peer=False, name='ProxyApp-client', do_connect=True, unix_socket=False,
                        test_override=False):
        self.class_instance = class_instance
        self.client = None
        self.test_override = test_override
        self.port = port
        self.host = host
        self.timeout = timeout
        if app is None:
            app = get_app("ProxyUtil")
        self.app = app
        self._name = name
        self.unix_socket = unix_socket
        if remote_functions is None:
            remote_functions = ["run_any", "a_run_any", "remove_mod", "save_load", "exit_main", "show_console", "hide_console",
                                "rrun_flow",
                                "get_autocompletion_dict",
                                "exit_main", "watch_mod"]
        self.remote_functions = remote_functions

        from toolboxv2.mods.SocketManager import SocketType
        self.connection_type = SocketType.client
        if peer:
            self.connection_type = SocketType.peer
        if do_connect:
            await self.connect()

    async def connect(self):
        client_result = await self.app.a_run_local(SOCKETMANAGER.CREATE_SOCKET,
                                           get_results=True,
                                           name=self._name,
                                           host=self.host,
                                           port=self.port,
                                           type_id=self.connection_type,
                                           max_connections=-1,
                                           return_full_object=True,
                                           test_override=self.test_override,
                                           unix_file=self.unix_socket)

        if client_result.is_error():
            raise Exception(f"Client {self._name} error: {client_result.print(False)}")
        if not client_result.is_data():
            raise Exception(f"Client {self._name} error: {client_result.print(False)}")
        # 'socket': socket,
        # 'receiver_socket': r_socket,
        # 'host': host,
        # 'port': port,
        # 'p2p-port': endpoint_port,
        # 'sender': send,
        # 'receiver_queue': receiver_queue,
        # 'connection_error': connection_error,
        # 'receiver_thread': s_thread,
        # 'keepalive_thread': keep_alive_thread,
        # 'running_dict': running_dict,
        # 'client_to_receiver_thread': to_receive,
        # 'client_receiver_threads': threeds,
        result = await client_result.aget()
        if result is None or result.get('connection_error') != 0:
            raise Exception(f"Client {self._name} error: {client_result.print(False)}")
        self.client = Result.ok(result)

    async def disconnect(self):
        time.sleep(1)
        close = self.client.get("close")
        await close()
        self.client = None

    async def reconnect(self):
        if self.client is not None:
            await self.disconnect()
        await self.connect()

    async def verify(self, message=b"verify"):
        await asyncio.sleep(1)
        # self.client.get('sender')({'keepalive': 0})
        await self.client.get('sender')(message)

    def __getattr__(self, name):

        # print(f"ProxyApp: {name}, {self.client is None}")
        if name == "on_exit":
            return self.disconnect
        if name == "rc":
            return self.reconnect

        if name == "r":
            try:
                return self.client.get('receiver_queue').get(timeout=self.timeout)
            except:
                return "No data"

        app_attr = getattr(self.class_instance, name)

        async def method(*args, **kwargs):
            # if name == 'run_any':
            #     print("method", name, kwargs.get('get_results', False), args[0])
            if self.client is None:
                await self.reconnect()
            if kwargs.get('spec', '-') == 'app':
                if asyncio.iscoroutinefunction(app_attr):
                    return await app_attr(*args, **kwargs)
                return app_attr(*args, **kwargs)
            try:
                if name in self.remote_functions:
                    if (name == 'run_any' or name == 'a_run_any') and not kwargs.get('get_results', False):
                        if asyncio.iscoroutinefunction(app_attr):
                            return await app_attr(*args, **kwargs)
                        return app_attr(*args, **kwargs)
                    if (name == 'run_any' or name == 'a_run_any') and kwargs.get('get_results', False):
                        if isinstance(args[0], Enum):
                            args = (args[0].__class__.NAME.value, args[0].value), args[1:]
                    self.app.sprint(f"Calling method {name}, {args=}, {kwargs}=")
                    await self.client.get('sender')({'name': name, 'args': args, 'kwargs': kwargs})
                    while Spinner("Waiting for result"):
                        try:
                            data = self.client.get('receiver_queue').get(timeout=self.timeout)
                            if isinstance(data, dict) and 'identifier' in data:
                                del data["identifier"]
                            if 'error' in data and 'origin' in data and 'result' in data and 'info' in data:
                                data = ApiResult(**data).as_result()
                            return data
                        except:
                            print("No data look later with class_instance.r")
                            return Result.default_internal_error("No data received from Demon."
                                                                 " uns class_instance.r to get data later")
            except:
                if self.client.get('socket') is None:
                    self.client = None
            return app_attr(*args, **kwargs)

        if callable(app_attr) and name in self.remote_functions and self.client is not None:
            return method
        return app_attr
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/proxy/prox_util.py
20
21
22
23
24
25
26
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.__storedargs = args, kwargs
    self.async_initialized = False
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/proxy/prox_util.py
28
29
30
31
32
33
34
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    # assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self

security

Code
Source code in toolboxv2/utils/security/cryp.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
class Code:

    @staticmethod
    def DK():
        return DEVICE_KEY

    def decode_code(self, encrypted_data, key=None):

        if not isinstance(encrypted_data, str):
            encrypted_data = str(encrypted_data)

        if key is None:
            key = DEVICE_KEY()

        return self.decrypt_symmetric(encrypted_data, key)

    def encode_code(self, data, key=None):

        if not isinstance(data, str):
            data = str(data)

        if key is None:
            key = DEVICE_KEY()

        return self.encrypt_symmetric(data, key)

    @staticmethod
    def generate_seed() -> int:
        """
        Erzeugt eine zufällige Zahl als Seed.

        Returns:
            int: Eine zufällige Zahl.
        """
        return random.randint(2 ** 32 - 1, 2 ** 64 - 1)

    @staticmethod
    def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
        """
        Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

        Args:
            text (str): Der zu hashende Text.
            salt (str): Der Salt-Wert.
            pepper (str): Der Pepper-Wert.
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            str: Der resultierende Hash-Wert.
        """
        return hashlib.sha256((salt + text + pepper).encode()).hexdigest()

    @staticmethod
    def generate_symmetric_key() -> str:
        """
        Generiert einen Schlüssel für die symmetrische Verschlüsselung.

        Returns:
            str: Der generierte Schlüssel.
        """
        return Fernet.generate_key().decode()

    @staticmethod
    def encrypt_symmetric(text: str or bytes, key: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.

        Returns:
            str: Der verschlüsselte Text.
        """
        if isinstance(text, str):
            text = text.encode()

        try:
            fernet = Fernet(key.encode())
            return fernet.encrypt(text).decode()
        except Exception as e:
            get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
            return "Error encrypt"

    @staticmethod
    def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
        """
        Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            encrypted_text (str): Der zu entschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.
            to_str (bool): default true returns str if false returns bytes
        Returns:
            str: Der entschlüsselte Text.
        """

        if isinstance(key, str):
            key = key.encode()

        #try:
        fernet = Fernet(key)
        text_b = fernet.decrypt(encrypted_text)
        if not to_str:
            return text_b
        return text_b.decode()
        # except Exception as e:
        #     get_logger().error(f"Error decrypt_symmetric {e}")
        #     if not mute:
        #         raise e
        #     if not to_str:
        #         return f"Error decoding".encode()
        #     return f"Error decoding"

    @staticmethod
    def generate_asymmetric_keys() -> (str, str):
        """
        Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

        Args:
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
        """
        private_key = rsa.generate_private_key(
            public_exponent=65537,
            key_size=2048 * 3,
        )
        public_key = private_key.public_key()

        # Serialisieren der Schlüssel
        pem_private_key = private_key.private_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PrivateFormat.PKCS8,
            encryption_algorithm=serialization.NoEncryption()
        ).decode()

        pem_public_key = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        ).decode()

        return pem_public_key, pem_private_key

    @staticmethod
    def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
        """
        Speichert die generierten Schlüssel in separate Dateien.
        Der private Schlüssel wird mit dem Device Key verschlüsselt.

        Args:
            public_key (str): Der öffentliche Schlüssel im PEM-Format
            private_key (str): Der private Schlüssel im PEM-Format
            directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
        """
        # Erstelle das Verzeichnis, falls es nicht existiert
        os.makedirs(directory, exist_ok=True)

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Verschlüssele den privaten Schlüssel mit dem Device Key
        encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

        # Speichere den öffentlichen Schlüssel
        public_key_path = os.path.join(directory, "public_key.pem")
        with open(public_key_path, "w") as f:
            f.write(public_key)

        # Speichere den verschlüsselten privaten Schlüssel
        private_key_path = os.path.join(directory, "private_key.pem")
        with open(private_key_path, "w") as f:
            f.write(encrypted_private_key)

        print("Saved keys in ", public_key_path)

    @staticmethod
    def load_keys_from_files(directory: str = "keys") -> (str, str):
        """
        Lädt die Schlüssel aus den Dateien.
        Der private Schlüssel wird mit dem Device Key entschlüsselt.

        Args:
            directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

        Raises:
            FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
        """
        # Pfade zu den Schlüsseldateien
        public_key_path = os.path.join(directory, "public_key.pem")
        private_key_path = os.path.join(directory, "private_key.pem")

        # Prüfe ob die Dateien existieren
        if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
            return "", ""

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Lade den öffentlichen Schlüssel
        with open(public_key_path) as f:
            public_key = f.read()

        # Lade und entschlüssele den privaten Schlüssel
        with open(private_key_path) as f:
            encrypted_private_key = f.read()
            private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

        return public_key, private_key

    @staticmethod
    def encrypt_asymmetric(text: str, public_key_str: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

        Returns:
            str: Der verschlüsselte Text.
        """
        # try:
        #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        #  except Exception as e:
        #     get_logger().error(f"Error encrypt_asymmetric {e}")
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            encrypted = public_key.encrypt(
                text.encode(),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return encrypted.hex()
        except Exception as e:
            get_logger().error(f"Error encrypt_asymmetric {e}")
            return "Invalid"

    @staticmethod
    def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
        """
        Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

        Args:
            encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
            private_key_str (str): Der private Schlüssel als String.

        Returns:
            str: Der entschlüsselte Text.
        """
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            decrypted = private_key.decrypt(
                bytes.fromhex(encrypted_text_hex),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return decrypted.decode()

        except Exception as e:
            get_logger().error(f"Error decrypt_asymmetric {e}")
        return "Invalid"

    @staticmethod
    def verify_signature(signature: str or bytes, message: str or bytes, public_key_str: str,
                         salt_length=padding.PSS.MAX_LENGTH) -> bool:
        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                padding=padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                algorithm=hashes.SHA512()
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def verify_signature_web_algo(signature: str or bytes, message: str or bytes, public_key_str: str,
                                  algo: int = -512) -> bool:
        signature_algorithm = ECDSA(hashes.SHA512())
        if algo != -512:
            signature_algorithm = ECDSA(hashes.SHA256())

        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                # padding=padding.PSS(
                #    mgf=padding.MGF1(hashes.SHA512()),
                #    salt_length=padding.PSS.MAX_LENGTH
                # ),
                signature_algorithm=signature_algorithm
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def create_signature(message: str, private_key_str: str, salt_length=padding.PSS.MAX_LENGTH,
                         row=False) -> str or bytes:
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            signature = private_key.sign(
                message.encode(),
                padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                hashes.SHA512()
            )
            if row:
                return signature
            return base64.b64encode(signature).decode()
        except Exception as e:
            get_logger().error(f"Error create_signature {e}")
            print(e)
        return "Invalid Key"

    @staticmethod
    def pem_to_public_key(pem_key: str):
        """
        Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

        Args:
            pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

        Returns:
            PublicKey: Das PublicKey-Objekt.
        """
        public_key = serialization.load_pem_public_key(pem_key.encode())
        return public_key

    @staticmethod
    def public_key_to_pem(public_key: RSAPublicKey):
        """
        Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

        Args:
            public_key (PublicKey): Das PublicKey-Objekt.

        Returns:
            str: Der PEM-kodierte öffentliche Schlüssel.
        """
        pem = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        )
        return pem.decode()
decrypt_asymmetric(encrypted_text_hex, private_key_str) staticmethod

Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

Parameters:

Name Type Description Default
encrypted_text_hex str

Der verschlüsselte Text als Hex-String.

required
private_key_str str

Der private Schlüssel als String.

required

Returns:

Name Type Description
str str

Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
@staticmethod
def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
    """
    Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

    Args:
        encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
        private_key_str (str): Der private Schlüssel als String.

    Returns:
        str: Der entschlüsselte Text.
    """
    try:
        private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
        decrypted = private_key.decrypt(
            bytes.fromhex(encrypted_text_hex),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return decrypted.decode()

    except Exception as e:
        get_logger().error(f"Error decrypt_asymmetric {e}")
    return "Invalid"
decrypt_symmetric(encrypted_text, key, to_str=True, mute=False) staticmethod

Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
encrypted_text str

Der zu entschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required
to_str bool

default true returns str if false returns bytes

True

Returns: str: Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
@staticmethod
def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
    """
    Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        encrypted_text (str): Der zu entschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.
        to_str (bool): default true returns str if false returns bytes
    Returns:
        str: Der entschlüsselte Text.
    """

    if isinstance(key, str):
        key = key.encode()

    #try:
    fernet = Fernet(key)
    text_b = fernet.decrypt(encrypted_text)
    if not to_str:
        return text_b
    return text_b.decode()
encrypt_asymmetric(text, public_key_str) staticmethod

Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
public_key_str str

Der öffentliche Schlüssel als String oder im pem format.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
@staticmethod
def encrypt_asymmetric(text: str, public_key_str: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

    Returns:
        str: Der verschlüsselte Text.
    """
    # try:
    #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
    #  except Exception as e:
    #     get_logger().error(f"Error encrypt_asymmetric {e}")
    try:
        public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        encrypted = public_key.encrypt(
            text.encode(),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return encrypted.hex()
    except Exception as e:
        get_logger().error(f"Error encrypt_asymmetric {e}")
        return "Invalid"
encrypt_symmetric(text, key) staticmethod

Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
@staticmethod
def encrypt_symmetric(text: str or bytes, key: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.

    Returns:
        str: Der verschlüsselte Text.
    """
    if isinstance(text, str):
        text = text.encode()

    try:
        fernet = Fernet(key.encode())
        return fernet.encrypt(text).decode()
    except Exception as e:
        get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
        return "Error encrypt"
generate_asymmetric_keys() staticmethod

Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

Parameters:

Name Type Description Default
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
@staticmethod
def generate_asymmetric_keys() -> (str, str):
    """
    Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

    Args:
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
    """
    private_key = rsa.generate_private_key(
        public_exponent=65537,
        key_size=2048 * 3,
    )
    public_key = private_key.public_key()

    # Serialisieren der Schlüssel
    pem_private_key = private_key.private_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PrivateFormat.PKCS8,
        encryption_algorithm=serialization.NoEncryption()
    ).decode()

    pem_public_key = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    ).decode()

    return pem_public_key, pem_private_key
generate_seed() staticmethod

Erzeugt eine zufällige Zahl als Seed.

Returns:

Name Type Description
int int

Eine zufällige Zahl.

Source code in toolboxv2/utils/security/cryp.py
68
69
70
71
72
73
74
75
76
@staticmethod
def generate_seed() -> int:
    """
    Erzeugt eine zufällige Zahl als Seed.

    Returns:
        int: Eine zufällige Zahl.
    """
    return random.randint(2 ** 32 - 1, 2 ** 64 - 1)
generate_symmetric_key() staticmethod

Generiert einen Schlüssel für die symmetrische Verschlüsselung.

Returns:

Name Type Description
str str

Der generierte Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
 94
 95
 96
 97
 98
 99
100
101
102
@staticmethod
def generate_symmetric_key() -> str:
    """
    Generiert einen Schlüssel für die symmetrische Verschlüsselung.

    Returns:
        str: Der generierte Schlüssel.
    """
    return Fernet.generate_key().decode()
load_keys_from_files(directory='keys') staticmethod

Lädt die Schlüssel aus den Dateien. Der private Schlüssel wird mit dem Device Key entschlüsselt.

Parameters:

Name Type Description Default
directory str

Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

'keys'

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel

Raises:

Type Description
FileNotFoundError

Wenn die Schlüsseldateien nicht gefunden werden können

Source code in toolboxv2/utils/security/cryp.py
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
@staticmethod
def load_keys_from_files(directory: str = "keys") -> (str, str):
    """
    Lädt die Schlüssel aus den Dateien.
    Der private Schlüssel wird mit dem Device Key entschlüsselt.

    Args:
        directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

    Raises:
        FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
    """
    # Pfade zu den Schlüsseldateien
    public_key_path = os.path.join(directory, "public_key.pem")
    private_key_path = os.path.join(directory, "private_key.pem")

    # Prüfe ob die Dateien existieren
    if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
        return "", ""

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Lade den öffentlichen Schlüssel
    with open(public_key_path) as f:
        public_key = f.read()

    # Lade und entschlüssele den privaten Schlüssel
    with open(private_key_path) as f:
        encrypted_private_key = f.read()
        private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

    return public_key, private_key
one_way_hash(text, salt='', pepper='') staticmethod

Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

Parameters:

Name Type Description Default
text str

Der zu hashende Text.

required
salt str

Der Salt-Wert.

''
pepper str

Der Pepper-Wert.

''
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Name Type Description
str str

Der resultierende Hash-Wert.

Source code in toolboxv2/utils/security/cryp.py
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
@staticmethod
def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
    """
    Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

    Args:
        text (str): Der zu hashende Text.
        salt (str): Der Salt-Wert.
        pepper (str): Der Pepper-Wert.
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        str: Der resultierende Hash-Wert.
    """
    return hashlib.sha256((salt + text + pepper).encode()).hexdigest()
pem_to_public_key(pem_key) staticmethod

Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

Parameters:

Name Type Description Default
pem_key str

Der PEM-kodierte öffentliche Schlüssel.

required

Returns:

Name Type Description
PublicKey

Das PublicKey-Objekt.

Source code in toolboxv2/utils/security/cryp.py
386
387
388
389
390
391
392
393
394
395
396
397
398
@staticmethod
def pem_to_public_key(pem_key: str):
    """
    Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

    Args:
        pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

    Returns:
        PublicKey: Das PublicKey-Objekt.
    """
    public_key = serialization.load_pem_public_key(pem_key.encode())
    return public_key
public_key_to_pem(public_key) staticmethod

Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

Parameters:

Name Type Description Default
public_key PublicKey

Das PublicKey-Objekt.

required

Returns:

Name Type Description
str

Der PEM-kodierte öffentliche Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
@staticmethod
def public_key_to_pem(public_key: RSAPublicKey):
    """
    Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

    Args:
        public_key (PublicKey): Das PublicKey-Objekt.

    Returns:
        str: Der PEM-kodierte öffentliche Schlüssel.
    """
    pem = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    )
    return pem.decode()
save_keys_to_files(public_key, private_key, directory='keys') staticmethod

Speichert die generierten Schlüssel in separate Dateien. Der private Schlüssel wird mit dem Device Key verschlüsselt.

Parameters:

Name Type Description Default
public_key str

Der öffentliche Schlüssel im PEM-Format

required
private_key str

Der private Schlüssel im PEM-Format

required
directory str

Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen

'keys'
Source code in toolboxv2/utils/security/cryp.py
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
@staticmethod
def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
    """
    Speichert die generierten Schlüssel in separate Dateien.
    Der private Schlüssel wird mit dem Device Key verschlüsselt.

    Args:
        public_key (str): Der öffentliche Schlüssel im PEM-Format
        private_key (str): Der private Schlüssel im PEM-Format
        directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
    """
    # Erstelle das Verzeichnis, falls es nicht existiert
    os.makedirs(directory, exist_ok=True)

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Verschlüssele den privaten Schlüssel mit dem Device Key
    encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

    # Speichere den öffentlichen Schlüssel
    public_key_path = os.path.join(directory, "public_key.pem")
    with open(public_key_path, "w") as f:
        f.write(public_key)

    # Speichere den verschlüsselten privaten Schlüssel
    private_key_path = os.path.join(directory, "private_key.pem")
    with open(private_key_path, "w") as f:
        f.write(encrypted_private_key)

    print("Saved keys in ", public_key_path)
cryp
Code
Source code in toolboxv2/utils/security/cryp.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
class Code:

    @staticmethod
    def DK():
        return DEVICE_KEY

    def decode_code(self, encrypted_data, key=None):

        if not isinstance(encrypted_data, str):
            encrypted_data = str(encrypted_data)

        if key is None:
            key = DEVICE_KEY()

        return self.decrypt_symmetric(encrypted_data, key)

    def encode_code(self, data, key=None):

        if not isinstance(data, str):
            data = str(data)

        if key is None:
            key = DEVICE_KEY()

        return self.encrypt_symmetric(data, key)

    @staticmethod
    def generate_seed() -> int:
        """
        Erzeugt eine zufällige Zahl als Seed.

        Returns:
            int: Eine zufällige Zahl.
        """
        return random.randint(2 ** 32 - 1, 2 ** 64 - 1)

    @staticmethod
    def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
        """
        Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

        Args:
            text (str): Der zu hashende Text.
            salt (str): Der Salt-Wert.
            pepper (str): Der Pepper-Wert.
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            str: Der resultierende Hash-Wert.
        """
        return hashlib.sha256((salt + text + pepper).encode()).hexdigest()

    @staticmethod
    def generate_symmetric_key() -> str:
        """
        Generiert einen Schlüssel für die symmetrische Verschlüsselung.

        Returns:
            str: Der generierte Schlüssel.
        """
        return Fernet.generate_key().decode()

    @staticmethod
    def encrypt_symmetric(text: str or bytes, key: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.

        Returns:
            str: Der verschlüsselte Text.
        """
        if isinstance(text, str):
            text = text.encode()

        try:
            fernet = Fernet(key.encode())
            return fernet.encrypt(text).decode()
        except Exception as e:
            get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
            return "Error encrypt"

    @staticmethod
    def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
        """
        Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            encrypted_text (str): Der zu entschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.
            to_str (bool): default true returns str if false returns bytes
        Returns:
            str: Der entschlüsselte Text.
        """

        if isinstance(key, str):
            key = key.encode()

        #try:
        fernet = Fernet(key)
        text_b = fernet.decrypt(encrypted_text)
        if not to_str:
            return text_b
        return text_b.decode()
        # except Exception as e:
        #     get_logger().error(f"Error decrypt_symmetric {e}")
        #     if not mute:
        #         raise e
        #     if not to_str:
        #         return f"Error decoding".encode()
        #     return f"Error decoding"

    @staticmethod
    def generate_asymmetric_keys() -> (str, str):
        """
        Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

        Args:
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
        """
        private_key = rsa.generate_private_key(
            public_exponent=65537,
            key_size=2048 * 3,
        )
        public_key = private_key.public_key()

        # Serialisieren der Schlüssel
        pem_private_key = private_key.private_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PrivateFormat.PKCS8,
            encryption_algorithm=serialization.NoEncryption()
        ).decode()

        pem_public_key = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        ).decode()

        return pem_public_key, pem_private_key

    @staticmethod
    def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
        """
        Speichert die generierten Schlüssel in separate Dateien.
        Der private Schlüssel wird mit dem Device Key verschlüsselt.

        Args:
            public_key (str): Der öffentliche Schlüssel im PEM-Format
            private_key (str): Der private Schlüssel im PEM-Format
            directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
        """
        # Erstelle das Verzeichnis, falls es nicht existiert
        os.makedirs(directory, exist_ok=True)

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Verschlüssele den privaten Schlüssel mit dem Device Key
        encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

        # Speichere den öffentlichen Schlüssel
        public_key_path = os.path.join(directory, "public_key.pem")
        with open(public_key_path, "w") as f:
            f.write(public_key)

        # Speichere den verschlüsselten privaten Schlüssel
        private_key_path = os.path.join(directory, "private_key.pem")
        with open(private_key_path, "w") as f:
            f.write(encrypted_private_key)

        print("Saved keys in ", public_key_path)

    @staticmethod
    def load_keys_from_files(directory: str = "keys") -> (str, str):
        """
        Lädt die Schlüssel aus den Dateien.
        Der private Schlüssel wird mit dem Device Key entschlüsselt.

        Args:
            directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

        Raises:
            FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
        """
        # Pfade zu den Schlüsseldateien
        public_key_path = os.path.join(directory, "public_key.pem")
        private_key_path = os.path.join(directory, "private_key.pem")

        # Prüfe ob die Dateien existieren
        if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
            return "", ""

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Lade den öffentlichen Schlüssel
        with open(public_key_path) as f:
            public_key = f.read()

        # Lade und entschlüssele den privaten Schlüssel
        with open(private_key_path) as f:
            encrypted_private_key = f.read()
            private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

        return public_key, private_key

    @staticmethod
    def encrypt_asymmetric(text: str, public_key_str: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

        Returns:
            str: Der verschlüsselte Text.
        """
        # try:
        #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        #  except Exception as e:
        #     get_logger().error(f"Error encrypt_asymmetric {e}")
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            encrypted = public_key.encrypt(
                text.encode(),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return encrypted.hex()
        except Exception as e:
            get_logger().error(f"Error encrypt_asymmetric {e}")
            return "Invalid"

    @staticmethod
    def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
        """
        Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

        Args:
            encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
            private_key_str (str): Der private Schlüssel als String.

        Returns:
            str: Der entschlüsselte Text.
        """
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            decrypted = private_key.decrypt(
                bytes.fromhex(encrypted_text_hex),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return decrypted.decode()

        except Exception as e:
            get_logger().error(f"Error decrypt_asymmetric {e}")
        return "Invalid"

    @staticmethod
    def verify_signature(signature: str or bytes, message: str or bytes, public_key_str: str,
                         salt_length=padding.PSS.MAX_LENGTH) -> bool:
        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                padding=padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                algorithm=hashes.SHA512()
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def verify_signature_web_algo(signature: str or bytes, message: str or bytes, public_key_str: str,
                                  algo: int = -512) -> bool:
        signature_algorithm = ECDSA(hashes.SHA512())
        if algo != -512:
            signature_algorithm = ECDSA(hashes.SHA256())

        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                # padding=padding.PSS(
                #    mgf=padding.MGF1(hashes.SHA512()),
                #    salt_length=padding.PSS.MAX_LENGTH
                # ),
                signature_algorithm=signature_algorithm
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def create_signature(message: str, private_key_str: str, salt_length=padding.PSS.MAX_LENGTH,
                         row=False) -> str or bytes:
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            signature = private_key.sign(
                message.encode(),
                padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                hashes.SHA512()
            )
            if row:
                return signature
            return base64.b64encode(signature).decode()
        except Exception as e:
            get_logger().error(f"Error create_signature {e}")
            print(e)
        return "Invalid Key"

    @staticmethod
    def pem_to_public_key(pem_key: str):
        """
        Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

        Args:
            pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

        Returns:
            PublicKey: Das PublicKey-Objekt.
        """
        public_key = serialization.load_pem_public_key(pem_key.encode())
        return public_key

    @staticmethod
    def public_key_to_pem(public_key: RSAPublicKey):
        """
        Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

        Args:
            public_key (PublicKey): Das PublicKey-Objekt.

        Returns:
            str: Der PEM-kodierte öffentliche Schlüssel.
        """
        pem = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        )
        return pem.decode()
decrypt_asymmetric(encrypted_text_hex, private_key_str) staticmethod

Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

Parameters:

Name Type Description Default
encrypted_text_hex str

Der verschlüsselte Text als Hex-String.

required
private_key_str str

Der private Schlüssel als String.

required

Returns:

Name Type Description
str str

Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
@staticmethod
def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
    """
    Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

    Args:
        encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
        private_key_str (str): Der private Schlüssel als String.

    Returns:
        str: Der entschlüsselte Text.
    """
    try:
        private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
        decrypted = private_key.decrypt(
            bytes.fromhex(encrypted_text_hex),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return decrypted.decode()

    except Exception as e:
        get_logger().error(f"Error decrypt_asymmetric {e}")
    return "Invalid"
decrypt_symmetric(encrypted_text, key, to_str=True, mute=False) staticmethod

Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
encrypted_text str

Der zu entschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required
to_str bool

default true returns str if false returns bytes

True

Returns: str: Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
@staticmethod
def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
    """
    Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        encrypted_text (str): Der zu entschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.
        to_str (bool): default true returns str if false returns bytes
    Returns:
        str: Der entschlüsselte Text.
    """

    if isinstance(key, str):
        key = key.encode()

    #try:
    fernet = Fernet(key)
    text_b = fernet.decrypt(encrypted_text)
    if not to_str:
        return text_b
    return text_b.decode()
encrypt_asymmetric(text, public_key_str) staticmethod

Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
public_key_str str

Der öffentliche Schlüssel als String oder im pem format.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
@staticmethod
def encrypt_asymmetric(text: str, public_key_str: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

    Returns:
        str: Der verschlüsselte Text.
    """
    # try:
    #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
    #  except Exception as e:
    #     get_logger().error(f"Error encrypt_asymmetric {e}")
    try:
        public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        encrypted = public_key.encrypt(
            text.encode(),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return encrypted.hex()
    except Exception as e:
        get_logger().error(f"Error encrypt_asymmetric {e}")
        return "Invalid"
encrypt_symmetric(text, key) staticmethod

Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
@staticmethod
def encrypt_symmetric(text: str or bytes, key: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.

    Returns:
        str: Der verschlüsselte Text.
    """
    if isinstance(text, str):
        text = text.encode()

    try:
        fernet = Fernet(key.encode())
        return fernet.encrypt(text).decode()
    except Exception as e:
        get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
        return "Error encrypt"
generate_asymmetric_keys() staticmethod

Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

Parameters:

Name Type Description Default
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
@staticmethod
def generate_asymmetric_keys() -> (str, str):
    """
    Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

    Args:
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
    """
    private_key = rsa.generate_private_key(
        public_exponent=65537,
        key_size=2048 * 3,
    )
    public_key = private_key.public_key()

    # Serialisieren der Schlüssel
    pem_private_key = private_key.private_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PrivateFormat.PKCS8,
        encryption_algorithm=serialization.NoEncryption()
    ).decode()

    pem_public_key = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    ).decode()

    return pem_public_key, pem_private_key
generate_seed() staticmethod

Erzeugt eine zufällige Zahl als Seed.

Returns:

Name Type Description
int int

Eine zufällige Zahl.

Source code in toolboxv2/utils/security/cryp.py
68
69
70
71
72
73
74
75
76
@staticmethod
def generate_seed() -> int:
    """
    Erzeugt eine zufällige Zahl als Seed.

    Returns:
        int: Eine zufällige Zahl.
    """
    return random.randint(2 ** 32 - 1, 2 ** 64 - 1)
generate_symmetric_key() staticmethod

Generiert einen Schlüssel für die symmetrische Verschlüsselung.

Returns:

Name Type Description
str str

Der generierte Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
 94
 95
 96
 97
 98
 99
100
101
102
@staticmethod
def generate_symmetric_key() -> str:
    """
    Generiert einen Schlüssel für die symmetrische Verschlüsselung.

    Returns:
        str: Der generierte Schlüssel.
    """
    return Fernet.generate_key().decode()
load_keys_from_files(directory='keys') staticmethod

Lädt die Schlüssel aus den Dateien. Der private Schlüssel wird mit dem Device Key entschlüsselt.

Parameters:

Name Type Description Default
directory str

Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

'keys'

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel

Raises:

Type Description
FileNotFoundError

Wenn die Schlüsseldateien nicht gefunden werden können

Source code in toolboxv2/utils/security/cryp.py
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
@staticmethod
def load_keys_from_files(directory: str = "keys") -> (str, str):
    """
    Lädt die Schlüssel aus den Dateien.
    Der private Schlüssel wird mit dem Device Key entschlüsselt.

    Args:
        directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

    Raises:
        FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
    """
    # Pfade zu den Schlüsseldateien
    public_key_path = os.path.join(directory, "public_key.pem")
    private_key_path = os.path.join(directory, "private_key.pem")

    # Prüfe ob die Dateien existieren
    if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
        return "", ""

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Lade den öffentlichen Schlüssel
    with open(public_key_path) as f:
        public_key = f.read()

    # Lade und entschlüssele den privaten Schlüssel
    with open(private_key_path) as f:
        encrypted_private_key = f.read()
        private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

    return public_key, private_key
one_way_hash(text, salt='', pepper='') staticmethod

Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

Parameters:

Name Type Description Default
text str

Der zu hashende Text.

required
salt str

Der Salt-Wert.

''
pepper str

Der Pepper-Wert.

''
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Name Type Description
str str

Der resultierende Hash-Wert.

Source code in toolboxv2/utils/security/cryp.py
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
@staticmethod
def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
    """
    Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

    Args:
        text (str): Der zu hashende Text.
        salt (str): Der Salt-Wert.
        pepper (str): Der Pepper-Wert.
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        str: Der resultierende Hash-Wert.
    """
    return hashlib.sha256((salt + text + pepper).encode()).hexdigest()
pem_to_public_key(pem_key) staticmethod

Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

Parameters:

Name Type Description Default
pem_key str

Der PEM-kodierte öffentliche Schlüssel.

required

Returns:

Name Type Description
PublicKey

Das PublicKey-Objekt.

Source code in toolboxv2/utils/security/cryp.py
386
387
388
389
390
391
392
393
394
395
396
397
398
@staticmethod
def pem_to_public_key(pem_key: str):
    """
    Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

    Args:
        pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

    Returns:
        PublicKey: Das PublicKey-Objekt.
    """
    public_key = serialization.load_pem_public_key(pem_key.encode())
    return public_key
public_key_to_pem(public_key) staticmethod

Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

Parameters:

Name Type Description Default
public_key PublicKey

Das PublicKey-Objekt.

required

Returns:

Name Type Description
str

Der PEM-kodierte öffentliche Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
@staticmethod
def public_key_to_pem(public_key: RSAPublicKey):
    """
    Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

    Args:
        public_key (PublicKey): Das PublicKey-Objekt.

    Returns:
        str: Der PEM-kodierte öffentliche Schlüssel.
    """
    pem = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    )
    return pem.decode()
save_keys_to_files(public_key, private_key, directory='keys') staticmethod

Speichert die generierten Schlüssel in separate Dateien. Der private Schlüssel wird mit dem Device Key verschlüsselt.

Parameters:

Name Type Description Default
public_key str

Der öffentliche Schlüssel im PEM-Format

required
private_key str

Der private Schlüssel im PEM-Format

required
directory str

Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen

'keys'
Source code in toolboxv2/utils/security/cryp.py
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
@staticmethod
def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
    """
    Speichert die generierten Schlüssel in separate Dateien.
    Der private Schlüssel wird mit dem Device Key verschlüsselt.

    Args:
        public_key (str): Der öffentliche Schlüssel im PEM-Format
        private_key (str): Der private Schlüssel im PEM-Format
        directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
    """
    # Erstelle das Verzeichnis, falls es nicht existiert
    os.makedirs(directory, exist_ok=True)

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Verschlüssele den privaten Schlüssel mit dem Device Key
    encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

    # Speichere den öffentlichen Schlüssel
    public_key_path = os.path.join(directory, "public_key.pem")
    with open(public_key_path, "w") as f:
        f.write(public_key)

    # Speichere den verschlüsselten privaten Schlüssel
    private_key_path = os.path.join(directory, "private_key.pem")
    with open(private_key_path, "w") as f:
        f.write(encrypted_private_key)

    print("Saved keys in ", public_key_path)

singelton_class

Singleton

Singleton metaclass for ensuring only one instance of a class.

Source code in toolboxv2/utils/singelton_class.py
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
class Singleton(type):
    """
    Singleton metaclass for ensuring only one instance of a class.
    """

    _instances = {}
    _kwargs = {}
    _args = {}

    def __call__(cls, *args, **kwargs):
        if cls not in cls._instances:
            cls._instances[cls] = super().__call__(*args, **kwargs)
            cls._args[cls] = args
            cls._kwargs[cls] = kwargs
        return cls._instances[cls]

system

AppType
Source code in toolboxv2/utils/system/types.py
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
class AppType:
    prefix: str
    id: str
    globals: dict[str, Any] = {"root": dict, }
    locals: dict[str, Any] = {"user": {'app': "self"}, }

    local_test: bool = False
    start_dir: str
    data_dir: str
    config_dir: str
    info_dir: str

    logger: logging.Logger
    logging_filename: str

    api_allowed_mods_list: list[str] = []

    version: str
    loop: asyncio.AbstractEventLoop

    keys: dict[str, str] = {
        "MACRO": "macro~~~~:",
        "MACRO_C": "m_color~~:",
        "HELPER": "helper~~~:",
        "debug": "debug~~~~:",
        "id": "name-spa~:",
        "st-load": "mute~load:",
        "comm-his": "comm-his~:",
        "develop-mode": "dev~mode~:",
        "provider::": "provider::",
    }

    defaults: dict[str, (bool or dict or dict[str, dict[str, str]] or str or list[str] or list[list]) | None] = {
        "MACRO": list[str],
        "MACRO_C": dict,
        "HELPER": dict,
        "debug": str,
        "id": str,
        "st-load": False,
        "comm-his": list[list],
        "develop-mode": bool,
    }

    config_fh: FileHandler
    _debug: bool
    flows: dict[str, Callable]
    dev_modi: bool
    functions: dict[str, Any]
    modules: dict[str, Any]

    interface_type: ToolBoxInterfaces
    REFIX: str

    alive: bool
    called_exit: tuple[bool, float]
    args_sto: AppArgs
    system_flag = None
    session = None
    appdata = None
    exit_tasks = []

    enable_profiling: bool = False
    sto = None

    def __init__(self, prefix: None | str= None, args: AppArgs | None = None):
        self.args_sto = args
        self.prefix = prefix
        """proxi attr"""

    @staticmethod
    def exit_main(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def hide_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def show_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def disconnect(*args, **kwargs):
        """proxi attr"""

    def set_logger(self, debug=False):
        """proxi attr"""

    @property
    def debug(self):
        """proxi attr"""
        return self._debug

    def debug_rains(self, e):
        """proxi attr"""

    def set_flows(self, r):
        """proxi attr"""

    def run_flows(self, name, **kwargs):
        """proxi attr"""

    def rrun_flows(self, name, **kwargs):
        """proxi attr"""

    def idle(self):
        import time
        self.print("idle")
        try:
            while self.alive:
                time.sleep(1)
        except KeyboardInterrupt:
            pass
        self.print("idle done")

    async def a_idle(self):
        self.print("a idle")
        try:
            if hasattr(self, 'daemon_app'):
                self.print("serving daemon")
                await self.daemon_app.connect(self)
            else:
                self.print("serving default")
                while self.alive:
                    await asyncio.sleep(1)
        except KeyboardInterrupt:
            pass
        self.print("a idle done")

    @debug.setter
    def debug(self, value):
        """proxi attr"""

    def _coppy_mod(self, content, new_mod_dir, mod_name, file_type='py'):
        """proxi attr"""

    def _pre_lib_mod(self, mod_name, path_to="./runtime", file_type='py'):
        """proxi attr"""

    def _copy_load(self, mod_name, file_type='py', **kwargs):
        """proxi attr"""

    def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True):
        """proxi attr"""

    def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):
        """proxi attr"""

    def save_initialized_module(self, tools_class, spec):
        """proxi attr"""

    def mod_online(self, mod_name, installed=False):
        """proxi attr"""

    def _get_function(self,
                      name: Enum or None,
                      state: bool = True,
                      specification: str = "app",
                      metadata=False, as_str: tuple or None = None, r=0):
        """proxi attr"""

    def save_exit(self):
        """proxi attr"""

    def load_mod(self, mod_name: str, mlm='I', **kwargs):
        """proxi attr"""

    async def init_module(self, modular):
        return await self.load_mod(modular)

    async def load_all_mods_in_file(self, working_dir="mods"):
        """proxi attr"""

    def get_all_mods(self, working_dir="mods", path_to="./runtime"):
        """proxi attr"""

    def remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            self.remove_mod(mod, delete=delete)

    async def a_remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            await self.a_remove_mod(mod, delete=delete)

    def print_ok(self):
        """proxi attr"""
        self.logger.info("OK")

    def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
        """proxi attr"""

    def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None):
        """proxi attr"""

    def remove_mod(self, mod_name, spec='app', delete=True):
        """proxi attr"""

    async def a_remove_mod(self, mod_name, spec='app', delete=True):
        """proxi attr"""

    def exit(self):
        """proxi attr"""

    def web_context(self) -> str:
        """returns the build index ( toolbox web component )"""

    async def a_exit(self):
        """proxi attr"""

    def save_load(self, modname, spec='app'):
        """proxi attr"""

    def get_function(self, name: Enum or tuple, **kwargs):
        """
        Kwargs for _get_function
            metadata:: return the registered function dictionary
                stateless: (function_data, None), 0
                stateful: (function_data, higher_order_function), 0
            state::boolean
                specification::str default app
        """

    def run_a_from_sync(self, function, *args):
        """
        run a async fuction
        """

    def run_function(self, mod_function_name: Enum or tuple,
                     tb_run_function_with_state=True,
                     tb_run_with_specification='app',
                     args_=None,
                     kwargs_=None,
                     *args,
                     **kwargs) -> Result:

        """proxi attr"""

    async def a_run_function(self, mod_function_name: Enum or tuple,
                             tb_run_function_with_state=True,
                             tb_run_with_specification='app',
                             args_=None,
                             kwargs_=None,
                             *args,
                             **kwargs) -> Result:

        """proxi attr"""

    def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):
        """
        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        mod_function_name = f"{modular_name}.{function_name}"

        proxi attr
        """

    async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict):
        """
        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        mod_function_name = f"{modular_name}.{function_name}"

        proxi attr
        """

    async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None, method="GET",
                       args_=None,
                       kwargs_=None,
                       *args, **kwargs):
        """run a function remote via http / https"""

    def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
                get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                kwargs_=None,
                *args, **kwargs):
        """proxi attr"""

    async def a_run_any(self, mod_function_name: Enum or str or tuple,
                        backwords_compability_variabel_string_holder=None,
                        get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                        kwargs_=None,
                        *args, **kwargs):
        """proxi attr"""

    def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
        """proxi attr"""

    @staticmethod
    def print(text, *args, **kwargs):
        """proxi attr"""

    @staticmethod
    def sprint(text, *args, **kwargs):
        """proxi attr"""

    # ----------------------------------------------------------------
    # Decorators for the toolbox

    def _register_function(self, module_name, func_name, data):
        """proxi attr"""

    def _create_decorator(self, type_: str,
                          name: str = "",
                          mod_name: str = "",
                          level: int = -1,
                          restrict_in_virtual_mode: bool = False,
                          api: bool = False,
                          helper: str = "",
                          version: str or None = None,
                          initial=False,
                          exit_f=False,
                          test=True,
                          samples=None,
                          state=None,
                          pre_compute=None,
                          post_compute=None,
                          memory_cache=False,
                          file_cache=False,
                          row=False,
                          request_as_kwarg=False,
                          memory_cache_max_size=100,
                          memory_cache_ttl=300):
        """proxi attr"""

        # data = {
        #     "type": type_,
        #     "module_name": module_name,
        #     "func_name": func_name,
        #     "level": level,
        #     "restrict_in_virtual_mode": restrict_in_virtual_mode,
        #     "func": func,
        #     "api": api,
        #     "helper": helper,
        #     "version": version,
        #     "initial": initial,
        #     "exit_f": exit_f,
        #     "__module__": func.__module__,
        #     "signature": sig,
        #     "params": params,
        #     "state": (
        #         False if len(params) == 0 else params[0] in ['self', 'state', 'app']) if state is None else state,
        #     "do_test": test,
        #     "samples": samples,
        #     "request_as_kwarg": request_as_kwarg,

    def tb(self, name=None,
           mod_name: str = "",
           helper: str = "",
           version: str or None = None,
           test: bool = True,
           restrict_in_virtual_mode: bool = False,
           api: bool = False,
           initial: bool = False,
           exit_f: bool = False,
           test_only: bool = False,
           memory_cache: bool = False,
           file_cache: bool = False,
           row=False,
           request_as_kwarg: bool = False,
           state: bool or None = None,
           level: int = 0,
           memory_cache_max_size: int = 100,
           memory_cache_ttl: int = 300,
           samples: list or dict or None = None,
           interface: ToolBoxInterfaces or None or str = None,
           pre_compute=None,
           post_compute=None,
           api_methods=None,
           ):
        """
    A decorator for registering and configuring functions within a module.

    This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

    Args:
        name (str, optional): The name to register the function under. Defaults to the function's own name.
        mod_name (str, optional): The name of the module the function belongs to.
        helper (str, optional): A helper string providing additional information about the function.
        version (str or None, optional): The version of the function or module.
        test (bool, optional): Flag to indicate if the function is for testing purposes.
        restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
        api (bool, optional): Flag to indicate if the function is part of an API.
        initial (bool, optional): Flag to indicate if the function should be executed at initialization.
        exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
        test_only (bool, optional): Flag to indicate if the function should only be used for testing.
        memory_cache (bool, optional): Flag to enable memory caching for the function.
        request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
        file_cache (bool, optional): Flag to enable file caching for the function.
        row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
        state (bool or None, optional): Flag to indicate if the function maintains state.
        level (int, optional): The level of the function, used for prioritization or categorization.
        memory_cache_max_size (int, optional): Maximum size of the memory cache.
        memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
        samples (list or dict or None, optional): Samples or examples of function usage.
        interface (str, optional): The interface type for the function.
        pre_compute (callable, optional): A function to be called before the main function.
        post_compute (callable, optional): A function to be called after the main function.
        api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

    Returns:
        function: The decorated function with additional processing and registration capabilities.
    """
        if interface is None:
            interface = "tb"
        if test_only and 'test' not in self.id:
            return lambda *args, **kwargs: args
        return self._create_decorator(interface,
                                      name,
                                      mod_name,
                                      level=level,
                                      restrict_in_virtual_mode=restrict_in_virtual_mode,
                                      helper=helper,
                                      api=api,
                                      version=version,
                                      initial=initial,
                                      exit_f=exit_f,
                                      test=test,
                                      samples=samples,
                                      state=state,
                                      pre_compute=pre_compute,
                                      post_compute=post_compute,
                                      memory_cache=memory_cache,
                                      file_cache=file_cache,
                                      row=row,
                                      request_as_kwarg=request_as_kwarg,
                                      memory_cache_max_size=memory_cache_max_size,
                                      memory_cache_ttl=memory_cache_ttl)

    def print_functions(self, name=None):


        if not self.functions:
            print("Nothing to see")
            return

        def helper(_functions):
            for func_name, data in _functions.items():
                if not isinstance(data, dict):
                    continue

                func_type = data.get('type', 'Unknown')
                func_level = 'r' if data['level'] == -1 else data['level']
                api_status = 'Api' if data.get('api', False) else 'Non-Api'

                print(f"  Function: {func_name}{data.get('signature', '()')}; "
                      f"Type: {func_type}, Level: {func_level}, {api_status}")

        if name is not None:
            functions = self.functions.get(name)
            if functions is not None:
                print(f"\nModule: {name}; Type: {functions.get('app_instance_type', 'Unknown')}")
                helper(functions)
                return
        for module, functions in self.functions.items():
            print(f"\nModule: {module}; Type: {functions.get('app_instance_type', 'Unknown')}")
            helper(functions)

    def save_autocompletion_dict(self):
        """proxi attr"""

    def get_autocompletion_dict(self):
        """proxi attr"""

    def get_username(self, get_input=False, default="loot") -> str:
        """proxi attr"""

    def save_registry_as_enums(self, directory: str, filename: str):
        """proxi attr"""

    async def execute_all_functions_(self, m_query='', f_query=''):
        print("Executing all functions")
        from ..extras import generate_test_cases
        all_data = {
            "modular_run": 0,
            "modular_fatal_error": 0,
            "errors": 0,
            "modular_sug": 0,
            "coverage": [],
            "total_coverage": {},
        }
        items = list(self.functions.items()).copy()
        for module_name, functions in items:
            infos = {
                "functions_run": 0,
                "functions_fatal_error": 0,
                "error": 0,
                "functions_sug": 0,
                'calls': {},
                'callse': {},
                "coverage": [0, 0],
            }
            all_data['modular_run'] += 1
            if not module_name.startswith(m_query):
                all_data['modular_sug'] += 1
                continue

            with Spinner(message=f"In {module_name}| "):
                f_items = list(functions.items()).copy()
                for function_name, function_data in f_items:
                    if not isinstance(function_data, dict):
                        continue
                    if not function_name.startswith(f_query):
                        continue
                    test: list = function_data.get('do_test')
                    # print(test, module_name, function_name, function_data)
                    infos["coverage"][0] += 1
                    if test is False:
                        continue

                    with Spinner(message=f"\t\t\t\t\t\tfuction {function_name}..."):
                        params: list = function_data.get('params')
                        sig: signature = function_data.get('signature')
                        state: bool = function_data.get('state')
                        samples: bool = function_data.get('samples')

                        test_kwargs_list = [{}]

                        if params is not None:
                            test_kwargs_list = samples if samples is not None else generate_test_cases(sig=sig)
                            # print(test_kwargs)
                            # print(test_kwargs[0])
                            # test_kwargs = test_kwargs_list[0]
                        # print(module_name, function_name, test_kwargs_list)
                        infos["coverage"][1] += 1
                        for test_kwargs in test_kwargs_list:
                            try:
                                # print(f"test Running {state=} |{module_name}.{function_name}")
                                result = await self.a_run_function((module_name, function_name),
                                                                   tb_run_function_with_state=state,
                                                                   **test_kwargs)
                                if not isinstance(result, Result):
                                    result = Result.ok(result)
                                if result.info.exec_code == 0:
                                    infos['calls'][function_name] = [test_kwargs, str(result)]
                                    infos['functions_sug'] += 1
                                else:
                                    infos['functions_sug'] += 1
                                    infos['error'] += 1
                                    infos['callse'][function_name] = [test_kwargs, str(result)]
                            except Exception as e:
                                infos['functions_fatal_error'] += 1
                                infos['callse'][function_name] = [test_kwargs, str(e)]
                            finally:
                                infos['functions_run'] += 1

                if infos['functions_run'] == infos['functions_sug']:
                    all_data['modular_sug'] += 1
                else:
                    all_data['modular_fatal_error'] += 1
                if infos['error'] > 0:
                    all_data['errors'] += infos['error']

                all_data[module_name] = infos
                if infos['coverage'][0] == 0:
                    c = 0
                else:
                    c = infos['coverage'][1] / infos['coverage'][0]
                all_data["coverage"].append(f"{module_name}:{c:.2f}\n")
        total_coverage = sum([float(t.split(":")[-1]) for t in all_data["coverage"]]) / len(all_data["coverage"])
        print(
            f"\n{all_data['modular_run']=}\n{all_data['modular_sug']=}\n{all_data['modular_fatal_error']=}\n{total_coverage=}")
        d = analyze_data(all_data)
        return Result.ok(data=all_data, data_info=d)

    @staticmethod
    def calculate_complexity(filename_or_code):
        from radon.complexity import cc_rank, cc_visit
        if os.path.exists(filename_or_code):
            with open(filename_or_code) as file:
                code = file.read()
        else:
            code = filename_or_code

        # Calculate and print Cyclomatic Complexity
        complexity_results = cc_visit(code)
        i = -1
        avg_complexity = 0
        for block in complexity_results:
            complexity = block.complexity
            i += 1
            print(f"block: {block.name} {i} Class/Fuction/Methode : {block.letter}")
            print(f"    fullname: {block.fullname}")
            print(f"    Cyclomatic Complexity: {complexity}")
            # Optional: Get complexity rank
            avg_complexity += complexity
            rank = cc_rank(complexity)
            print(f"    Complexity Rank: {rank}")
            # print(f"    lineno: {block.lineno}")
            print(f"    endline: {block.endline}")
            print(f"    col_offset: {block.col_offset}\n")
        if i <= 0:
            i += 2
        avg_complexity = avg_complexity / i
        print(f"\nAVG Complexity: {avg_complexity:.2f}")
        print(f"Total Rank: {cc_rank(int(avg_complexity + i // 10))}")

    async def execute_function_test(self, module_name: str, function_name: str,
                                    function_data: dict, test_kwargs: dict,
                                    profiler: cProfile.Profile) -> tuple[bool, str, dict, float]:
        start_time = time.time()
        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            try:
                result = await self.a_run_function(
                    (module_name, function_name),
                    tb_run_function_with_state=function_data.get('state'),
                    **test_kwargs
                )

                if not isinstance(result, Result):
                    result = Result.ok(result)

                success = result.info.exec_code == 0
                execution_time = time.time() - start_time
                return success, str(result), test_kwargs, execution_time
            except Exception as e:
                execution_time = time.time() - start_time
                return False, str(e), test_kwargs, execution_time

    async def process_function(self, module_name: str, function_name: str,
                               function_data: dict, profiler: cProfile.Profile) -> tuple[str, ModuleInfo]:
        start_time = time.time()
        info = ModuleInfo()

        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            if not isinstance(function_data, dict):
                return function_name, info

            test = function_data.get('do_test')
            info.coverage[0] += 1

            if test is False:
                return function_name, info

            params = function_data.get('params')
            sig = function_data.get('signature')
            samples = function_data.get('samples')

            test_kwargs_list = [{}] if params is None else (
                samples if samples is not None else generate_test_cases(sig=sig)
            )

            info.coverage[1] += 1

            # Create tasks for all test cases
            tasks = [
                self.execute_function_test(module_name, function_name, function_data, test_kwargs, profiler)
                for test_kwargs in test_kwargs_list
            ]

            # Execute all tests concurrently
            results = await asyncio.gather(*tasks)

            total_execution_time = 0
            for success, result_str, test_kwargs, execution_time in results:
                info.functions_run += 1
                total_execution_time += execution_time

                if success:
                    info.functions_sug += 1
                    info.calls[function_name] = [test_kwargs, result_str]
                else:
                    info.functions_sug += 1
                    info.error += 1
                    info.callse[function_name] = [test_kwargs, result_str]

            info.execution_time = time.time() - start_time
            return function_name, info

    async def process_module(self, module_name: str, functions: dict,
                             f_query: str, profiler: cProfile.Profile) -> tuple[str, ModuleInfo]:
        start_time = time.time()

        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            async with asyncio.Semaphore(mp.cpu_count()):
                tasks = [
                    self.process_function(module_name, fname, fdata, profiler)
                    for fname, fdata in functions.items()
                    if fname.startswith(f_query)
                ]

                if not tasks:
                    return module_name, ModuleInfo()

                results = await asyncio.gather(*tasks)

                # Combine results from all functions in the module
                combined_info = ModuleInfo()
                total_execution_time = 0

                for _, info in results:
                    combined_info.functions_run += info.functions_run
                    combined_info.functions_fatal_error += info.functions_fatal_error
                    combined_info.error += info.error
                    combined_info.functions_sug += info.functions_sug
                    combined_info.calls.update(info.calls)
                    combined_info.callse.update(info.callse)
                    combined_info.coverage[0] += info.coverage[0]
                    combined_info.coverage[1] += info.coverage[1]
                    total_execution_time += info.execution_time

                combined_info.execution_time = time.time() - start_time
                return module_name, combined_info

    async def execute_all_functions(self, m_query='', f_query='', enable_profiling=True):
        """
        Execute all functions with parallel processing and optional profiling.

        Args:
            m_query (str): Module name query filter
            f_query (str): Function name query filter
            enable_profiling (bool): Enable detailed profiling information
        """
        print("Executing all functions in parallel" + (" with profiling" if enable_profiling else ""))

        start_time = time.time()
        stats = ExecutionStats()
        items = list(self.functions.items()).copy()

        # Set up profiling
        self.enable_profiling = enable_profiling
        profiler = cProfile.Profile()

        with profile_section(profiler, enable_profiling):
            # Filter modules based on query
            filtered_modules = [
                (mname, mfuncs) for mname, mfuncs in items
                if mname.startswith(m_query)
            ]

            stats.modular_run = len(filtered_modules)

            # Process all modules concurrently
            async with asyncio.Semaphore(mp.cpu_count()):
                tasks = [
                    self.process_module(mname, mfuncs, f_query, profiler)
                    for mname, mfuncs in filtered_modules
                ]

                results = await asyncio.gather(*tasks)

            # Combine results and calculate statistics
            for module_name, info in results:
                if info.functions_run == info.functions_sug:
                    stats.modular_sug += 1
                else:
                    stats.modular_fatal_error += 1

                stats.errors += info.error

                # Calculate coverage
                coverage = (info.coverage[1] / info.coverage[0]) if info.coverage[0] > 0 else 0
                stats.coverage.append(f"{module_name}:{coverage:.2f}\n")

                # Store module info
                stats.__dict__[module_name] = info

            # Calculate total coverage
            total_coverage = (
                sum(float(t.split(":")[-1]) for t in stats.coverage) / len(stats.coverage)
                if stats.coverage else 0
            )

            stats.total_execution_time = time.time() - start_time

            # Generate profiling stats if enabled
            if enable_profiling:
                s = io.StringIO()
                ps = pstats.Stats(profiler, stream=s).sort_stats('cumulative')
                ps.print_stats()
                stats.profiling_data = {
                    'detailed_stats': s.getvalue(),
                    'total_time': stats.total_execution_time,
                    'function_count': stats.modular_run,
                    'successful_functions': stats.modular_sug
                }

            print(
                f"\n{stats.modular_run=}"
                f"\n{stats.modular_sug=}"
                f"\n{stats.modular_fatal_error=}"
                f"\n{total_coverage=}"
                f"\nTotal execution time: {stats.total_execution_time:.2f}s"
            )

            if enable_profiling:
                print("\nProfiling Summary:")
                print(f"{'=' * 50}")
                print("Top 10 time-consuming functions:")
                ps.print_stats(10)

            analyzed_data = analyze_data(stats.__dict__)
            return Result.ok(data=stats.__dict__, data_info=analyzed_data)
debug property writable

proxi attr

prefix = prefix instance-attribute

proxi attr

a_exit() async

proxi attr

Source code in toolboxv2/utils/system/types.py
1413
1414
async def a_exit(self):
    """proxi attr"""
a_fuction_runner(function, function_data, args, kwargs) async

parameters = function_data.get('params') modular_name = function_data.get('module_name') function_name = function_data.get('func_name') mod_function_name = f"{modular_name}.{function_name}"

proxi attr

Source code in toolboxv2/utils/system/types.py
1464
1465
1466
1467
1468
1469
1470
1471
1472
async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict):
    """
    parameters = function_data.get('params')
    modular_name = function_data.get('module_name')
    function_name = function_data.get('func_name')
    mod_function_name = f"{modular_name}.{function_name}"

    proxi attr
    """
a_remove_mod(mod_name, spec='app', delete=True) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1404
1405
async def a_remove_mod(self, mod_name, spec='app', delete=True):
    """proxi attr"""
a_run_any(mod_function_name, backwords_compability_variabel_string_holder=None, get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1486
1487
1488
1489
1490
1491
async def a_run_any(self, mod_function_name: Enum or str or tuple,
                    backwords_compability_variabel_string_holder=None,
                    get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                    kwargs_=None,
                    *args, **kwargs):
    """proxi attr"""
a_run_function(mod_function_name, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1444
1445
1446
1447
1448
1449
1450
1451
1452
async def a_run_function(self, mod_function_name: Enum or tuple,
                         tb_run_function_with_state=True,
                         tb_run_with_specification='app',
                         args_=None,
                         kwargs_=None,
                         *args,
                         **kwargs) -> Result:

    """proxi attr"""
debug_rains(e)

proxi attr

Source code in toolboxv2/utils/system/types.py
1298
1299
def debug_rains(self, e):
    """proxi attr"""
disconnect(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1286
1287
1288
@staticmethod
async def disconnect(*args, **kwargs):
    """proxi attr"""
execute_all_functions(m_query='', f_query='', enable_profiling=True) async

Execute all functions with parallel processing and optional profiling.

Parameters:

Name Type Description Default
m_query str

Module name query filter

''
f_query str

Function name query filter

''
enable_profiling bool

Enable detailed profiling information

True
Source code in toolboxv2/utils/system/types.py
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
async def execute_all_functions(self, m_query='', f_query='', enable_profiling=True):
    """
    Execute all functions with parallel processing and optional profiling.

    Args:
        m_query (str): Module name query filter
        f_query (str): Function name query filter
        enable_profiling (bool): Enable detailed profiling information
    """
    print("Executing all functions in parallel" + (" with profiling" if enable_profiling else ""))

    start_time = time.time()
    stats = ExecutionStats()
    items = list(self.functions.items()).copy()

    # Set up profiling
    self.enable_profiling = enable_profiling
    profiler = cProfile.Profile()

    with profile_section(profiler, enable_profiling):
        # Filter modules based on query
        filtered_modules = [
            (mname, mfuncs) for mname, mfuncs in items
            if mname.startswith(m_query)
        ]

        stats.modular_run = len(filtered_modules)

        # Process all modules concurrently
        async with asyncio.Semaphore(mp.cpu_count()):
            tasks = [
                self.process_module(mname, mfuncs, f_query, profiler)
                for mname, mfuncs in filtered_modules
            ]

            results = await asyncio.gather(*tasks)

        # Combine results and calculate statistics
        for module_name, info in results:
            if info.functions_run == info.functions_sug:
                stats.modular_sug += 1
            else:
                stats.modular_fatal_error += 1

            stats.errors += info.error

            # Calculate coverage
            coverage = (info.coverage[1] / info.coverage[0]) if info.coverage[0] > 0 else 0
            stats.coverage.append(f"{module_name}:{coverage:.2f}\n")

            # Store module info
            stats.__dict__[module_name] = info

        # Calculate total coverage
        total_coverage = (
            sum(float(t.split(":")[-1]) for t in stats.coverage) / len(stats.coverage)
            if stats.coverage else 0
        )

        stats.total_execution_time = time.time() - start_time

        # Generate profiling stats if enabled
        if enable_profiling:
            s = io.StringIO()
            ps = pstats.Stats(profiler, stream=s).sort_stats('cumulative')
            ps.print_stats()
            stats.profiling_data = {
                'detailed_stats': s.getvalue(),
                'total_time': stats.total_execution_time,
                'function_count': stats.modular_run,
                'successful_functions': stats.modular_sug
            }

        print(
            f"\n{stats.modular_run=}"
            f"\n{stats.modular_sug=}"
            f"\n{stats.modular_fatal_error=}"
            f"\n{total_coverage=}"
            f"\nTotal execution time: {stats.total_execution_time:.2f}s"
        )

        if enable_profiling:
            print("\nProfiling Summary:")
            print(f"{'=' * 50}")
            print("Top 10 time-consuming functions:")
            ps.print_stats(10)

        analyzed_data = analyze_data(stats.__dict__)
        return Result.ok(data=stats.__dict__, data_info=analyzed_data)
exit()

proxi attr

Source code in toolboxv2/utils/system/types.py
1407
1408
def exit(self):
    """proxi attr"""
exit_main(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1274
1275
1276
@staticmethod
def exit_main(*args, **kwargs):
    """proxi attr"""
fuction_runner(function, function_data, args, kwargs, t0=0.0)

parameters = function_data.get('params') modular_name = function_data.get('module_name') function_name = function_data.get('func_name') mod_function_name = f"{modular_name}.{function_name}"

proxi attr

Source code in toolboxv2/utils/system/types.py
1454
1455
1456
1457
1458
1459
1460
1461
1462
def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):
    """
    parameters = function_data.get('params')
    modular_name = function_data.get('module_name')
    function_name = function_data.get('func_name')
    mod_function_name = f"{modular_name}.{function_name}"

    proxi attr
    """
get_all_mods(working_dir='mods', path_to='./runtime')

proxi attr

Source code in toolboxv2/utils/system/types.py
1378
1379
def get_all_mods(self, working_dir="mods", path_to="./runtime"):
    """proxi attr"""
get_autocompletion_dict()

proxi attr

Source code in toolboxv2/utils/system/types.py
1669
1670
def get_autocompletion_dict(self):
    """proxi attr"""
get_function(name, **kwargs)

Kwargs for _get_function metadata:: return the registered function dictionary stateless: (function_data, None), 0 stateful: (function_data, higher_order_function), 0 state::boolean specification::str default app

Source code in toolboxv2/utils/system/types.py
1419
1420
1421
1422
1423
1424
1425
1426
1427
def get_function(self, name: Enum or tuple, **kwargs):
    """
    Kwargs for _get_function
        metadata:: return the registered function dictionary
            stateless: (function_data, None), 0
            stateful: (function_data, higher_order_function), 0
        state::boolean
            specification::str default app
    """
get_mod(name, spec='app')

proxi attr

Source code in toolboxv2/utils/system/types.py
1493
1494
def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
    """proxi attr"""
get_username(get_input=False, default='loot')

proxi attr

Source code in toolboxv2/utils/system/types.py
1672
1673
def get_username(self, get_input=False, default="loot") -> str:
    """proxi attr"""
hide_console(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1278
1279
1280
@staticmethod
async def hide_console(*args, **kwargs):
    """proxi attr"""
inplace_load_instance(mod_name, loc='toolboxv2.mods.', spec='app', save=True)

proxi attr

Source code in toolboxv2/utils/system/types.py
1347
1348
def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True):
    """proxi attr"""
load_all_mods_in_file(working_dir='mods') async

proxi attr

Source code in toolboxv2/utils/system/types.py
1375
1376
async def load_all_mods_in_file(self, working_dir="mods"):
    """proxi attr"""
load_mod(mod_name, mlm='I', **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1369
1370
def load_mod(self, mod_name: str, mlm='I', **kwargs):
    """proxi attr"""
mod_online(mod_name, installed=False)

proxi attr

Source code in toolboxv2/utils/system/types.py
1356
1357
def mod_online(self, mod_name, installed=False):
    """proxi attr"""
print(text, *args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1496
1497
1498
@staticmethod
def print(text, *args, **kwargs):
    """proxi attr"""
print_ok()

proxi attr

Source code in toolboxv2/utils/system/types.py
1391
1392
1393
def print_ok(self):
    """proxi attr"""
    self.logger.info("OK")
reload_mod(mod_name, spec='app', is_file=True, loc='toolboxv2.mods.')

proxi attr

Source code in toolboxv2/utils/system/types.py
1395
1396
def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
    """proxi attr"""
remove_mod(mod_name, spec='app', delete=True)

proxi attr

Source code in toolboxv2/utils/system/types.py
1401
1402
def remove_mod(self, mod_name, spec='app', delete=True):
    """proxi attr"""
rrun_flows(name, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1307
1308
def rrun_flows(self, name, **kwargs):
    """proxi attr"""
run_a_from_sync(function, *args)

run a async fuction

Source code in toolboxv2/utils/system/types.py
1429
1430
1431
1432
def run_a_from_sync(self, function, *args):
    """
    run a async fuction
    """
run_any(mod_function_name, backwords_compability_variabel_string_holder=None, get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1480
1481
1482
1483
1484
def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
            get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
            kwargs_=None,
            *args, **kwargs):
    """proxi attr"""
run_flows(name, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1304
1305
def run_flows(self, name, **kwargs):
    """proxi attr"""
run_function(mod_function_name, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1434
1435
1436
1437
1438
1439
1440
1441
1442
def run_function(self, mod_function_name: Enum or tuple,
                 tb_run_function_with_state=True,
                 tb_run_with_specification='app',
                 args_=None,
                 kwargs_=None,
                 *args,
                 **kwargs) -> Result:

    """proxi attr"""
run_http(mod_function_name, function_name=None, method='GET', args_=None, kwargs_=None, *args, **kwargs) async

run a function remote via http / https

Source code in toolboxv2/utils/system/types.py
1474
1475
1476
1477
1478
async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None, method="GET",
                   args_=None,
                   kwargs_=None,
                   *args, **kwargs):
    """run a function remote via http / https"""
save_autocompletion_dict()

proxi attr

Source code in toolboxv2/utils/system/types.py
1666
1667
def save_autocompletion_dict(self):
    """proxi attr"""
save_exit()

proxi attr

Source code in toolboxv2/utils/system/types.py
1366
1367
def save_exit(self):
    """proxi attr"""
save_initialized_module(tools_class, spec)

proxi attr

Source code in toolboxv2/utils/system/types.py
1353
1354
def save_initialized_module(self, tools_class, spec):
    """proxi attr"""
save_instance(instance, modular_id, spec='app', instance_type='file/application', tools_class=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1350
1351
def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):
    """proxi attr"""
save_load(modname, spec='app')

proxi attr

Source code in toolboxv2/utils/system/types.py
1416
1417
def save_load(self, modname, spec='app'):
    """proxi attr"""
save_registry_as_enums(directory, filename)

proxi attr

Source code in toolboxv2/utils/system/types.py
1675
1676
def save_registry_as_enums(self, directory: str, filename: str):
    """proxi attr"""
set_flows(r)

proxi attr

Source code in toolboxv2/utils/system/types.py
1301
1302
def set_flows(self, r):
    """proxi attr"""
set_logger(debug=False)

proxi attr

Source code in toolboxv2/utils/system/types.py
1290
1291
def set_logger(self, debug=False):
    """proxi attr"""
show_console(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1282
1283
1284
@staticmethod
async def show_console(*args, **kwargs):
    """proxi attr"""
sprint(text, *args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1500
1501
1502
@staticmethod
def sprint(text, *args, **kwargs):
    """proxi attr"""
tb(name=None, mod_name='', helper='', version=None, test=True, restrict_in_virtual_mode=False, api=False, initial=False, exit_f=False, test_only=False, memory_cache=False, file_cache=False, row=False, request_as_kwarg=False, state=None, level=0, memory_cache_max_size=100, memory_cache_ttl=300, samples=None, interface=None, pre_compute=None, post_compute=None, api_methods=None)

A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Parameters:

Name Type Description Default
name str

The name to register the function under. Defaults to the function's own name.

None
mod_name str

The name of the module the function belongs to.

''
helper str

A helper string providing additional information about the function.

''
version str or None

The version of the function or module.

None
test bool

Flag to indicate if the function is for testing purposes.

True
restrict_in_virtual_mode bool

Flag to restrict the function in virtual mode.

False
api bool

Flag to indicate if the function is part of an API.

False
initial bool

Flag to indicate if the function should be executed at initialization.

False
exit_f bool

Flag to indicate if the function should be executed at exit.

False
test_only bool

Flag to indicate if the function should only be used for testing.

False
memory_cache bool

Flag to enable memory caching for the function.

False
request_as_kwarg bool

Flag to get request if the fuction is calld from api.

False
file_cache bool

Flag to enable file caching for the function.

False
row bool

rather to auto wrap the result in Result type default False means no row data aka result type

False
state bool or None

Flag to indicate if the function maintains state.

None
level int

The level of the function, used for prioritization or categorization.

0
memory_cache_max_size int

Maximum size of the memory cache.

100
memory_cache_ttl int

Time-to-live for the memory cache entries.

300
samples list or dict or None

Samples or examples of function usage.

None
interface str

The interface type for the function.

None
pre_compute callable

A function to be called before the main function.

None
post_compute callable

A function to be called after the main function.

None
api_methods list[str]

default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

None

Returns:

Name Type Description
function

The decorated function with additional processing and registration capabilities.

Source code in toolboxv2/utils/system/types.py
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
def tb(self, name=None,
       mod_name: str = "",
       helper: str = "",
       version: str or None = None,
       test: bool = True,
       restrict_in_virtual_mode: bool = False,
       api: bool = False,
       initial: bool = False,
       exit_f: bool = False,
       test_only: bool = False,
       memory_cache: bool = False,
       file_cache: bool = False,
       row=False,
       request_as_kwarg: bool = False,
       state: bool or None = None,
       level: int = 0,
       memory_cache_max_size: int = 100,
       memory_cache_ttl: int = 300,
       samples: list or dict or None = None,
       interface: ToolBoxInterfaces or None or str = None,
       pre_compute=None,
       post_compute=None,
       api_methods=None,
       ):
    """
A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Args:
    name (str, optional): The name to register the function under. Defaults to the function's own name.
    mod_name (str, optional): The name of the module the function belongs to.
    helper (str, optional): A helper string providing additional information about the function.
    version (str or None, optional): The version of the function or module.
    test (bool, optional): Flag to indicate if the function is for testing purposes.
    restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
    api (bool, optional): Flag to indicate if the function is part of an API.
    initial (bool, optional): Flag to indicate if the function should be executed at initialization.
    exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
    test_only (bool, optional): Flag to indicate if the function should only be used for testing.
    memory_cache (bool, optional): Flag to enable memory caching for the function.
    request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
    file_cache (bool, optional): Flag to enable file caching for the function.
    row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
    state (bool or None, optional): Flag to indicate if the function maintains state.
    level (int, optional): The level of the function, used for prioritization or categorization.
    memory_cache_max_size (int, optional): Maximum size of the memory cache.
    memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
    samples (list or dict or None, optional): Samples or examples of function usage.
    interface (str, optional): The interface type for the function.
    pre_compute (callable, optional): A function to be called before the main function.
    post_compute (callable, optional): A function to be called after the main function.
    api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

Returns:
    function: The decorated function with additional processing and registration capabilities.
"""
    if interface is None:
        interface = "tb"
    if test_only and 'test' not in self.id:
        return lambda *args, **kwargs: args
    return self._create_decorator(interface,
                                  name,
                                  mod_name,
                                  level=level,
                                  restrict_in_virtual_mode=restrict_in_virtual_mode,
                                  helper=helper,
                                  api=api,
                                  version=version,
                                  initial=initial,
                                  exit_f=exit_f,
                                  test=test,
                                  samples=samples,
                                  state=state,
                                  pre_compute=pre_compute,
                                  post_compute=post_compute,
                                  memory_cache=memory_cache,
                                  file_cache=file_cache,
                                  row=row,
                                  request_as_kwarg=request_as_kwarg,
                                  memory_cache_max_size=memory_cache_max_size,
                                  memory_cache_ttl=memory_cache_ttl)
watch_mod(mod_name, spec='app', loc='toolboxv2.mods.', use_thread=True, path_name=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1398
1399
def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None):
    """proxi attr"""
web_context()

returns the build index ( toolbox web component )

Source code in toolboxv2/utils/system/types.py
1410
1411
def web_context(self) -> str:
    """returns the build index ( toolbox web component )"""
MainTool
Source code in toolboxv2/utils/system/main_tool.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
class MainTool:
    toolID: str = ""
    # app = None
    interface = None
    spec = "app"
    name = ""
    color = "Bold"
    stuf = False

    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.__storedargs = args, kwargs
        self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
        self.async_initialized = False
        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    pass
                else:
                    self.todo()
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")

    async def __ainit__(self, *args, **kwargs):
        self.version = kwargs["v"]
        self.tools = kwargs.get("tool", {})
        self.name = kwargs["name"]
        self.logger = kwargs.get("logs", get_logger())
        self.color = kwargs.get("color", "WHITE")
        self.todo = kwargs.get("load", kwargs.get("on_start", None))
        if not hasattr(self, 'config'):
            self.config = {}
        self.user = None
        self.description = "A toolbox mod" if kwargs.get("description") is None else kwargs.get("description")
        if MainTool.interface is None:
            MainTool.interface = self.app.interface_type
        # Result.default(self.app.interface)

        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    await self.todo()
                else:
                    pass
                await asyncio.sleep(0.1)
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")
        self.app.print(f"TOOL : {self.spec}.{self.name} online")



    @property
    def app(self):
        return get_app(
            from_=f"{self.spec}.{self.name}|{self.toolID if self.toolID else '*' + MainTool.toolID} {self.interface if self.interface else MainTool.interface}")

    @app.setter
    def app(self, v):
        raise PermissionError(f"You cannot set the App Instance! {v=}")

    @staticmethod
    def return_result(error: ToolBoxError = ToolBoxError.none,
                      exec_code: int = 0,
                      help_text: str = "",
                      data_info=None,
                      data=None,
                      data_to=None):

        if data_to is None:
            data_to = MainTool.interface if MainTool.interface is not None else ToolBoxInterfaces.cli

        if data is None:
            data = {}

        if data_info is None:
            data_info = {}

        return Result(
            error,
            ToolBoxResult(data_info=data_info, data=data, data_to=data_to),
            ToolBoxInfo(exec_code=exec_code, help_text=help_text)
        )

    def print(self, message, end="\n", **kwargs):
        if self.stuf:
            return

        self.app.print(Style.style_dic[self.color] + self.name + Style.style_dic["END"] + ":", message, end=end,
                       **kwargs)

    def add_str_to_config(self, command):
        if len(command) != 2:
            self.logger.error('Invalid command must be key value')
            return False
        self.config[command[0]] = command[1]

    def webInstall(self, user_instance, construct_render) -> str:
        """"Returns a web installer for the given user instance and construct render template"""

    def get_version(self) -> str:
        """"Returns the version"""
        return self.version

    async def get_user(self, username: str) -> Result:
        return await self.app.a_run_any(CLOUDM_AUTHMANAGER.GET_USER_BY_NAME, username=username, get_results=True)

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/system/main_tool.py
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.__storedargs = args, kwargs
    self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
    self.async_initialized = False
    if self.todo:
        try:
            if inspect.iscoroutinefunction(self.todo):
                pass
            else:
                self.todo()
            get_logger().info(f"{self.name} on load suspended")
        except Exception as e:
            get_logger().error(f"Error loading mod {self.name} {e}")
            if self.app.debug:
                import traceback
                traceback.print_exc()
    else:
        get_logger().info(f"{self.name} no load require")
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/system/main_tool.py
163
164
165
166
167
168
169
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self
get_version()

"Returns the version

Source code in toolboxv2/utils/system/main_tool.py
156
157
158
def get_version(self) -> str:
    """"Returns the version"""
    return self.version
webInstall(user_instance, construct_render)

"Returns a web installer for the given user instance and construct render template

Source code in toolboxv2/utils/system/main_tool.py
153
154
def webInstall(self, user_instance, construct_render) -> str:
    """"Returns a web installer for the given user instance and construct render template"""
MainToolType
Source code in toolboxv2/utils/system/types.py
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
class MainToolType:
    toolID: str
    app: A
    interface: ToolBoxInterfaces
    spec: str

    version: str
    tools: dict  # legacy
    name: str
    logger: logging
    color: str
    todo: Callable
    _on_exit: Callable
    stuf: bool
    config: dict
    user: U | None
    description: str

    @staticmethod
    def return_result(error: ToolBoxError = ToolBoxError.none,
                      exec_code: int = 0,
                      help_text: str = "",
                      data_info=None,
                      data=None,
                      data_to=None) -> Result:
        """proxi attr"""

    def load(self):
        """proxi attr"""

    def print(self, message, end="\n", **kwargs):
        """proxi attr"""

    def add_str_to_config(self, command):
        if len(command) != 2:
            self.logger.error('Invalid command must be key value')
            return False
        self.config[command[0]] = command[1]

    def webInstall(self, user_instance, construct_render) -> str:
        """"Returns a web installer for the given user instance and construct render template"""

    async def get_user(self, username: str) -> Result:
        return self.app.a_run_any(CLOUDM_AUTHMANAGER.GET_USER_BY_NAME, username=username, get_results=True)
load()

proxi attr

Source code in toolboxv2/utils/system/types.py
1186
1187
def load(self):
    """proxi attr"""
print(message, end='\n', **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1189
1190
def print(self, message, end="\n", **kwargs):
    """proxi attr"""
return_result(error=ToolBoxError.none, exec_code=0, help_text='', data_info=None, data=None, data_to=None) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1177
1178
1179
1180
1181
1182
1183
1184
@staticmethod
def return_result(error: ToolBoxError = ToolBoxError.none,
                  exec_code: int = 0,
                  help_text: str = "",
                  data_info=None,
                  data=None,
                  data_to=None) -> Result:
    """proxi attr"""
webInstall(user_instance, construct_render)

"Returns a web installer for the given user instance and construct render template

Source code in toolboxv2/utils/system/types.py
1198
1199
def webInstall(self, user_instance, construct_render) -> str:
    """"Returns a web installer for the given user instance and construct render template"""
Result
Source code in toolboxv2/utils/system/types.py
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
class Result:
    _task = None
    def __init__(self,
                 error: ToolBoxError,
                 result: ToolBoxResult,
                 info: ToolBoxInfo,
                 origin: Any | None = None,
                 ):
        self.error: ToolBoxError = error
        self.result: ToolBoxResult = result
        self.info: ToolBoxInfo = info
        self.origin = origin

    def as_result(self):
        return self

    def as_dict(self):
        return {
            "error":self.error.value if isinstance(self.error, Enum) else self.error,
        "result" : {
            "data_to":self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
            "data_info":self.result.data_info,
            "data":self.result.data,
            "data_type":self.result.data_type
        } if self.result else None,
        "info" : {
            "exec_code" : self.info.exec_code,  # exec_code umwandel in http resposn codes
        "help_text" : self.info.help_text
        } if self.info else None,
        "origin" : self.origin
        }

    def set_origin(self, origin):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = origin
        return self

    def set_dir_origin(self, name, extras="assets/"):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = f"mods/{name}/{extras}"
        return self

    def is_error(self):
        if _test_is_result(self.result.data):
            return self.result.data.is_error()
        if self.error == ToolBoxError.none:
            return False
        if self.info.exec_code == 0:
            return False
        if self.info.exec_code == 200:
            return False
        return True

    def is_data(self):
        return self.result.data is not None

    def to_api_result(self):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=self.error.value if isinstance(self.error, Enum) else self.error,
            result=ToolBoxResultBM(
                data_to=self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
                data_info=self.result.data_info,
                data=self.result.data,
                data_type=self.result.data_type
            ) if self.result else None,
            info=ToolBoxInfoBM(
                exec_code=self.info.exec_code,  # exec_code umwandel in http resposn codes
                help_text=self.info.help_text
            ) if self.info else None,
            origin=self.origin
        )

    def task(self, task):
        self._task = task
        return self

    @staticmethod
    def result_from_dict(error: str, result: dict, info: dict, origin: list or None or str):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=error if isinstance(error, Enum) else error,
            result=ToolBoxResultBM(
                data_to=result.get('data_to') if isinstance(result.get('data_to'), Enum) else result.get('data_to'),
                data_info=result.get('data_info', '404'),
                data=result.get('data'),
                data_type=result.get('data_type', '404'),
            ) if result else ToolBoxResultBM(
                data_to=ToolBoxInterfaces.cli.value,
                data_info='',
                data='404',
                data_type='404',
            ),
            info=ToolBoxInfoBM(
                exec_code=info.get('exec_code', 404),
                help_text=info.get('help_text', '404')
            ) if info else ToolBoxInfoBM(
                exec_code=404,
                help_text='404'
            ),
            origin=origin
        ).as_result()

    @classmethod
    def stream(cls,
               stream_generator: Any,  # Renamed from source for clarity
               content_type: str = "text/event-stream",  # Default to SSE
               headers: Union[dict, None] = None,
               info: str = "OK",
               interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
               cleanup_func: Union[
                   Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None):
        """
        Create a streaming response Result. Handles SSE and other stream types.

        Args:
            stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
            content_type: Content-Type header (default: text/event-stream for SSE).
            headers: Additional HTTP headers for the response.
            info: Help text for the result.
            interface: Interface to send data to.
            cleanup_func: Optional function for cleanup.

        Returns:
            A Result object configured for streaming.
        """
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        final_generator: AsyncGenerator[str, None]

        if content_type == "text/event-stream":
            # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
            # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
            final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

            # Standard SSE headers for the HTTP response itself
            # These will be stored in the Result object. Rust side decides how to use them.
            standard_sse_headers = {
                "Cache-Control": "no-cache",  # SSE specific
                "Connection": "keep-alive",  # SSE specific
                "X-Accel-Buffering": "no",  # Useful for proxies with SSE
                # Content-Type is implicitly text/event-stream, will be in streaming_data below
            }
            all_response_headers = standard_sse_headers.copy()
            if headers:
                all_response_headers.update(headers)
        else:
            # For non-SSE streams.
            # If stream_generator is sync, wrap it to be async.
            # If already async or single item, it will be handled.
            # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
            # For consistency with how SSEGenerator does it, we can wrap sync ones.
            if inspect.isgenerator(stream_generator) or \
                (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
                final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
            elif inspect.isasyncgen(stream_generator):
                final_generator = stream_generator
            else:  # Single item or string
                async def _single_item_gen():
                    yield stream_generator

                final_generator = _single_item_gen()
            all_response_headers = headers if headers else {}

        # Prepare streaming data to be stored in the Result object
        streaming_data = {
            "type": "stream",  # Indicator for Rust side
            "generator": final_generator,
            "content_type": content_type,  # Let Rust know the intended content type
            "headers": all_response_headers  # Intended HTTP headers for the overall response
        }

        result_payload = ToolBoxResult(
            data_to=interface,
            data=streaming_data,
            data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
            data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
        )

        return cls(error=error, info=info_obj, result=result_payload)

    @classmethod
    def sse(cls,
            stream_generator: Any,
            info: str = "OK",
            interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
            cleanup_func: Union[
                Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None,
            # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
            ):
        """
        Create an Server-Sent Events (SSE) streaming response Result.

        Args:
            stream_generator: A source yielding individual data items. This can be an
                              async generator, sync generator, iterable, or a single item.
                              Each item will be formatted as an SSE event.
            info: Optional help text for the Result.
            interface: Optional ToolBoxInterface to target.
            cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
            #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

        Returns:
            A Result object configured for SSE streaming.
        """
        # Result.stream will handle calling SSEGenerator.create_sse_stream
        # and setting appropriate default headers for SSE when content_type is "text/event-stream".
        return cls.stream(
            stream_generator=stream_generator,
            content_type="text/event-stream",
            # headers=http_headers, # Pass if we add http_headers param
            info=info,
            interface=interface,
            cleanup_func=cleanup_func
        )

    @classmethod
    def default(cls, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=-1, help_text="")
        result = ToolBoxResult(data_to=interface)
        return cls(error=error, info=info, result=result)

    @classmethod
    def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
        """Create a JSON response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=data,
            data_info="JSON response",
            data_type="json"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
        """Create a text response Result with specific content type."""
        if headers is not None:
            return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=text_data,
            data_info="Text response",
            data_type=content_type
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
               interface=ToolBoxInterfaces.remote):
        """Create a binary data response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        # Create a dictionary with binary data and metadata
        binary_data = {
            "data": data,
            "content_type": content_type,
            "filename": download_name
        }

        result = ToolBoxResult(
            data_to=interface,
            data=binary_data,
            data_info=f"Binary response: {download_name}" if download_name else "Binary response",
            data_type="binary"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
        """Create a file download response Result.

        Args:
            data: File data as bytes or base64 string
            filename: Name of the file for download
            content_type: MIME type of the file (auto-detected if None)
            info: Response info text
            interface: Target interface

        Returns:
            Result object configured for file download
        """
        import base64
        import mimetypes

        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=200, help_text=info)

        # Auto-detect content type if not provided
        if content_type is None:
            content_type, _ = mimetypes.guess_type(filename)
            if content_type is None:
                content_type = "application/octet-stream"

        # Ensure data is base64 encoded string (as expected by Rust server)
        if isinstance(data, bytes):
            base64_data = base64.b64encode(data).decode('utf-8')
        elif isinstance(data, str):
            # Assume it's already base64 encoded
            base64_data = data
        else:
            raise ValueError("File data must be bytes or base64 string")

        result = ToolBoxResult(
            data_to=interface,
            data=base64_data,  # Rust expects base64 string for "file" type
            data_info=f"File download: {filename}",
            data_type="file"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
        """Create a redirect response."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=url,
            data_info="Redirect response",
            data_type="redirect"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def ok(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def html(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.remote, data_type="html",status=200, headers=None, row=False):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=status, help_text=info)
        from ...utils.system.getting_and_closing_app import get_app

        if not row and not '"<div class="main-content""' in data:
            data = f'<div class="main-content frosted-glass">{data}<div>'
        if not row and not get_app().web_context() in data:
            data = get_app().web_context() + data

        if isinstance(headers, dict):
            result = ToolBoxResult(data_to=interface, data={'html':data,'headers':headers}, data_info=data_info,
                                   data_type="special_html")
        else:
            result = ToolBoxResult(data_to=interface, data=data, data_info=data_info,
                                   data_type=data_type if data_type is not None else type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def future(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.future):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type="future")
        return cls(error=error, info=info, result=result)

    @classmethod
    def custom_error(cls, data=None, data_info="", info="", exec_code=-1, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def error(cls, data=None, data_info="", info="", exec_code=450, interface=ToolBoxInterfaces.remote):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_user_error(cls, info="", exec_code=-3, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.input_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_internal_error(cls, info="", exec_code=-2, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.internal_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    def print(self, show=True, show_data=True, prifix=""):
        data = '\n' + f"{((prifix + 'Data: ' + str(self.result.data) if self.result.data is not None else 'NO Data') if not isinstance(self.result.data, Result) else self.result.data.print(show=False, show_data=show_data, prifix=prifix + '-')) if show_data else 'Data: private'}"
        origin = '\n' + f"{prifix + 'Origin: ' + str(self.origin) if self.origin is not None else 'NO Origin'}"
        text = (f"Function Exec code: {self.info.exec_code}"
                f"\n{prifix}Info's:"
                f" {self.info.help_text} {'<|> ' + str(self.result.data_info) if self.result.data_info is not None else ''}"
                f"{origin}{data if not data.endswith('NO Data') else ''}")
        if not show:
            return text
        print("\n======== Result ========\n" + text + "\n------- EndOfD -------")
        return self

    def log(self, show_data=True, prifix=""):
        from toolboxv2 import get_logger
        get_logger().debug(self.print(show=False, show_data=show_data, prifix=prifix).replace("\n", " - "))
        return self

    def __str__(self):
        return self.print(show=False, show_data=True)

    def get(self, key=None, default=None):
        data = self.result.data
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    async def aget(self, key=None, default=None):
        if asyncio.isfuture(self.result.data) or asyncio.iscoroutine(self.result.data) or (
            isinstance(self.result.data_to, Enum) and self.result.data_to.name == ToolBoxInterfaces.future.name):
            data = await self.result.data
        else:
            data = self.get(key=None, default=None)
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    def lazy_return(self, _=0, data=None, **kwargs):
        flags = ['raise', 'logg', 'user', 'intern']
        flag = flags[_] if isinstance(_, int) else _
        if self.info.exec_code == 0:
            return self if data is None else data if _test_is_result(data) else self.ok(data=data, **kwargs)
        if flag == 'raise':
            raise ValueError(self.print(show=False))
        if flag == 'logg':
            from .. import get_logger
            get_logger().error(self.print(show=False))

        if flag == 'user':
            return self if data is None else data if _test_is_result(data) else self.default_user_error(data=data,
                                                                                                        **kwargs)
        if flag == 'intern':
            return self if data is None else data if _test_is_result(data) else self.default_internal_error(data=data,
                                                                                                            **kwargs)

        return self if data is None else data if _test_is_result(data) else self.custom_error(data=data, **kwargs)

    @property
    def bg_task(self):
        return self._task
binary(data, content_type='application/octet-stream', download_name=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a binary data response Result.

Source code in toolboxv2/utils/system/types.py
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
@classmethod
def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
           interface=ToolBoxInterfaces.remote):
    """Create a binary data response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    # Create a dictionary with binary data and metadata
    binary_data = {
        "data": data,
        "content_type": content_type,
        "filename": download_name
    }

    result = ToolBoxResult(
        data_to=interface,
        data=binary_data,
        data_info=f"Binary response: {download_name}" if download_name else "Binary response",
        data_type="binary"
    )

    return cls(error=error, info=info_obj, result=result)
file(data, filename, content_type=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a file download response Result.

Parameters:

Name Type Description Default
data

File data as bytes or base64 string

required
filename

Name of the file for download

required
content_type

MIME type of the file (auto-detected if None)

None
info

Response info text

'OK'
interface

Target interface

remote

Returns:

Type Description

Result object configured for file download

Source code in toolboxv2/utils/system/types.py
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
@classmethod
def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
    """Create a file download response Result.

    Args:
        data: File data as bytes or base64 string
        filename: Name of the file for download
        content_type: MIME type of the file (auto-detected if None)
        info: Response info text
        interface: Target interface

    Returns:
        Result object configured for file download
    """
    import base64
    import mimetypes

    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=200, help_text=info)

    # Auto-detect content type if not provided
    if content_type is None:
        content_type, _ = mimetypes.guess_type(filename)
        if content_type is None:
            content_type = "application/octet-stream"

    # Ensure data is base64 encoded string (as expected by Rust server)
    if isinstance(data, bytes):
        base64_data = base64.b64encode(data).decode('utf-8')
    elif isinstance(data, str):
        # Assume it's already base64 encoded
        base64_data = data
    else:
        raise ValueError("File data must be bytes or base64 string")

    result = ToolBoxResult(
        data_to=interface,
        data=base64_data,  # Rust expects base64 string for "file" type
        data_info=f"File download: {filename}",
        data_type="file"
    )

    return cls(error=error, info=info_obj, result=result)
json(data, info='OK', interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None) classmethod

Create a JSON response Result.

Source code in toolboxv2/utils/system/types.py
847
848
849
850
851
852
853
854
855
856
857
858
859
860
@classmethod
def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
    """Create a JSON response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=data,
        data_info="JSON response",
        data_type="json"
    )

    return cls(error=error, info=info_obj, result=result)
redirect(url, status_code=302, info='Redirect', interface=ToolBoxInterfaces.remote) classmethod

Create a redirect response.

Source code in toolboxv2/utils/system/types.py
946
947
948
949
950
951
952
953
954
955
956
957
958
959
@classmethod
def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
    """Create a redirect response."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=url,
        data_info="Redirect response",
        data_type="redirect"
    )

    return cls(error=error, info=info_obj, result=result)
sse(stream_generator, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create an Server-Sent Events (SSE) streaming response Result.

Parameters:

Name Type Description Default
stream_generator Any

A source yielding individual data items. This can be an async generator, sync generator, iterable, or a single item. Each item will be formatted as an SSE event.

required
info str

Optional help text for the Result.

'OK'
interface ToolBoxInterfaces

Optional ToolBoxInterface to target.

remote
cleanup_func Union[Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None]

Optional cleanup function to run when the stream ends or is cancelled.

None
#http_headers

Optional dictionary of custom HTTP headers for the SSE response.

required

Returns:

Type Description

A Result object configured for SSE streaming.

Source code in toolboxv2/utils/system/types.py
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
@classmethod
def sse(cls,
        stream_generator: Any,
        info: str = "OK",
        interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
        cleanup_func: Union[
            Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None,
        # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
        ):
    """
    Create an Server-Sent Events (SSE) streaming response Result.

    Args:
        stream_generator: A source yielding individual data items. This can be an
                          async generator, sync generator, iterable, or a single item.
                          Each item will be formatted as an SSE event.
        info: Optional help text for the Result.
        interface: Optional ToolBoxInterface to target.
        cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
        #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

    Returns:
        A Result object configured for SSE streaming.
    """
    # Result.stream will handle calling SSEGenerator.create_sse_stream
    # and setting appropriate default headers for SSE when content_type is "text/event-stream".
    return cls.stream(
        stream_generator=stream_generator,
        content_type="text/event-stream",
        # headers=http_headers, # Pass if we add http_headers param
        info=info,
        interface=interface,
        cleanup_func=cleanup_func
    )
stream(stream_generator, content_type='text/event-stream', headers=None, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create a streaming response Result. Handles SSE and other stream types.

Parameters:

Name Type Description Default
stream_generator Any

Any stream source (async generator, sync generator, iterable, or single item).

required
content_type str

Content-Type header (default: text/event-stream for SSE).

'text/event-stream'
headers Union[dict, None]

Additional HTTP headers for the response.

None
info str

Help text for the result.

'OK'
interface ToolBoxInterfaces

Interface to send data to.

remote
cleanup_func Union[Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None]

Optional function for cleanup.

None

Returns:

Type Description

A Result object configured for streaming.

Source code in toolboxv2/utils/system/types.py
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
@classmethod
def stream(cls,
           stream_generator: Any,  # Renamed from source for clarity
           content_type: str = "text/event-stream",  # Default to SSE
           headers: Union[dict, None] = None,
           info: str = "OK",
           interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
           cleanup_func: Union[
               Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None):
    """
    Create a streaming response Result. Handles SSE and other stream types.

    Args:
        stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
        content_type: Content-Type header (default: text/event-stream for SSE).
        headers: Additional HTTP headers for the response.
        info: Help text for the result.
        interface: Interface to send data to.
        cleanup_func: Optional function for cleanup.

    Returns:
        A Result object configured for streaming.
    """
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    final_generator: AsyncGenerator[str, None]

    if content_type == "text/event-stream":
        # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
        # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
        final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

        # Standard SSE headers for the HTTP response itself
        # These will be stored in the Result object. Rust side decides how to use them.
        standard_sse_headers = {
            "Cache-Control": "no-cache",  # SSE specific
            "Connection": "keep-alive",  # SSE specific
            "X-Accel-Buffering": "no",  # Useful for proxies with SSE
            # Content-Type is implicitly text/event-stream, will be in streaming_data below
        }
        all_response_headers = standard_sse_headers.copy()
        if headers:
            all_response_headers.update(headers)
    else:
        # For non-SSE streams.
        # If stream_generator is sync, wrap it to be async.
        # If already async or single item, it will be handled.
        # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
        # For consistency with how SSEGenerator does it, we can wrap sync ones.
        if inspect.isgenerator(stream_generator) or \
            (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
            final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
        elif inspect.isasyncgen(stream_generator):
            final_generator = stream_generator
        else:  # Single item or string
            async def _single_item_gen():
                yield stream_generator

            final_generator = _single_item_gen()
        all_response_headers = headers if headers else {}

    # Prepare streaming data to be stored in the Result object
    streaming_data = {
        "type": "stream",  # Indicator for Rust side
        "generator": final_generator,
        "content_type": content_type,  # Let Rust know the intended content type
        "headers": all_response_headers  # Intended HTTP headers for the overall response
    }

    result_payload = ToolBoxResult(
        data_to=interface,
        data=streaming_data,
        data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
        data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
    )

    return cls(error=error, info=info_obj, result=result_payload)
text(text_data, content_type='text/plain', exec_code=None, status=200, info='OK', interface=ToolBoxInterfaces.remote, headers=None) classmethod

Create a text response Result with specific content type.

Source code in toolboxv2/utils/system/types.py
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
@classmethod
def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
    """Create a text response Result with specific content type."""
    if headers is not None:
        return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=text_data,
        data_info="Text response",
        data_type=content_type
    )

    return cls(error=error, info=info_obj, result=result)
all_functions_enums

Automatic generated by ToolBox v = 0.1.21

api
build_cargo_project(debug=False)

Build the Cargo project, optionally in debug mode.

Source code in toolboxv2/utils/system/api.py
220
221
222
223
224
225
226
227
228
229
230
231
232
233
def build_cargo_project(debug=False):
    """Build the Cargo project, optionally in debug mode."""
    mode = "debug" if debug else "release"
    args = ["cargo", "build"]
    if not debug:
        args.append("--release")

    print(f"Building in {mode} mode...")
    try:
        subprocess.run(args, cwd=os.path.join(".", "src-core"), check=True)
        return True
    except subprocess.CalledProcessError as e:
        print(f"Cargo build failed: {e}")
        return False
check_and_run_local_release(do_run=True)

Search for a pre-built release executable in the src-core folder and run it if found.

Source code in toolboxv2/utils/system/api.py
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
def check_and_run_local_release(do_run=True):
    """Search for a pre-built release executable in the src-core folder and run it if found."""
    src_core_path = os.path.join(".", "src-core")
    if os.path.isdir(src_core_path):
        # Define the path to the expected release executable, assuming a Cargo project structure
        expected_name = "simple-core-server.exe" if platform.system().lower() == "windows" else "simple-core-server"
        release_path = os.path.join(src_core_path, expected_name)
        if os.path.isfile(release_path):
            print("Found pre-built release executable.")
            return release_path if not do_run else run_executable(release_path)
        release_path = os.path.join(src_core_path, "target", "release", expected_name)
        if os.path.isfile(release_path):
            print("Found pre-built release executable.")
            # Move the executable from target/release to src_core_path for easier access next time
            dest_path = os.path.join(src_core_path, expected_name)
            try:
                import shutil
                shutil.copy2(release_path, dest_path)
                print(f"Copied executable to {dest_path} for easier access next time")
            except Exception as e:
                print(f"Failed to copy executable: {e}")
                return False
            if do_run:
                run_executable(dest_path)
            else:
                return dest_path
            return True
    return False
check_cargo_installed()

Check if Cargo (Rust package manager) is installed on the system.

Source code in toolboxv2/utils/system/api.py
211
212
213
214
215
216
217
def check_cargo_installed():
    """Check if Cargo (Rust package manager) is installed on the system."""
    try:
        subprocess.run(["cargo", "--version"], check=True, capture_output=True)
        return True
    except Exception:
        return False
cleanup_build_files()

Cleans up build files.

Source code in toolboxv2/utils/system/api.py
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
def cleanup_build_files():
    """Cleans up build files."""
    src_core_path = os.path.join(".", "src-core")
    target_path = os.path.join(src_core_path, "target")

    if os.path.exists(target_path):
        try:
            print(f"Cleaning up build files in {target_path}...")
            # First try using cargo clean
            try:
                subprocess.run(["cargo", "clean"], cwd=src_core_path, check=True)
                print("Successfully cleaned up build files with cargo clean")
            except subprocess.CalledProcessError:
                # If cargo clean fails, manually remove directories
                print("Cargo clean failed, manually removing build directories...")
                for item in os.listdir(target_path):
                    item_path = os.path.join(target_path, item)
                    if os.path.isdir(item_path) and item != ".rustc_info.json":
                        shutil.rmtree(item_path)
                        print(f"Removed {item_path}")
            return True
        except Exception as e:
            print(f"Failed to clean up build files: {e}")
            return False
    else:
        print(f"Build directory {target_path} not found")
        return True
create_dill_archive(site_packages, output_file='python312.dill')

Package dill and all dependencies into a single .dill archive.

Source code in toolboxv2/utils/system/api.py
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
def create_dill_archive(site_packages, output_file="python312.dill"):
    """Package dill and all dependencies into a single .dill archive."""
    try:
        temp_dir = "/tmp/dill_package"
        os.makedirs(temp_dir, exist_ok=True)

        # Copy only necessary packages
        packages = ["dill"]
        for package in packages:
            package_path = os.path.join(site_packages, package)
            if os.path.exists(package_path):
                shutil.copytree(package_path, os.path.join(temp_dir, package), dirs_exist_ok=True)
            else:
                print(f"Warning: {package} not found in site-packages.")

        # Create the .dill archive
        with tarfile.open(output_file, "w:gz") as tar:
            tar.add(temp_dir, arcname=".")

        print(f"Successfully created {output_file}")

        # Clean up
        shutil.rmtree(temp_dir)

    except Exception as e:
        print(f"Error creating .dill archive: {e}")
detect_os_and_arch()

Detect the current operating system and architecture.

Source code in toolboxv2/utils/system/api.py
119
120
121
122
123
def detect_os_and_arch():
    """Detect the current operating system and architecture."""
    current_os = platform.system().lower()  # e.g., 'windows', 'linux', 'darwin'
    machine = platform.machine().lower()  # e.g., 'x86_64', 'amd64'
    return current_os, machine
download_executable(url, file_name)

Attempt to download the executable from the provided URL.

Source code in toolboxv2/utils/system/api.py
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
def download_executable(url, file_name):
    """Attempt to download the executable from the provided URL."""
    try:
        import requests
    except ImportError:
        print("The 'requests' library is required. Please install it via pip install requests")
        sys.exit(1)

    print(f"Attempting to download executable from {url}...")
    try:
        response = requests.get(url, stream=True)
    except Exception as e:
        print(f"Download error: {e}")
        return None

    if response.status_code == 200:
        with open(file_name, "wb") as f:
            for chunk in response.iter_content(chunk_size=8192):
                if chunk:
                    f.write(chunk)
        # Make the file executable on non-Windows systems
        if platform.system().lower() != "windows":
            os.chmod(file_name, 0o755)
        return file_name
    else:
        print("Download failed. Status code:", response.status_code)
        return None
ensure_socket_and_fd_file_posix(host, port, backlog, fd_file_path)

POSIX: Ensures a listening socket exists and its FD is in the fd_file.

Source code in toolboxv2/utils/system/api.py
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
def ensure_socket_and_fd_file_posix(host, port, backlog, fd_file_path) -> tuple[socket.socket | None, int | None]:
    """POSIX: Ensures a listening socket exists and its FD is in the fd_file."""
    # ALWAYS remove the old FD file if it exists. This script invocation
    # will create its own socket. This means any previous server using that
    # address must be stopped, or we'll get AddrInUse.
    if os.path.exists(fd_file_path):
        print(f"[POSIX] Stale FD file {fd_file_path} found. Removing to create a new socket.")
        try:
            os.remove(fd_file_path)
        except OSError as e:
            # This is not ideal, as binding might fail if the port is still in use
            # by whatever process was associated with the old FD.
            print(f"[POSIX] Warning: Could not remove old FD file {fd_file_path}: {e}")

    # The rest of your socket creation logic
    try:
        server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

        fd_num = server_socket.fileno()
        if hasattr(os, 'set_inheritable'): # Python 3.4+
            os.set_inheritable(fd_num, True)
        else: # POSIX, Python < 3.4 (fcntl not on Windows)
            import fcntl # Import fcntl here as it's POSIX specific
            flags = fcntl.fcntl(fd_num, fcntl.F_GETFD)
            fcntl.fcntl(fd_num, fcntl.F_SETFD, flags & ~fcntl.FD_CLOEXEC) # Ensure inheritable

        server_socket.bind((host, port))
        server_socket.listen(backlog)
        # FD is now valid and owned by this Python process
        with open(fd_file_path, 'w') as f: f.write(str(fd_num))
        os.chmod(fd_file_path, 0o600) # Restrictive permissions
        print(f"[POSIX] Created new socket. FD {fd_num} saved to {fd_file_path}.")
        return server_socket, fd_num
    except Exception as e:
        print(f"[POSIX] Fatal: Could not create and save listening socket FD: {e}")
        if 'server_socket' in locals():
            server_socket.close()
        return None, None
find_highest_zip_version(name_filter, app_version=None, root_dir='mods_sto', version_only=False)

Findet die höchste verfügbare ZIP-Version in einem Verzeichnis basierend auf einem Namensfilter.

Parameters:

Name Type Description Default
root_dir str

Wurzelverzeichnis für die Suche

'mods_sto'
name_filter str

Namensfilter für die ZIP-Dateien

required
app_version str

Aktuelle App-Version für Kompatibilitätsprüfung

None

Returns:

Name Type Description
str str

Pfad zur ZIP-Datei mit der höchsten Version oder None wenn keine gefunden

Source code in toolboxv2/utils/system/api.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
def find_highest_zip_version(name_filter: str, app_version: str = None, root_dir: str = "mods_sto", version_only=False) -> str:
    """
    Findet die höchste verfügbare ZIP-Version in einem Verzeichnis basierend auf einem Namensfilter.

    Args:
        root_dir (str): Wurzelverzeichnis für die Suche
        name_filter (str): Namensfilter für die ZIP-Dateien
        app_version (str, optional): Aktuelle App-Version für Kompatibilitätsprüfung

    Returns:
        str: Pfad zur ZIP-Datei mit der höchsten Version oder None wenn keine gefunden
    """

    # Kompiliere den Regex-Pattern für die Dateinamen
    pattern = fr"{name_filter}&v[0-9.]+§([0-9.]+)\.zip$"

    highest_version = None
    highest_version_file = None

    # Durchsuche das Verzeichnis
    root_path = Path(root_dir)
    for file_path in root_path.rglob("*.zip"):
        if "RST$"+name_filter not in str(file_path):
            continue
        match = re.search(pattern, str(file_path).split("RST$")[-1].strip())
        if match:
            zip_version = match.group(1)

            # Prüfe App-Version Kompatibilität falls angegeben
            if app_version:
                file_app_version = re.search(r"&v([0-9.]+)§", str(file_path)).group(1)
                if version.parse(file_app_version) > version.parse(app_version):
                    continue

            # Vergleiche Versionen
            current_version = version.parse(zip_version)
            if highest_version is None or current_version > highest_version:
                highest_version = current_version
                highest_version_file = str(file_path)
    if version_only:
        return str(highest_version)
    return highest_version_file
find_highest_zip_version_entry(name, target_app_version=None, filepath='tbState.yaml')

Findet den Eintrag mit der höchsten ZIP-Version für einen gegebenen Namen und eine optionale Ziel-App-Version in einer YAML-Datei.

:param name: Der Name des gesuchten Eintrags. :param target_app_version: Die Zielversion der App als String (optional). :param filepath: Der Pfad zur YAML-Datei. :return: Den Eintrag mit der höchsten ZIP-Version innerhalb der Ziel-App-Version oder None, falls nicht gefunden.

Source code in toolboxv2/utils/system/api.py
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
def find_highest_zip_version_entry(name, target_app_version=None, filepath='tbState.yaml'):
    """
    Findet den Eintrag mit der höchsten ZIP-Version für einen gegebenen Namen und eine optionale Ziel-App-Version in einer YAML-Datei.

    :param name: Der Name des gesuchten Eintrags.
    :param target_app_version: Die Zielversion der App als String (optional).
    :param filepath: Der Pfad zur YAML-Datei.
    :return: Den Eintrag mit der höchsten ZIP-Version innerhalb der Ziel-App-Version oder None, falls nicht gefunden.
    """
    import yaml
    highest_zip_ver = None
    highest_entry = {}

    with open(filepath) as file:
        data = yaml.safe_load(file)
        # print(data)
        app_ver_h = None
        for key, value in list(data.get('installable', {}).items())[::-1]:
            # Prüfe, ob der Name im Schlüssel enthalten ist

            if name in key:
                v = value['version']
                if len(v) == 1:
                    app_ver = v[0].split('v')[-1]
                    zip_ver = "0.0.0"
                else:
                    app_ver, zip_ver = v
                    app_ver = app_ver.split('v')[-1]
                app_ver = version.parse(app_ver)
                # Wenn eine Ziel-App-Version angegeben ist, vergleiche sie
                if target_app_version is None or app_ver == version.parse(target_app_version):
                    current_zip_ver = version.parse(zip_ver)
                    # print(current_zip_ver, highest_zip_ver)

                    if highest_zip_ver is None or current_zip_ver > highest_zip_ver:
                        highest_zip_ver = current_zip_ver
                        highest_entry = value

                    if app_ver_h is None or app_ver > app_ver_h:
                        app_ver_h = app_ver
                        highest_zip_ver = current_zip_ver
                        highest_entry = value
    return highest_entry
get_uv_site_packages()

Find the site-packages directory for a uv-managed virtual environment.

Source code in toolboxv2/utils/system/api.py
336
337
338
339
340
341
342
343
344
345
def get_uv_site_packages():
    """Find the site-packages directory for a uv-managed virtual environment."""
    try:
        site_packages = subprocess.check_output(["uv", "info", "--json"], text=True)
        import json
        data = json.loads(site_packages)
        return data["venv"]["site_packages"]
    except Exception as e:
        print(f"Error finding uv site-packages: {e}")
        return None
is_uv_installed()

Check if uv is installed.

Source code in toolboxv2/utils/system/api.py
328
329
330
331
332
333
334
def is_uv_installed():
    """Check if uv is installed."""
    try:
        subprocess.run(["uv", "--version"], check=True, capture_output=True, text=True)
        return True
    except FileNotFoundError:
        return False
main_api_runner(debug=False, run=True)

Main function to run the API server. When debug=True, enables hot reloading and runs in debug mode.

Non blocking!

Source code in toolboxv2/utils/system/api.py
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
def main_api_runner(debug=False, run=True):
    """
    Main function to run the API server.
    When debug=True, enables hot reloading and runs in debug mode.

    Non blocking!
    """
    if not os.path.exists(os.getenv("PY_DILL", '.')):
        add_py_dill()
    if is_uv_installed():
        print(f"VIRTUAL_ENV=$ {os.getenv('VIRTUAL_ENV')} {os.getenv('PY_SITE_PACKAGES')}")
        os.environ["VIRTUAL_ENV"] = os.getenv('UV_BASE_ENV', os.getenv('VIRTUAL_ENV'))
        # os.environ["PY_SITE_PACKAGES"] = os.getenv('PY_SITE_PACKAGES')
    if debug:
        print("Starting in DEBUG mode with hot reloading enabled...")
        if check_cargo_installed():
            run_with_hot_reload()
        else:
            print("Cargo is not installed. Hot reloading requires Cargo.")
        return None

    # Release mode flow
    if exe := check_and_run_local_release(run):
        return exe

    # Step 1: Detect current OS and machine architecture
    current_os, machine = detect_os_and_arch()
    print(f"Detected OS: {current_os}, Architecture: {machine}")

    # Step 2: Attempt to download executable from remote URL
    url, file_name = query_executable_url(current_os, machine)
    downloaded_exe = download_executable(url, file_name)

    if downloaded_exe and run:
        print("Downloaded executable. Executing it...")
        run_executable(downloaded_exe)
        return None

    if downloaded_exe and not run:
        return downloaded_exe

    # Step 3: Fallback: Check for local pre-built release executable in src-core folder
    print("Remote executable not found. Searching local 'src-core' folder...")
    if exe := check_and_run_local_release():
        return exe
    else:
        print("Pre-built release executable not found locally.")

        # Step 4: If executable not found locally, check if Cargo is installed
        if not check_cargo_installed():

            print("Cargo is not installed. Please install Cargo to build the project.")
            return None

        print("Cargo is installed. Proceeding with build.")
        if not build_cargo_project(debug=False):

            print("Failed to build the Cargo project.")
            return None

        # After successful build, try running the release executable again
        if exe := check_and_run_local_release(run):
            return exe

        print("Release executable missing even after build.")
        return None
query_executable_url(current_os, machine)

Query a remote URL for a matching executable based on OS and architecture. The file name is built dynamically based on parameters.

Source code in toolboxv2/utils/system/api.py
126
127
128
129
130
131
132
133
134
135
136
137
138
def query_executable_url(current_os, machine):
    """
    Query a remote URL for a matching executable based on OS and architecture.
    The file name is built dynamically based on parameters.
    """
    base_url = "https://example.com/downloads"  # Replace with the actual URL
    # Windows executables have .exe extension
    if current_os == "windows":
        file_name = f"server_{current_os}_{machine}.exe"
    else:
        file_name = f"server_{current_os}_{machine}"
    full_url = f"{base_url}/{file_name}"
    return full_url, file_name
remove_release_executable()

Removes the release executable.

Source code in toolboxv2/utils/system/api.py
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def remove_release_executable():
    """Removes the release executable."""
    src_core_path = os.path.join(".", "src-core")
    expected_name = "simple-core-server.exe" if platform.system().lower() == "windows" else "simple-core-server"

    # Remove from src-core root
    direct_path = os.path.join(src_core_path, expected_name)
    if os.path.exists(direct_path):
        try:
            os.remove(direct_path)
            print(f"Removed release executable: {direct_path}")
        except Exception as e:
            print(f"Failed to remove {direct_path}: {e}")

    # Remove from target/release
    release_path = os.path.join(src_core_path, "target", "release", expected_name)
    if os.path.exists(release_path):
        try:
            os.remove(release_path)
            print(f"Removed release executable: {release_path}")
        except Exception as e:
            print(f"Failed to remove {release_path}: {e}")

    return True
run_executable(file_path)

Run the executable file.

Source code in toolboxv2/utils/system/api.py
170
171
172
173
174
175
176
177
178
def run_executable(file_path):
    """Run the executable file."""
    try:
        print("Running it.")
        subprocess.run([os.path.abspath(file_path)], check=True)
    except subprocess.CalledProcessError as e:
        print(f"Failed to execute {file_path}: {e}")
    except KeyboardInterrupt:
        print("Exiting call from:", file_path)
run_in_debug_mode()

Run the Cargo project in debug mode.

Source code in toolboxv2/utils/system/api.py
261
262
263
264
265
266
267
268
269
270
def run_in_debug_mode():
    """Run the Cargo project in debug mode."""
    src_core_path = os.path.join(".", "src-core")
    print("Running in debug mode...")
    try:
        subprocess.run(["cargo", "run"], cwd=src_core_path)
        return True
    except subprocess.CalledProcessError as e:
        print(f"Debug execution failed: {e}")
        return False
run_with_hot_reload()

Run the Cargo project with hot reloading.

Source code in toolboxv2/utils/system/api.py
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
def run_with_hot_reload():
    """Run the Cargo project with hot reloading."""
    src_core_path = os.path.join(".", "src-core")

    # Check if cargo-watch is installed
    try:
        subprocess.run(["cargo", "watch", "--version"], check=True, capture_output=True)
    except Exception:
        print("cargo-watch is not installed. Installing now...")
        try:
            subprocess.run(["cargo", "install", "cargo-watch"], check=True)
        except subprocess.CalledProcessError as e:
            print(f"Failed to install cargo-watch: {e}")
            print("Running without hot reload")
            return run_in_debug_mode()

    print("Running with hot reload in debug mode...")
    try:
        subprocess.run(["cargo", "watch", "-x", "run"], cwd=src_core_path)
        return True
    except subprocess.CalledProcessError as e:
        print(f"Hot reload execution failed: {e}")
        return False
start_rust_server_posix(executable_path, persistent_fd)

POSIX: Starts Rust server passing the persistent_fd.

Source code in toolboxv2/utils/system/api.py
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
def start_rust_server_posix(executable_path: str, persistent_fd: int):
    """POSIX: Starts Rust server passing the persistent_fd."""
    abs_executable_path = Path(executable_path).resolve()
    env = os.environ.copy()
    env["PERSISTENT_LISTENER_FD"] = str(persistent_fd)
    env["LISTEN_FDS"] = "1" if  persistent_fd != 4 else str(persistent_fd)# Also set for listenfd standard mechanism
    env["LISTEN_PID"] = str(os.getpid())
    print(f"[POSIX] Starting Rust server: {abs_executable_path} using FD {persistent_fd}")
    try:
        process = subprocess.Popen(
            [str(abs_executable_path)],
            cwd=abs_executable_path.parent,
            env=env,
            pass_fds=[persistent_fd],
        )
        return process
    except Exception as e:
        print(f"[POSIX] Failed to start Rust server {abs_executable_path}: {e}")
        return None
start_rust_server_windows(executable_path)

WINDOWS: Starts Rust server normally. It will bind its own socket.

Source code in toolboxv2/utils/system/api.py
578
579
580
581
582
583
584
585
586
587
588
589
590
591
def start_rust_server_windows(executable_path: str):
    """WINDOWS: Starts Rust server normally. It will bind its own socket."""
    abs_executable_path = Path(executable_path).resolve()
    print(f"[WINDOWS] Starting Rust server: {abs_executable_path}. It will bind its own socket.")
    try:
        process = subprocess.Popen(
            [str(abs_executable_path)],
            cwd=abs_executable_path.parent,
            # No special env vars for socket needed for Windows fallback
        )
        return process
    except Exception as e:
        print(f"[WINDOWS] Failed to start Rust server {abs_executable_path}: {e}")
        return None
update_server(new_executable_path, new_version)

High-level update function, calls platform-specific logic.

Source code in toolboxv2/utils/system/api.py
595
596
597
598
599
600
def update_server(new_executable_path: str, new_version: str):
    """High-level update function, calls platform-specific logic."""
    if platform.system().lower() == "windows":
        return update_server_windows(new_executable_path, new_version)
    else: # POSIX
        return update_server_posix(new_executable_path, new_version)
update_server_posix(new_executable_path, new_version)

POSIX: Zero-downtime update using persistent FD.

Source code in toolboxv2/utils/system/api.py
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
def update_server_posix(new_executable_path: str, new_version: str):
    """POSIX: Zero-downtime update using persistent FD."""
    if not psutil: return False
    print(f"--- [POSIX] Starting Update to {new_version} ---")
    old_pid, old_version, old_exe_path = read_server_state()

    if not os.path.exists(PERSISTENT_FD_FILE):
        print(f"[POSIX] Error: FD file '{PERSISTENT_FD_FILE}' not found. Cannot update.")
        return False
    try:
        with open(PERSISTENT_FD_FILE) as f: persistent_fd = int(f.read().strip())
        print(f"[POSIX] Using persistent listener FD: {persistent_fd}")
    except Exception as e:
        print(f"[POSIX] Error reading FD from '{PERSISTENT_FD_FILE}': {e}")
        return False

    new_process = start_rust_server_posix(new_executable_path, persistent_fd)
    if new_process is None: return False
    time.sleep(5) # Allow new server to init
    if new_process.poll() is not None:
        print(f"[POSIX] New server (PID {new_process.pid}) died. Exit: {new_process.poll()}. Update failed.")
        return False
    print(f"[POSIX] New server (v{new_version}, PID {new_process.pid}) started.")

    if old_pid and is_process_running(old_pid):
        print(f"[POSIX] Stopping old server (v{old_version}, PID {old_pid})...")
        if not stop_process(old_pid):
            print(f"[POSIX] Warning: Failed to stop old server PID {old_pid}.")
    else:
        print("[POSIX] No old server or PID was stale.")

    write_server_state(new_process.pid, new_version, new_executable_path)
    print(f"--- [POSIX] Update to {new_version} complete. New PID: {new_process.pid} ---")
    return True
update_server_windows(new_executable_path, new_version)

WINDOWS: Graceful restart (stop old, start new).

Source code in toolboxv2/utils/system/api.py
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
def update_server_windows(new_executable_path: str, new_version: str):
    """WINDOWS: Graceful restart (stop old, start new)."""
    if not psutil: return False
    print(f"--- [WINDOWS] Starting Update (Graceful Restart) to {new_version} ---")
    old_pid, old_version, old_exe_path = read_server_state()

    if old_pid and is_process_running(old_pid):
        print(f"[WINDOWS] Stopping old server (v{old_version}, PID {old_pid})...")
        if not stop_process(old_pid):
            print(f"[WINDOWS] Failed to stop old server PID {old_pid}. Update aborted to prevent conflicts.")
            return False
        print("[WINDOWS] Old server stopped.")
        time.sleep(2) # Give OS time to release port
    else:
        print("[WINDOWS] No old server running or PID was stale.")

    new_process = start_rust_server_windows(new_executable_path)
    if new_process is None: return False
    time.sleep(3) # Allow new server to init
    if new_process.poll() is not None:
        print(f"[WINDOWS] New server (PID {new_process.pid}) died. Exit: {new_process.poll()}. Update failed.")
        return False
    print(f"[WINDOWS] New server (v{new_version}, PID {new_process.pid}) started.")

    write_server_state(new_process.pid, new_version, new_executable_path)
    print(f"--- [WINDOWS] Update to {new_version} complete. New PID: {new_process.pid} ---")
    return True
conda_runner
create_env_registry(env_name)

Create a JSON registry of all packages installed in the specified conda environment.

Args: env_name (str): Name of the conda environment

Returns: bool: True if registry creation was successful, False otherwise

Source code in toolboxv2/utils/system/conda_runner.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
def create_env_registry(env_name: str) -> bool:
    """
    Create a JSON registry of all packages installed in the specified conda environment.

    Args:
    env_name (str): Name of the conda environment

    Returns:
    bool: True if registry creation was successful, False otherwise
    """
    # Get list of installed packages
    command = f"conda list -n {env_name} --json"
    success, output = run_command(command, live=False)

    if not success or output is None:
        print(f"Failed to get package list for environment {env_name}")
        return False

    try:
        # Parse the JSON output
        packages = json.loads(output)

        # Create a simplified registry with package names and versions
        registry = [{"name": pkg["name"], "version": pkg["version"]} for pkg in packages]

        # Write the registry to a JSON file
        registry_file = f"{env_name}_registry.json"
        with open(registry_file, 'w') as f:
            json.dump(registry, f, indent=2)

        print(f"Registry created successfully: {registry_file}")
        return True

    except json.JSONDecodeError:
        print(f"Failed to parse package list for environment {env_name}")
        return False
    except OSError:
        print(f"Failed to write registry file for environment {env_name}")
        return False
main_tool
MainTool
Source code in toolboxv2/utils/system/main_tool.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
class MainTool:
    toolID: str = ""
    # app = None
    interface = None
    spec = "app"
    name = ""
    color = "Bold"
    stuf = False

    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.__storedargs = args, kwargs
        self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
        self.async_initialized = False
        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    pass
                else:
                    self.todo()
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")

    async def __ainit__(self, *args, **kwargs):
        self.version = kwargs["v"]
        self.tools = kwargs.get("tool", {})
        self.name = kwargs["name"]
        self.logger = kwargs.get("logs", get_logger())
        self.color = kwargs.get("color", "WHITE")
        self.todo = kwargs.get("load", kwargs.get("on_start", None))
        if not hasattr(self, 'config'):
            self.config = {}
        self.user = None
        self.description = "A toolbox mod" if kwargs.get("description") is None else kwargs.get("description")
        if MainTool.interface is None:
            MainTool.interface = self.app.interface_type
        # Result.default(self.app.interface)

        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    await self.todo()
                else:
                    pass
                await asyncio.sleep(0.1)
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")
        self.app.print(f"TOOL : {self.spec}.{self.name} online")



    @property
    def app(self):
        return get_app(
            from_=f"{self.spec}.{self.name}|{self.toolID if self.toolID else '*' + MainTool.toolID} {self.interface if self.interface else MainTool.interface}")

    @app.setter
    def app(self, v):
        raise PermissionError(f"You cannot set the App Instance! {v=}")

    @staticmethod
    def return_result(error: ToolBoxError = ToolBoxError.none,
                      exec_code: int = 0,
                      help_text: str = "",
                      data_info=None,
                      data=None,
                      data_to=None):

        if data_to is None:
            data_to = MainTool.interface if MainTool.interface is not None else ToolBoxInterfaces.cli

        if data is None:
            data = {}

        if data_info is None:
            data_info = {}

        return Result(
            error,
            ToolBoxResult(data_info=data_info, data=data, data_to=data_to),
            ToolBoxInfo(exec_code=exec_code, help_text=help_text)
        )

    def print(self, message, end="\n", **kwargs):
        if self.stuf:
            return

        self.app.print(Style.style_dic[self.color] + self.name + Style.style_dic["END"] + ":", message, end=end,
                       **kwargs)

    def add_str_to_config(self, command):
        if len(command) != 2:
            self.logger.error('Invalid command must be key value')
            return False
        self.config[command[0]] = command[1]

    def webInstall(self, user_instance, construct_render) -> str:
        """"Returns a web installer for the given user instance and construct render template"""

    def get_version(self) -> str:
        """"Returns the version"""
        return self.version

    async def get_user(self, username: str) -> Result:
        return await self.app.a_run_any(CLOUDM_AUTHMANAGER.GET_USER_BY_NAME, username=username, get_results=True)

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/system/main_tool.py
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.__storedargs = args, kwargs
    self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
    self.async_initialized = False
    if self.todo:
        try:
            if inspect.iscoroutinefunction(self.todo):
                pass
            else:
                self.todo()
            get_logger().info(f"{self.name} on load suspended")
        except Exception as e:
            get_logger().error(f"Error loading mod {self.name} {e}")
            if self.app.debug:
                import traceback
                traceback.print_exc()
    else:
        get_logger().info(f"{self.name} no load require")
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/system/main_tool.py
163
164
165
166
167
168
169
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self
get_version()

"Returns the version

Source code in toolboxv2/utils/system/main_tool.py
156
157
158
def get_version(self) -> str:
    """"Returns the version"""
    return self.version
webInstall(user_instance, construct_render)

"Returns a web installer for the given user instance and construct render template

Source code in toolboxv2/utils/system/main_tool.py
153
154
def webInstall(self, user_instance, construct_render) -> str:
    """"Returns a web installer for the given user instance and construct render template"""
get_version_from_pyproject(pyproject_path='../pyproject.toml')

Reads the version from the pyproject.toml file.

Source code in toolboxv2/utils/system/main_tool.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
def get_version_from_pyproject(pyproject_path='../pyproject.toml'):
    """Reads the version from the pyproject.toml file."""
    if not os.path.exists(pyproject_path) and pyproject_path=='../pyproject.toml':
        pyproject_path = 'pyproject.toml'
    if not os.path.exists(pyproject_path) and pyproject_path=='pyproject.toml':
        return "0.1.21"

    try:
        import toml
        # Load the pyproject.toml file
        with open(pyproject_path) as file:
            pyproject_data = toml.load(file)

        # Extract the version from the 'project' section
        version = pyproject_data.get('project', {}).get('version')

        if version is None:
            raise ValueError(f"Version not found in {pyproject_path}")

        return version
    except Exception as e:
        print(f"Error reading version: {e}")
        return "0.0.0"
state_system

The Task of the State System is : 1 Kep trak of the current state of the ToolBox and its dependency's 2 tracks the shasum of all mod and runnabael 3 the version of all mod

The state : {"utils":{"file_name": {"version":##,"shasum"}} ,"mods":{"file_name": {"version":##,"shasum":##,"src-url":##}} ,"runnable":{"file_name": {"version":##,"shasum":##,"src-url":##}} ,"api":{"file_name": {"version":##,"shasum"}} ,"app":{"file_name": {"version":##,"shasum":##,"src-url":##}} }

trans form state from on to an other.

types
AppType
Source code in toolboxv2/utils/system/types.py
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
class AppType:
    prefix: str
    id: str
    globals: dict[str, Any] = {"root": dict, }
    locals: dict[str, Any] = {"user": {'app': "self"}, }

    local_test: bool = False
    start_dir: str
    data_dir: str
    config_dir: str
    info_dir: str

    logger: logging.Logger
    logging_filename: str

    api_allowed_mods_list: list[str] = []

    version: str
    loop: asyncio.AbstractEventLoop

    keys: dict[str, str] = {
        "MACRO": "macro~~~~:",
        "MACRO_C": "m_color~~:",
        "HELPER": "helper~~~:",
        "debug": "debug~~~~:",
        "id": "name-spa~:",
        "st-load": "mute~load:",
        "comm-his": "comm-his~:",
        "develop-mode": "dev~mode~:",
        "provider::": "provider::",
    }

    defaults: dict[str, (bool or dict or dict[str, dict[str, str]] or str or list[str] or list[list]) | None] = {
        "MACRO": list[str],
        "MACRO_C": dict,
        "HELPER": dict,
        "debug": str,
        "id": str,
        "st-load": False,
        "comm-his": list[list],
        "develop-mode": bool,
    }

    config_fh: FileHandler
    _debug: bool
    flows: dict[str, Callable]
    dev_modi: bool
    functions: dict[str, Any]
    modules: dict[str, Any]

    interface_type: ToolBoxInterfaces
    REFIX: str

    alive: bool
    called_exit: tuple[bool, float]
    args_sto: AppArgs
    system_flag = None
    session = None
    appdata = None
    exit_tasks = []

    enable_profiling: bool = False
    sto = None

    def __init__(self, prefix: None | str= None, args: AppArgs | None = None):
        self.args_sto = args
        self.prefix = prefix
        """proxi attr"""

    @staticmethod
    def exit_main(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def hide_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def show_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def disconnect(*args, **kwargs):
        """proxi attr"""

    def set_logger(self, debug=False):
        """proxi attr"""

    @property
    def debug(self):
        """proxi attr"""
        return self._debug

    def debug_rains(self, e):
        """proxi attr"""

    def set_flows(self, r):
        """proxi attr"""

    def run_flows(self, name, **kwargs):
        """proxi attr"""

    def rrun_flows(self, name, **kwargs):
        """proxi attr"""

    def idle(self):
        import time
        self.print("idle")
        try:
            while self.alive:
                time.sleep(1)
        except KeyboardInterrupt:
            pass
        self.print("idle done")

    async def a_idle(self):
        self.print("a idle")
        try:
            if hasattr(self, 'daemon_app'):
                self.print("serving daemon")
                await self.daemon_app.connect(self)
            else:
                self.print("serving default")
                while self.alive:
                    await asyncio.sleep(1)
        except KeyboardInterrupt:
            pass
        self.print("a idle done")

    @debug.setter
    def debug(self, value):
        """proxi attr"""

    def _coppy_mod(self, content, new_mod_dir, mod_name, file_type='py'):
        """proxi attr"""

    def _pre_lib_mod(self, mod_name, path_to="./runtime", file_type='py'):
        """proxi attr"""

    def _copy_load(self, mod_name, file_type='py', **kwargs):
        """proxi attr"""

    def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True):
        """proxi attr"""

    def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):
        """proxi attr"""

    def save_initialized_module(self, tools_class, spec):
        """proxi attr"""

    def mod_online(self, mod_name, installed=False):
        """proxi attr"""

    def _get_function(self,
                      name: Enum or None,
                      state: bool = True,
                      specification: str = "app",
                      metadata=False, as_str: tuple or None = None, r=0):
        """proxi attr"""

    def save_exit(self):
        """proxi attr"""

    def load_mod(self, mod_name: str, mlm='I', **kwargs):
        """proxi attr"""

    async def init_module(self, modular):
        return await self.load_mod(modular)

    async def load_all_mods_in_file(self, working_dir="mods"):
        """proxi attr"""

    def get_all_mods(self, working_dir="mods", path_to="./runtime"):
        """proxi attr"""

    def remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            self.remove_mod(mod, delete=delete)

    async def a_remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            await self.a_remove_mod(mod, delete=delete)

    def print_ok(self):
        """proxi attr"""
        self.logger.info("OK")

    def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
        """proxi attr"""

    def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None):
        """proxi attr"""

    def remove_mod(self, mod_name, spec='app', delete=True):
        """proxi attr"""

    async def a_remove_mod(self, mod_name, spec='app', delete=True):
        """proxi attr"""

    def exit(self):
        """proxi attr"""

    def web_context(self) -> str:
        """returns the build index ( toolbox web component )"""

    async def a_exit(self):
        """proxi attr"""

    def save_load(self, modname, spec='app'):
        """proxi attr"""

    def get_function(self, name: Enum or tuple, **kwargs):
        """
        Kwargs for _get_function
            metadata:: return the registered function dictionary
                stateless: (function_data, None), 0
                stateful: (function_data, higher_order_function), 0
            state::boolean
                specification::str default app
        """

    def run_a_from_sync(self, function, *args):
        """
        run a async fuction
        """

    def run_function(self, mod_function_name: Enum or tuple,
                     tb_run_function_with_state=True,
                     tb_run_with_specification='app',
                     args_=None,
                     kwargs_=None,
                     *args,
                     **kwargs) -> Result:

        """proxi attr"""

    async def a_run_function(self, mod_function_name: Enum or tuple,
                             tb_run_function_with_state=True,
                             tb_run_with_specification='app',
                             args_=None,
                             kwargs_=None,
                             *args,
                             **kwargs) -> Result:

        """proxi attr"""

    def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):
        """
        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        mod_function_name = f"{modular_name}.{function_name}"

        proxi attr
        """

    async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict):
        """
        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        mod_function_name = f"{modular_name}.{function_name}"

        proxi attr
        """

    async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None, method="GET",
                       args_=None,
                       kwargs_=None,
                       *args, **kwargs):
        """run a function remote via http / https"""

    def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
                get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                kwargs_=None,
                *args, **kwargs):
        """proxi attr"""

    async def a_run_any(self, mod_function_name: Enum or str or tuple,
                        backwords_compability_variabel_string_holder=None,
                        get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                        kwargs_=None,
                        *args, **kwargs):
        """proxi attr"""

    def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
        """proxi attr"""

    @staticmethod
    def print(text, *args, **kwargs):
        """proxi attr"""

    @staticmethod
    def sprint(text, *args, **kwargs):
        """proxi attr"""

    # ----------------------------------------------------------------
    # Decorators for the toolbox

    def _register_function(self, module_name, func_name, data):
        """proxi attr"""

    def _create_decorator(self, type_: str,
                          name: str = "",
                          mod_name: str = "",
                          level: int = -1,
                          restrict_in_virtual_mode: bool = False,
                          api: bool = False,
                          helper: str = "",
                          version: str or None = None,
                          initial=False,
                          exit_f=False,
                          test=True,
                          samples=None,
                          state=None,
                          pre_compute=None,
                          post_compute=None,
                          memory_cache=False,
                          file_cache=False,
                          row=False,
                          request_as_kwarg=False,
                          memory_cache_max_size=100,
                          memory_cache_ttl=300):
        """proxi attr"""

        # data = {
        #     "type": type_,
        #     "module_name": module_name,
        #     "func_name": func_name,
        #     "level": level,
        #     "restrict_in_virtual_mode": restrict_in_virtual_mode,
        #     "func": func,
        #     "api": api,
        #     "helper": helper,
        #     "version": version,
        #     "initial": initial,
        #     "exit_f": exit_f,
        #     "__module__": func.__module__,
        #     "signature": sig,
        #     "params": params,
        #     "state": (
        #         False if len(params) == 0 else params[0] in ['self', 'state', 'app']) if state is None else state,
        #     "do_test": test,
        #     "samples": samples,
        #     "request_as_kwarg": request_as_kwarg,

    def tb(self, name=None,
           mod_name: str = "",
           helper: str = "",
           version: str or None = None,
           test: bool = True,
           restrict_in_virtual_mode: bool = False,
           api: bool = False,
           initial: bool = False,
           exit_f: bool = False,
           test_only: bool = False,
           memory_cache: bool = False,
           file_cache: bool = False,
           row=False,
           request_as_kwarg: bool = False,
           state: bool or None = None,
           level: int = 0,
           memory_cache_max_size: int = 100,
           memory_cache_ttl: int = 300,
           samples: list or dict or None = None,
           interface: ToolBoxInterfaces or None or str = None,
           pre_compute=None,
           post_compute=None,
           api_methods=None,
           ):
        """
    A decorator for registering and configuring functions within a module.

    This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

    Args:
        name (str, optional): The name to register the function under. Defaults to the function's own name.
        mod_name (str, optional): The name of the module the function belongs to.
        helper (str, optional): A helper string providing additional information about the function.
        version (str or None, optional): The version of the function or module.
        test (bool, optional): Flag to indicate if the function is for testing purposes.
        restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
        api (bool, optional): Flag to indicate if the function is part of an API.
        initial (bool, optional): Flag to indicate if the function should be executed at initialization.
        exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
        test_only (bool, optional): Flag to indicate if the function should only be used for testing.
        memory_cache (bool, optional): Flag to enable memory caching for the function.
        request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
        file_cache (bool, optional): Flag to enable file caching for the function.
        row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
        state (bool or None, optional): Flag to indicate if the function maintains state.
        level (int, optional): The level of the function, used for prioritization or categorization.
        memory_cache_max_size (int, optional): Maximum size of the memory cache.
        memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
        samples (list or dict or None, optional): Samples or examples of function usage.
        interface (str, optional): The interface type for the function.
        pre_compute (callable, optional): A function to be called before the main function.
        post_compute (callable, optional): A function to be called after the main function.
        api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

    Returns:
        function: The decorated function with additional processing and registration capabilities.
    """
        if interface is None:
            interface = "tb"
        if test_only and 'test' not in self.id:
            return lambda *args, **kwargs: args
        return self._create_decorator(interface,
                                      name,
                                      mod_name,
                                      level=level,
                                      restrict_in_virtual_mode=restrict_in_virtual_mode,
                                      helper=helper,
                                      api=api,
                                      version=version,
                                      initial=initial,
                                      exit_f=exit_f,
                                      test=test,
                                      samples=samples,
                                      state=state,
                                      pre_compute=pre_compute,
                                      post_compute=post_compute,
                                      memory_cache=memory_cache,
                                      file_cache=file_cache,
                                      row=row,
                                      request_as_kwarg=request_as_kwarg,
                                      memory_cache_max_size=memory_cache_max_size,
                                      memory_cache_ttl=memory_cache_ttl)

    def print_functions(self, name=None):


        if not self.functions:
            print("Nothing to see")
            return

        def helper(_functions):
            for func_name, data in _functions.items():
                if not isinstance(data, dict):
                    continue

                func_type = data.get('type', 'Unknown')
                func_level = 'r' if data['level'] == -1 else data['level']
                api_status = 'Api' if data.get('api', False) else 'Non-Api'

                print(f"  Function: {func_name}{data.get('signature', '()')}; "
                      f"Type: {func_type}, Level: {func_level}, {api_status}")

        if name is not None:
            functions = self.functions.get(name)
            if functions is not None:
                print(f"\nModule: {name}; Type: {functions.get('app_instance_type', 'Unknown')}")
                helper(functions)
                return
        for module, functions in self.functions.items():
            print(f"\nModule: {module}; Type: {functions.get('app_instance_type', 'Unknown')}")
            helper(functions)

    def save_autocompletion_dict(self):
        """proxi attr"""

    def get_autocompletion_dict(self):
        """proxi attr"""

    def get_username(self, get_input=False, default="loot") -> str:
        """proxi attr"""

    def save_registry_as_enums(self, directory: str, filename: str):
        """proxi attr"""

    async def execute_all_functions_(self, m_query='', f_query=''):
        print("Executing all functions")
        from ..extras import generate_test_cases
        all_data = {
            "modular_run": 0,
            "modular_fatal_error": 0,
            "errors": 0,
            "modular_sug": 0,
            "coverage": [],
            "total_coverage": {},
        }
        items = list(self.functions.items()).copy()
        for module_name, functions in items:
            infos = {
                "functions_run": 0,
                "functions_fatal_error": 0,
                "error": 0,
                "functions_sug": 0,
                'calls': {},
                'callse': {},
                "coverage": [0, 0],
            }
            all_data['modular_run'] += 1
            if not module_name.startswith(m_query):
                all_data['modular_sug'] += 1
                continue

            with Spinner(message=f"In {module_name}| "):
                f_items = list(functions.items()).copy()
                for function_name, function_data in f_items:
                    if not isinstance(function_data, dict):
                        continue
                    if not function_name.startswith(f_query):
                        continue
                    test: list = function_data.get('do_test')
                    # print(test, module_name, function_name, function_data)
                    infos["coverage"][0] += 1
                    if test is False:
                        continue

                    with Spinner(message=f"\t\t\t\t\t\tfuction {function_name}..."):
                        params: list = function_data.get('params')
                        sig: signature = function_data.get('signature')
                        state: bool = function_data.get('state')
                        samples: bool = function_data.get('samples')

                        test_kwargs_list = [{}]

                        if params is not None:
                            test_kwargs_list = samples if samples is not None else generate_test_cases(sig=sig)
                            # print(test_kwargs)
                            # print(test_kwargs[0])
                            # test_kwargs = test_kwargs_list[0]
                        # print(module_name, function_name, test_kwargs_list)
                        infos["coverage"][1] += 1
                        for test_kwargs in test_kwargs_list:
                            try:
                                # print(f"test Running {state=} |{module_name}.{function_name}")
                                result = await self.a_run_function((module_name, function_name),
                                                                   tb_run_function_with_state=state,
                                                                   **test_kwargs)
                                if not isinstance(result, Result):
                                    result = Result.ok(result)
                                if result.info.exec_code == 0:
                                    infos['calls'][function_name] = [test_kwargs, str(result)]
                                    infos['functions_sug'] += 1
                                else:
                                    infos['functions_sug'] += 1
                                    infos['error'] += 1
                                    infos['callse'][function_name] = [test_kwargs, str(result)]
                            except Exception as e:
                                infos['functions_fatal_error'] += 1
                                infos['callse'][function_name] = [test_kwargs, str(e)]
                            finally:
                                infos['functions_run'] += 1

                if infos['functions_run'] == infos['functions_sug']:
                    all_data['modular_sug'] += 1
                else:
                    all_data['modular_fatal_error'] += 1
                if infos['error'] > 0:
                    all_data['errors'] += infos['error']

                all_data[module_name] = infos
                if infos['coverage'][0] == 0:
                    c = 0
                else:
                    c = infos['coverage'][1] / infos['coverage'][0]
                all_data["coverage"].append(f"{module_name}:{c:.2f}\n")
        total_coverage = sum([float(t.split(":")[-1]) for t in all_data["coverage"]]) / len(all_data["coverage"])
        print(
            f"\n{all_data['modular_run']=}\n{all_data['modular_sug']=}\n{all_data['modular_fatal_error']=}\n{total_coverage=}")
        d = analyze_data(all_data)
        return Result.ok(data=all_data, data_info=d)

    @staticmethod
    def calculate_complexity(filename_or_code):
        from radon.complexity import cc_rank, cc_visit
        if os.path.exists(filename_or_code):
            with open(filename_or_code) as file:
                code = file.read()
        else:
            code = filename_or_code

        # Calculate and print Cyclomatic Complexity
        complexity_results = cc_visit(code)
        i = -1
        avg_complexity = 0
        for block in complexity_results:
            complexity = block.complexity
            i += 1
            print(f"block: {block.name} {i} Class/Fuction/Methode : {block.letter}")
            print(f"    fullname: {block.fullname}")
            print(f"    Cyclomatic Complexity: {complexity}")
            # Optional: Get complexity rank
            avg_complexity += complexity
            rank = cc_rank(complexity)
            print(f"    Complexity Rank: {rank}")
            # print(f"    lineno: {block.lineno}")
            print(f"    endline: {block.endline}")
            print(f"    col_offset: {block.col_offset}\n")
        if i <= 0:
            i += 2
        avg_complexity = avg_complexity / i
        print(f"\nAVG Complexity: {avg_complexity:.2f}")
        print(f"Total Rank: {cc_rank(int(avg_complexity + i // 10))}")

    async def execute_function_test(self, module_name: str, function_name: str,
                                    function_data: dict, test_kwargs: dict,
                                    profiler: cProfile.Profile) -> tuple[bool, str, dict, float]:
        start_time = time.time()
        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            try:
                result = await self.a_run_function(
                    (module_name, function_name),
                    tb_run_function_with_state=function_data.get('state'),
                    **test_kwargs
                )

                if not isinstance(result, Result):
                    result = Result.ok(result)

                success = result.info.exec_code == 0
                execution_time = time.time() - start_time
                return success, str(result), test_kwargs, execution_time
            except Exception as e:
                execution_time = time.time() - start_time
                return False, str(e), test_kwargs, execution_time

    async def process_function(self, module_name: str, function_name: str,
                               function_data: dict, profiler: cProfile.Profile) -> tuple[str, ModuleInfo]:
        start_time = time.time()
        info = ModuleInfo()

        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            if not isinstance(function_data, dict):
                return function_name, info

            test = function_data.get('do_test')
            info.coverage[0] += 1

            if test is False:
                return function_name, info

            params = function_data.get('params')
            sig = function_data.get('signature')
            samples = function_data.get('samples')

            test_kwargs_list = [{}] if params is None else (
                samples if samples is not None else generate_test_cases(sig=sig)
            )

            info.coverage[1] += 1

            # Create tasks for all test cases
            tasks = [
                self.execute_function_test(module_name, function_name, function_data, test_kwargs, profiler)
                for test_kwargs in test_kwargs_list
            ]

            # Execute all tests concurrently
            results = await asyncio.gather(*tasks)

            total_execution_time = 0
            for success, result_str, test_kwargs, execution_time in results:
                info.functions_run += 1
                total_execution_time += execution_time

                if success:
                    info.functions_sug += 1
                    info.calls[function_name] = [test_kwargs, result_str]
                else:
                    info.functions_sug += 1
                    info.error += 1
                    info.callse[function_name] = [test_kwargs, result_str]

            info.execution_time = time.time() - start_time
            return function_name, info

    async def process_module(self, module_name: str, functions: dict,
                             f_query: str, profiler: cProfile.Profile) -> tuple[str, ModuleInfo]:
        start_time = time.time()

        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            async with asyncio.Semaphore(mp.cpu_count()):
                tasks = [
                    self.process_function(module_name, fname, fdata, profiler)
                    for fname, fdata in functions.items()
                    if fname.startswith(f_query)
                ]

                if not tasks:
                    return module_name, ModuleInfo()

                results = await asyncio.gather(*tasks)

                # Combine results from all functions in the module
                combined_info = ModuleInfo()
                total_execution_time = 0

                for _, info in results:
                    combined_info.functions_run += info.functions_run
                    combined_info.functions_fatal_error += info.functions_fatal_error
                    combined_info.error += info.error
                    combined_info.functions_sug += info.functions_sug
                    combined_info.calls.update(info.calls)
                    combined_info.callse.update(info.callse)
                    combined_info.coverage[0] += info.coverage[0]
                    combined_info.coverage[1] += info.coverage[1]
                    total_execution_time += info.execution_time

                combined_info.execution_time = time.time() - start_time
                return module_name, combined_info

    async def execute_all_functions(self, m_query='', f_query='', enable_profiling=True):
        """
        Execute all functions with parallel processing and optional profiling.

        Args:
            m_query (str): Module name query filter
            f_query (str): Function name query filter
            enable_profiling (bool): Enable detailed profiling information
        """
        print("Executing all functions in parallel" + (" with profiling" if enable_profiling else ""))

        start_time = time.time()
        stats = ExecutionStats()
        items = list(self.functions.items()).copy()

        # Set up profiling
        self.enable_profiling = enable_profiling
        profiler = cProfile.Profile()

        with profile_section(profiler, enable_profiling):
            # Filter modules based on query
            filtered_modules = [
                (mname, mfuncs) for mname, mfuncs in items
                if mname.startswith(m_query)
            ]

            stats.modular_run = len(filtered_modules)

            # Process all modules concurrently
            async with asyncio.Semaphore(mp.cpu_count()):
                tasks = [
                    self.process_module(mname, mfuncs, f_query, profiler)
                    for mname, mfuncs in filtered_modules
                ]

                results = await asyncio.gather(*tasks)

            # Combine results and calculate statistics
            for module_name, info in results:
                if info.functions_run == info.functions_sug:
                    stats.modular_sug += 1
                else:
                    stats.modular_fatal_error += 1

                stats.errors += info.error

                # Calculate coverage
                coverage = (info.coverage[1] / info.coverage[0]) if info.coverage[0] > 0 else 0
                stats.coverage.append(f"{module_name}:{coverage:.2f}\n")

                # Store module info
                stats.__dict__[module_name] = info

            # Calculate total coverage
            total_coverage = (
                sum(float(t.split(":")[-1]) for t in stats.coverage) / len(stats.coverage)
                if stats.coverage else 0
            )

            stats.total_execution_time = time.time() - start_time

            # Generate profiling stats if enabled
            if enable_profiling:
                s = io.StringIO()
                ps = pstats.Stats(profiler, stream=s).sort_stats('cumulative')
                ps.print_stats()
                stats.profiling_data = {
                    'detailed_stats': s.getvalue(),
                    'total_time': stats.total_execution_time,
                    'function_count': stats.modular_run,
                    'successful_functions': stats.modular_sug
                }

            print(
                f"\n{stats.modular_run=}"
                f"\n{stats.modular_sug=}"
                f"\n{stats.modular_fatal_error=}"
                f"\n{total_coverage=}"
                f"\nTotal execution time: {stats.total_execution_time:.2f}s"
            )

            if enable_profiling:
                print("\nProfiling Summary:")
                print(f"{'=' * 50}")
                print("Top 10 time-consuming functions:")
                ps.print_stats(10)

            analyzed_data = analyze_data(stats.__dict__)
            return Result.ok(data=stats.__dict__, data_info=analyzed_data)
debug property writable

proxi attr

prefix = prefix instance-attribute

proxi attr

a_exit() async

proxi attr

Source code in toolboxv2/utils/system/types.py
1413
1414
async def a_exit(self):
    """proxi attr"""
a_fuction_runner(function, function_data, args, kwargs) async

parameters = function_data.get('params') modular_name = function_data.get('module_name') function_name = function_data.get('func_name') mod_function_name = f"{modular_name}.{function_name}"

proxi attr

Source code in toolboxv2/utils/system/types.py
1464
1465
1466
1467
1468
1469
1470
1471
1472
async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict):
    """
    parameters = function_data.get('params')
    modular_name = function_data.get('module_name')
    function_name = function_data.get('func_name')
    mod_function_name = f"{modular_name}.{function_name}"

    proxi attr
    """
a_remove_mod(mod_name, spec='app', delete=True) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1404
1405
async def a_remove_mod(self, mod_name, spec='app', delete=True):
    """proxi attr"""
a_run_any(mod_function_name, backwords_compability_variabel_string_holder=None, get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1486
1487
1488
1489
1490
1491
async def a_run_any(self, mod_function_name: Enum or str or tuple,
                    backwords_compability_variabel_string_holder=None,
                    get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                    kwargs_=None,
                    *args, **kwargs):
    """proxi attr"""
a_run_function(mod_function_name, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1444
1445
1446
1447
1448
1449
1450
1451
1452
async def a_run_function(self, mod_function_name: Enum or tuple,
                         tb_run_function_with_state=True,
                         tb_run_with_specification='app',
                         args_=None,
                         kwargs_=None,
                         *args,
                         **kwargs) -> Result:

    """proxi attr"""
debug_rains(e)

proxi attr

Source code in toolboxv2/utils/system/types.py
1298
1299
def debug_rains(self, e):
    """proxi attr"""
disconnect(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1286
1287
1288
@staticmethod
async def disconnect(*args, **kwargs):
    """proxi attr"""
execute_all_functions(m_query='', f_query='', enable_profiling=True) async

Execute all functions with parallel processing and optional profiling.

Parameters:

Name Type Description Default
m_query str

Module name query filter

''
f_query str

Function name query filter

''
enable_profiling bool

Enable detailed profiling information

True
Source code in toolboxv2/utils/system/types.py
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
async def execute_all_functions(self, m_query='', f_query='', enable_profiling=True):
    """
    Execute all functions with parallel processing and optional profiling.

    Args:
        m_query (str): Module name query filter
        f_query (str): Function name query filter
        enable_profiling (bool): Enable detailed profiling information
    """
    print("Executing all functions in parallel" + (" with profiling" if enable_profiling else ""))

    start_time = time.time()
    stats = ExecutionStats()
    items = list(self.functions.items()).copy()

    # Set up profiling
    self.enable_profiling = enable_profiling
    profiler = cProfile.Profile()

    with profile_section(profiler, enable_profiling):
        # Filter modules based on query
        filtered_modules = [
            (mname, mfuncs) for mname, mfuncs in items
            if mname.startswith(m_query)
        ]

        stats.modular_run = len(filtered_modules)

        # Process all modules concurrently
        async with asyncio.Semaphore(mp.cpu_count()):
            tasks = [
                self.process_module(mname, mfuncs, f_query, profiler)
                for mname, mfuncs in filtered_modules
            ]

            results = await asyncio.gather(*tasks)

        # Combine results and calculate statistics
        for module_name, info in results:
            if info.functions_run == info.functions_sug:
                stats.modular_sug += 1
            else:
                stats.modular_fatal_error += 1

            stats.errors += info.error

            # Calculate coverage
            coverage = (info.coverage[1] / info.coverage[0]) if info.coverage[0] > 0 else 0
            stats.coverage.append(f"{module_name}:{coverage:.2f}\n")

            # Store module info
            stats.__dict__[module_name] = info

        # Calculate total coverage
        total_coverage = (
            sum(float(t.split(":")[-1]) for t in stats.coverage) / len(stats.coverage)
            if stats.coverage else 0
        )

        stats.total_execution_time = time.time() - start_time

        # Generate profiling stats if enabled
        if enable_profiling:
            s = io.StringIO()
            ps = pstats.Stats(profiler, stream=s).sort_stats('cumulative')
            ps.print_stats()
            stats.profiling_data = {
                'detailed_stats': s.getvalue(),
                'total_time': stats.total_execution_time,
                'function_count': stats.modular_run,
                'successful_functions': stats.modular_sug
            }

        print(
            f"\n{stats.modular_run=}"
            f"\n{stats.modular_sug=}"
            f"\n{stats.modular_fatal_error=}"
            f"\n{total_coverage=}"
            f"\nTotal execution time: {stats.total_execution_time:.2f}s"
        )

        if enable_profiling:
            print("\nProfiling Summary:")
            print(f"{'=' * 50}")
            print("Top 10 time-consuming functions:")
            ps.print_stats(10)

        analyzed_data = analyze_data(stats.__dict__)
        return Result.ok(data=stats.__dict__, data_info=analyzed_data)
exit()

proxi attr

Source code in toolboxv2/utils/system/types.py
1407
1408
def exit(self):
    """proxi attr"""
exit_main(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1274
1275
1276
@staticmethod
def exit_main(*args, **kwargs):
    """proxi attr"""
fuction_runner(function, function_data, args, kwargs, t0=0.0)

parameters = function_data.get('params') modular_name = function_data.get('module_name') function_name = function_data.get('func_name') mod_function_name = f"{modular_name}.{function_name}"

proxi attr

Source code in toolboxv2/utils/system/types.py
1454
1455
1456
1457
1458
1459
1460
1461
1462
def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):
    """
    parameters = function_data.get('params')
    modular_name = function_data.get('module_name')
    function_name = function_data.get('func_name')
    mod_function_name = f"{modular_name}.{function_name}"

    proxi attr
    """
get_all_mods(working_dir='mods', path_to='./runtime')

proxi attr

Source code in toolboxv2/utils/system/types.py
1378
1379
def get_all_mods(self, working_dir="mods", path_to="./runtime"):
    """proxi attr"""
get_autocompletion_dict()

proxi attr

Source code in toolboxv2/utils/system/types.py
1669
1670
def get_autocompletion_dict(self):
    """proxi attr"""
get_function(name, **kwargs)

Kwargs for _get_function metadata:: return the registered function dictionary stateless: (function_data, None), 0 stateful: (function_data, higher_order_function), 0 state::boolean specification::str default app

Source code in toolboxv2/utils/system/types.py
1419
1420
1421
1422
1423
1424
1425
1426
1427
def get_function(self, name: Enum or tuple, **kwargs):
    """
    Kwargs for _get_function
        metadata:: return the registered function dictionary
            stateless: (function_data, None), 0
            stateful: (function_data, higher_order_function), 0
        state::boolean
            specification::str default app
    """
get_mod(name, spec='app')

proxi attr

Source code in toolboxv2/utils/system/types.py
1493
1494
def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
    """proxi attr"""
get_username(get_input=False, default='loot')

proxi attr

Source code in toolboxv2/utils/system/types.py
1672
1673
def get_username(self, get_input=False, default="loot") -> str:
    """proxi attr"""
hide_console(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1278
1279
1280
@staticmethod
async def hide_console(*args, **kwargs):
    """proxi attr"""
inplace_load_instance(mod_name, loc='toolboxv2.mods.', spec='app', save=True)

proxi attr

Source code in toolboxv2/utils/system/types.py
1347
1348
def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True):
    """proxi attr"""
load_all_mods_in_file(working_dir='mods') async

proxi attr

Source code in toolboxv2/utils/system/types.py
1375
1376
async def load_all_mods_in_file(self, working_dir="mods"):
    """proxi attr"""
load_mod(mod_name, mlm='I', **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1369
1370
def load_mod(self, mod_name: str, mlm='I', **kwargs):
    """proxi attr"""
mod_online(mod_name, installed=False)

proxi attr

Source code in toolboxv2/utils/system/types.py
1356
1357
def mod_online(self, mod_name, installed=False):
    """proxi attr"""
print(text, *args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1496
1497
1498
@staticmethod
def print(text, *args, **kwargs):
    """proxi attr"""
print_ok()

proxi attr

Source code in toolboxv2/utils/system/types.py
1391
1392
1393
def print_ok(self):
    """proxi attr"""
    self.logger.info("OK")
reload_mod(mod_name, spec='app', is_file=True, loc='toolboxv2.mods.')

proxi attr

Source code in toolboxv2/utils/system/types.py
1395
1396
def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
    """proxi attr"""
remove_mod(mod_name, spec='app', delete=True)

proxi attr

Source code in toolboxv2/utils/system/types.py
1401
1402
def remove_mod(self, mod_name, spec='app', delete=True):
    """proxi attr"""
rrun_flows(name, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1307
1308
def rrun_flows(self, name, **kwargs):
    """proxi attr"""
run_a_from_sync(function, *args)

run a async fuction

Source code in toolboxv2/utils/system/types.py
1429
1430
1431
1432
def run_a_from_sync(self, function, *args):
    """
    run a async fuction
    """
run_any(mod_function_name, backwords_compability_variabel_string_holder=None, get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1480
1481
1482
1483
1484
def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
            get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
            kwargs_=None,
            *args, **kwargs):
    """proxi attr"""
run_flows(name, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1304
1305
def run_flows(self, name, **kwargs):
    """proxi attr"""
run_function(mod_function_name, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1434
1435
1436
1437
1438
1439
1440
1441
1442
def run_function(self, mod_function_name: Enum or tuple,
                 tb_run_function_with_state=True,
                 tb_run_with_specification='app',
                 args_=None,
                 kwargs_=None,
                 *args,
                 **kwargs) -> Result:

    """proxi attr"""
run_http(mod_function_name, function_name=None, method='GET', args_=None, kwargs_=None, *args, **kwargs) async

run a function remote via http / https

Source code in toolboxv2/utils/system/types.py
1474
1475
1476
1477
1478
async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None, method="GET",
                   args_=None,
                   kwargs_=None,
                   *args, **kwargs):
    """run a function remote via http / https"""
save_autocompletion_dict()

proxi attr

Source code in toolboxv2/utils/system/types.py
1666
1667
def save_autocompletion_dict(self):
    """proxi attr"""
save_exit()

proxi attr

Source code in toolboxv2/utils/system/types.py
1366
1367
def save_exit(self):
    """proxi attr"""
save_initialized_module(tools_class, spec)

proxi attr

Source code in toolboxv2/utils/system/types.py
1353
1354
def save_initialized_module(self, tools_class, spec):
    """proxi attr"""
save_instance(instance, modular_id, spec='app', instance_type='file/application', tools_class=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1350
1351
def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):
    """proxi attr"""
save_load(modname, spec='app')

proxi attr

Source code in toolboxv2/utils/system/types.py
1416
1417
def save_load(self, modname, spec='app'):
    """proxi attr"""
save_registry_as_enums(directory, filename)

proxi attr

Source code in toolboxv2/utils/system/types.py
1675
1676
def save_registry_as_enums(self, directory: str, filename: str):
    """proxi attr"""
set_flows(r)

proxi attr

Source code in toolboxv2/utils/system/types.py
1301
1302
def set_flows(self, r):
    """proxi attr"""
set_logger(debug=False)

proxi attr

Source code in toolboxv2/utils/system/types.py
1290
1291
def set_logger(self, debug=False):
    """proxi attr"""
show_console(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1282
1283
1284
@staticmethod
async def show_console(*args, **kwargs):
    """proxi attr"""
sprint(text, *args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1500
1501
1502
@staticmethod
def sprint(text, *args, **kwargs):
    """proxi attr"""
tb(name=None, mod_name='', helper='', version=None, test=True, restrict_in_virtual_mode=False, api=False, initial=False, exit_f=False, test_only=False, memory_cache=False, file_cache=False, row=False, request_as_kwarg=False, state=None, level=0, memory_cache_max_size=100, memory_cache_ttl=300, samples=None, interface=None, pre_compute=None, post_compute=None, api_methods=None)

A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Parameters:

Name Type Description Default
name str

The name to register the function under. Defaults to the function's own name.

None
mod_name str

The name of the module the function belongs to.

''
helper str

A helper string providing additional information about the function.

''
version str or None

The version of the function or module.

None
test bool

Flag to indicate if the function is for testing purposes.

True
restrict_in_virtual_mode bool

Flag to restrict the function in virtual mode.

False
api bool

Flag to indicate if the function is part of an API.

False
initial bool

Flag to indicate if the function should be executed at initialization.

False
exit_f bool

Flag to indicate if the function should be executed at exit.

False
test_only bool

Flag to indicate if the function should only be used for testing.

False
memory_cache bool

Flag to enable memory caching for the function.

False
request_as_kwarg bool

Flag to get request if the fuction is calld from api.

False
file_cache bool

Flag to enable file caching for the function.

False
row bool

rather to auto wrap the result in Result type default False means no row data aka result type

False
state bool or None

Flag to indicate if the function maintains state.

None
level int

The level of the function, used for prioritization or categorization.

0
memory_cache_max_size int

Maximum size of the memory cache.

100
memory_cache_ttl int

Time-to-live for the memory cache entries.

300
samples list or dict or None

Samples or examples of function usage.

None
interface str

The interface type for the function.

None
pre_compute callable

A function to be called before the main function.

None
post_compute callable

A function to be called after the main function.

None
api_methods list[str]

default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

None

Returns:

Name Type Description
function

The decorated function with additional processing and registration capabilities.

Source code in toolboxv2/utils/system/types.py
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
def tb(self, name=None,
       mod_name: str = "",
       helper: str = "",
       version: str or None = None,
       test: bool = True,
       restrict_in_virtual_mode: bool = False,
       api: bool = False,
       initial: bool = False,
       exit_f: bool = False,
       test_only: bool = False,
       memory_cache: bool = False,
       file_cache: bool = False,
       row=False,
       request_as_kwarg: bool = False,
       state: bool or None = None,
       level: int = 0,
       memory_cache_max_size: int = 100,
       memory_cache_ttl: int = 300,
       samples: list or dict or None = None,
       interface: ToolBoxInterfaces or None or str = None,
       pre_compute=None,
       post_compute=None,
       api_methods=None,
       ):
    """
A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Args:
    name (str, optional): The name to register the function under. Defaults to the function's own name.
    mod_name (str, optional): The name of the module the function belongs to.
    helper (str, optional): A helper string providing additional information about the function.
    version (str or None, optional): The version of the function or module.
    test (bool, optional): Flag to indicate if the function is for testing purposes.
    restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
    api (bool, optional): Flag to indicate if the function is part of an API.
    initial (bool, optional): Flag to indicate if the function should be executed at initialization.
    exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
    test_only (bool, optional): Flag to indicate if the function should only be used for testing.
    memory_cache (bool, optional): Flag to enable memory caching for the function.
    request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
    file_cache (bool, optional): Flag to enable file caching for the function.
    row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
    state (bool or None, optional): Flag to indicate if the function maintains state.
    level (int, optional): The level of the function, used for prioritization or categorization.
    memory_cache_max_size (int, optional): Maximum size of the memory cache.
    memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
    samples (list or dict or None, optional): Samples or examples of function usage.
    interface (str, optional): The interface type for the function.
    pre_compute (callable, optional): A function to be called before the main function.
    post_compute (callable, optional): A function to be called after the main function.
    api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

Returns:
    function: The decorated function with additional processing and registration capabilities.
"""
    if interface is None:
        interface = "tb"
    if test_only and 'test' not in self.id:
        return lambda *args, **kwargs: args
    return self._create_decorator(interface,
                                  name,
                                  mod_name,
                                  level=level,
                                  restrict_in_virtual_mode=restrict_in_virtual_mode,
                                  helper=helper,
                                  api=api,
                                  version=version,
                                  initial=initial,
                                  exit_f=exit_f,
                                  test=test,
                                  samples=samples,
                                  state=state,
                                  pre_compute=pre_compute,
                                  post_compute=post_compute,
                                  memory_cache=memory_cache,
                                  file_cache=file_cache,
                                  row=row,
                                  request_as_kwarg=request_as_kwarg,
                                  memory_cache_max_size=memory_cache_max_size,
                                  memory_cache_ttl=memory_cache_ttl)
watch_mod(mod_name, spec='app', loc='toolboxv2.mods.', use_thread=True, path_name=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1398
1399
def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None):
    """proxi attr"""
web_context()

returns the build index ( toolbox web component )

Source code in toolboxv2/utils/system/types.py
1410
1411
def web_context(self) -> str:
    """returns the build index ( toolbox web component )"""
Headers dataclass

Class representing HTTP headers with strongly typed common fields.

Source code in toolboxv2/utils/system/types.py
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
@dataclass
class Headers:
    """Class representing HTTP headers with strongly typed common fields."""
    # General Headers
    accept: None | str= None
    accept_charset: None | str= None
    accept_encoding: None | str= None
    accept_language: None | str= None
    accept_ranges: None | str= None
    access_control_allow_credentials: None | str= None
    access_control_allow_headers: None | str= None
    access_control_allow_methods: None | str= None
    access_control_allow_origin: None | str= None
    access_control_expose_headers: None | str= None
    access_control_max_age: None | str= None
    access_control_request_headers: None | str= None
    access_control_request_method: None | str= None
    age: None | str= None
    allow: None | str= None
    alt_svc: None | str= None
    authorization: None | str= None
    cache_control: None | str= None
    clear_site_data: None | str= None
    connection: None | str= None
    content_disposition: None | str= None
    content_encoding: None | str= None
    content_language: None | str= None
    content_length: None | str= None
    content_location: None | str= None
    content_range: None | str= None
    content_security_policy: None | str= None
    content_security_policy_report_only: None | str= None
    content_type: None | str= None
    cookie: None | str= None
    cross_origin_embedder_policy: None | str= None
    cross_origin_opener_policy: None | str= None
    cross_origin_resource_policy: None | str= None
    date: None | str= None
    device_memory: None | str= None
    digest: None | str= None
    dnt: None | str= None
    dpr: None | str= None
    etag: None | str= None
    expect: None | str= None
    expires: None | str= None
    feature_policy: None | str= None
    forwarded: None | str= None
    from_header: None | str= None  # 'from' is a Python keyword
    host: None | str= None
    if_match: None | str= None
    if_modified_since: None | str= None
    if_none_match: None | str= None
    if_range: None | str= None
    if_unmodified_since: None | str= None
    keep_alive: None | str= None
    large_allocation: None | str= None
    last_modified: None | str= None
    link: None | str= None
    location: None | str= None
    max_forwards: None | str= None
    origin: None | str= None
    pragma: None | str= None
    proxy_authenticate: None | str= None
    proxy_authorization: None | str= None
    public_key_pins: None | str= None
    public_key_pins_report_only: None | str= None
    range: None | str= None
    referer: None | str= None
    referrer_policy: None | str= None
    retry_after: None | str= None
    save_data: None | str= None
    sec_fetch_dest: None | str= None
    sec_fetch_mode: None | str= None
    sec_fetch_site: None | str= None
    sec_fetch_user: None | str= None
    sec_websocket_accept: None | str= None
    sec_websocket_extensions: None | str= None
    sec_websocket_key: None | str= None
    sec_websocket_protocol: None | str= None
    sec_websocket_version: None | str= None
    server: None | str= None
    server_timing: None | str= None
    service_worker_allowed: None | str= None
    set_cookie: None | str= None
    sourcemap: None | str= None
    strict_transport_security: None | str= None
    te: None | str= None
    timing_allow_origin: None | str= None
    tk: None | str= None
    trailer: None | str= None
    transfer_encoding: None | str= None
    upgrade: None | str= None
    upgrade_insecure_requests: None | str= None
    user_agent: None | str= None
    vary: None | str= None
    via: None | str= None
    warning: None | str= None
    www_authenticate: None | str= None
    x_content_type_options: None | str= None
    x_dns_prefetch_control: None | str= None
    x_forwarded_for: None | str= None
    x_forwarded_host: None | str= None
    x_forwarded_proto: None | str= None
    x_frame_options: None | str= None
    x_xss_protection: None | str= None

    # Browser-specific and custom headers
    sec_ch_ua: None | str= None
    sec_ch_ua_mobile: None | str= None
    sec_ch_ua_platform: None | str= None
    sec_ch_ua_arch: None | str= None
    sec_ch_ua_bitness: None | str= None
    sec_ch_ua_full_version: None | str= None
    sec_ch_ua_full_version_list: None | str= None
    sec_ch_ua_platform_version: None | str= None

    # HTMX specific headers
    hx_boosted: None | str= None
    hx_current_url: None | str= None
    hx_history_restore_request: None | str= None
    hx_prompt: None | str= None
    hx_request: None | str= None
    hx_target: None | str= None
    hx_trigger: None | str= None
    hx_trigger_name: None | str= None

    # Additional fields can be stored in extra_headers
    extra_headers: dict[str, str] = field(default_factory=dict)

    def __post_init__(self):
        """Convert header keys with hyphens to underscores for attribute access."""
        # Handle the 'from' header specifically since it's a Python keyword
        if 'from' in self.__dict__:
            self.from_header = self.__dict__.pop('from')

        # Store any attributes that weren't explicitly defined in extra_headers
        all_attrs = self.__annotations__.keys()
        for key in list(self.__dict__.keys()):
            if key not in all_attrs and key != "extra_headers":
                self.extra_headers[key.replace("_", "-")] = getattr(self, key)
                delattr(self, key)

    @classmethod
    def from_dict(cls, headers_dict: dict[str, str]) -> 'Headers':
        """Create a Headers instance from a dictionary."""
        # Convert header keys from hyphenated to underscore format for Python attributes
        processed_headers = {}
        extra_headers = {}

        for key, value in headers_dict.items():
            # Handle 'from' header specifically
            if key.lower() == 'from':
                processed_headers['from_header'] = value
                continue

            python_key = key.replace("-", "_").lower()
            if python_key in cls.__annotations__ and python_key != "extra_headers":
                processed_headers[python_key] = value
            else:
                extra_headers[key] = value

        return cls(**processed_headers, extra_headers=extra_headers)

    def to_dict(self) -> dict[str, str]:
        """Convert the Headers object back to a dictionary."""
        result = {}

        # Add regular attributes
        for key, value in self.__dict__.items():
            if key != "extra_headers" and value is not None:
                # Handle from_header specially
                if key == "from_header":
                    result["from"] = value
                else:
                    result[key.replace("_", "-")] = value

        # Add extra headers
        result.update(self.extra_headers)

        return result
__post_init__()

Convert header keys with hyphens to underscores for attribute access.

Source code in toolboxv2/utils/system/types.py
162
163
164
165
166
167
168
169
170
171
172
173
def __post_init__(self):
    """Convert header keys with hyphens to underscores for attribute access."""
    # Handle the 'from' header specifically since it's a Python keyword
    if 'from' in self.__dict__:
        self.from_header = self.__dict__.pop('from')

    # Store any attributes that weren't explicitly defined in extra_headers
    all_attrs = self.__annotations__.keys()
    for key in list(self.__dict__.keys()):
        if key not in all_attrs and key != "extra_headers":
            self.extra_headers[key.replace("_", "-")] = getattr(self, key)
            delattr(self, key)
from_dict(headers_dict) classmethod

Create a Headers instance from a dictionary.

Source code in toolboxv2/utils/system/types.py
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
@classmethod
def from_dict(cls, headers_dict: dict[str, str]) -> 'Headers':
    """Create a Headers instance from a dictionary."""
    # Convert header keys from hyphenated to underscore format for Python attributes
    processed_headers = {}
    extra_headers = {}

    for key, value in headers_dict.items():
        # Handle 'from' header specifically
        if key.lower() == 'from':
            processed_headers['from_header'] = value
            continue

        python_key = key.replace("-", "_").lower()
        if python_key in cls.__annotations__ and python_key != "extra_headers":
            processed_headers[python_key] = value
        else:
            extra_headers[key] = value

    return cls(**processed_headers, extra_headers=extra_headers)
to_dict()

Convert the Headers object back to a dictionary.

Source code in toolboxv2/utils/system/types.py
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
def to_dict(self) -> dict[str, str]:
    """Convert the Headers object back to a dictionary."""
    result = {}

    # Add regular attributes
    for key, value in self.__dict__.items():
        if key != "extra_headers" and value is not None:
            # Handle from_header specially
            if key == "from_header":
                result["from"] = value
            else:
                result[key.replace("_", "-")] = value

    # Add extra headers
    result.update(self.extra_headers)

    return result
MainToolType
Source code in toolboxv2/utils/system/types.py
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
class MainToolType:
    toolID: str
    app: A
    interface: ToolBoxInterfaces
    spec: str

    version: str
    tools: dict  # legacy
    name: str
    logger: logging
    color: str
    todo: Callable
    _on_exit: Callable
    stuf: bool
    config: dict
    user: U | None
    description: str

    @staticmethod
    def return_result(error: ToolBoxError = ToolBoxError.none,
                      exec_code: int = 0,
                      help_text: str = "",
                      data_info=None,
                      data=None,
                      data_to=None) -> Result:
        """proxi attr"""

    def load(self):
        """proxi attr"""

    def print(self, message, end="\n", **kwargs):
        """proxi attr"""

    def add_str_to_config(self, command):
        if len(command) != 2:
            self.logger.error('Invalid command must be key value')
            return False
        self.config[command[0]] = command[1]

    def webInstall(self, user_instance, construct_render) -> str:
        """"Returns a web installer for the given user instance and construct render template"""

    async def get_user(self, username: str) -> Result:
        return self.app.a_run_any(CLOUDM_AUTHMANAGER.GET_USER_BY_NAME, username=username, get_results=True)
load()

proxi attr

Source code in toolboxv2/utils/system/types.py
1186
1187
def load(self):
    """proxi attr"""
print(message, end='\n', **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1189
1190
def print(self, message, end="\n", **kwargs):
    """proxi attr"""
return_result(error=ToolBoxError.none, exec_code=0, help_text='', data_info=None, data=None, data_to=None) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1177
1178
1179
1180
1181
1182
1183
1184
@staticmethod
def return_result(error: ToolBoxError = ToolBoxError.none,
                  exec_code: int = 0,
                  help_text: str = "",
                  data_info=None,
                  data=None,
                  data_to=None) -> Result:
    """proxi attr"""
webInstall(user_instance, construct_render)

"Returns a web installer for the given user instance and construct render template

Source code in toolboxv2/utils/system/types.py
1198
1199
def webInstall(self, user_instance, construct_render) -> str:
    """"Returns a web installer for the given user instance and construct render template"""
Request dataclass

Class representing an HTTP request.

Source code in toolboxv2/utils/system/types.py
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
@dataclass
class Request:
    """Class representing an HTTP request."""
    content_type: str
    headers: Headers
    method: str
    path: str
    query_params: dict[str, Any] = field(default_factory=dict)
    form_data: dict[str, Any] | None = None
    body: Any | None = None

    @classmethod
    def from_dict(cls, data: dict[str, Any]) -> 'Request':
        """Create a Request instance from a dictionary."""
        headers = Headers.from_dict(data.get('headers', {}))

        # Extract other fields
        return cls(
            content_type=data.get('content_type', ''),
            headers=headers,
            method=data.get('method', ''),
            path=data.get('path', ''),
            query_params=data.get('query_params', {}),
            form_data=data.get('form_data'),
            body=data.get('body')
        )

    def to_dict(self) -> dict[str, Any]:
        """Convert the Request object back to a dictionary."""
        result = {
            'content_type': self.content_type,
            'headers': self.headers.to_dict(),
            'method': self.method,
            'path': self.path,
            'query_params': self.query_params,
        }

        if self.form_data is not None:
            result['form_data'] = self.form_data

        if self.body is not None:
            result['body'] = self.body

        return result
from_dict(data) classmethod

Create a Request instance from a dictionary.

Source code in toolboxv2/utils/system/types.py
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
@classmethod
def from_dict(cls, data: dict[str, Any]) -> 'Request':
    """Create a Request instance from a dictionary."""
    headers = Headers.from_dict(data.get('headers', {}))

    # Extract other fields
    return cls(
        content_type=data.get('content_type', ''),
        headers=headers,
        method=data.get('method', ''),
        path=data.get('path', ''),
        query_params=data.get('query_params', {}),
        form_data=data.get('form_data'),
        body=data.get('body')
    )
to_dict()

Convert the Request object back to a dictionary.

Source code in toolboxv2/utils/system/types.py
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
def to_dict(self) -> dict[str, Any]:
    """Convert the Request object back to a dictionary."""
    result = {
        'content_type': self.content_type,
        'headers': self.headers.to_dict(),
        'method': self.method,
        'path': self.path,
        'query_params': self.query_params,
    }

    if self.form_data is not None:
        result['form_data'] = self.form_data

    if self.body is not None:
        result['body'] = self.body

    return result
RequestData dataclass

Main class representing the complete request data structure.

Source code in toolboxv2/utils/system/types.py
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
@dataclass
class RequestData:
    """Main class representing the complete request data structure."""
    request: Request
    session: Session
    session_id: str

    @classmethod
    def from_dict(cls, data: dict[str, Any]) -> 'RequestData':
        """Create a RequestData instance from a dictionary."""
        return cls(
            request=Request.from_dict(data.get('request', {})),
            session=Session.from_dict(data.get('session', {})),
            session_id=data.get('session_id', '')
        )

    def to_dict(self) -> dict[str, Any]:
        """Convert the RequestData object back to a dictionary."""
        return {
            'request': self.request.to_dict(),
            'session': self.session.to_dict(),
            'session_id': self.session_id
        }

    def __getattr__(self, name: str) -> Any:
        """Delegate unknown attributes to the `request` object."""
        # Nur wenn das Attribut nicht direkt in RequestData existiert
        # und auch nicht `session` oder `session_id` ist
        if hasattr(self.request, name):
            return getattr(self.request, name)
        raise AttributeError(f"'RequestData' object has no attribute '{name}'")

    @classmethod
    def moc(cls):
        return cls(
            request=Request.from_dict({
                'content_type': 'application/x-www-form-urlencoded',
                'headers': {
                    'accept': '*/*',
                    'accept-encoding': 'gzip, deflate, br, zstd',
                    'accept-language': 'de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7',
                    'connection': 'keep-alive',
                    'content-length': '107',
                    'content-type': 'application/x-www-form-urlencoded',
                    'cookie': 'session=abc123',
                    'host': 'localhost:8080',
                    'hx-current-url': 'http://localhost:8080/api/TruthSeeker/get_main_ui',
                    'hx-request': 'true',
                    'hx-target': 'estimates-guest_1fc2c9',
                    'hx-trigger': 'config-form-guest_1fc2c9',
                    'origin': 'http://localhost:8080',
                    'referer': 'http://localhost:8080/api/TruthSeeker/get_main_ui',
                    'sec-ch-ua': '"Chromium";v="134", "Not:A-Brand";v="24", "Google Chrome";v="134"',
                    'sec-ch-ua-mobile': '?0',
                    'sec-ch-ua-platform': '"Windows"',
                    'sec-fetch-dest': 'empty',
                    'sec-fetch-mode': 'cors',
                    'sec-fetch-site': 'same-origin',
                    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
                },
                'method': 'POST',
                'path': '/api/TruthSeeker/update_estimates',
                'query_params': {},
                'form_data': {
                    'param1': 'value1',
                    'param2': 'value2'
                }
            }),
            session=Session.from_dict({
                'SiID': '29a2e258e18252e2afd5ff943523f09c82f1bb9adfe382a6f33fc6a8381de898',
                'level': '1',
                'spec': '74eed1c8de06886842e235486c3c2fd6bcd60586998ac5beb87f13c0d1750e1d',
                'user_name': 'root',
                'custom_field': 'custom_value'
            }),
            session_id='0x29dd1ac0d1e30d3f'
        )
__getattr__(name)

Delegate unknown attributes to the request object.

Source code in toolboxv2/utils/system/types.py
327
328
329
330
331
332
333
def __getattr__(self, name: str) -> Any:
    """Delegate unknown attributes to the `request` object."""
    # Nur wenn das Attribut nicht direkt in RequestData existiert
    # und auch nicht `session` oder `session_id` ist
    if hasattr(self.request, name):
        return getattr(self.request, name)
    raise AttributeError(f"'RequestData' object has no attribute '{name}'")
from_dict(data) classmethod

Create a RequestData instance from a dictionary.

Source code in toolboxv2/utils/system/types.py
310
311
312
313
314
315
316
317
@classmethod
def from_dict(cls, data: dict[str, Any]) -> 'RequestData':
    """Create a RequestData instance from a dictionary."""
    return cls(
        request=Request.from_dict(data.get('request', {})),
        session=Session.from_dict(data.get('session', {})),
        session_id=data.get('session_id', '')
    )
to_dict()

Convert the RequestData object back to a dictionary.

Source code in toolboxv2/utils/system/types.py
319
320
321
322
323
324
325
def to_dict(self) -> dict[str, Any]:
    """Convert the RequestData object back to a dictionary."""
    return {
        'request': self.request.to_dict(),
        'session': self.session.to_dict(),
        'session_id': self.session_id
    }
Result
Source code in toolboxv2/utils/system/types.py
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
class Result:
    _task = None
    def __init__(self,
                 error: ToolBoxError,
                 result: ToolBoxResult,
                 info: ToolBoxInfo,
                 origin: Any | None = None,
                 ):
        self.error: ToolBoxError = error
        self.result: ToolBoxResult = result
        self.info: ToolBoxInfo = info
        self.origin = origin

    def as_result(self):
        return self

    def as_dict(self):
        return {
            "error":self.error.value if isinstance(self.error, Enum) else self.error,
        "result" : {
            "data_to":self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
            "data_info":self.result.data_info,
            "data":self.result.data,
            "data_type":self.result.data_type
        } if self.result else None,
        "info" : {
            "exec_code" : self.info.exec_code,  # exec_code umwandel in http resposn codes
        "help_text" : self.info.help_text
        } if self.info else None,
        "origin" : self.origin
        }

    def set_origin(self, origin):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = origin
        return self

    def set_dir_origin(self, name, extras="assets/"):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = f"mods/{name}/{extras}"
        return self

    def is_error(self):
        if _test_is_result(self.result.data):
            return self.result.data.is_error()
        if self.error == ToolBoxError.none:
            return False
        if self.info.exec_code == 0:
            return False
        if self.info.exec_code == 200:
            return False
        return True

    def is_data(self):
        return self.result.data is not None

    def to_api_result(self):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=self.error.value if isinstance(self.error, Enum) else self.error,
            result=ToolBoxResultBM(
                data_to=self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
                data_info=self.result.data_info,
                data=self.result.data,
                data_type=self.result.data_type
            ) if self.result else None,
            info=ToolBoxInfoBM(
                exec_code=self.info.exec_code,  # exec_code umwandel in http resposn codes
                help_text=self.info.help_text
            ) if self.info else None,
            origin=self.origin
        )

    def task(self, task):
        self._task = task
        return self

    @staticmethod
    def result_from_dict(error: str, result: dict, info: dict, origin: list or None or str):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=error if isinstance(error, Enum) else error,
            result=ToolBoxResultBM(
                data_to=result.get('data_to') if isinstance(result.get('data_to'), Enum) else result.get('data_to'),
                data_info=result.get('data_info', '404'),
                data=result.get('data'),
                data_type=result.get('data_type', '404'),
            ) if result else ToolBoxResultBM(
                data_to=ToolBoxInterfaces.cli.value,
                data_info='',
                data='404',
                data_type='404',
            ),
            info=ToolBoxInfoBM(
                exec_code=info.get('exec_code', 404),
                help_text=info.get('help_text', '404')
            ) if info else ToolBoxInfoBM(
                exec_code=404,
                help_text='404'
            ),
            origin=origin
        ).as_result()

    @classmethod
    def stream(cls,
               stream_generator: Any,  # Renamed from source for clarity
               content_type: str = "text/event-stream",  # Default to SSE
               headers: Union[dict, None] = None,
               info: str = "OK",
               interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
               cleanup_func: Union[
                   Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None):
        """
        Create a streaming response Result. Handles SSE and other stream types.

        Args:
            stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
            content_type: Content-Type header (default: text/event-stream for SSE).
            headers: Additional HTTP headers for the response.
            info: Help text for the result.
            interface: Interface to send data to.
            cleanup_func: Optional function for cleanup.

        Returns:
            A Result object configured for streaming.
        """
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        final_generator: AsyncGenerator[str, None]

        if content_type == "text/event-stream":
            # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
            # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
            final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

            # Standard SSE headers for the HTTP response itself
            # These will be stored in the Result object. Rust side decides how to use them.
            standard_sse_headers = {
                "Cache-Control": "no-cache",  # SSE specific
                "Connection": "keep-alive",  # SSE specific
                "X-Accel-Buffering": "no",  # Useful for proxies with SSE
                # Content-Type is implicitly text/event-stream, will be in streaming_data below
            }
            all_response_headers = standard_sse_headers.copy()
            if headers:
                all_response_headers.update(headers)
        else:
            # For non-SSE streams.
            # If stream_generator is sync, wrap it to be async.
            # If already async or single item, it will be handled.
            # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
            # For consistency with how SSEGenerator does it, we can wrap sync ones.
            if inspect.isgenerator(stream_generator) or \
                (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
                final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
            elif inspect.isasyncgen(stream_generator):
                final_generator = stream_generator
            else:  # Single item or string
                async def _single_item_gen():
                    yield stream_generator

                final_generator = _single_item_gen()
            all_response_headers = headers if headers else {}

        # Prepare streaming data to be stored in the Result object
        streaming_data = {
            "type": "stream",  # Indicator for Rust side
            "generator": final_generator,
            "content_type": content_type,  # Let Rust know the intended content type
            "headers": all_response_headers  # Intended HTTP headers for the overall response
        }

        result_payload = ToolBoxResult(
            data_to=interface,
            data=streaming_data,
            data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
            data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
        )

        return cls(error=error, info=info_obj, result=result_payload)

    @classmethod
    def sse(cls,
            stream_generator: Any,
            info: str = "OK",
            interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
            cleanup_func: Union[
                Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None,
            # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
            ):
        """
        Create an Server-Sent Events (SSE) streaming response Result.

        Args:
            stream_generator: A source yielding individual data items. This can be an
                              async generator, sync generator, iterable, or a single item.
                              Each item will be formatted as an SSE event.
            info: Optional help text for the Result.
            interface: Optional ToolBoxInterface to target.
            cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
            #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

        Returns:
            A Result object configured for SSE streaming.
        """
        # Result.stream will handle calling SSEGenerator.create_sse_stream
        # and setting appropriate default headers for SSE when content_type is "text/event-stream".
        return cls.stream(
            stream_generator=stream_generator,
            content_type="text/event-stream",
            # headers=http_headers, # Pass if we add http_headers param
            info=info,
            interface=interface,
            cleanup_func=cleanup_func
        )

    @classmethod
    def default(cls, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=-1, help_text="")
        result = ToolBoxResult(data_to=interface)
        return cls(error=error, info=info, result=result)

    @classmethod
    def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
        """Create a JSON response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=data,
            data_info="JSON response",
            data_type="json"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
        """Create a text response Result with specific content type."""
        if headers is not None:
            return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=text_data,
            data_info="Text response",
            data_type=content_type
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
               interface=ToolBoxInterfaces.remote):
        """Create a binary data response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        # Create a dictionary with binary data and metadata
        binary_data = {
            "data": data,
            "content_type": content_type,
            "filename": download_name
        }

        result = ToolBoxResult(
            data_to=interface,
            data=binary_data,
            data_info=f"Binary response: {download_name}" if download_name else "Binary response",
            data_type="binary"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
        """Create a file download response Result.

        Args:
            data: File data as bytes or base64 string
            filename: Name of the file for download
            content_type: MIME type of the file (auto-detected if None)
            info: Response info text
            interface: Target interface

        Returns:
            Result object configured for file download
        """
        import base64
        import mimetypes

        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=200, help_text=info)

        # Auto-detect content type if not provided
        if content_type is None:
            content_type, _ = mimetypes.guess_type(filename)
            if content_type is None:
                content_type = "application/octet-stream"

        # Ensure data is base64 encoded string (as expected by Rust server)
        if isinstance(data, bytes):
            base64_data = base64.b64encode(data).decode('utf-8')
        elif isinstance(data, str):
            # Assume it's already base64 encoded
            base64_data = data
        else:
            raise ValueError("File data must be bytes or base64 string")

        result = ToolBoxResult(
            data_to=interface,
            data=base64_data,  # Rust expects base64 string for "file" type
            data_info=f"File download: {filename}",
            data_type="file"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
        """Create a redirect response."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=url,
            data_info="Redirect response",
            data_type="redirect"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def ok(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def html(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.remote, data_type="html",status=200, headers=None, row=False):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=status, help_text=info)
        from ...utils.system.getting_and_closing_app import get_app

        if not row and not '"<div class="main-content""' in data:
            data = f'<div class="main-content frosted-glass">{data}<div>'
        if not row and not get_app().web_context() in data:
            data = get_app().web_context() + data

        if isinstance(headers, dict):
            result = ToolBoxResult(data_to=interface, data={'html':data,'headers':headers}, data_info=data_info,
                                   data_type="special_html")
        else:
            result = ToolBoxResult(data_to=interface, data=data, data_info=data_info,
                                   data_type=data_type if data_type is not None else type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def future(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.future):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type="future")
        return cls(error=error, info=info, result=result)

    @classmethod
    def custom_error(cls, data=None, data_info="", info="", exec_code=-1, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def error(cls, data=None, data_info="", info="", exec_code=450, interface=ToolBoxInterfaces.remote):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_user_error(cls, info="", exec_code=-3, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.input_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_internal_error(cls, info="", exec_code=-2, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.internal_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    def print(self, show=True, show_data=True, prifix=""):
        data = '\n' + f"{((prifix + 'Data: ' + str(self.result.data) if self.result.data is not None else 'NO Data') if not isinstance(self.result.data, Result) else self.result.data.print(show=False, show_data=show_data, prifix=prifix + '-')) if show_data else 'Data: private'}"
        origin = '\n' + f"{prifix + 'Origin: ' + str(self.origin) if self.origin is not None else 'NO Origin'}"
        text = (f"Function Exec code: {self.info.exec_code}"
                f"\n{prifix}Info's:"
                f" {self.info.help_text} {'<|> ' + str(self.result.data_info) if self.result.data_info is not None else ''}"
                f"{origin}{data if not data.endswith('NO Data') else ''}")
        if not show:
            return text
        print("\n======== Result ========\n" + text + "\n------- EndOfD -------")
        return self

    def log(self, show_data=True, prifix=""):
        from toolboxv2 import get_logger
        get_logger().debug(self.print(show=False, show_data=show_data, prifix=prifix).replace("\n", " - "))
        return self

    def __str__(self):
        return self.print(show=False, show_data=True)

    def get(self, key=None, default=None):
        data = self.result.data
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    async def aget(self, key=None, default=None):
        if asyncio.isfuture(self.result.data) or asyncio.iscoroutine(self.result.data) or (
            isinstance(self.result.data_to, Enum) and self.result.data_to.name == ToolBoxInterfaces.future.name):
            data = await self.result.data
        else:
            data = self.get(key=None, default=None)
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    def lazy_return(self, _=0, data=None, **kwargs):
        flags = ['raise', 'logg', 'user', 'intern']
        flag = flags[_] if isinstance(_, int) else _
        if self.info.exec_code == 0:
            return self if data is None else data if _test_is_result(data) else self.ok(data=data, **kwargs)
        if flag == 'raise':
            raise ValueError(self.print(show=False))
        if flag == 'logg':
            from .. import get_logger
            get_logger().error(self.print(show=False))

        if flag == 'user':
            return self if data is None else data if _test_is_result(data) else self.default_user_error(data=data,
                                                                                                        **kwargs)
        if flag == 'intern':
            return self if data is None else data if _test_is_result(data) else self.default_internal_error(data=data,
                                                                                                            **kwargs)

        return self if data is None else data if _test_is_result(data) else self.custom_error(data=data, **kwargs)

    @property
    def bg_task(self):
        return self._task
binary(data, content_type='application/octet-stream', download_name=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a binary data response Result.

Source code in toolboxv2/utils/system/types.py
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
@classmethod
def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
           interface=ToolBoxInterfaces.remote):
    """Create a binary data response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    # Create a dictionary with binary data and metadata
    binary_data = {
        "data": data,
        "content_type": content_type,
        "filename": download_name
    }

    result = ToolBoxResult(
        data_to=interface,
        data=binary_data,
        data_info=f"Binary response: {download_name}" if download_name else "Binary response",
        data_type="binary"
    )

    return cls(error=error, info=info_obj, result=result)
file(data, filename, content_type=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a file download response Result.

Parameters:

Name Type Description Default
data

File data as bytes or base64 string

required
filename

Name of the file for download

required
content_type

MIME type of the file (auto-detected if None)

None
info

Response info text

'OK'
interface

Target interface

remote

Returns:

Type Description

Result object configured for file download

Source code in toolboxv2/utils/system/types.py
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
@classmethod
def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
    """Create a file download response Result.

    Args:
        data: File data as bytes or base64 string
        filename: Name of the file for download
        content_type: MIME type of the file (auto-detected if None)
        info: Response info text
        interface: Target interface

    Returns:
        Result object configured for file download
    """
    import base64
    import mimetypes

    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=200, help_text=info)

    # Auto-detect content type if not provided
    if content_type is None:
        content_type, _ = mimetypes.guess_type(filename)
        if content_type is None:
            content_type = "application/octet-stream"

    # Ensure data is base64 encoded string (as expected by Rust server)
    if isinstance(data, bytes):
        base64_data = base64.b64encode(data).decode('utf-8')
    elif isinstance(data, str):
        # Assume it's already base64 encoded
        base64_data = data
    else:
        raise ValueError("File data must be bytes or base64 string")

    result = ToolBoxResult(
        data_to=interface,
        data=base64_data,  # Rust expects base64 string for "file" type
        data_info=f"File download: {filename}",
        data_type="file"
    )

    return cls(error=error, info=info_obj, result=result)
json(data, info='OK', interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None) classmethod

Create a JSON response Result.

Source code in toolboxv2/utils/system/types.py
847
848
849
850
851
852
853
854
855
856
857
858
859
860
@classmethod
def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
    """Create a JSON response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=data,
        data_info="JSON response",
        data_type="json"
    )

    return cls(error=error, info=info_obj, result=result)
redirect(url, status_code=302, info='Redirect', interface=ToolBoxInterfaces.remote) classmethod

Create a redirect response.

Source code in toolboxv2/utils/system/types.py
946
947
948
949
950
951
952
953
954
955
956
957
958
959
@classmethod
def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
    """Create a redirect response."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=url,
        data_info="Redirect response",
        data_type="redirect"
    )

    return cls(error=error, info=info_obj, result=result)
sse(stream_generator, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create an Server-Sent Events (SSE) streaming response Result.

Parameters:

Name Type Description Default
stream_generator Any

A source yielding individual data items. This can be an async generator, sync generator, iterable, or a single item. Each item will be formatted as an SSE event.

required
info str

Optional help text for the Result.

'OK'
interface ToolBoxInterfaces

Optional ToolBoxInterface to target.

remote
cleanup_func Union[Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None]

Optional cleanup function to run when the stream ends or is cancelled.

None
#http_headers

Optional dictionary of custom HTTP headers for the SSE response.

required

Returns:

Type Description

A Result object configured for SSE streaming.

Source code in toolboxv2/utils/system/types.py
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
@classmethod
def sse(cls,
        stream_generator: Any,
        info: str = "OK",
        interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
        cleanup_func: Union[
            Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None,
        # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
        ):
    """
    Create an Server-Sent Events (SSE) streaming response Result.

    Args:
        stream_generator: A source yielding individual data items. This can be an
                          async generator, sync generator, iterable, or a single item.
                          Each item will be formatted as an SSE event.
        info: Optional help text for the Result.
        interface: Optional ToolBoxInterface to target.
        cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
        #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

    Returns:
        A Result object configured for SSE streaming.
    """
    # Result.stream will handle calling SSEGenerator.create_sse_stream
    # and setting appropriate default headers for SSE when content_type is "text/event-stream".
    return cls.stream(
        stream_generator=stream_generator,
        content_type="text/event-stream",
        # headers=http_headers, # Pass if we add http_headers param
        info=info,
        interface=interface,
        cleanup_func=cleanup_func
    )
stream(stream_generator, content_type='text/event-stream', headers=None, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create a streaming response Result. Handles SSE and other stream types.

Parameters:

Name Type Description Default
stream_generator Any

Any stream source (async generator, sync generator, iterable, or single item).

required
content_type str

Content-Type header (default: text/event-stream for SSE).

'text/event-stream'
headers Union[dict, None]

Additional HTTP headers for the response.

None
info str

Help text for the result.

'OK'
interface ToolBoxInterfaces

Interface to send data to.

remote
cleanup_func Union[Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None]

Optional function for cleanup.

None

Returns:

Type Description

A Result object configured for streaming.

Source code in toolboxv2/utils/system/types.py
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
@classmethod
def stream(cls,
           stream_generator: Any,  # Renamed from source for clarity
           content_type: str = "text/event-stream",  # Default to SSE
           headers: Union[dict, None] = None,
           info: str = "OK",
           interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
           cleanup_func: Union[
               Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None):
    """
    Create a streaming response Result. Handles SSE and other stream types.

    Args:
        stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
        content_type: Content-Type header (default: text/event-stream for SSE).
        headers: Additional HTTP headers for the response.
        info: Help text for the result.
        interface: Interface to send data to.
        cleanup_func: Optional function for cleanup.

    Returns:
        A Result object configured for streaming.
    """
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    final_generator: AsyncGenerator[str, None]

    if content_type == "text/event-stream":
        # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
        # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
        final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

        # Standard SSE headers for the HTTP response itself
        # These will be stored in the Result object. Rust side decides how to use them.
        standard_sse_headers = {
            "Cache-Control": "no-cache",  # SSE specific
            "Connection": "keep-alive",  # SSE specific
            "X-Accel-Buffering": "no",  # Useful for proxies with SSE
            # Content-Type is implicitly text/event-stream, will be in streaming_data below
        }
        all_response_headers = standard_sse_headers.copy()
        if headers:
            all_response_headers.update(headers)
    else:
        # For non-SSE streams.
        # If stream_generator is sync, wrap it to be async.
        # If already async or single item, it will be handled.
        # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
        # For consistency with how SSEGenerator does it, we can wrap sync ones.
        if inspect.isgenerator(stream_generator) or \
            (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
            final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
        elif inspect.isasyncgen(stream_generator):
            final_generator = stream_generator
        else:  # Single item or string
            async def _single_item_gen():
                yield stream_generator

            final_generator = _single_item_gen()
        all_response_headers = headers if headers else {}

    # Prepare streaming data to be stored in the Result object
    streaming_data = {
        "type": "stream",  # Indicator for Rust side
        "generator": final_generator,
        "content_type": content_type,  # Let Rust know the intended content type
        "headers": all_response_headers  # Intended HTTP headers for the overall response
    }

    result_payload = ToolBoxResult(
        data_to=interface,
        data=streaming_data,
        data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
        data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
    )

    return cls(error=error, info=info_obj, result=result_payload)
text(text_data, content_type='text/plain', exec_code=None, status=200, info='OK', interface=ToolBoxInterfaces.remote, headers=None) classmethod

Create a text response Result with specific content type.

Source code in toolboxv2/utils/system/types.py
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
@classmethod
def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
    """Create a text response Result with specific content type."""
    if headers is not None:
        return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=text_data,
        data_info="Text response",
        data_type=content_type
    )

    return cls(error=error, info=info_obj, result=result)
SSEGenerator

Production-ready SSE generator that converts any data source to properly formatted Server-Sent Events compatible with browsers.

Source code in toolboxv2/utils/system/types.py
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
class SSEGenerator:
    """
    Production-ready SSE generator that converts any data source to
    properly formatted Server-Sent Events compatible with browsers.
    """

    @staticmethod
    def format_sse_event(data: Any) -> str:
        """Format any data as a proper SSE event message."""
        # Already formatted as SSE
        if isinstance(data, str) and (data.startswith('data:') or data.startswith('event:')) and '\n\n' in data:
            return data

        # Handle bytes (binary data)
        if isinstance(data, bytes):
            try:
                # Try to decode as UTF-8 first
                decoded_data_str = data.decode('utf-8')
                # If decoding works, treat it as a string for further processing
                # This allows binary data that is valid UTF-8 JSON to be processed as JSON.
                data = decoded_data_str
            except UnicodeDecodeError:
                # Binary data that is not UTF-8, encode as base64
                b64_data = base64.b64encode(data).decode('utf-8')
                return f"event: binary\ndata: {b64_data}\n\n"

        # Convert non-string objects (that are not already bytes) to JSON string
        # If data was bytes and successfully decoded to UTF-8 string, it will be processed here.
        original_data_type_was_complex = False
        if not isinstance(data, str):
            original_data_type_was_complex = True
            try:
                data_str = json.dumps(data)
            except Exception:
                data_str = str(data)  # Fallback to string representation
        else:
            data_str = data  # data is already a string

        # Handle JSON data with special event formatting
        # data_str now holds the string representation (either original string or JSON string)
        if data_str.strip().startswith('{'):
            try:
                json_data = json.loads(data_str)
                if isinstance(json_data, dict) and 'event' in json_data:
                    event_type = json_data['event']
                    event_id = json_data.get('id', None)  # Use None to distinguish from empty string

                    # Determine the actual data payload for the SSE 'data:' field
                    # If 'data' key exists in json_data, use its content.
                    # Otherwise, use the original data_str (which is the JSON of json_data).
                    if 'data' in json_data:
                        payload_content = json_data['data']
                        # If payload_content is complex, re-serialize it to JSON string
                        if isinstance(payload_content, (dict, list)):
                            sse_data_field = json.dumps(payload_content)
                        else:  # Simple type (string, number, bool)
                            sse_data_field = str(payload_content)
                    else:
                        # If original data was complex (e.g. dict) and became json_data,
                        # and no 'data' key in it, then use the full json_data as payload.
                        # If original data was a simple string that happened to be JSON parsable
                        # but without 'event' key, it would have been handled by "Regular JSON without event"
                        # or "Plain text" later.
                        # This path implies original data was a dict with 'event' key.
                        sse_data_field = data_str

                    sse_lines = []
                    if event_type:  # Should always be true here
                        sse_lines.append(f"event: {event_type}")
                    if event_id is not None:  # Check for None, allow empty string id
                        sse_lines.append(f"id: {event_id}")

                    # Handle multi-line data for the data field
                    for line in sse_data_field.splitlines():
                        sse_lines.append(f"data: {line}")

                    return "\n".join(sse_lines) + "\n\n"
                else:
                    # Regular JSON without special 'event' key
                    sse_lines = []
                    for line in data_str.splitlines():
                        sse_lines.append(f"data: {line}")
                    return "\n".join(sse_lines) + "\n\n"
            except json.JSONDecodeError:
                # Not valid JSON, treat as plain text
                sse_lines = []
                for line in data_str.splitlines():
                    sse_lines.append(f"data: {line}")
                return "\n".join(sse_lines) + "\n\n"
        else:
            # Plain text
            sse_lines = []
            for line in data_str.splitlines():
                sse_lines.append(f"data: {line}")
            return "\n".join(sse_lines) + "\n\n"

    @classmethod
    async def wrap_sync_generator(cls, generator):
        """Convert a synchronous generator to an async generator."""
        for item in generator:
            yield item
            # Allow other tasks to run
            await asyncio.sleep(0)

    @classmethod
    async def create_sse_stream(
        cls,
        source: Any,  # Changed from positional arg to keyword for clarity in Result.stream
        cleanup_func: Union[Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None
    ) -> AsyncGenerator[str, None]:
        """
        Convert any source to a properly formatted SSE stream.

        Args:
            source: Can be async generator, sync generator, iterable, or a single item.
            cleanup_func: Optional function to call when the stream ends or is cancelled.
                          Can be a synchronous function, async function, or async generator.

        Yields:
            Properly formatted SSE messages (strings).
        """
        # Send stream start event
        # This structure ensures data field contains {"id":"0"}
        yield cls.format_sse_event({"event": "stream_start", "data": {"id": "0"}})

        try:
            # Handle different types of sources
            if inspect.isasyncgen(source):
                # Source is already an async generator
                async for item in source:
                    yield cls.format_sse_event(item)
            elif inspect.isgenerator(source) or (not isinstance(source, str) and hasattr(source, '__iter__')):
                # Source is a sync generator or iterable (but not a string)
                # Strings are iterable but should be treated as single items unless explicitly made a generator
                async for item in cls.wrap_sync_generator(source):
                    yield cls.format_sse_event(item)
            else:
                # Single item (including strings)
                yield cls.format_sse_event(source)
        except asyncio.CancelledError:
            # Client disconnected
            yield cls.format_sse_event({"event": "cancelled", "data": {"id": "cancelled"}})
            raise
        except Exception as e:
            # Error in stream
            error_info = {
                "event": "error",
                "data": {  # Ensure payload is under 'data' key for the new format_sse_event logic
                    "message": str(e),
                    "traceback": traceback.format_exc()
                }
            }
            yield cls.format_sse_event(error_info)
        finally:
            # Always send end event
            yield cls.format_sse_event({"event": "stream_end", "data": {"id": "final"}})

            # Execute cleanup function if provided
            if cleanup_func:
                try:
                    if inspect.iscoroutinefunction(cleanup_func):  # Check if it's an async def function
                        await cleanup_func()
                    elif inspect.isasyncgenfunction(cleanup_func) or inspect.isasyncgen(
                        cleanup_func):  # Check if it's an async def generator function or already an async generator
                        # If it's a function, call it to get the generator
                        gen_to_exhaust = cleanup_func() if inspect.isasyncgenfunction(cleanup_func) else cleanup_func
                        async for _ in gen_to_exhaust:
                            pass  # Exhaust the generator to ensure cleanup completes
                    else:
                        # Synchronous function
                        cleanup_func()
                except Exception as e:
                    # Log cleanup errors but don't propagate them to client
                    error_info_cleanup = {
                        "event": "cleanup_error",
                        "data": {  # Ensure payload is under 'data' key
                            "message": str(e),
                            "traceback": traceback.format_exc()
                        }
                    }
                    # We can't yield here as the stream is already closing/closed.
                    # Instead, log the error.
                    # In a real app, use a proper logger.
                    print(f"SSE cleanup error: {cls.format_sse_event(error_info_cleanup)}", flush=True)
create_sse_stream(source, cleanup_func=None) async classmethod

Convert any source to a properly formatted SSE stream.

Parameters:

Name Type Description Default
source Any

Can be async generator, sync generator, iterable, or a single item.

required
cleanup_func Union[Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None]

Optional function to call when the stream ends or is cancelled. Can be a synchronous function, async function, or async generator.

None

Yields:

Type Description
AsyncGenerator[str, None]

Properly formatted SSE messages (strings).

Source code in toolboxv2/utils/system/types.py
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
@classmethod
async def create_sse_stream(
    cls,
    source: Any,  # Changed from positional arg to keyword for clarity in Result.stream
    cleanup_func: Union[Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None
) -> AsyncGenerator[str, None]:
    """
    Convert any source to a properly formatted SSE stream.

    Args:
        source: Can be async generator, sync generator, iterable, or a single item.
        cleanup_func: Optional function to call when the stream ends or is cancelled.
                      Can be a synchronous function, async function, or async generator.

    Yields:
        Properly formatted SSE messages (strings).
    """
    # Send stream start event
    # This structure ensures data field contains {"id":"0"}
    yield cls.format_sse_event({"event": "stream_start", "data": {"id": "0"}})

    try:
        # Handle different types of sources
        if inspect.isasyncgen(source):
            # Source is already an async generator
            async for item in source:
                yield cls.format_sse_event(item)
        elif inspect.isgenerator(source) or (not isinstance(source, str) and hasattr(source, '__iter__')):
            # Source is a sync generator or iterable (but not a string)
            # Strings are iterable but should be treated as single items unless explicitly made a generator
            async for item in cls.wrap_sync_generator(source):
                yield cls.format_sse_event(item)
        else:
            # Single item (including strings)
            yield cls.format_sse_event(source)
    except asyncio.CancelledError:
        # Client disconnected
        yield cls.format_sse_event({"event": "cancelled", "data": {"id": "cancelled"}})
        raise
    except Exception as e:
        # Error in stream
        error_info = {
            "event": "error",
            "data": {  # Ensure payload is under 'data' key for the new format_sse_event logic
                "message": str(e),
                "traceback": traceback.format_exc()
            }
        }
        yield cls.format_sse_event(error_info)
    finally:
        # Always send end event
        yield cls.format_sse_event({"event": "stream_end", "data": {"id": "final"}})

        # Execute cleanup function if provided
        if cleanup_func:
            try:
                if inspect.iscoroutinefunction(cleanup_func):  # Check if it's an async def function
                    await cleanup_func()
                elif inspect.isasyncgenfunction(cleanup_func) or inspect.isasyncgen(
                    cleanup_func):  # Check if it's an async def generator function or already an async generator
                    # If it's a function, call it to get the generator
                    gen_to_exhaust = cleanup_func() if inspect.isasyncgenfunction(cleanup_func) else cleanup_func
                    async for _ in gen_to_exhaust:
                        pass  # Exhaust the generator to ensure cleanup completes
                else:
                    # Synchronous function
                    cleanup_func()
            except Exception as e:
                # Log cleanup errors but don't propagate them to client
                error_info_cleanup = {
                    "event": "cleanup_error",
                    "data": {  # Ensure payload is under 'data' key
                        "message": str(e),
                        "traceback": traceback.format_exc()
                    }
                }
                # We can't yield here as the stream is already closing/closed.
                # Instead, log the error.
                # In a real app, use a proper logger.
                print(f"SSE cleanup error: {cls.format_sse_event(error_info_cleanup)}", flush=True)
format_sse_event(data) staticmethod

Format any data as a proper SSE event message.

Source code in toolboxv2/utils/system/types.py
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
@staticmethod
def format_sse_event(data: Any) -> str:
    """Format any data as a proper SSE event message."""
    # Already formatted as SSE
    if isinstance(data, str) and (data.startswith('data:') or data.startswith('event:')) and '\n\n' in data:
        return data

    # Handle bytes (binary data)
    if isinstance(data, bytes):
        try:
            # Try to decode as UTF-8 first
            decoded_data_str = data.decode('utf-8')
            # If decoding works, treat it as a string for further processing
            # This allows binary data that is valid UTF-8 JSON to be processed as JSON.
            data = decoded_data_str
        except UnicodeDecodeError:
            # Binary data that is not UTF-8, encode as base64
            b64_data = base64.b64encode(data).decode('utf-8')
            return f"event: binary\ndata: {b64_data}\n\n"

    # Convert non-string objects (that are not already bytes) to JSON string
    # If data was bytes and successfully decoded to UTF-8 string, it will be processed here.
    original_data_type_was_complex = False
    if not isinstance(data, str):
        original_data_type_was_complex = True
        try:
            data_str = json.dumps(data)
        except Exception:
            data_str = str(data)  # Fallback to string representation
    else:
        data_str = data  # data is already a string

    # Handle JSON data with special event formatting
    # data_str now holds the string representation (either original string or JSON string)
    if data_str.strip().startswith('{'):
        try:
            json_data = json.loads(data_str)
            if isinstance(json_data, dict) and 'event' in json_data:
                event_type = json_data['event']
                event_id = json_data.get('id', None)  # Use None to distinguish from empty string

                # Determine the actual data payload for the SSE 'data:' field
                # If 'data' key exists in json_data, use its content.
                # Otherwise, use the original data_str (which is the JSON of json_data).
                if 'data' in json_data:
                    payload_content = json_data['data']
                    # If payload_content is complex, re-serialize it to JSON string
                    if isinstance(payload_content, (dict, list)):
                        sse_data_field = json.dumps(payload_content)
                    else:  # Simple type (string, number, bool)
                        sse_data_field = str(payload_content)
                else:
                    # If original data was complex (e.g. dict) and became json_data,
                    # and no 'data' key in it, then use the full json_data as payload.
                    # If original data was a simple string that happened to be JSON parsable
                    # but without 'event' key, it would have been handled by "Regular JSON without event"
                    # or "Plain text" later.
                    # This path implies original data was a dict with 'event' key.
                    sse_data_field = data_str

                sse_lines = []
                if event_type:  # Should always be true here
                    sse_lines.append(f"event: {event_type}")
                if event_id is not None:  # Check for None, allow empty string id
                    sse_lines.append(f"id: {event_id}")

                # Handle multi-line data for the data field
                for line in sse_data_field.splitlines():
                    sse_lines.append(f"data: {line}")

                return "\n".join(sse_lines) + "\n\n"
            else:
                # Regular JSON without special 'event' key
                sse_lines = []
                for line in data_str.splitlines():
                    sse_lines.append(f"data: {line}")
                return "\n".join(sse_lines) + "\n\n"
        except json.JSONDecodeError:
            # Not valid JSON, treat as plain text
            sse_lines = []
            for line in data_str.splitlines():
                sse_lines.append(f"data: {line}")
            return "\n".join(sse_lines) + "\n\n"
    else:
        # Plain text
        sse_lines = []
        for line in data_str.splitlines():
            sse_lines.append(f"data: {line}")
        return "\n".join(sse_lines) + "\n\n"
wrap_sync_generator(generator) async classmethod

Convert a synchronous generator to an async generator.

Source code in toolboxv2/utils/system/types.py
2101
2102
2103
2104
2105
2106
2107
@classmethod
async def wrap_sync_generator(cls, generator):
    """Convert a synchronous generator to an async generator."""
    for item in generator:
        yield item
        # Allow other tasks to run
        await asyncio.sleep(0)
Session dataclass

Class representing a session.

Source code in toolboxv2/utils/system/types.py
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
@dataclass
class Session:
    """Class representing a session."""
    SiID: str
    level: str
    spec: str
    user_name: str
    # Allow for additional fields
    extra_data: dict[str, Any] = field(default_factory=dict)

    @classmethod
    def from_dict(cls, data: dict[str, Any]) -> 'Session':
        """Create a Session instance from a dictionary with default values."""
        known_fields = {
            'SiID': data.get('SiID', '#0'),
            'level': data.get('level', -1),
            'spec': data.get('spec', 'app'),
            'user_name': data.get('user_name', 'anonymous'),
        }

        extra_data = {k: v for k, v in data.items() if k not in known_fields}
        return cls(**known_fields, extra_data=extra_data)

    def to_dict(self) -> dict[str, Any]:
        """Convert the Session object back to a dictionary."""
        result = {
            'SiID': self.SiID,
            'level': self.level,
            'spec': self.spec,
            'user_name': self.user_name,
        }

        # Add extra data
        result.update(self.extra_data)

        return result

    @property
    def valid(self):
        return int(self.level) > 0
from_dict(data) classmethod

Create a Session instance from a dictionary with default values.

Source code in toolboxv2/utils/system/types.py
271
272
273
274
275
276
277
278
279
280
281
282
@classmethod
def from_dict(cls, data: dict[str, Any]) -> 'Session':
    """Create a Session instance from a dictionary with default values."""
    known_fields = {
        'SiID': data.get('SiID', '#0'),
        'level': data.get('level', -1),
        'spec': data.get('spec', 'app'),
        'user_name': data.get('user_name', 'anonymous'),
    }

    extra_data = {k: v for k, v in data.items() if k not in known_fields}
    return cls(**known_fields, extra_data=extra_data)
to_dict()

Convert the Session object back to a dictionary.

Source code in toolboxv2/utils/system/types.py
284
285
286
287
288
289
290
291
292
293
294
295
296
def to_dict(self) -> dict[str, Any]:
    """Convert the Session object back to a dictionary."""
    result = {
        'SiID': self.SiID,
        'level': self.level,
        'spec': self.spec,
        'user_name': self.user_name,
    }

    # Add extra data
    result.update(self.extra_data)

    return result
parse_request_data(data)

Parse the incoming request data into a strongly typed structure.

Source code in toolboxv2/utils/system/types.py
383
384
385
def parse_request_data(data: dict[str, Any]) -> RequestData:
    """Parse the incoming request data into a strongly typed structure."""
    return RequestData.from_dict(data)

toolbox

Main module.

App
Source code in toolboxv2/utils/toolbox.py
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
class App(AppType, metaclass=Singleton):

    def __init__(self, prefix: str = "", args=AppArgs().default()):
        super().__init__(prefix, args)
        self._web_context = None
        t0 = time.perf_counter()
        abspath = os.path.abspath(__file__)
        self.system_flag = system()  # Linux: Linux Mac: Darwin Windows: Windows

        self.appdata = os.getenv('APPDATA') if os.name == 'nt' else os.getenv('XDG_CONFIG_HOME') or os.path.expanduser(
                '~/.config') if os.name == 'posix' else None

        if self.system_flag == "Darwin" or self.system_flag == "Linux":
            dir_name = os.path.dirname(abspath).replace("/utils", "")
        else:
            dir_name = os.path.dirname(abspath).replace("\\utils", "")

        self.start_dir = str(dir_name)

        self.bg_tasks = []

        lapp = dir_name + '\\.data\\'

        if not prefix:
            if not os.path.exists(f"{lapp}last-app-prefix.txt"):
                os.makedirs(lapp, exist_ok=True)
                open(f"{lapp}last-app-prefix.txt", "a").close()
            with open(f"{lapp}last-app-prefix.txt") as prefix_file:
                cont = prefix_file.read()
                if cont:
                    prefix = cont.rstrip()
        else:
            if not os.path.exists(f"{lapp}last-app-prefix.txt"):
                os.makedirs(lapp, exist_ok=True)
                open(f"{lapp}last-app-prefix.txt", "a").close()
            with open(f"{lapp}last-app-prefix.txt", "w") as prefix_file:
                prefix_file.write(prefix)

        self.prefix = prefix

        node_ = node()

        if 'localhost' in node_ and (host := os.getenv('HOSTNAME', 'localhost')) != 'localhost':
            node_ = node_.replace('localhost', host)
        self.id = prefix + '-' + node_
        self.globals = {
            "root": {**globals()},
        }
        self.locals = {
            "user": {'app': self, **locals()},
        }

        identification = self.id

        if "test" in prefix:
            if self.system_flag == "Darwin" or self.system_flag == "Linux":
                start_dir = self.start_dir.replace("ToolBoxV2/toolboxv2", "toolboxv2")
            else:
                start_dir = self.start_dir.replace("ToolBoxV2\\toolboxv2", "toolboxv2")
            self.data_dir = start_dir + '\\.data\\' + "test"
            self.config_dir = start_dir + '\\.config\\' + "test"
            self.info_dir = start_dir + '\\.info\\' + "test"
        elif identification.startswith('collective-'):
            collective_identification = identification.split('-')[1]
            self.data_dir = self.start_dir + '\\.data\\' + collective_identification
            self.config_dir = self.start_dir + '\\.config\\' + collective_identification
            self.info_dir = self.start_dir + '\\.info\\' + collective_identification
        else:
            self.data_dir = self.start_dir + '\\.data\\' + identification
            self.config_dir = self.start_dir + '\\.config\\' + identification
            self.info_dir = self.start_dir + '\\.info\\' + identification

        if self.appdata is None:
            self.appdata = self.data_dir
        else:
            self.appdata += "/ToolBoxV2"

        if not os.path.exists(self.appdata):
            os.makedirs(self.appdata, exist_ok=True)
        if not os.path.exists(self.data_dir):
            os.makedirs(self.data_dir, exist_ok=True)
        if not os.path.exists(self.config_dir):
            os.makedirs(self.config_dir, exist_ok=True)
        if not os.path.exists(self.info_dir):
            os.makedirs(self.info_dir, exist_ok=True)

        print(f"Starting ToolBox as {prefix} from :", Style.Bold(Style.CYAN(f"{os.getcwd()}")))

        logger_info_str, self.logger, self.logging_filename = self.set_logger(args.debug)

        print("Logger " + logger_info_str)
        print("================================")
        self.logger.info("Logger initialized")
        get_logger().info(Style.GREEN("Starting Application instance"))
        if args.init and args.init is not None and self.start_dir not in sys.path:
            sys.path.append(self.start_dir)


        __version__ = get_version_from_pyproject()

        self.version = __version__

        self.keys = {
            "MACRO": "macro~~~~:",
            "MACRO_C": "m_color~~:",
            "HELPER": "helper~~~:",
            "debug": "debug~~~~:",
            "id": "name-spa~:",
            "st-load": "mute~load:",
            "comm-his": "comm-his~:",
            "develop-mode": "dev~mode~:",
            "provider::": "provider::",
        }

        defaults = {
            "MACRO": ['Exit'],
            "MACRO_C": {},
            "HELPER": {},
            "debug": args.debug,
            "id": self.id,
            "st-load": False,
            "comm-his": [[]],
            "develop-mode": False,
        }
        self.config_fh = FileHandler(self.id + ".config", keys=self.keys, defaults=defaults)
        self.config_fh.load_file_handler()
        self._debug = args.debug
        self.flows = {}
        self.dev_modi = self.config_fh.get_file_handler(self.keys["develop-mode"])
        if self.config_fh.get_file_handler("provider::") is None:
            self.config_fh.add_to_save_file_handler("provider::", "http://localhost:" + str(
                self.args_sto.port) if os.environ.get("HOSTNAME",
                                                                     "localhost") == "localhost" else "https://simplecore.app")
        self.functions = {}
        self.modules = {}

        self.interface_type = ToolBoxInterfaces.native
        self.PREFIX = Style.CYAN(f"~{node()}@>")
        self.alive = True
        self.called_exit = False, time.time()

        self.print(f"Infos:\n  {'Name':<8} -> {node()}\n  {'ID':<8} -> {self.id}\n  {'Version':<8} -> {self.version}\n")

        self.logger.info(
            Style.GREEN(
                f"Finish init up in {time.perf_counter() - t0:.2f}s"
            )
        )

        self.args_sto = args
        self.loop = None

        from .system.session import Session
        self.session: Session = Session(self.get_username())

    def get_username(self, get_input=False, default="loot") -> str:
        user_name = self.config_fh.get_file_handler("ac_user:::")
        if get_input and user_name is None:
            user_name = input("Input your username: ")
            self.config_fh.add_to_save_file_handler("ac_user:::", user_name)
        if user_name is None:
            user_name = default
            self.config_fh.add_to_save_file_handler("ac_user:::", user_name)
        return user_name

    def set_username(self, username):
        return self.config_fh.add_to_save_file_handler("ac_user:::", username)

    @staticmethod
    def exit_main(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    def hide_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    def show_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    def disconnect(*args, **kwargs):
        """proxi attr"""

    def set_logger(self, debug=False):
        if "test" in self.prefix and not debug:
            logger, logging_filename = setup_logging(logging.NOTSET, name="toolbox-test", interminal=True,
                                                     file_level=logging.NOTSET, app_name=self.id)
            logger_info_str = "in Test Mode"
        elif "live" in self.prefix and not debug:
            logger, logging_filename = setup_logging(logging.DEBUG, name="toolbox-live", interminal=False,
                                                     file_level=logging.WARNING, app_name=self.id)
            logger_info_str = "in Live Mode"
            # setup_logging(logging.WARNING, name="toolbox-live", is_online=True
            #              , online_level=logging.WARNING).info("Logger initialized")
        elif "debug" in self.prefix or self.prefix.endswith("D"):
            self.prefix = self.prefix.replace("-debug", '').replace("debug", '')
            logger, logging_filename = setup_logging(logging.DEBUG, name="toolbox-debug", interminal=True,
                                                     file_level=logging.WARNING, app_name=self.id)
            logger_info_str = "in debug Mode"
            self.debug = True
        elif debug:
            logger, logging_filename = setup_logging(logging.DEBUG, name=f"toolbox-{self.prefix}-debug",
                                                     interminal=True,
                                                     file_level=logging.DEBUG, app_name=self.id)
            logger_info_str = "in args debug Mode"
        else:
            logger, logging_filename = setup_logging(logging.ERROR, name=f"toolbox-{self.prefix}", app_name=self.id)
            logger_info_str = "in Default"

        return logger_info_str, logger, logging_filename

    @property
    def debug(self):
        return self._debug

    @debug.setter
    def debug(self, value):
        if not isinstance(value, bool):
            self.logger.debug(f"Value must be an boolean. is : {value} type of {type(value)}")
            raise ValueError("Value must be an boolean.")

        # self.logger.info(f"Setting debug {value}")
        self._debug = value

    def debug_rains(self, e):
        if self.debug:
            import traceback
            x = "="*5
            x += " DEBUG "
            x += "="*5
            self.print(x)
            self.print(traceback.format_exc())
            self.print(x)
            raise e
        else:
            self.logger.error(f"Error: {e}")
            import traceback
            x = "="*5
            x += " DEBUG "
            x += "="*5
            self.print(x)
            self.print(traceback.format_exc())
            self.print(x)

    def set_flows(self, r):
        self.flows = r

    async def run_flows(self, name, **kwargs):
        from ..flows import flows_dict as flows_dict_func
        if name not in self.flows:
            self.flows = {**self.flows, **flows_dict_func(s=name, remote=True)}
        if name in self.flows:
            if asyncio.iscoroutinefunction(self.flows[name]):
                return await self.flows[name](get_app(from_="runner"), self.args_sto, **kwargs)
            else:
                return self.flows[name](get_app(from_="runner"), self.args_sto, **kwargs)
        else:
            print("Flow not found, active flows:", len(self.flows.keys()))

    def _coppy_mod(self, content, new_mod_dir, mod_name, file_type='py'):

        mode = 'xb'
        self.logger.info(f" coppy mod {mod_name} to {new_mod_dir} size : {sys.getsizeof(content) / 8388608:.3f} mb")

        if not os.path.exists(new_mod_dir):
            os.makedirs(new_mod_dir)
            with open(f"{new_mod_dir}/__init__.py", "w") as nmd:
                nmd.write(f"__version__ = '{self.version}'")

        if os.path.exists(f"{new_mod_dir}/{mod_name}.{file_type}"):
            mode = False

            with open(f"{new_mod_dir}/{mod_name}.{file_type}", 'rb') as d:
                runtime_mod = d.read()  # Testing version but not efficient

            if len(content) != len(runtime_mod):
                mode = 'wb'

        if mode:
            with open(f"{new_mod_dir}/{mod_name}.{file_type}", mode) as f:
                f.write(content)

    def _pre_lib_mod(self, mod_name, path_to="./runtime", file_type='py'):
        working_dir = self.id.replace(".", "_")
        lib_mod_dir = f"toolboxv2.runtime.{working_dir}.mod_lib."

        self.logger.info(f"pre_lib_mod {mod_name} from {lib_mod_dir}")

        postfix = "_dev" if self.dev_modi else ""
        mod_file_dir = f"./mods{postfix}/{mod_name}.{file_type}"
        new_mod_dir = f"{path_to}/{working_dir}/mod_lib"
        with open(mod_file_dir, "rb") as c:
            content = c.read()
        self._coppy_mod(content, new_mod_dir, mod_name, file_type=file_type)
        return lib_mod_dir

    def _copy_load(self, mod_name, file_type='py', **kwargs):
        loc = self._pre_lib_mod(mod_name, file_type)
        return self.inplace_load_instance(mod_name, loc=loc, **kwargs)

    def helper_install_pip_module(self, module_name):
        if 'main' in self.id:
            return
        self.print(f"Installing {module_name} GREEDY")
        os.system(f"{sys.executable} -m pip install {module_name}")

    def python_module_import_classifier(self, mod_name, error_message):

        if error_message.startswith("No module named 'toolboxv2.utils"):
            return Result.default_internal_error(f"404 {error_message.split('utils')[1]} not found")
        if error_message.startswith("No module named 'toolboxv2.mods"):
            if mod_name.startswith('.'):
                return
            return self.run_a_from_sync(self.a_run_any, ("CloudM", "install"), module_name=mod_name)
        if error_message.startswith("No module named '"):
            pip_requ = error_message.split("'")[1].replace("'", "").strip()
            # if 'y' in input(f"\t\t\tAuto install {pip_requ} Y/n").lower:
            return self.helper_install_pip_module(pip_requ)
            # return Result.default_internal_error(f"404 {pip_requ} not found")

    def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True, mfo=None):
        if self.dev_modi and loc == "toolboxv2.mods.":
            loc = "toolboxv2.mods_dev."
        if self.mod_online(mod_name):
            self.logger.info(f"Reloading mod from : {loc + mod_name}")
            self.remove_mod(mod_name, spec=spec, delete=False)

        if (os.path.exists(self.start_dir + '/mods/' + mod_name) or os.path.exists(
            self.start_dir + '/mods/' + mod_name + '.py')) and (
            os.path.isdir(self.start_dir + '/mods/' + mod_name) or os.path.isfile(
            self.start_dir + '/mods/' + mod_name + '.py')):
            try:
                if mfo is None:
                    modular_file_object = import_module(loc + mod_name)
                else:
                    modular_file_object = mfo
                self.modules[mod_name] = modular_file_object
            except ModuleNotFoundError as e:
                self.logger.error(Style.RED(f"module {loc + mod_name} not found is type sensitive {e}"))
                self.print(Style.RED(f"module {loc + mod_name} not found is type sensitive {e}"))
                if self.debug or self.args_sto.sysPrint:
                    self.python_module_import_classifier(mod_name, str(e))
                self.debug_rains(e)
                return None
        else:
            self.print(f"module {loc + mod_name} is not valid")
            return None
        if hasattr(modular_file_object, "Tools"):
            tools_class = modular_file_object.Tools
        else:
            if hasattr(modular_file_object, "name"):
                tools_class = modular_file_object
                modular_file_object = import_module(loc + mod_name)
            else:
                tools_class = None

        modular_id = None
        instance = modular_file_object
        app_instance_type = "file/application"

        if tools_class is None:
            modular_id = modular_file_object.Name if hasattr(modular_file_object, "Name") else mod_name

        if tools_class is None and modular_id is None:
            modular_id = str(modular_file_object.__name__)
            self.logger.warning(f"Unknown instance loaded {mod_name}")
            return modular_file_object

        if tools_class is not None:
            tools_class = self.save_initialized_module(tools_class, spec)
            modular_id = tools_class.name
            app_instance_type = "functions/class"
        else:
            instance.spec = spec
        # if private:
        #     self.functions[modular_id][f"{spec}_private"] = private

        if not save:
            return instance if tools_class is None else tools_class

        return self.save_instance(instance, modular_id, spec, app_instance_type, tools_class=tools_class)

    def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):

        if modular_id in self.functions and tools_class is None:
            if self.functions[modular_id].get(f"{spec}_instance", None) is None:
                self.functions[modular_id][f"{spec}_instance"] = instance
                self.functions[modular_id][f"{spec}_instance_type"] = instance_type
            else:
                self.print("ERROR OVERRIDE")
                raise ImportError(f"Module already known {modular_id}")

        elif tools_class is not None:
            if modular_id not in self.functions:
                self.functions[modular_id] = {}
            self.functions[modular_id][f"{spec}_instance"] = tools_class
            self.functions[modular_id][f"{spec}_instance_type"] = instance_type

            try:
                if not hasattr(tools_class, 'tools'):
                    tools_class.tools = {"Version": tools_class.get_version, 'name': tools_class.name}
                for function_name in list(tools_class.tools.keys()):
                    t_function_name = function_name.lower()
                    if t_function_name != "all" and t_function_name != "name":
                        self.tb(function_name, mod_name=modular_id)(tools_class.tools.get(function_name))
                self.functions[modular_id][f"{spec}_instance_type"] += "/BC"
            except Exception as e:
                self.logger.error(f"Starting Module {modular_id} compatibility failed with : {e}")
                pass
        elif modular_id not in self.functions and tools_class is None:
            self.functions[modular_id] = {}
            self.functions[modular_id][f"{spec}_instance"] = instance
            self.functions[modular_id][f"{spec}_instance_type"] = instance_type

        else:
            raise ImportError(f"Modular {modular_id} is not a valid mod")
        on_start = self.functions[modular_id].get("on_start")
        if on_start is not None:
            i = 1
            for f in on_start:
                try:
                    f_, e = self.get_function((modular_id, f), state=True, specification=spec)
                    if e == 0:
                        self.logger.info(Style.GREY(f"Running On start {f} {i}/{len(on_start)}"))
                        if asyncio.iscoroutinefunction(f_):
                            self.print(f"Async on start is only in Tool claas supported for {modular_id}.{f}" if tools_class is None else f"initialization starting soon for {modular_id}.{f}")
                        else:
                            o = f_()
                            if o is not None:
                                self.print(f"Function {modular_id} On start result: {o}")
                    else:
                        self.logger.warning(f"starting function not found {e}")
                except Exception as e:
                    self.logger.debug(Style.YELLOW(
                        Style.Bold(f"modular:{modular_id}.{f} on_start error {i}/{len(on_start)} -> {e}")))
                    self.debug_rains(e)
                finally:
                    i += 1
        return instance if tools_class is None else tools_class

    def save_initialized_module(self, tools_class, spec):
        tools_class.spec = spec
        live_tools_class = tools_class(app=self)
        return live_tools_class

    def mod_online(self, mod_name, installed=False):
        if installed and mod_name not in self.functions:
            self.save_load(mod_name)
        return mod_name in self.functions

    def _get_function(self,
                      name: Enum or None,
                      state: bool = True,
                      specification: str = "app",
                      metadata=False, as_str: tuple or None = None, r=0):

        if as_str is None and isinstance(name, Enum):
            modular_id = str(name.NAME.value)
            function_id = str(name.value)
        elif as_str is None and isinstance(name, list):
            modular_id, function_id = name[0], name[1]
        else:
            modular_id, function_id = as_str

        self.logger.info(f"getting function : {specification}.{modular_id}.{function_id}")

        if modular_id not in self.functions:
            if r == 0:
                self.save_load(modular_id, spec=specification)
                return self.get_function(name=(modular_id, function_id),
                                         state=state,
                                         specification=specification,
                                         metadata=metadata,
                                         r=1)
            self.logger.warning(f"function modular not found {modular_id} 404")
            return "404", 404

        if function_id not in self.functions[modular_id]:
            self.logger.warning(f"function data not found {modular_id}.{function_id} 404")
            return "404", 404

        function_data = self.functions[modular_id][function_id]

        if isinstance(function_data, list):
            print(f"functions {function_id} : {function_data}")
            function_data = self.functions[modular_id][function_data[-1]]

        function = function_data.get("func")
        params = function_data.get("params")

        state_ = function_data.get("state")
        if state_ is not None and state != state_:
            state = state_

        if function is None:
            self.logger.warning("No function found")
            return "404", 404

        if params is None:
            self.logger.warning("No function (params) found")
            return "404", 301

        if metadata and not state:
            self.logger.info("returning metadata stateless")
            return (function_data, function), 0

        if not state:  # mens a stateless function
            self.logger.info("returning stateless function")
            return function, 0

        instance = self.functions[modular_id].get(f"{specification}_instance")

        # instance_type = self.functions[modular_id].get(f"{specification}_instance_type", "functions/class")

        if params[0] == 'app':
            instance = get_app(from_=f"fuction {specification}.{modular_id}.{function_id}")

        if instance is None and self.alive:
            self.inplace_load_instance(modular_id, spec=specification)
            instance = self.functions[modular_id].get(f"{specification}_instance")

        if instance is None:
            self.logger.warning("No live Instance found")
            return "404", 400

        # if instance_type.endswith("/BC"):  # for backwards compatibility  functions/class/BC old modules
        #     # returning as stateless
        #     # return "422", -1
        #     self.logger.info(
        #         f"returning stateless function, cant find tools class for state handling found {instance_type}")
        #     if metadata:
        #         self.logger.info(f"returning metadata stateless")
        #         return (function_data, function), 0
        #     return function, 0

        self.logger.info("wrapping in higher_order_function")

        self.logger.info(f"returned fuction {specification}.{modular_id}.{function_id}")
        higher_order_function = partial(function, instance)

        if metadata:
            self.logger.info("returning metadata stateful")
            return (function_data, higher_order_function), 0

        self.logger.info("returning stateful function")
        return higher_order_function, 0

    def save_exit(self):
        self.logger.info(f"save exiting saving data to {self.config_fh.file_handler_filename} states of {self.debug=}")
        self.config_fh.add_to_save_file_handler(self.keys["debug"], str(self.debug))

    def init_mod(self, mod_name, spec='app'):
        if '.' in mod_name:
            mod_name = mod_name.split('.')[0]
        return self.loop_gard().run_until_complete(self.a_init_mod(mod_name, spec))

    def run_bg_task(self, task):
        """
        Run a task in the background that will properly handle nested asyncio operations.
        This implementation ensures that asyncio.create_task() and asyncio.gather() work
        correctly within the background task.

        Args:
            task: A callable function that can be synchronous or asynchronous
        """
        if not callable(task):
            self.logger.warning("Task is not callable!")
            return None

        # Function that will run in a separate thread with its own event loop
        def thread_target(task_):
            # Create a new event loop for this thread
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)

            try:
                # Determine how to run the task based on its type
                if asyncio.iscoroutinefunction(task_):
                    # If it's an async function, run it directly
                    loop.run_until_complete(task_())
                elif asyncio.iscoroutine(task_):
                    # If it's already a coroutine object
                    loop.run_until_complete(task_)
                else:
                    # If it's a synchronous function that might create async tasks internally
                    async def wrapper():
                        # Run potentially blocking synchronous code in an executor
                        return await loop.run_in_executor(None, task_)

                    loop.run_until_complete(wrapper())

                self.logger.debug("Background task completed successfully")
            except Exception as e:
                self.logger.error(f"Background task failed with error: {str(e)}")
            finally:
                # Clean up any pending tasks
                pending = asyncio.all_tasks(loop)
                if pending:
                    # Cancel any remaining tasks
                    for task_ in pending:
                        task_.cancel()

                    # Allow tasks to finish cancellation
                    loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))

                loop.close()

        # Create and start a non-daemon thread that will run to completion
        # Using non-daemon thread ensures the task completes even if main thread exits
        t = threading.Thread(target=thread_target, args=(task,))
        t.daemon = False  # Non-daemon thread will keep program alive until it completes
        self.bg_tasks.append(t)
        t.start()
        return t

    # Alternative implementation that may be needed if your function creates many nested tasks
    def run_bg_task_advanced(self, task, *args, **kwargs):
        """
        Alternative implementation for complex async scenarios where the task creates
        nested asyncio tasks using create_task() and gather().

        This version ensures proper execution of nested tasks by maintaining the thread
        and its event loop throughout the lifetime of all child tasks.

        Args:
            task: A callable function that can be synchronous or asynchronous
            *args, **kwargs: Arguments to pass to the task
        """
        if not callable(task):
            self.logger.warning("Task is not callable!")
            return None

        # Create a dedicated thread with its own event loop
        async def async_wrapper():
            try:
                if asyncio.iscoroutinefunction(task):
                    return await task(*args, **kwargs)
                elif asyncio.iscoroutine(task):
                    return await task
                else:
                    # Run in executor to avoid blocking
                    loop = asyncio.get_event_loop()
                    return await loop.run_in_executor(None, lambda: task(*args, **kwargs))
            except Exception as e:
                self.logger.error(f"Background task error: {str(e)}")
                raise

        def thread_target():
            # Create new event loop for this thread
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)

            try:
                # Run the task to completion with all its nested tasks
                loop.run_until_complete(async_wrapper())
            except Exception as e:
                self.logger.error(f"Background task thread failed: {str(e)}")
            finally:
                # Clean up any pending tasks that might still be running
                try:
                    pending = asyncio.all_tasks(loop)
                    if pending:
                        # Allow tasks time to clean up
                        loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
                except Exception:
                    pass

                loop.close()

        # Use a non-daemon thread so it will run to completion
        t = threading.Thread(target=thread_target, daemon=True)
        t.daemon = False
        self.bg_tasks.append(t)
        t.start()
        return t

    # Helper method to wait for background tasks to complete (optional)
    def wait_for_bg_tasks(self, timeout=None):
        """
        Wait for all background tasks to complete.

        Args:
            timeout: Maximum time to wait (in seconds) for all tasks to complete.
                     None means wait indefinitely.

        Returns:
            bool: True if all tasks completed, False if timeout occurred
        """
        active_tasks = [t for t in self.bg_tasks if t.is_alive()]

        for task in active_tasks:
            task.join(timeout=timeout)
            if task.is_alive():
                return False

        return True

    def __call__(self, *args, **kwargs):
        return self.run(*args, **kwargs)

    def run(self, *args, request=None, running_function_coro=None, **kwargs):
        """
        Run a function with support for SSE streaming in both
        threaded and non-threaded contexts.
        """
        if running_function_coro is None:
            mn, fn = args[0]
            if self.functions.get(mn, {}).get(fn, {}).get('request_as_kwarg', False):
                kwargs["request"] = RequestData.from_dict(request)
                if 'data' in kwargs and 'data' not in self.functions.get(mn, {}).get(fn, {}).get('params', []):
                    kwargs["request"].data = kwargs["request"].body = kwargs['data']
                    del kwargs['data']
                if 'form_data' in kwargs and 'form_data' not in self.functions.get(mn, {}).get(fn, {}).get('params',
                                                                                                           []):
                    kwargs["request"].form_data = kwargs["request"].body = kwargs['form_data']
                    del kwargs['form_data']

        # Create the coroutine
        coro = running_function_coro or self.a_run_any(*args, **kwargs)

        # Get or create an event loop
        try:
            loop = asyncio.get_event_loop()
            is_running = loop.is_running()
        except RuntimeError:
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
            is_running = False

        # If the loop is already running, run in a separate thread
        if is_running:
            # Create thread pool executor as needed
            if not hasattr(self.__class__, '_executor'):
                self.__class__._executor = ThreadPoolExecutor(max_workers=4)

            def run_in_new_thread():
                # Set up a new loop in this thread
                new_loop = asyncio.new_event_loop()
                asyncio.set_event_loop(new_loop)

                try:
                    # Run the coroutine
                    return new_loop.run_until_complete(coro)
                finally:
                    new_loop.close()

            # Run in thread and get result
            thread_result = self.__class__._executor.submit(run_in_new_thread).result()

            # Handle streaming results from thread
            if isinstance(thread_result, dict) and thread_result.get("is_stream"):
                # Create a new SSE stream in the main thread
                async def stream_from_function():
                    # Re-run the function with direct async access
                    stream_result = await self.a_run_any(*args, **kwargs)

                    if (isinstance(stream_result, Result) and
                        getattr(stream_result.result, 'data_type', None) == "stream"):
                        # Get and forward data from the original generator
                        original_gen = stream_result.result.data.get("generator")
                        if inspect.isasyncgen(original_gen):
                            async for item in original_gen:
                                yield item

                # Return a new streaming Result
                return Result.stream(
                    stream_generator=stream_from_function(),
                    headers=thread_result.get("headers", {})
                )

            result = thread_result
        else:
            # Direct execution when loop is not running
            result = loop.run_until_complete(coro)

        # Process the final result
        if isinstance(result, Result):
            if 'debug' in self.id:
                result.print()
            if getattr(result.result, 'data_type', None) == "stream":
                return result
            return result.to_api_result().model_dump(mode='json')

        return result

    def loop_gard(self):
        if self.loop is None:
            self.loop = asyncio.get_event_loop()
        if self.loop.is_closed():
            self.loop = asyncio.get_event_loop()
        return self.loop

    async def a_init_mod(self, mod_name, spec='app'):
        mod = self.save_load(mod_name, spec=spec)
        if hasattr(mod, "__initobj") and not mod.async_initialized:
            await mod
        return mod


    def load_mod(self, mod_name: str, mlm='I', **kwargs):

        action_list_helper = ['I (inplace load dill on error python)',
                              # 'C (coppy py file to runtime dir)',
                              # 'S (save py file to dill)',
                              # 'CS (coppy and save py file)',
                              # 'D (development mode, inplace load py file)'
                              ]
        action_list = {"I": lambda: self.inplace_load_instance(mod_name, **kwargs),
                       "C": lambda: self._copy_load(mod_name, **kwargs)
                       }

        try:
            if mlm in action_list:

                return action_list.get(mlm)()
            else:
                self.logger.critical(
                    f"config mlm must be {' or '.join(action_list_helper)} is {mlm=}")
                raise ValueError(f"config mlm must be {' or '.join(action_list_helper)} is {mlm=}")
        except ValueError as e:
            self.logger.warning(Style.YELLOW(f"Error Loading Module '{mod_name}', with error :{e}"))
            self.debug_rains(e)
        except ImportError as e:
            self.logger.error(Style.YELLOW(f"Error Loading Module '{mod_name}', with error :{e}"))
            self.debug_rains(e)
        except Exception as e:
            self.logger.critical(Style.RED(f"Error Loading Module '{mod_name}', with critical error :{e}"))
            print(Style.RED(f"Error Loading Module '{mod_name}'"))
            self.debug_rains(e)

        return Result.default_internal_error(info="info's in logs.")

    async def load_all_mods_in_file(self, working_dir="mods"):
        print(f"LOADING ALL MODS FROM FOLDER : {working_dir}")
        t0 = time.perf_counter()
        # Get the list of all modules
        module_list = self.get_all_mods(working_dir)
        open_modules = self.functions.keys()
        start_len = len(open_modules)

        for om in open_modules:
            if om in module_list:
                module_list.remove(om)

        tasks: set[Task] = set()

        _ = {tasks.add(asyncio.create_task(asyncio.to_thread(self.save_load, mod, 'app'))) for mod in module_list}
        for t in asyncio.as_completed(tasks):
            try:
                result = await t
                if hasattr(result, 'Name'):
                    print('Opened :', result.Name)
                elif hasattr(result, 'name'):
                    if hasattr(result, 'async_initialized'):
                        if not result.async_initialized:
                            async def _():
                                try:
                                    if asyncio.iscoroutine(result):
                                        await result
                                    if hasattr(result, 'Name'):
                                        print('Opened :', result.Name)
                                    elif hasattr(result, 'name'):
                                        print('Opened :', result.name)
                                except Exception as e:
                                    self.debug_rains(e)
                                    if hasattr(result, 'Name'):
                                        print('Error opening :', result.Name)
                                    elif hasattr(result, 'name'):
                                        print('Error opening :', result.name)
                            asyncio.create_task(_())
                        else:
                            print('Opened :', result.name)
                else:
                    print('Opened :', result)
            except Exception as e:
                self.logger.error(Style.RED(f"An Error occurred while opening all modules error: {str(e)}"))
                self.debug_rains(e)
        opened = len(self.functions.keys()) - start_len

        self.logger.info(f"Opened {opened} modules in {time.perf_counter() - t0:.2f}s")
        return f"Opened {opened} modules in {time.perf_counter() - t0:.2f}s"

    def get_all_mods(self, working_dir="mods", path_to="./runtime", use_wd=True):
        self.logger.info(f"collating all mods in working directory {working_dir}")

        pr = "_dev" if self.dev_modi else ""
        if working_dir == "mods" and use_wd:
            working_dir = f"{self.start_dir}/mods{pr}"
        elif use_wd:
            pass
        else:
            w_dir = self.id.replace(".", "_")
            working_dir = f"{path_to}/{w_dir}/mod_lib{pr}/"
        res = os.listdir(working_dir)

        self.logger.info(f"found : {len(res)} files")

        def do_helper(_mod):
            if "mainTool" in _mod:
                return False
            # if not _mod.endswith(".py"):
            #     return False
            if _mod.startswith("__"):
                return False
            if _mod.startswith("."):
                return False
            return not _mod.startswith("test_")

        def r_endings(word: str):
            if word.endswith(".py"):
                return word[:-3]
            return word

        mods_list = list(map(r_endings, filter(do_helper, res)))

        self.logger.info(f"found : {len(mods_list)} Modules")
        return mods_list

    def remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            self.remove_mod(mod, delete=delete)

    def remove_mod(self, mod_name, spec='app', delete=True):
        if mod_name not in self.functions:
            self.logger.info(f"mod not active {mod_name}")
            return
        on_exit = self.functions[mod_name].get("on_exit")

        def helper():
            if f"{spec}_instance" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance"]
            if f"{spec}_instance_type" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance_type"]

        if on_exit is None and self.functions[mod_name].get(f"{spec}_instance_type", "").endswith("/BC"):
            instance = self.functions[mod_name].get(f"{spec}_instance", None)
            if instance is not None and hasattr(instance, 'on_exit'):
                if asyncio.iscoroutinefunction(instance.on_exit):
                    self.exit_tasks.append(instance.on_exit)
                else:
                    instance.on_exit()

        if on_exit is None and delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]
            return
        if on_exit is None:
            helper()
            return

        i = 1
        for f in on_exit:
            try:
                f_, e = self.get_function((mod_name, f), state=True, specification=spec)
                if e == 0:
                    self.logger.info(Style.GREY(f"Running On exit {f} {i}/{len(on_exit)}"))
                    if asyncio.iscoroutinefunction(f_):
                        self.exit_tasks.append(f_)
                        o = None
                    else:
                        o = f_()
                    if o is not None:
                        self.print(f"Function On Exit result: {o}")
                else:
                    self.logger.warning("closing function not found")
            except Exception as e:
                self.logger.debug(
                    Style.YELLOW(Style.Bold(f"modular:{mod_name}.{f} on_exit error {i}/{len(on_exit)} -> {e}")))
            finally:
                i += 1

        helper()

        if delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]

    async def a_remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            await self.a_remove_mod(mod, delete=delete)

    async def a_remove_mod(self, mod_name, spec='app', delete=True):
        if mod_name not in self.functions:
            self.logger.info(f"mod not active {mod_name}")
            return
        on_exit = self.functions[mod_name].get("on_exit")

        def helper():
            if f"{spec}_instance" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance"]
            if f"{spec}_instance_type" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance_type"]

        if on_exit is None and self.functions[mod_name].get(f"{spec}_instance_type", "").endswith("/BC"):
            instance = self.functions[mod_name].get(f"{spec}_instance", None)
            if instance is not None and hasattr(instance, 'on_exit'):
                if asyncio.iscoroutinefunction(instance.on_exit):
                    await instance.on_exit()
                else:
                    instance.on_exit()

        if on_exit is None and delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]
            return
        if on_exit is None:
            helper()
            return

        i = 1
        for f in on_exit:
            try:
                f_, e = self.get_function((mod_name, f), state=True, specification=spec)
                if e == 0:
                    self.logger.info(Style.GREY(f"Running On exit {f} {i}/{len(on_exit)}"))
                    if asyncio.iscoroutinefunction(f_):
                        o = await f_()
                    else:
                        o = f_()
                    if o is not None:
                        self.print(f"Function On Exit result: {o}")
                else:
                    self.logger.warning("closing function not found")
            except Exception as e:
                self.logger.debug(
                    Style.YELLOW(Style.Bold(f"modular:{mod_name}.{f} on_exit error {i}/{len(on_exit)} -> {e}")))
            finally:
                i += 1

        helper()

        if delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]

    def exit(self, remove_all=True):
        if not self.alive:
            return
        if self.args_sto.debug:
            self.hide_console()
        self.disconnect()
        if remove_all:
            self.remove_all_modules()
        self.logger.info("Exiting ToolBox interface")
        self.alive = False
        self.called_exit = True, time.time()
        self.save_exit()
        try:
            self.config_fh.save_file_handler()
        except SystemExit:
            print("If u ar testing this is fine else ...")

        if hasattr(self, 'daemon_app'):
            import threading

            for thread in threading.enumerate()[::-1]:
                if thread.name == "MainThread":
                    continue
                try:
                    with Spinner(f"closing Thread {thread.name:^50}|", symbols="s", count_down=True,
                                 time_in_s=0.751 if not self.debug else 0.6):
                        thread.join(timeout=0.751 if not self.debug else 0.6)
                except TimeoutError as e:
                    self.logger.error(f"Timeout error on exit {thread.name} {str(e)}")
                    print(str(e), f"Timeout {thread.name}")
                except KeyboardInterrupt:
                    print("Unsave Exit")
                    break
        if hasattr(self, 'loop') and self.loop is not None:
            with Spinner("closing Event loop:", symbols="+"):
                self.loop.stop()

    async def a_exit(self):
        await self.a_remove_all_modules()
        results = await asyncio.gather(
            *[asyncio.create_task(f()) for f in self.exit_tasks if asyncio.iscoroutinefunction(f)])
        for result in results:
            self.print(f"Function On Exit result: {result}")
        self.exit(remove_all=False)

    def save_load(self, modname, spec='app'):
        self.logger.debug(f"Save load module {modname}")
        if not modname:
            self.logger.warning("no filename specified")
            return False
        try:
            return self.load_mod(modname, spec=spec)
        except ModuleNotFoundError as e:
            self.logger.error(Style.RED(f"Module {modname} not found"))
            self.debug_rains(e)

        return False

    def get_function(self, name: Enum or tuple, **kwargs):
        """
        Kwargs for _get_function
            metadata:: return the registered function dictionary
                stateless: (function_data, None), 0
                stateful: (function_data, higher_order_function), 0
            state::boolean
                specification::str default app
        """
        if isinstance(name, tuple):
            return self._get_function(None, as_str=name, **kwargs)
        else:
            return self._get_function(name, **kwargs)

    async def a_run_function(self, mod_function_name: Enum or tuple,
                             tb_run_function_with_state=True,
                             tb_run_with_specification='app',
                             args_=None,
                             kwargs_=None,
                             *args,
                             **kwargs) -> Result:

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_
        if isinstance(mod_function_name, tuple):
            modular_name, function_name = mod_function_name
        elif isinstance(mod_function_name, list):
            modular_name, function_name = mod_function_name[0], mod_function_name[1]
        elif isinstance(mod_function_name, Enum):
            modular_name, function_name = mod_function_name.__class__.NAME.value, mod_function_name.value
        else:
            raise TypeError("Unknown function type")

        if not self.mod_online(modular_name, installed=True):
            self.get_mod(modular_name)

        function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                      metadata=True, specification=tb_run_with_specification)
        self.logger.info(f"Received fuction : {mod_function_name}, with execode: {error_code}")
        if error_code == 404:
            mod = self.get_mod(modular_name)
            if hasattr(mod, "async_initialized") and not mod.async_initialized:
                await mod
            function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                          metadata=True, specification=tb_run_with_specification)

        if error_code == 404:
            self.logger.warning(Style.RED("Function Not Found"))
            return (Result.default_user_error(interface=self.interface_type,
                                              exec_code=404,
                                              info="function not found function is not decorated").
                    set_origin(mod_function_name))

        if error_code == 300:
            return Result.default_internal_error(interface=self.interface_type,
                                                 info=f"module {modular_name}"
                                                      f" has no state (instance)").set_origin(mod_function_name)

        if error_code != 0:
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=error_code,
                                                 info=f"Internal error"
                                                      f" {modular_name}."
                                                      f"{function_name}").set_origin(mod_function_name)

        if not tb_run_function_with_state:
            function_data, _ = function_data
            function = function_data.get('func')
        else:
            function_data, function = function_data

        if not function:
            self.logger.warning(Style.RED(f"Function {function_name} not found"))
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=404,
                                                 info="function not found function").set_origin(mod_function_name)

        self.logger.info("Profiling function")
        t0 = time.perf_counter()
        if asyncio.iscoroutinefunction(function):
            return await self.a_fuction_runner(function, function_data, args, kwargs, t0)
        else:
            return self.fuction_runner(function, function_data, args, kwargs, t0)

    def run_function(self, mod_function_name: Enum or tuple,
                     tb_run_function_with_state=True,
                     tb_run_with_specification='app',
                     args_=None,
                     kwargs_=None,
                     *args,
                     **kwargs) -> Result:

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_
        if isinstance(mod_function_name, tuple):
            modular_name, function_name = mod_function_name
        elif isinstance(mod_function_name, list):
            modular_name, function_name = mod_function_name[0], mod_function_name[1]
        elif isinstance(mod_function_name, Enum):
            modular_name, function_name = mod_function_name.__class__.NAME.value, mod_function_name.value
        else:
            raise TypeError("Unknown function type")

        if not self.mod_online(modular_name, installed=True):
            self.get_mod(modular_name)

        function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                      metadata=True, specification=tb_run_with_specification)
        self.logger.info(f"Received fuction : {mod_function_name}, with execode: {error_code}")
        if error_code == 1 or error_code == 3 or error_code == 400:
            self.get_mod(modular_name)
            function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                          metadata=True, specification=tb_run_with_specification)

        if error_code == 2:
            self.logger.warning(Style.RED("Function Not Found"))
            return (Result.default_user_error(interface=self.interface_type,
                                              exec_code=404,
                                              info="function not found function is not decorated").
                    set_origin(mod_function_name))

        if error_code == -1:
            return Result.default_internal_error(interface=self.interface_type,
                                                 info=f"module {modular_name}"
                                                      f" has no state (instance)").set_origin(mod_function_name)

        if error_code != 0:
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=error_code,
                                                 info=f"Internal error"
                                                      f" {modular_name}."
                                                      f"{function_name}").set_origin(mod_function_name)

        if not tb_run_function_with_state:
            function_data, _ = function_data
            function = function_data.get('func')
        else:
            function_data, function = function_data

        if not function:
            self.logger.warning(Style.RED(f"Function {function_name} not found"))
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=404,
                                                 info="function not found function").set_origin(mod_function_name)

        self.logger.info("Profiling function")
        t0 = time.perf_counter()
        if asyncio.iscoroutinefunction(function):
            raise ValueError(f"Fuction {function_name} is Async use a_run_any")
        else:
            return self.fuction_runner(function, function_data, args, kwargs, t0)

    def run_a_from_sync(self, function, *args, **kwargs):
        # Initialize self.loop if not already set.
        if self.loop is None:
            try:
                self.loop = asyncio.get_running_loop()
            except RuntimeError:
                self.loop = asyncio.new_event_loop()

        # If the loop is running, offload the coroutine to a new thread.
        if self.loop.is_running():
            result_future = Future()

            def run_in_new_loop():
                new_loop = asyncio.new_event_loop()
                asyncio.set_event_loop(new_loop)
                try:
                    result = new_loop.run_until_complete(function(*args, **kwargs))
                    result_future.set_result(result)
                except Exception as e:
                    result_future.set_exception(e)
                finally:
                    new_loop.close()

            thread = threading.Thread(target=run_in_new_loop)
            thread.start()
            thread.join()  # Block until the thread completes.
            return result_future.result()
        else:
            # If the loop is not running, schedule and run the coroutine directly.
            future = self.loop.create_task(function(*args, **kwargs))
            return self.loop.run_until_complete(future)

    def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):

        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        row = function_data.get('row')
        mod_function_name = f"{modular_name}.{function_name}"

        if_self_state = 1 if 'self' in parameters else 0

        try:
            if len(parameters) == 0:
                res = function()
            elif len(parameters) == len(args) + if_self_state:
                res = function(*args)
            elif len(parameters) == len(kwargs.keys()) + if_self_state:
                res = function(**kwargs)
            else:
                res = function(*args, **kwargs)
            self.logger.info(f"Execution done in {time.perf_counter()-t0:.4f}")
            if isinstance(res, Result):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.set_origin(mod_function_name)
            elif isinstance(res, ApiResult):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.as_result().set_origin(mod_function_name).to_api_result()
            elif row:
                formatted_result = res
            else:
                # Wrap the result in a Result object
                formatted_result = Result.ok(
                    interface=self.interface_type,
                    data_info="Auto generated result",
                    data=res,
                    info="Function executed successfully"
                ).set_origin(mod_function_name)
            if not row:
                self.logger.info(
                    f"Function Exec code: {formatted_result.info.exec_code} Info's: {formatted_result.info.help_text}")
            else:
                self.logger.info(
                    f"Function Exec data: {formatted_result}")
        except Exception as e:
            self.logger.error(
                Style.YELLOW(Style.Bold(
                    f"! Function ERROR: in {modular_name}.{function_name}")))
            # Wrap the exception in a Result object
            formatted_result = Result.default_internal_error(info=str(e)).set_origin(mod_function_name)
            # res = formatted_result
            self.logger.error(
                f"Function {modular_name}.{function_name}"
                f" executed wit an error {str(e)}, {type(e)}")
            self.debug_rains(e)
            self.print(f"! Function ERROR: in {modular_name}.{function_name} ")



        else:
            self.print_ok()

            self.logger.info(
                f"Function {modular_name}.{function_name}"
                f" executed successfully")

        return formatted_result

    async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):

        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        row = function_data.get('row')
        mod_function_name = f"{modular_name}.{function_name}"

        if_self_state = 1 if 'self' in parameters else 0

        try:
            if len(parameters) == 0:
                res = await function()
            elif len(parameters) == len(args) + if_self_state:
                res = await function(*args)
            elif len(parameters) == len(kwargs.keys()) + if_self_state:
                res = await function(**kwargs)
            else:
                res = await function(*args, **kwargs)
            self.logger.info(f"Execution done in {time.perf_counter()-t0:.4f}")
            if isinstance(res, Result):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.set_origin(mod_function_name)
            elif isinstance(res, ApiResult):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.as_result().set_origin(mod_function_name).to_api_result()
            elif row:
                formatted_result = res
            else:
                # Wrap the result in a Result object
                formatted_result = Result.ok(
                    interface=self.interface_type,
                    data_info="Auto generated result",
                    data=res,
                    info="Function executed successfully"
                ).set_origin(mod_function_name)
            if not row:
                self.logger.info(
                    f"Function Exec code: {formatted_result.info.exec_code} Info's: {formatted_result.info.help_text}")
            else:
                self.logger.info(
                    f"Function Exec data: {formatted_result}")
        except Exception as e:
            self.logger.error(
                Style.YELLOW(Style.Bold(
                    f"! Function ERROR: in {modular_name}.{function_name}")))
            # Wrap the exception in a Result object
            formatted_result = Result.default_internal_error(info=str(e)).set_origin(mod_function_name)
            # res = formatted_result
            self.logger.error(
                f"Function {modular_name}.{function_name}"
                f" executed wit an error {str(e)}, {type(e)}")
            self.debug_rains(e)

        else:
            self.print_ok()

            self.logger.info(
                f"Function {modular_name}.{function_name}"
                f" executed successfully")

        return formatted_result

    async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None,
                       args_=None,
                       kwargs_=None, method="GET",
                       *args, **kwargs):
        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_

        modular_name = mod_function_name
        function_name = function_name

        if isinstance(mod_function_name, str) and isinstance(function_name, str):
            mod_function_name = (mod_function_name, function_name)

        if isinstance(mod_function_name, tuple):
            modular_name, function_name = mod_function_name
        elif isinstance(mod_function_name, list):
            modular_name, function_name = mod_function_name[0], mod_function_name[1]
        elif isinstance(mod_function_name, Enum):
            modular_name, function_name = mod_function_name.__class__.NAME.value, mod_function_name.value

        r = await self.session.fetch(f"/api/{modular_name}/{function_name}{'?' + args_ if args_ is not None else ''}",
                                     data=kwargs, method=method)
        try:
            if not r:
                print("§ Session server Offline!", self.session.base)
                return Result.default_internal_error(info="Session fetch failed").as_dict()

            content_type = r.headers.get('Content-Type', '').lower()
            raw = await r.read()
            encoding = r.get_encoding() or 'utf-8'
            text = raw.decode(encoding, errors='ignore')

            # Attempt JSON
            if 'application/json' in content_type:
                try:
                    return await r.json()
                except Exception as e:
                    print("⚠ JSON decode error:", e)

            # Attempt YAML
            if 'yaml' in content_type or text.strip().startswith('---'):
                try:
                    import yaml
                    return yaml.safe_load(text)
                except Exception as e:
                    print("⚠ YAML decode error:", e)

            # Attempt XML
            if 'xml' in content_type or text.strip().startswith('<?xml'):
                try:
                    import xmltodict
                    return xmltodict.parse(text)
                except Exception as e:
                    print("⚠ XML decode error:", e)

            # Fallback: return plain text
            return Result.default_internal_error(data={'raw_text': text, 'content_type': content_type}).as_dict()

        except Exception as e:
            print("❌ Fatal error during API call:", e)
            return Result.default_internal_error(str(e)).as_dict()

    def run_local(self, *args, **kwargs):
        return self.run_any(*args, **kwargs)

    async def a_run_local(self, *args, **kwargs):
        return await self.a_run_any(*args, **kwargs)

    def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
                get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                kwargs_=None,
                *args, **kwargs):

        # if self.debug:
        #     self.logger.info(f'Called from: {getouterframes(currentframe(), 2)}')

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_

        if isinstance(mod_function_name, str) and backwords_compability_variabel_string_holder is None:
            backwords_compability_variabel_string_holder = mod_function_name.split('.')[-1]
            mod_function_name = mod_function_name.replace(f".{backwords_compability_variabel_string_holder}", "")

        if isinstance(mod_function_name, str) and isinstance(backwords_compability_variabel_string_holder, str):
            mod_function_name = (mod_function_name, backwords_compability_variabel_string_holder)

        res: Result = self.run_function(mod_function_name,
                                        tb_run_function_with_state=tb_run_function_with_state,
                                        tb_run_with_specification=tb_run_with_specification,
                                        args_=args, kwargs_=kwargs).as_result()
        if isinstance(res, ApiResult):
            res = res.as_result()

        if isinstance(res, Result) and res.bg_task is not None:
            self.run_bg_task(res.bg_task)

        if self.debug:
            res.log(show_data=False)

        if not get_results and isinstance(res, Result):
            return res.get()

        if get_results and not isinstance(res, Result):
            return Result.ok(data=res)

        return res

    async def a_run_any(self, mod_function_name: Enum or str or tuple,
                        backwords_compability_variabel_string_holder=None,
                        get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                        kwargs_=None,
                        *args, **kwargs):

        # if self.debug:
        #     self.logger.info(f'Called from: {getouterframes(currentframe(), 2)}')

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_

        if isinstance(mod_function_name, str) and backwords_compability_variabel_string_holder is None:
            backwords_compability_variabel_string_holder = mod_function_name.split('.')[-1]
            mod_function_name = mod_function_name.replace(f".{backwords_compability_variabel_string_holder}", "")

        if isinstance(mod_function_name, str) and isinstance(backwords_compability_variabel_string_holder, str):
            mod_function_name = (mod_function_name, backwords_compability_variabel_string_holder)

        res: Result = await self.a_run_function(mod_function_name,
                                                tb_run_function_with_state=tb_run_function_with_state,
                                                tb_run_with_specification=tb_run_with_specification,
                                                args_=args, kwargs_=kwargs)
        if isinstance(res, ApiResult):
            res = res.as_result()

        if isinstance(res, Result) and res.bg_task is not None:
            self.run_bg_task(res.bg_task)

        if self.debug:
            res.print()
            res.log(show_data=False) if isinstance(res, Result) else self.logger.debug(res)
        if not get_results and isinstance(res, Result):
            return res.get()

        if get_results and not isinstance(res, Result):
            return Result.ok(data=res)

        return res


    def web_context(self):
        if self._web_context is None:
            try:
                self._web_context = open("./dist/helper.html", encoding="utf-8").read()
            except Exception as e:
                self.logger.error(f"Could not load web context: {e}")
                self._web_context = "<div><h1>Web Context not found</h1></div>"
        return self._web_context

    def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
        if spec != "app":
            self.print(f"Getting Module {name} spec: {spec}")
        if name not in self.functions:
            mod = self.save_load(name, spec=spec)
            if mod is False or (isinstance(mod, Result) and mod.is_error()):
                self.logger.warning(f"Could not find {name} in {list(self.functions.keys())}")
                raise ValueError(f"Could not find {name} in {list(self.functions.keys())} pleas install the module, or its posibly broken use --debug for infos")
        # private = self.functions[name].get(f"{spec}_private")
        # if private is not None:
        #     if private and spec != 'app':
        #         raise ValueError("Module is private")
        if name not in self.functions:
            self.logger.warning(f"Module '{name}' is not found")
            return None
        instance = self.functions[name].get(f"{spec}_instance")
        if instance is None:
            return self.load_mod(name, spec=spec)
        return self.functions[name].get(f"{spec}_instance")

    def print(self, text, *args, **kwargs):
        # self.logger.info(f"Output : {text}")
        if 'live' in self.id:
            return
        if self.sprint(None):
            print(Style.CYAN(f"System${self.id}:"), end=" ")
        print(text, *args, **kwargs)

    def sprint(self, text, *args, **kwargs):
        if text is None:
            return True
        if 'live' in self.id:
            return
        # self.logger.info(f"Output : {text}")
        print(Style.CYAN(f"System${self.id}:"), end=" ")
        if isinstance(text, str) and kwargs == {} and text:
            stram_print(text + ' '.join(args))
            print()
        else:
            print(text, *args, **kwargs)

    # ----------------------------------------------------------------
    # Decorators for the toolbox

    def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
        self.remove_mod(mod_name, delete=True)
        if mod_name not in self.modules:
            self.logger.warning(f"Module '{mod_name}' is not found")
            return
        if hasattr(self.modules[mod_name], 'reload_save') and self.modules[mod_name].reload_save:
            def reexecute_module_code(x):
                return x
        else:
            def reexecute_module_code(module_name):
                if isinstance(module_name, str):
                    module = import_module(module_name)
                else:
                    module = module_name
                # Get the source code of the module
                try:
                    source = inspect.getsource(module)
                except Exception:
                    # print(f"No source for {str(module_name).split('from')[0]}: {e}")
                    return module
                # Compile the source code
                try:
                    code = compile(source, module.__file__, 'exec')
                    # Execute the code in the module's namespace
                    exec(code, module.__dict__)
                except Exception:
                    # print(f"No source for {str(module_name).split('from')[0]}: {e}")
                    pass
                return module

        if not is_file:
            mods = self.get_all_mods("./mods/" + mod_name)
            def recursive_reload(package_name):
                package = import_module(package_name)

                # First, reload all submodules
                if hasattr(package, '__path__'):
                    for _finder, name, _ispkg in pkgutil.walk_packages(package.__path__, package.__name__ + "."):
                        try:
                            mod = import_module(name)
                            reexecute_module_code(mod)
                            reload(mod)
                        except Exception as e:
                            print(f"Error reloading module {name}: {e}")
                            break

                # Finally, reload the package itself
                reexecute_module_code(package)
                reload(package)

            for mod in mods:
                if mod.endswith(".txt") or mod.endswith(".yaml"):
                    continue
                try:
                    recursive_reload(loc + mod_name + '.' + mod)
                    self.print(f"Reloaded {mod_name}.{mod}")
                except ImportError:
                    self.print(f"Could not load {mod_name}.{mod}")
        reexecute_module_code(self.modules[mod_name])
        if mod_name in self.functions:
            if "on_exit" in self.functions[mod_name]:
                self.functions[mod_name]["on_exit"] = []
            if "on_start" in self.functions[mod_name]:
                self.functions[mod_name]["on_start"] = []
        self.inplace_load_instance(mod_name, spec=spec, mfo=reload(self.modules[mod_name]) if mod_name in self.modules else None)

    def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None, on_reload=None):
        if path_name is None:
            path_name = mod_name
        is_file = os.path.isfile(self.start_dir + '/mods/' + path_name + '.py')
        import watchfiles
        def helper():
            paths = f'mods/{path_name}' + ('.py' if is_file else '')
            self.print(f'Watching Path: {paths}')
            for changes in watchfiles.watch(paths):
                if not changes:
                    continue
                self.reload_mod(mod_name, spec, is_file, loc)
                if on_reload:
                    on_reload()

        if not use_thread:
            helper()
        else:
            threading.Thread(target=helper, daemon=True).start()

    def _register_function(self, module_name, func_name, data):
        if module_name not in self.functions:
            self.functions[module_name] = {}
        if func_name in self.functions[module_name]:
            self.print(f"Overriding function {func_name} from {module_name}", end="\r")
            self.functions[module_name][func_name] = data
        else:
            self.functions[module_name][func_name] = data

    def _create_decorator(self, type_: str,
                          name: str = "",
                          mod_name: str = "",
                          level: int = -1,
                          restrict_in_virtual_mode: bool = False,
                          api: bool = False,
                          helper: str = "",
                          version: str or None = None,
                          initial: bool=False,
                          exit_f: bool=False,
                          test: bool=True,
                          samples:list[dict[str, Any]] | None=None,
                          state:bool | None=None,
                          pre_compute:Callable | None=None,
                          post_compute:Callable[[], Result] | None=None,
                          api_methods:list[str] | None=None,
                          memory_cache: bool=False,
                          file_cache: bool=False,
                          request_as_kwarg: bool=False,
                          row: bool=False,
                          memory_cache_max_size:int=100,
                          memory_cache_ttl:int=300):

        if isinstance(type_, Enum):
            type_ = type_.value

        if memory_cache and file_cache:
            raise ValueError("Don't use both cash at the same time for the same fuction")

        use_cache = memory_cache or file_cache
        cache = {}
        if file_cache:
            cache = FileCache(folder=self.data_dir + f'\\cache\\{mod_name}\\',
                              filename=self.data_dir + f'\\cache\\{mod_name}\\{name}cache.db')
        if memory_cache:
            cache = MemoryCache(maxsize=memory_cache_max_size, ttl=memory_cache_ttl)

        version = self.version if version is None else self.version + ':' + version

        def a_additional_process(func):

            async def executor(*args, **kwargs):

                if pre_compute is not None:
                    args, kwargs = await pre_compute(*args, **kwargs)
                if asyncio.iscoroutinefunction(func):
                    result = await func(*args, **kwargs)
                else:
                    result = func(*args, **kwargs)
                if post_compute is not None:
                    result = await post_compute(result)
                if row:
                    return result
                if not isinstance(result, Result):
                    result = Result.ok(data=result)
                if result.origin is None:
                    result.set_origin((mod_name if mod_name else func.__module__.split('.')[-1]
                                       , name if name else func.__name__
                                       , type_))
                if result.result.data_to == ToolBoxInterfaces.native.name:
                    result.result.data_to = ToolBoxInterfaces.remote if api else ToolBoxInterfaces.native
                # Wenden Sie die to_api_result Methode auf das Ergebnis an, falls verfügbar
                if api and hasattr(result, 'to_api_result'):
                    return result.to_api_result()
                return result

            @wraps(func)
            async def wrapper(*args, **kwargs):

                if not use_cache:
                    return await executor(*args, **kwargs)

                try:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{str(args)},{str(kwargs.items())}")
                except ValueError:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{bytes(args)},{str(kwargs.items())}")

                result = cache.get(cache_key)
                if result is not None:
                    return result

                result = await executor(*args, **kwargs)

                cache.set(cache_key, result)

                return result

            return wrapper

        def additional_process(func):

            def executor(*args, **kwargs):

                if pre_compute is not None:
                    args, kwargs = pre_compute(*args, **kwargs)
                if asyncio.iscoroutinefunction(func):
                    result = func(*args, **kwargs)
                else:
                    result = func(*args, **kwargs)
                if post_compute is not None:
                    result = post_compute(result)
                if row:
                    return result
                if not isinstance(result, Result):
                    result = Result.ok(data=result)
                if result.origin is None:
                    result.set_origin((mod_name if mod_name else func.__module__.split('.')[-1]
                                       , name if name else func.__name__
                                       , type_))
                if result.result.data_to == ToolBoxInterfaces.native.name:
                    result.result.data_to = ToolBoxInterfaces.remote if api else ToolBoxInterfaces.native
                # Wenden Sie die to_api_result Methode auf das Ergebnis an, falls verfügbar
                if api and hasattr(result, 'to_api_result'):
                    return result.to_api_result()
                return result

            @wraps(func)
            def wrapper(*args, **kwargs):

                if not use_cache:
                    return executor(*args, **kwargs)

                try:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{str(args)},{str(kwargs.items())}")
                except ValueError:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{bytes(args)},{str(kwargs.items())}")

                result = cache.get(cache_key)
                if result is not None:
                    return result

                result = executor(*args, **kwargs)

                cache.set(cache_key, result)

                return result

            return wrapper

        def decorator(func):
            sig = signature(func)
            params = list(sig.parameters)
            module_name = mod_name if mod_name else func.__module__.split('.')[-1]
            func_name = name if name else func.__name__
            if func_name == 'on_start':
                func_name = 'on_startup'
            if func_name == 'on_exit':
                func_name = 'on_close'
            if api or pre_compute is not None or post_compute is not None or memory_cache or file_cache:
                if asyncio.iscoroutinefunction(func):
                    func = a_additional_process(func)
                else:
                    func = additional_process(func)
            if api and str(sig.return_annotation) == 'Result':
                raise ValueError(f"Fuction {module_name}.{func_name} registered as "
                                 f"Api fuction but uses {str(sig.return_annotation)}\n"
                                 f"Please change the sig from ..)-> Result to ..)-> ApiResult")
            data = {
                "type": type_,
                "module_name": module_name,
                "func_name": func_name,
                "level": level,
                "restrict_in_virtual_mode": restrict_in_virtual_mode,
                "func": func,
                "api": api,
                "helper": helper,
                "version": version,
                "initial": initial,
                "exit_f": exit_f,
                "api_methods": api_methods if api_methods is not None else ["AUTO"],
                "__module__": func.__module__,
                "signature": sig,
                "params": params,
                "row": row,
                "state": (
                    False if len(params) == 0 else params[0] in ['self', 'state', 'app']) if state is None else state,
                "do_test": test,
                "samples": samples,
                "request_as_kwarg": request_as_kwarg,

            }
            self._register_function(module_name, func_name, data)
            if exit_f:
                if "on_exit" not in self.functions[module_name]:
                    self.functions[module_name]["on_exit"] = []
                self.functions[module_name]["on_exit"].append(func_name)
            if initial:
                if "on_start" not in self.functions[module_name]:
                    self.functions[module_name]["on_start"] = []
                self.functions[module_name]["on_start"].append(func_name)

            return func

        decorator.tb_init = True

        return decorator

    def tb(self, name=None,
           mod_name: str = "",
           helper: str = "",
           version: str | None = None,
           test: bool = True,
           restrict_in_virtual_mode: bool = False,
           api: bool = False,
           initial: bool = False,
           exit_f: bool = False,
           test_only: bool = False,
           memory_cache: bool = False,
           file_cache: bool = False,
           request_as_kwarg: bool = False,
           row: bool = False,
           state: bool | None = None,
           level: int = -1,
           memory_cache_max_size: int = 100,
           memory_cache_ttl: int = 300,
           samples: list or dict or None = None,
           interface: ToolBoxInterfaces or None or str = None,
           pre_compute=None,
           post_compute=None,
           api_methods=None,
           ):
        """
    A decorator for registering and configuring functions within a module.

    This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

    Args:
        name (str, optional): The name to register the function under. Defaults to the function's own name.
        mod_name (str, optional): The name of the module the function belongs to.
        helper (str, optional): A helper string providing additional information about the function.
        version (str or None, optional): The version of the function or module.
        test (bool, optional): Flag to indicate if the function is for testing purposes.
        restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
        api (bool, optional): Flag to indicate if the function is part of an API.
        initial (bool, optional): Flag to indicate if the function should be executed at initialization.
        exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
        test_only (bool, optional): Flag to indicate if the function should only be used for testing.
        memory_cache (bool, optional): Flag to enable memory caching for the function.
        request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
        file_cache (bool, optional): Flag to enable file caching for the function.
        row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
        state (bool or None, optional): Flag to indicate if the function maintains state.
        level (int, optional): The level of the function, used for prioritization or categorization.
        memory_cache_max_size (int, optional): Maximum size of the memory cache.
        memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
        samples (list or dict or None, optional): Samples or examples of function usage.
        interface (str, optional): The interface type for the function.
        pre_compute (callable, optional): A function to be called before the main function.
        post_compute (callable, optional): A function to be called after the main function.
        api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

    Returns:
        function: The decorated function with additional processing and registration capabilities.
    """
        if interface is None:
            interface = "tb"
        if test_only and 'test' not in self.id:
            return lambda *args, **kwargs: args
        return self._create_decorator(interface,
                                      name,
                                      mod_name,
                                      level=level,
                                      restrict_in_virtual_mode=restrict_in_virtual_mode,
                                      helper=helper,
                                      api=api,
                                      version=version,
                                      initial=initial,
                                      exit_f=exit_f,
                                      test=test,
                                      samples=samples,
                                      state=state,
                                      pre_compute=pre_compute,
                                      post_compute=post_compute,
                                      memory_cache=memory_cache,
                                      file_cache=file_cache,
                                      request_as_kwarg=request_as_kwarg,
                                      row=row,
                                      api_methods=api_methods,
                                      memory_cache_max_size=memory_cache_max_size,
                                      memory_cache_ttl=memory_cache_ttl)

    def save_autocompletion_dict(self):
        autocompletion_dict = {}
        for module_name, _module in self.functions.items():
            data = {}
            for function_name, function_data in self.functions[module_name].items():
                if not isinstance(function_data, dict):
                    continue
                data[function_name] = {arg: None for arg in
                                       function_data.get("params", [])}
                if len(data[function_name].keys()) == 0:
                    data[function_name] = None
            autocompletion_dict[module_name] = data if len(data.keys()) > 0 else None
        self.config_fh.add_to_save_file_handler("auto~~~~~~", str(autocompletion_dict))

    def get_autocompletion_dict(self):
        return self.config_fh.get_file_handler("auto~~~~~~")

    def save_registry_as_enums(self, directory: str, filename: str):
        # Ordner erstellen, falls nicht vorhanden
        if not os.path.exists(directory):
            os.makedirs(directory)

        # Dateipfad vorbereiten
        filepath = os.path.join(directory, filename)

        # Enum-Klassen als Strings generieren
        enum_classes = [f'"""Automatic generated by ToolBox v = {self.version}"""'
                        f'\nfrom enum import Enum\nfrom dataclasses import dataclass'
                        f'\n\n\n']
        for module, functions in self.functions.items():
            if module.startswith("APP_INSTANCE"):
                continue
            class_name = module
            enum_members = "\n    ".join(
                [
                    f"{func_name.upper().replace('-', '')}"
                    f" = '{func_name}' "
                    f"# Input: ({fuction_data['params'] if isinstance(fuction_data, dict) else ''}),"
                    f" Output: {fuction_data['signature'].return_annotation if isinstance(fuction_data, dict) else 'None'}"
                    for func_name, fuction_data in functions.items()])
            enum_class = (f'@dataclass\nclass {class_name.upper().replace(".", "_").replace("-", "")}(Enum):'
                          f"\n    NAME = '{class_name}'\n    {enum_members}")
            enum_classes.append(enum_class)

        # Enums in die Datei schreiben
        data = "\n\n\n".join(enum_classes)
        if len(data) < 12:
            raise ValueError(
                "Invalid Enums Loosing content pleas delete it ur self in the (utils/system/all_functions_enums.py) or add mor new stuff :}")
        with open(filepath, 'w') as file:
            file.write(data)

        print(Style.Bold(Style.BLUE(f"Enums gespeichert in {filepath}")))
disconnect(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
223
224
225
@staticmethod
def disconnect(*args, **kwargs):
    """proxi attr"""
exit_main(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
211
212
213
@staticmethod
def exit_main(*args, **kwargs):
    """proxi attr"""
get_function(name, **kwargs)

Kwargs for _get_function metadata:: return the registered function dictionary stateless: (function_data, None), 0 stateful: (function_data, higher_order_function), 0 state::boolean specification::str default app

Source code in toolboxv2/utils/toolbox.py
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
def get_function(self, name: Enum or tuple, **kwargs):
    """
    Kwargs for _get_function
        metadata:: return the registered function dictionary
            stateless: (function_data, None), 0
            stateful: (function_data, higher_order_function), 0
        state::boolean
            specification::str default app
    """
    if isinstance(name, tuple):
        return self._get_function(None, as_str=name, **kwargs)
    else:
        return self._get_function(name, **kwargs)
hide_console(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
215
216
217
@staticmethod
def hide_console(*args, **kwargs):
    """proxi attr"""
run(*args, request=None, running_function_coro=None, **kwargs)

Run a function with support for SSE streaming in both threaded and non-threaded contexts.

Source code in toolboxv2/utils/toolbox.py
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
def run(self, *args, request=None, running_function_coro=None, **kwargs):
    """
    Run a function with support for SSE streaming in both
    threaded and non-threaded contexts.
    """
    if running_function_coro is None:
        mn, fn = args[0]
        if self.functions.get(mn, {}).get(fn, {}).get('request_as_kwarg', False):
            kwargs["request"] = RequestData.from_dict(request)
            if 'data' in kwargs and 'data' not in self.functions.get(mn, {}).get(fn, {}).get('params', []):
                kwargs["request"].data = kwargs["request"].body = kwargs['data']
                del kwargs['data']
            if 'form_data' in kwargs and 'form_data' not in self.functions.get(mn, {}).get(fn, {}).get('params',
                                                                                                       []):
                kwargs["request"].form_data = kwargs["request"].body = kwargs['form_data']
                del kwargs['form_data']

    # Create the coroutine
    coro = running_function_coro or self.a_run_any(*args, **kwargs)

    # Get or create an event loop
    try:
        loop = asyncio.get_event_loop()
        is_running = loop.is_running()
    except RuntimeError:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        is_running = False

    # If the loop is already running, run in a separate thread
    if is_running:
        # Create thread pool executor as needed
        if not hasattr(self.__class__, '_executor'):
            self.__class__._executor = ThreadPoolExecutor(max_workers=4)

        def run_in_new_thread():
            # Set up a new loop in this thread
            new_loop = asyncio.new_event_loop()
            asyncio.set_event_loop(new_loop)

            try:
                # Run the coroutine
                return new_loop.run_until_complete(coro)
            finally:
                new_loop.close()

        # Run in thread and get result
        thread_result = self.__class__._executor.submit(run_in_new_thread).result()

        # Handle streaming results from thread
        if isinstance(thread_result, dict) and thread_result.get("is_stream"):
            # Create a new SSE stream in the main thread
            async def stream_from_function():
                # Re-run the function with direct async access
                stream_result = await self.a_run_any(*args, **kwargs)

                if (isinstance(stream_result, Result) and
                    getattr(stream_result.result, 'data_type', None) == "stream"):
                    # Get and forward data from the original generator
                    original_gen = stream_result.result.data.get("generator")
                    if inspect.isasyncgen(original_gen):
                        async for item in original_gen:
                            yield item

            # Return a new streaming Result
            return Result.stream(
                stream_generator=stream_from_function(),
                headers=thread_result.get("headers", {})
            )

        result = thread_result
    else:
        # Direct execution when loop is not running
        result = loop.run_until_complete(coro)

    # Process the final result
    if isinstance(result, Result):
        if 'debug' in self.id:
            result.print()
        if getattr(result.result, 'data_type', None) == "stream":
            return result
        return result.to_api_result().model_dump(mode='json')

    return result
run_bg_task(task)

Run a task in the background that will properly handle nested asyncio operations. This implementation ensures that asyncio.create_task() and asyncio.gather() work correctly within the background task.

Parameters:

Name Type Description Default
task

A callable function that can be synchronous or asynchronous

required
Source code in toolboxv2/utils/toolbox.py
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
def run_bg_task(self, task):
    """
    Run a task in the background that will properly handle nested asyncio operations.
    This implementation ensures that asyncio.create_task() and asyncio.gather() work
    correctly within the background task.

    Args:
        task: A callable function that can be synchronous or asynchronous
    """
    if not callable(task):
        self.logger.warning("Task is not callable!")
        return None

    # Function that will run in a separate thread with its own event loop
    def thread_target(task_):
        # Create a new event loop for this thread
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

        try:
            # Determine how to run the task based on its type
            if asyncio.iscoroutinefunction(task_):
                # If it's an async function, run it directly
                loop.run_until_complete(task_())
            elif asyncio.iscoroutine(task_):
                # If it's already a coroutine object
                loop.run_until_complete(task_)
            else:
                # If it's a synchronous function that might create async tasks internally
                async def wrapper():
                    # Run potentially blocking synchronous code in an executor
                    return await loop.run_in_executor(None, task_)

                loop.run_until_complete(wrapper())

            self.logger.debug("Background task completed successfully")
        except Exception as e:
            self.logger.error(f"Background task failed with error: {str(e)}")
        finally:
            # Clean up any pending tasks
            pending = asyncio.all_tasks(loop)
            if pending:
                # Cancel any remaining tasks
                for task_ in pending:
                    task_.cancel()

                # Allow tasks to finish cancellation
                loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))

            loop.close()

    # Create and start a non-daemon thread that will run to completion
    # Using non-daemon thread ensures the task completes even if main thread exits
    t = threading.Thread(target=thread_target, args=(task,))
    t.daemon = False  # Non-daemon thread will keep program alive until it completes
    self.bg_tasks.append(t)
    t.start()
    return t
run_bg_task_advanced(task, *args, **kwargs)

Alternative implementation for complex async scenarios where the task creates nested asyncio tasks using create_task() and gather().

This version ensures proper execution of nested tasks by maintaining the thread and its event loop throughout the lifetime of all child tasks.

Parameters:

Name Type Description Default
task

A callable function that can be synchronous or asynchronous

required
*args, **kwargs

Arguments to pass to the task

required
Source code in toolboxv2/utils/toolbox.py
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
def run_bg_task_advanced(self, task, *args, **kwargs):
    """
    Alternative implementation for complex async scenarios where the task creates
    nested asyncio tasks using create_task() and gather().

    This version ensures proper execution of nested tasks by maintaining the thread
    and its event loop throughout the lifetime of all child tasks.

    Args:
        task: A callable function that can be synchronous or asynchronous
        *args, **kwargs: Arguments to pass to the task
    """
    if not callable(task):
        self.logger.warning("Task is not callable!")
        return None

    # Create a dedicated thread with its own event loop
    async def async_wrapper():
        try:
            if asyncio.iscoroutinefunction(task):
                return await task(*args, **kwargs)
            elif asyncio.iscoroutine(task):
                return await task
            else:
                # Run in executor to avoid blocking
                loop = asyncio.get_event_loop()
                return await loop.run_in_executor(None, lambda: task(*args, **kwargs))
        except Exception as e:
            self.logger.error(f"Background task error: {str(e)}")
            raise

    def thread_target():
        # Create new event loop for this thread
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

        try:
            # Run the task to completion with all its nested tasks
            loop.run_until_complete(async_wrapper())
        except Exception as e:
            self.logger.error(f"Background task thread failed: {str(e)}")
        finally:
            # Clean up any pending tasks that might still be running
            try:
                pending = asyncio.all_tasks(loop)
                if pending:
                    # Allow tasks time to clean up
                    loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
            except Exception:
                pass

            loop.close()

    # Use a non-daemon thread so it will run to completion
    t = threading.Thread(target=thread_target, daemon=True)
    t.daemon = False
    self.bg_tasks.append(t)
    t.start()
    return t
show_console(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
219
220
221
@staticmethod
def show_console(*args, **kwargs):
    """proxi attr"""
tb(name=None, mod_name='', helper='', version=None, test=True, restrict_in_virtual_mode=False, api=False, initial=False, exit_f=False, test_only=False, memory_cache=False, file_cache=False, request_as_kwarg=False, row=False, state=None, level=-1, memory_cache_max_size=100, memory_cache_ttl=300, samples=None, interface=None, pre_compute=None, post_compute=None, api_methods=None)

A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Parameters:

Name Type Description Default
name str

The name to register the function under. Defaults to the function's own name.

None
mod_name str

The name of the module the function belongs to.

''
helper str

A helper string providing additional information about the function.

''
version str or None

The version of the function or module.

None
test bool

Flag to indicate if the function is for testing purposes.

True
restrict_in_virtual_mode bool

Flag to restrict the function in virtual mode.

False
api bool

Flag to indicate if the function is part of an API.

False
initial bool

Flag to indicate if the function should be executed at initialization.

False
exit_f bool

Flag to indicate if the function should be executed at exit.

False
test_only bool

Flag to indicate if the function should only be used for testing.

False
memory_cache bool

Flag to enable memory caching for the function.

False
request_as_kwarg bool

Flag to get request if the fuction is calld from api.

False
file_cache bool

Flag to enable file caching for the function.

False
row bool

rather to auto wrap the result in Result type default False means no row data aka result type

False
state bool or None

Flag to indicate if the function maintains state.

None
level int

The level of the function, used for prioritization or categorization.

-1
memory_cache_max_size int

Maximum size of the memory cache.

100
memory_cache_ttl int

Time-to-live for the memory cache entries.

300
samples list or dict or None

Samples or examples of function usage.

None
interface str

The interface type for the function.

None
pre_compute callable

A function to be called before the main function.

None
post_compute callable

A function to be called after the main function.

None
api_methods list[str]

default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

None

Returns:

Name Type Description
function

The decorated function with additional processing and registration capabilities.

Source code in toolboxv2/utils/toolbox.py
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
def tb(self, name=None,
       mod_name: str = "",
       helper: str = "",
       version: str | None = None,
       test: bool = True,
       restrict_in_virtual_mode: bool = False,
       api: bool = False,
       initial: bool = False,
       exit_f: bool = False,
       test_only: bool = False,
       memory_cache: bool = False,
       file_cache: bool = False,
       request_as_kwarg: bool = False,
       row: bool = False,
       state: bool | None = None,
       level: int = -1,
       memory_cache_max_size: int = 100,
       memory_cache_ttl: int = 300,
       samples: list or dict or None = None,
       interface: ToolBoxInterfaces or None or str = None,
       pre_compute=None,
       post_compute=None,
       api_methods=None,
       ):
    """
A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Args:
    name (str, optional): The name to register the function under. Defaults to the function's own name.
    mod_name (str, optional): The name of the module the function belongs to.
    helper (str, optional): A helper string providing additional information about the function.
    version (str or None, optional): The version of the function or module.
    test (bool, optional): Flag to indicate if the function is for testing purposes.
    restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
    api (bool, optional): Flag to indicate if the function is part of an API.
    initial (bool, optional): Flag to indicate if the function should be executed at initialization.
    exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
    test_only (bool, optional): Flag to indicate if the function should only be used for testing.
    memory_cache (bool, optional): Flag to enable memory caching for the function.
    request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
    file_cache (bool, optional): Flag to enable file caching for the function.
    row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
    state (bool or None, optional): Flag to indicate if the function maintains state.
    level (int, optional): The level of the function, used for prioritization or categorization.
    memory_cache_max_size (int, optional): Maximum size of the memory cache.
    memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
    samples (list or dict or None, optional): Samples or examples of function usage.
    interface (str, optional): The interface type for the function.
    pre_compute (callable, optional): A function to be called before the main function.
    post_compute (callable, optional): A function to be called after the main function.
    api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

Returns:
    function: The decorated function with additional processing and registration capabilities.
"""
    if interface is None:
        interface = "tb"
    if test_only and 'test' not in self.id:
        return lambda *args, **kwargs: args
    return self._create_decorator(interface,
                                  name,
                                  mod_name,
                                  level=level,
                                  restrict_in_virtual_mode=restrict_in_virtual_mode,
                                  helper=helper,
                                  api=api,
                                  version=version,
                                  initial=initial,
                                  exit_f=exit_f,
                                  test=test,
                                  samples=samples,
                                  state=state,
                                  pre_compute=pre_compute,
                                  post_compute=post_compute,
                                  memory_cache=memory_cache,
                                  file_cache=file_cache,
                                  request_as_kwarg=request_as_kwarg,
                                  row=row,
                                  api_methods=api_methods,
                                  memory_cache_max_size=memory_cache_max_size,
                                  memory_cache_ttl=memory_cache_ttl)
wait_for_bg_tasks(timeout=None)

Wait for all background tasks to complete.

Parameters:

Name Type Description Default
timeout

Maximum time to wait (in seconds) for all tasks to complete. None means wait indefinitely.

None

Returns:

Name Type Description
bool

True if all tasks completed, False if timeout occurred

Source code in toolboxv2/utils/toolbox.py
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
def wait_for_bg_tasks(self, timeout=None):
    """
    Wait for all background tasks to complete.

    Args:
        timeout: Maximum time to wait (in seconds) for all tasks to complete.
                 None means wait indefinitely.

    Returns:
        bool: True if all tasks completed, False if timeout occurred
    """
    active_tasks = [t for t in self.bg_tasks if t.is_alive()]

    for task in active_tasks:
        task.join(timeout=timeout)
        if task.is_alive():
            return False

    return True

toolboxv2.show_console(show=True)

Source code in toolboxv2/utils/extras/show_and_hide_console.py
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
def show_console(show=True):
    global TBRUNNER_console_viabel
    """Brings up the Console Window."""
    try:
        if show and not TBRUNNER_console_viabel:
            # Show console
            ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 4)
            TBRUNNER_console_viabel = True
            return True
        elif not show and TBRUNNER_console_viabel:
            # Hide console
            ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 0)
            TBRUNNER_console_viabel = False
            return True
    except:
        print(f"Could not show_console {show=}", )
        return False
    return False

Logging

toolboxv2.get_logger()

Source code in toolboxv2/utils/system/tb_logger.py
136
137
def get_logger() -> logging.Logger:
    return logging.getLogger(loggerNameOfToolboxv2)

toolboxv2.setup_logging(level, name=loggerNameOfToolboxv2, online_level=None, is_online=False, file_level=None, interminal=False, logs_directory='../logs', app_name='main')

Source code in toolboxv2/utils/system/tb_logger.py
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
def setup_logging(level: int, name=loggerNameOfToolboxv2, online_level=None, is_online=False, file_level=None,
                  interminal=False, logs_directory="../logs", app_name="main"):
    global loggerNameOfToolboxv2

    if not online_level:
        online_level = level

    if not file_level:
        file_level = level

    if not os.path.exists(logs_directory):
        os.makedirs(logs_directory, exist_ok=True)
    if not os.path.exists(logs_directory + "/Logs.info"):
        open(f"{logs_directory}/Logs.info", "a").close()

    loggerNameOfToolboxv2 = name

    available_log_levels = [logging.CRITICAL, logging.FATAL, logging.ERROR, logging.WARNING, logging.WARN, logging.INFO,
                            logging.DEBUG, logging.NOTSET]

    if level not in available_log_levels:
        raise ValueError(f"level must be one of {available_log_levels}, but logging level is {level}")

    if online_level not in available_log_levels:
        raise ValueError(f"online_level must be one of {available_log_levels}, but logging level is {online_level}")

    if file_level not in available_log_levels:
        raise ValueError(f"file_level must be one of {available_log_levels}, but logging level is {file_level}")

    log_date = datetime.datetime.today().strftime('%Y-%m-%d')
    log_levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]
    log_level_index = log_levels.index(logging.getLevelName(level))

    filename = f"Logs-{name}-{log_date}-{log_levels[log_level_index]}"
    log_filename = f"{logs_directory}/{filename}.log"

    log_info_data = {
        filename: 0,
        "H": "localhost",
        "P": 62435
    }

    with open(f"{logs_directory}/Logs.info") as li:
        log_info_data_str = li.read()
        try:
            log_info_data = eval(log_info_data_str)
        except SyntaxError:
            if log_info_data_str:
                print(Style.RED(Style.Bold("Could not parse log info data")))

        if filename not in log_info_data:
            log_info_data[filename] = 0

        if not os.path.exists(log_filename):
            log_info_data[filename] = 0
            print("new log file")

        if os.path.exists(log_filename):
            log_info_data[filename] += 1

            while os.path.exists(f"{logs_directory}/{filename}#{log_info_data[filename]}.log"):
                log_info_data[filename] += 1

            try:
                os.rename(log_filename,
                          f"{logs_directory}/{filename}#{log_info_data[filename]}.log")
            except PermissionError:
                print(Style.YELLOW(Style.Bold(f"Could not rename log file appending on {filename}")))

    with open(f"{logs_directory}/Logs.info", "w") as li:
        if len(log_info_data.keys()) >= 7:
            log_info_data = {
                filename: log_info_data[filename],
                "H": log_info_data["H"],
                "P": log_info_data["P"]
            }
        li.write(str(log_info_data))

    try:
        with open(log_filename, "a"):
            pass
    except OSError:
        log_filename = f"{logs_directory}/Logs-Test-{log_date}-{log_levels[log_level_index]}.log"
        with open(log_filename, "a"):
            pass

    logger = logging.getLogger(name)

    logger.setLevel(level)
    # Prevent logger from propagating to parent loggers
    logger.propagate = False

    terminal_format = f"{app_name} %(asctime)s %(levelname)s %(name)s - %(message)s"
    file_format = f"{app_name} %(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s:%(lineno)d - %(message)s"

    # Configure handlers
    handlers = []

    # File handler (always added)
    file_handler = logging.FileHandler(log_filename)
    file_handler.setFormatter(logging.Formatter(file_format))
    file_handler.setLevel(file_level)
    handlers.append(file_handler)

    # Terminal handler (if requested)
    if interminal:
        terminal_handler = logging.StreamHandler()
        terminal_handler.setFormatter(logging.Formatter(terminal_format))
        terminal_handler.setLevel(level)
        handlers.append(terminal_handler)

    # Socket handler (if requested)
    if is_online:
        socket_handler = SocketHandler(log_info_data["H"], log_info_data["P"])
        socket_handler.setFormatter(logging.Formatter(file_format))
        socket_handler.setLevel(online_level)
        handlers.append(socket_handler)

    # Add all handlers to logger
    for handler in handlers:
        logger.addHandler(handler)

    return logger, filename

Styling & Console Output

toolboxv2.Style

Source code in toolboxv2/utils/extras/Style.py
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
class Style:
    _END = '\33[0m'
    _BLACK = '\33[30m'
    _RED = '\33[31m'
    _GREEN = '\33[32m'
    _YELLOW = '\33[33m'
    _BLUE = '\33[34m'
    _MAGENTA = '\33[35m'
    _CYAN = '\33[36m'
    _WHITE = '\33[37m'

    _Bold = '\33[1m'
    _ITALIC = '\33[3m'
    _Underline = '\33[4m'
    _BLINK = '\33[5m'
    _BLINK2 = '\33[6m'
    _Reversed = '\33[7m'

    _BLACKBG = '\33[40m'
    _REDBG = '\33[41m'
    _GREENBG = '\33[42m'
    _YELLOWBG = '\33[43m'
    _BLUEBG = '\33[44m'
    _VIOLETBG = '\33[45m'
    _BEIGEBG = '\33[46m'
    _WHITEBG = '\33[47m'

    _GREY = '\33[90m'
    _RED2 = '\33[91m'
    _GREEN2 = '\33[92m'
    _YELLOW2 = '\33[93m'
    _BLUE2 = '\33[94m'
    _VIOLET2 = '\33[95m'
    _BEIGE2 = '\33[96m'
    _WHITE2 = '\33[97m'

    _GREYBG = '\33[100m'
    _REDBG2 = '\33[101m'
    _GREENBG2 = '\33[102m'
    _YELLOWBG2 = '\33[103m'
    _BLUEBG2 = '\33[104m'
    _VIOLETBG2 = '\33[105m'
    _BEIGEBG2 = '\33[106m'
    _WHITEBG2 = '\33[107m'

    style_dic = {
        "END": _END,
        "BLACK": _BLACK,
        "RED": _RED,
        "GREEN": _GREEN,
        "YELLOW": _YELLOW,
        "BLUE": _BLUE,
        "MAGENTA": _MAGENTA,
        "CYAN": _CYAN,
        "WHITE": _WHITE,
        "Bold": _Bold,
        "Underline": _Underline,
        "Reversed": _Reversed,

        "ITALIC": _ITALIC,
        "BLINK": _BLINK,
        "BLINK2": _BLINK2,
        "BLACKBG": _BLACKBG,
        "REDBG": _REDBG,
        "GREENBG": _GREENBG,
        "YELLOWBG": _YELLOWBG,
        "BLUEBG": _BLUEBG,
        "VIOLETBG": _VIOLETBG,
        "BEIGEBG": _BEIGEBG,
        "WHITEBG": _WHITEBG,
        "GREY": _GREY,
        "RED2": _RED2,
        "GREEN2": _GREEN2,
        "YELLOW2": _YELLOW2,
        "BLUE2": _BLUE2,
        "VIOLET2": _VIOLET2,
        "BEIGE2": _BEIGE2,
        "WHITE2": _WHITE2,
        "GREYBG": _GREYBG,
        "REDBG2": _REDBG2,
        "GREENBG2": _GREENBG2,
        "YELLOWBG2": _YELLOWBG2,
        "BLUEBG2": _BLUEBG2,
        "VIOLETBG2": _VIOLETBG2,
        "BEIGEBG2": _BEIGEBG2,
        "WHITEBG2": _WHITEBG2,

    }

    @staticmethod
    def END_():
        print(Style._END)

    @staticmethod
    def GREEN_():
        print(Style._GREEN)

    @staticmethod
    def BLUE(text: str):
        return Style._BLUE + text + Style._END

    @staticmethod
    def BLACK(text: str):
        return Style._BLACK + text + Style._END

    @staticmethod
    def RED(text: str):
        return Style._RED + text + Style._END

    @staticmethod
    def GREEN(text: str):
        return Style._GREEN + text + Style._END

    @staticmethod
    def YELLOW(text: str):
        return Style._YELLOW + text + Style._END

    @staticmethod
    def MAGENTA(text: str):
        return Style._MAGENTA + text + Style._END

    @staticmethod
    def CYAN(text: str):
        return Style._CYAN + text + Style._END

    @staticmethod
    def WHITE(text: str):
        return Style._WHITE + text + Style._END

    @staticmethod
    def Bold(text: str):
        return Style._Bold + text + Style._END

    @staticmethod
    def Underline(text: str):
        return Style._Underline + text + Style._END

    @staticmethod
    def Reversed(text: str):
        return Style._Reversed + text + Style._END

    @staticmethod
    def ITALIC(text: str):
        return Style._ITALIC + text + Style._END

    @staticmethod
    def BLINK(text: str):
        return Style._BLINK + text + Style._END

    @staticmethod
    def BLINK2(text: str):
        return Style._BLINK2 + text + Style._END

    @staticmethod
    def BLACKBG(text: str):
        return Style._BLACKBG + text + Style._END

    @staticmethod
    def REDBG(text: str):
        return Style._REDBG + text + Style._END

    @staticmethod
    def GREENBG(text: str):
        return Style._GREENBG + text + Style._END

    @staticmethod
    def YELLOWBG(text: str):
        return Style._YELLOWBG + text + Style._END

    @staticmethod
    def BLUEBG(text: str):
        return Style._BLUEBG + text + Style._END

    @staticmethod
    def VIOLETBG(text: str):
        return Style._VIOLETBG + text + Style._END

    @staticmethod
    def BEIGEBG(text: str):
        return Style._BEIGEBG + text + Style._END

    @staticmethod
    def WHITEBG(text: str):
        return Style._WHITEBG + text + Style._END

    @staticmethod
    def GREY(text: str):
        return Style._GREY + text + Style._END

    @staticmethod
    def RED2(text: str):
        return Style._RED2 + text + Style._END

    @staticmethod
    def GREEN2(text: str):
        return Style._GREEN2 + text + Style._END

    @staticmethod
    def YELLOW2(text: str):
        return Style._YELLOW2 + text + Style._END

    @staticmethod
    def BLUE2(text: str):
        return Style._BLUE2 + text + Style._END

    @staticmethod
    def VIOLET2(text: str):
        return Style._VIOLET2 + text + Style._END

    @staticmethod
    def BEIGE2(text: str):
        return Style._BEIGE2 + text + Style._END

    @staticmethod
    def WHITE2(text: str):
        return Style._WHITE2 + text + Style._END

    @staticmethod
    def GREYBG(text: str):
        return Style._GREYBG + text + Style._END

    @staticmethod
    def REDBG2(text: str):
        return Style._REDBG2 + text + Style._END

    @staticmethod
    def GREENBG2(text: str):
        return Style._GREENBG2 + text + Style._END

    @staticmethod
    def YELLOWBG2(text: str):
        return Style._YELLOWBG2 + text + Style._END

    @staticmethod
    def BLUEBG2(text: str):
        return Style._BLUEBG2 + text + Style._END

    @staticmethod
    def VIOLETBG2(text: str):
        return Style._VIOLETBG2 + text + Style._END

    @staticmethod
    def BEIGEBG2(text: str):
        return Style._BEIGEBG2 + text + Style._END

    @staticmethod
    def WHITEBG2(text: str):
        return Style._WHITEBG2 + text + Style._END

    @staticmethod
    def loading_al(text: str):
        b = f"{text} /"
        print(b)
        sleep(0.05)
        cls()
        b = f"{text} -"
        print(b)
        sleep(0.05)
        cls()
        b = f"{text} \\"
        print(b)
        sleep(0.05)
        cls()
        b = f"{text} |"
        print(b)
        sleep(0.05)
        cls()

    @property
    def END(self):
        return self._END

    def color_demo(self):
        for color in self.style_dic:
            print(f"{color} -> {self.style_dic[color]}Effect{self._END}")

    @property
    def Underline2(self):
        return self._Underline

toolboxv2.Spinner

Enhanced Spinner with tqdm-like line rendering.

Source code in toolboxv2/utils/extras/Style.py
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
class Spinner:
    """
    Enhanced Spinner with tqdm-like line rendering.
    """
    SYMBOL_SETS = {
        "c": ["◐", "◓", "◑", "◒"],
        "b": ["▁", "▃", "▄", "▅", "▆", "▇", "█", "▇", "▆", "▅", "▄", "▃"],
        "d": ["⣾", "⣽", "⣻", "⢿", "⡿", "⣟", "⣯", "⣷"],
        "w": ["🌍", "🌎", "🌏"],
        "s": ["🌀   ", " 🌀  ", "  🌀 ", "   🌀", "  🌀 ", " 🌀  "],
        "+": ["+", "x"],
        "t": ["✶", "✸", "✹", "✺", "✹", "✷"]
    }

    def __init__(
        self,
        message: str = "Loading...",
        delay: float = 0.1,
        symbols=None,
        count_down: bool = False,
        time_in_s: float = 0
    ):
        """Initialize spinner with flexible configuration."""
        # Resolve symbol set.
        if isinstance(symbols, str):
            symbols = self.SYMBOL_SETS.get(symbols, None)

        # Default symbols if not provided.
        if symbols is None:
            symbols = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]

        # Test mode symbol set.
        if 'unittest' in sys.argv[0]:
            symbols = ['#', '=', '-']

        self.spinner = itertools.cycle(symbols)
        self.delay = delay
        self.message = message
        self.running = False
        self.spinner_thread = None
        self.max_t = time_in_s
        self.contd = count_down

        # Rendering management.
        self._is_primary = False
        self._start_time = 0

        # Central manager.
        self.manager = SpinnerManager()

    def _generate_render_line(self):
        """Generate the primary render line."""
        current_time = time.time()
        if self.contd:
            remaining = max(0, self.max_t - (current_time - self._start_time))
            time_display = f"{remaining:.2f}"
        else:
            time_display = f"{current_time - self._start_time:.2f}"

        symbol = next(self.spinner)
        return f"{symbol} {self.message} | {time_display}"

    def _generate_secondary_info(self):
        """Generate secondary spinner info for additional spinners."""
        return f"{self.message}"

    def __enter__(self):
        """Start the spinner."""
        self.running = True
        self._start_time = time.time()
        self.manager.register_spinner(self)
        return self

    def __exit__(self, exc_type, exc_value, exc_traceback):
        """Stop the spinner."""
        self.running = False
        self.manager.unregister_spinner(self)
        # Clear the spinner's line if it was the primary spinner.
        if self._is_primary:
            sys.stdout.write("\r\033[K")
            sys.stdout.flush()

__enter__()

Start the spinner.

Source code in toolboxv2/utils/extras/Style.py
591
592
593
594
595
596
def __enter__(self):
    """Start the spinner."""
    self.running = True
    self._start_time = time.time()
    self.manager.register_spinner(self)
    return self

__exit__(exc_type, exc_value, exc_traceback)

Stop the spinner.

Source code in toolboxv2/utils/extras/Style.py
598
599
600
601
602
603
604
605
def __exit__(self, exc_type, exc_value, exc_traceback):
    """Stop the spinner."""
    self.running = False
    self.manager.unregister_spinner(self)
    # Clear the spinner's line if it was the primary spinner.
    if self._is_primary:
        sys.stdout.write("\r\033[K")
        sys.stdout.flush()

__init__(message='Loading...', delay=0.1, symbols=None, count_down=False, time_in_s=0)

Initialize spinner with flexible configuration.

Source code in toolboxv2/utils/extras/Style.py
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
def __init__(
    self,
    message: str = "Loading...",
    delay: float = 0.1,
    symbols=None,
    count_down: bool = False,
    time_in_s: float = 0
):
    """Initialize spinner with flexible configuration."""
    # Resolve symbol set.
    if isinstance(symbols, str):
        symbols = self.SYMBOL_SETS.get(symbols, None)

    # Default symbols if not provided.
    if symbols is None:
        symbols = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]

    # Test mode symbol set.
    if 'unittest' in sys.argv[0]:
        symbols = ['#', '=', '-']

    self.spinner = itertools.cycle(symbols)
    self.delay = delay
    self.message = message
    self.running = False
    self.spinner_thread = None
    self.max_t = time_in_s
    self.contd = count_down

    # Rendering management.
    self._is_primary = False
    self._start_time = 0

    # Central manager.
    self.manager = SpinnerManager()

toolboxv2.remove_styles(text, infos=False)

Source code in toolboxv2/utils/extras/Style.py
331
332
333
334
335
336
337
338
339
340
341
342
def remove_styles(text: str, infos=False):
    in_ = []
    for key, style in Style.style_dic.items():
        if style in text:
            text = text.replace(style, '')
            if infos:
                in_.append([key for key, st in Style.style_dic.items() if style == st][0])
    if infos:
        if "END" in in_:
            in_.remove('END')
        return text, in_
    return text

Data Types & Structures

toolboxv2.AppArgs

Source code in toolboxv2/utils/system/types.py
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
class AppArgs:
    init = None
    init_file = 'init.config'
    get_version = False
    mm = False
    sm = False
    lm = False
    modi = 'cli'
    kill = False
    remote = False
    remote_direct_key = None
    background_application = False
    background_application_runner = False
    docker = False
    build = False
    install = None
    remove = None
    update = None
    name = 'main'
    port = 5000
    host = '0.0.0.0'
    load_all_mod_in_files = False
    mods_folder = 'toolboxv2.mods.'
    debug = None
    test = None
    profiler = None
    hot_reload = False
    live_application = True
    sysPrint = False
    kwargs = {}
    session = None

    def default(self):
        return self

    def set(self, name, value):
        setattr(self, name, value)
        return self

toolboxv2.Result

Source code in toolboxv2/utils/system/types.py
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
class Result:
    _task = None
    def __init__(self,
                 error: ToolBoxError,
                 result: ToolBoxResult,
                 info: ToolBoxInfo,
                 origin: Any | None = None,
                 ):
        self.error: ToolBoxError = error
        self.result: ToolBoxResult = result
        self.info: ToolBoxInfo = info
        self.origin = origin

    def as_result(self):
        return self

    def as_dict(self):
        return {
            "error":self.error.value if isinstance(self.error, Enum) else self.error,
        "result" : {
            "data_to":self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
            "data_info":self.result.data_info,
            "data":self.result.data,
            "data_type":self.result.data_type
        } if self.result else None,
        "info" : {
            "exec_code" : self.info.exec_code,  # exec_code umwandel in http resposn codes
        "help_text" : self.info.help_text
        } if self.info else None,
        "origin" : self.origin
        }

    def set_origin(self, origin):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = origin
        return self

    def set_dir_origin(self, name, extras="assets/"):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = f"mods/{name}/{extras}"
        return self

    def is_error(self):
        if _test_is_result(self.result.data):
            return self.result.data.is_error()
        if self.error == ToolBoxError.none:
            return False
        if self.info.exec_code == 0:
            return False
        if self.info.exec_code == 200:
            return False
        return True

    def is_data(self):
        return self.result.data is not None

    def to_api_result(self):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=self.error.value if isinstance(self.error, Enum) else self.error,
            result=ToolBoxResultBM(
                data_to=self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
                data_info=self.result.data_info,
                data=self.result.data,
                data_type=self.result.data_type
            ) if self.result else None,
            info=ToolBoxInfoBM(
                exec_code=self.info.exec_code,  # exec_code umwandel in http resposn codes
                help_text=self.info.help_text
            ) if self.info else None,
            origin=self.origin
        )

    def task(self, task):
        self._task = task
        return self

    @staticmethod
    def result_from_dict(error: str, result: dict, info: dict, origin: list or None or str):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=error if isinstance(error, Enum) else error,
            result=ToolBoxResultBM(
                data_to=result.get('data_to') if isinstance(result.get('data_to'), Enum) else result.get('data_to'),
                data_info=result.get('data_info', '404'),
                data=result.get('data'),
                data_type=result.get('data_type', '404'),
            ) if result else ToolBoxResultBM(
                data_to=ToolBoxInterfaces.cli.value,
                data_info='',
                data='404',
                data_type='404',
            ),
            info=ToolBoxInfoBM(
                exec_code=info.get('exec_code', 404),
                help_text=info.get('help_text', '404')
            ) if info else ToolBoxInfoBM(
                exec_code=404,
                help_text='404'
            ),
            origin=origin
        ).as_result()

    @classmethod
    def stream(cls,
               stream_generator: Any,  # Renamed from source for clarity
               content_type: str = "text/event-stream",  # Default to SSE
               headers: Union[dict, None] = None,
               info: str = "OK",
               interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
               cleanup_func: Union[
                   Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None):
        """
        Create a streaming response Result. Handles SSE and other stream types.

        Args:
            stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
            content_type: Content-Type header (default: text/event-stream for SSE).
            headers: Additional HTTP headers for the response.
            info: Help text for the result.
            interface: Interface to send data to.
            cleanup_func: Optional function for cleanup.

        Returns:
            A Result object configured for streaming.
        """
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        final_generator: AsyncGenerator[str, None]

        if content_type == "text/event-stream":
            # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
            # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
            final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

            # Standard SSE headers for the HTTP response itself
            # These will be stored in the Result object. Rust side decides how to use them.
            standard_sse_headers = {
                "Cache-Control": "no-cache",  # SSE specific
                "Connection": "keep-alive",  # SSE specific
                "X-Accel-Buffering": "no",  # Useful for proxies with SSE
                # Content-Type is implicitly text/event-stream, will be in streaming_data below
            }
            all_response_headers = standard_sse_headers.copy()
            if headers:
                all_response_headers.update(headers)
        else:
            # For non-SSE streams.
            # If stream_generator is sync, wrap it to be async.
            # If already async or single item, it will be handled.
            # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
            # For consistency with how SSEGenerator does it, we can wrap sync ones.
            if inspect.isgenerator(stream_generator) or \
                (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
                final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
            elif inspect.isasyncgen(stream_generator):
                final_generator = stream_generator
            else:  # Single item or string
                async def _single_item_gen():
                    yield stream_generator

                final_generator = _single_item_gen()
            all_response_headers = headers if headers else {}

        # Prepare streaming data to be stored in the Result object
        streaming_data = {
            "type": "stream",  # Indicator for Rust side
            "generator": final_generator,
            "content_type": content_type,  # Let Rust know the intended content type
            "headers": all_response_headers  # Intended HTTP headers for the overall response
        }

        result_payload = ToolBoxResult(
            data_to=interface,
            data=streaming_data,
            data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
            data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
        )

        return cls(error=error, info=info_obj, result=result_payload)

    @classmethod
    def sse(cls,
            stream_generator: Any,
            info: str = "OK",
            interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
            cleanup_func: Union[
                Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None,
            # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
            ):
        """
        Create an Server-Sent Events (SSE) streaming response Result.

        Args:
            stream_generator: A source yielding individual data items. This can be an
                              async generator, sync generator, iterable, or a single item.
                              Each item will be formatted as an SSE event.
            info: Optional help text for the Result.
            interface: Optional ToolBoxInterface to target.
            cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
            #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

        Returns:
            A Result object configured for SSE streaming.
        """
        # Result.stream will handle calling SSEGenerator.create_sse_stream
        # and setting appropriate default headers for SSE when content_type is "text/event-stream".
        return cls.stream(
            stream_generator=stream_generator,
            content_type="text/event-stream",
            # headers=http_headers, # Pass if we add http_headers param
            info=info,
            interface=interface,
            cleanup_func=cleanup_func
        )

    @classmethod
    def default(cls, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=-1, help_text="")
        result = ToolBoxResult(data_to=interface)
        return cls(error=error, info=info, result=result)

    @classmethod
    def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
        """Create a JSON response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=data,
            data_info="JSON response",
            data_type="json"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
        """Create a text response Result with specific content type."""
        if headers is not None:
            return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=text_data,
            data_info="Text response",
            data_type=content_type
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
               interface=ToolBoxInterfaces.remote):
        """Create a binary data response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        # Create a dictionary with binary data and metadata
        binary_data = {
            "data": data,
            "content_type": content_type,
            "filename": download_name
        }

        result = ToolBoxResult(
            data_to=interface,
            data=binary_data,
            data_info=f"Binary response: {download_name}" if download_name else "Binary response",
            data_type="binary"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
        """Create a file download response Result.

        Args:
            data: File data as bytes or base64 string
            filename: Name of the file for download
            content_type: MIME type of the file (auto-detected if None)
            info: Response info text
            interface: Target interface

        Returns:
            Result object configured for file download
        """
        import base64
        import mimetypes

        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=200, help_text=info)

        # Auto-detect content type if not provided
        if content_type is None:
            content_type, _ = mimetypes.guess_type(filename)
            if content_type is None:
                content_type = "application/octet-stream"

        # Ensure data is base64 encoded string (as expected by Rust server)
        if isinstance(data, bytes):
            base64_data = base64.b64encode(data).decode('utf-8')
        elif isinstance(data, str):
            # Assume it's already base64 encoded
            base64_data = data
        else:
            raise ValueError("File data must be bytes or base64 string")

        result = ToolBoxResult(
            data_to=interface,
            data=base64_data,  # Rust expects base64 string for "file" type
            data_info=f"File download: {filename}",
            data_type="file"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
        """Create a redirect response."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=url,
            data_info="Redirect response",
            data_type="redirect"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def ok(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def html(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.remote, data_type="html",status=200, headers=None, row=False):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=status, help_text=info)
        from ...utils.system.getting_and_closing_app import get_app

        if not row and not '"<div class="main-content""' in data:
            data = f'<div class="main-content frosted-glass">{data}<div>'
        if not row and not get_app().web_context() in data:
            data = get_app().web_context() + data

        if isinstance(headers, dict):
            result = ToolBoxResult(data_to=interface, data={'html':data,'headers':headers}, data_info=data_info,
                                   data_type="special_html")
        else:
            result = ToolBoxResult(data_to=interface, data=data, data_info=data_info,
                                   data_type=data_type if data_type is not None else type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def future(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.future):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type="future")
        return cls(error=error, info=info, result=result)

    @classmethod
    def custom_error(cls, data=None, data_info="", info="", exec_code=-1, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def error(cls, data=None, data_info="", info="", exec_code=450, interface=ToolBoxInterfaces.remote):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_user_error(cls, info="", exec_code=-3, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.input_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_internal_error(cls, info="", exec_code=-2, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.internal_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    def print(self, show=True, show_data=True, prifix=""):
        data = '\n' + f"{((prifix + 'Data: ' + str(self.result.data) if self.result.data is not None else 'NO Data') if not isinstance(self.result.data, Result) else self.result.data.print(show=False, show_data=show_data, prifix=prifix + '-')) if show_data else 'Data: private'}"
        origin = '\n' + f"{prifix + 'Origin: ' + str(self.origin) if self.origin is not None else 'NO Origin'}"
        text = (f"Function Exec code: {self.info.exec_code}"
                f"\n{prifix}Info's:"
                f" {self.info.help_text} {'<|> ' + str(self.result.data_info) if self.result.data_info is not None else ''}"
                f"{origin}{data if not data.endswith('NO Data') else ''}")
        if not show:
            return text
        print("\n======== Result ========\n" + text + "\n------- EndOfD -------")
        return self

    def log(self, show_data=True, prifix=""):
        from toolboxv2 import get_logger
        get_logger().debug(self.print(show=False, show_data=show_data, prifix=prifix).replace("\n", " - "))
        return self

    def __str__(self):
        return self.print(show=False, show_data=True)

    def get(self, key=None, default=None):
        data = self.result.data
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    async def aget(self, key=None, default=None):
        if asyncio.isfuture(self.result.data) or asyncio.iscoroutine(self.result.data) or (
            isinstance(self.result.data_to, Enum) and self.result.data_to.name == ToolBoxInterfaces.future.name):
            data = await self.result.data
        else:
            data = self.get(key=None, default=None)
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    def lazy_return(self, _=0, data=None, **kwargs):
        flags = ['raise', 'logg', 'user', 'intern']
        flag = flags[_] if isinstance(_, int) else _
        if self.info.exec_code == 0:
            return self if data is None else data if _test_is_result(data) else self.ok(data=data, **kwargs)
        if flag == 'raise':
            raise ValueError(self.print(show=False))
        if flag == 'logg':
            from .. import get_logger
            get_logger().error(self.print(show=False))

        if flag == 'user':
            return self if data is None else data if _test_is_result(data) else self.default_user_error(data=data,
                                                                                                        **kwargs)
        if flag == 'intern':
            return self if data is None else data if _test_is_result(data) else self.default_internal_error(data=data,
                                                                                                            **kwargs)

        return self if data is None else data if _test_is_result(data) else self.custom_error(data=data, **kwargs)

    @property
    def bg_task(self):
        return self._task

binary(data, content_type='application/octet-stream', download_name=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a binary data response Result.

Source code in toolboxv2/utils/system/types.py
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
@classmethod
def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
           interface=ToolBoxInterfaces.remote):
    """Create a binary data response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    # Create a dictionary with binary data and metadata
    binary_data = {
        "data": data,
        "content_type": content_type,
        "filename": download_name
    }

    result = ToolBoxResult(
        data_to=interface,
        data=binary_data,
        data_info=f"Binary response: {download_name}" if download_name else "Binary response",
        data_type="binary"
    )

    return cls(error=error, info=info_obj, result=result)

file(data, filename, content_type=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a file download response Result.

Parameters:

Name Type Description Default
data

File data as bytes or base64 string

required
filename

Name of the file for download

required
content_type

MIME type of the file (auto-detected if None)

None
info

Response info text

'OK'
interface

Target interface

remote

Returns:

Type Description

Result object configured for file download

Source code in toolboxv2/utils/system/types.py
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
@classmethod
def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
    """Create a file download response Result.

    Args:
        data: File data as bytes or base64 string
        filename: Name of the file for download
        content_type: MIME type of the file (auto-detected if None)
        info: Response info text
        interface: Target interface

    Returns:
        Result object configured for file download
    """
    import base64
    import mimetypes

    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=200, help_text=info)

    # Auto-detect content type if not provided
    if content_type is None:
        content_type, _ = mimetypes.guess_type(filename)
        if content_type is None:
            content_type = "application/octet-stream"

    # Ensure data is base64 encoded string (as expected by Rust server)
    if isinstance(data, bytes):
        base64_data = base64.b64encode(data).decode('utf-8')
    elif isinstance(data, str):
        # Assume it's already base64 encoded
        base64_data = data
    else:
        raise ValueError("File data must be bytes or base64 string")

    result = ToolBoxResult(
        data_to=interface,
        data=base64_data,  # Rust expects base64 string for "file" type
        data_info=f"File download: {filename}",
        data_type="file"
    )

    return cls(error=error, info=info_obj, result=result)

json(data, info='OK', interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None) classmethod

Create a JSON response Result.

Source code in toolboxv2/utils/system/types.py
847
848
849
850
851
852
853
854
855
856
857
858
859
860
@classmethod
def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
    """Create a JSON response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=data,
        data_info="JSON response",
        data_type="json"
    )

    return cls(error=error, info=info_obj, result=result)

redirect(url, status_code=302, info='Redirect', interface=ToolBoxInterfaces.remote) classmethod

Create a redirect response.

Source code in toolboxv2/utils/system/types.py
946
947
948
949
950
951
952
953
954
955
956
957
958
959
@classmethod
def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
    """Create a redirect response."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=url,
        data_info="Redirect response",
        data_type="redirect"
    )

    return cls(error=error, info=info_obj, result=result)

sse(stream_generator, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create an Server-Sent Events (SSE) streaming response Result.

Parameters:

Name Type Description Default
stream_generator Any

A source yielding individual data items. This can be an async generator, sync generator, iterable, or a single item. Each item will be formatted as an SSE event.

required
info str

Optional help text for the Result.

'OK'
interface ToolBoxInterfaces

Optional ToolBoxInterface to target.

remote
cleanup_func Union[Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None]

Optional cleanup function to run when the stream ends or is cancelled.

None
#http_headers

Optional dictionary of custom HTTP headers for the SSE response.

required

Returns:

Type Description

A Result object configured for SSE streaming.

Source code in toolboxv2/utils/system/types.py
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
@classmethod
def sse(cls,
        stream_generator: Any,
        info: str = "OK",
        interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
        cleanup_func: Union[
            Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None,
        # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
        ):
    """
    Create an Server-Sent Events (SSE) streaming response Result.

    Args:
        stream_generator: A source yielding individual data items. This can be an
                          async generator, sync generator, iterable, or a single item.
                          Each item will be formatted as an SSE event.
        info: Optional help text for the Result.
        interface: Optional ToolBoxInterface to target.
        cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
        #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

    Returns:
        A Result object configured for SSE streaming.
    """
    # Result.stream will handle calling SSEGenerator.create_sse_stream
    # and setting appropriate default headers for SSE when content_type is "text/event-stream".
    return cls.stream(
        stream_generator=stream_generator,
        content_type="text/event-stream",
        # headers=http_headers, # Pass if we add http_headers param
        info=info,
        interface=interface,
        cleanup_func=cleanup_func
    )

stream(stream_generator, content_type='text/event-stream', headers=None, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create a streaming response Result. Handles SSE and other stream types.

Parameters:

Name Type Description Default
stream_generator Any

Any stream source (async generator, sync generator, iterable, or single item).

required
content_type str

Content-Type header (default: text/event-stream for SSE).

'text/event-stream'
headers Union[dict, None]

Additional HTTP headers for the response.

None
info str

Help text for the result.

'OK'
interface ToolBoxInterfaces

Interface to send data to.

remote
cleanup_func Union[Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None]

Optional function for cleanup.

None

Returns:

Type Description

A Result object configured for streaming.

Source code in toolboxv2/utils/system/types.py
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
@classmethod
def stream(cls,
           stream_generator: Any,  # Renamed from source for clarity
           content_type: str = "text/event-stream",  # Default to SSE
           headers: Union[dict, None] = None,
           info: str = "OK",
           interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
           cleanup_func: Union[
               Callable[[], None], Callable[[], T], Callable[[], AsyncGenerator[T, None]], None] = None):
    """
    Create a streaming response Result. Handles SSE and other stream types.

    Args:
        stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
        content_type: Content-Type header (default: text/event-stream for SSE).
        headers: Additional HTTP headers for the response.
        info: Help text for the result.
        interface: Interface to send data to.
        cleanup_func: Optional function for cleanup.

    Returns:
        A Result object configured for streaming.
    """
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    final_generator: AsyncGenerator[str, None]

    if content_type == "text/event-stream":
        # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
        # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
        final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

        # Standard SSE headers for the HTTP response itself
        # These will be stored in the Result object. Rust side decides how to use them.
        standard_sse_headers = {
            "Cache-Control": "no-cache",  # SSE specific
            "Connection": "keep-alive",  # SSE specific
            "X-Accel-Buffering": "no",  # Useful for proxies with SSE
            # Content-Type is implicitly text/event-stream, will be in streaming_data below
        }
        all_response_headers = standard_sse_headers.copy()
        if headers:
            all_response_headers.update(headers)
    else:
        # For non-SSE streams.
        # If stream_generator is sync, wrap it to be async.
        # If already async or single item, it will be handled.
        # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
        # For consistency with how SSEGenerator does it, we can wrap sync ones.
        if inspect.isgenerator(stream_generator) or \
            (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
            final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
        elif inspect.isasyncgen(stream_generator):
            final_generator = stream_generator
        else:  # Single item or string
            async def _single_item_gen():
                yield stream_generator

            final_generator = _single_item_gen()
        all_response_headers = headers if headers else {}

    # Prepare streaming data to be stored in the Result object
    streaming_data = {
        "type": "stream",  # Indicator for Rust side
        "generator": final_generator,
        "content_type": content_type,  # Let Rust know the intended content type
        "headers": all_response_headers  # Intended HTTP headers for the overall response
    }

    result_payload = ToolBoxResult(
        data_to=interface,
        data=streaming_data,
        data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
        data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
    )

    return cls(error=error, info=info_obj, result=result_payload)

text(text_data, content_type='text/plain', exec_code=None, status=200, info='OK', interface=ToolBoxInterfaces.remote, headers=None) classmethod

Create a text response Result with specific content type.

Source code in toolboxv2/utils/system/types.py
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
@classmethod
def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
    """Create a text response Result with specific content type."""
    if headers is not None:
        return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=text_data,
        data_info="Text response",
        data_type=content_type
    )

    return cls(error=error, info=info_obj, result=result)

toolboxv2.ApiResult

Bases: BaseModel

Source code in toolboxv2/utils/system/types.py
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
class ApiResult(BaseModel):
    error: None | str= None
    origin: Any | None
    result: ToolBoxResultBM | None = None
    info: ToolBoxInfoBM | None

    def as_result(self):
        return Result(
            error=self.error.value if isinstance(self.error, Enum) else self.error,
            result=ToolBoxResult(
                data_to=self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
                data_info=self.result.data_info,
                data=self.result.data,
                data_type=self.result.data_type
            ) if self.result else None,
            info=ToolBoxInfo(
                exec_code=self.info.exec_code,
                help_text=self.info.help_text
            ) if self.info else None,
            origin=self.origin
        )

    def to_api_result(self):
        return self

    def print(self, *args, **kwargs):
        res = self.as_result().print(*args, **kwargs)
        if not isinstance(res, str):
            res = res.to_api_result()
        return res

toolboxv2.RequestData dataclass

Main class representing the complete request data structure.

Source code in toolboxv2/utils/system/types.py
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
@dataclass
class RequestData:
    """Main class representing the complete request data structure."""
    request: Request
    session: Session
    session_id: str

    @classmethod
    def from_dict(cls, data: dict[str, Any]) -> 'RequestData':
        """Create a RequestData instance from a dictionary."""
        return cls(
            request=Request.from_dict(data.get('request', {})),
            session=Session.from_dict(data.get('session', {})),
            session_id=data.get('session_id', '')
        )

    def to_dict(self) -> dict[str, Any]:
        """Convert the RequestData object back to a dictionary."""
        return {
            'request': self.request.to_dict(),
            'session': self.session.to_dict(),
            'session_id': self.session_id
        }

    def __getattr__(self, name: str) -> Any:
        """Delegate unknown attributes to the `request` object."""
        # Nur wenn das Attribut nicht direkt in RequestData existiert
        # und auch nicht `session` oder `session_id` ist
        if hasattr(self.request, name):
            return getattr(self.request, name)
        raise AttributeError(f"'RequestData' object has no attribute '{name}'")

    @classmethod
    def moc(cls):
        return cls(
            request=Request.from_dict({
                'content_type': 'application/x-www-form-urlencoded',
                'headers': {
                    'accept': '*/*',
                    'accept-encoding': 'gzip, deflate, br, zstd',
                    'accept-language': 'de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7',
                    'connection': 'keep-alive',
                    'content-length': '107',
                    'content-type': 'application/x-www-form-urlencoded',
                    'cookie': 'session=abc123',
                    'host': 'localhost:8080',
                    'hx-current-url': 'http://localhost:8080/api/TruthSeeker/get_main_ui',
                    'hx-request': 'true',
                    'hx-target': 'estimates-guest_1fc2c9',
                    'hx-trigger': 'config-form-guest_1fc2c9',
                    'origin': 'http://localhost:8080',
                    'referer': 'http://localhost:8080/api/TruthSeeker/get_main_ui',
                    'sec-ch-ua': '"Chromium";v="134", "Not:A-Brand";v="24", "Google Chrome";v="134"',
                    'sec-ch-ua-mobile': '?0',
                    'sec-ch-ua-platform': '"Windows"',
                    'sec-fetch-dest': 'empty',
                    'sec-fetch-mode': 'cors',
                    'sec-fetch-site': 'same-origin',
                    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
                },
                'method': 'POST',
                'path': '/api/TruthSeeker/update_estimates',
                'query_params': {},
                'form_data': {
                    'param1': 'value1',
                    'param2': 'value2'
                }
            }),
            session=Session.from_dict({
                'SiID': '29a2e258e18252e2afd5ff943523f09c82f1bb9adfe382a6f33fc6a8381de898',
                'level': '1',
                'spec': '74eed1c8de06886842e235486c3c2fd6bcd60586998ac5beb87f13c0d1750e1d',
                'user_name': 'root',
                'custom_field': 'custom_value'
            }),
            session_id='0x29dd1ac0d1e30d3f'
        )

__getattr__(name)

Delegate unknown attributes to the request object.

Source code in toolboxv2/utils/system/types.py
327
328
329
330
331
332
333
def __getattr__(self, name: str) -> Any:
    """Delegate unknown attributes to the `request` object."""
    # Nur wenn das Attribut nicht direkt in RequestData existiert
    # und auch nicht `session` oder `session_id` ist
    if hasattr(self.request, name):
        return getattr(self.request, name)
    raise AttributeError(f"'RequestData' object has no attribute '{name}'")

from_dict(data) classmethod

Create a RequestData instance from a dictionary.

Source code in toolboxv2/utils/system/types.py
310
311
312
313
314
315
316
317
@classmethod
def from_dict(cls, data: dict[str, Any]) -> 'RequestData':
    """Create a RequestData instance from a dictionary."""
    return cls(
        request=Request.from_dict(data.get('request', {})),
        session=Session.from_dict(data.get('session', {})),
        session_id=data.get('session_id', '')
    )

to_dict()

Convert the RequestData object back to a dictionary.

Source code in toolboxv2/utils/system/types.py
319
320
321
322
323
324
325
def to_dict(self) -> dict[str, Any]:
    """Convert the RequestData object back to a dictionary."""
    return {
        'request': self.request.to_dict(),
        'session': self.session.to_dict(),
        'session_id': self.session_id
    }

Security

toolboxv2.Code

Source code in toolboxv2/utils/security/cryp.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
class Code:

    @staticmethod
    def DK():
        return DEVICE_KEY

    def decode_code(self, encrypted_data, key=None):

        if not isinstance(encrypted_data, str):
            encrypted_data = str(encrypted_data)

        if key is None:
            key = DEVICE_KEY()

        return self.decrypt_symmetric(encrypted_data, key)

    def encode_code(self, data, key=None):

        if not isinstance(data, str):
            data = str(data)

        if key is None:
            key = DEVICE_KEY()

        return self.encrypt_symmetric(data, key)

    @staticmethod
    def generate_seed() -> int:
        """
        Erzeugt eine zufällige Zahl als Seed.

        Returns:
            int: Eine zufällige Zahl.
        """
        return random.randint(2 ** 32 - 1, 2 ** 64 - 1)

    @staticmethod
    def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
        """
        Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

        Args:
            text (str): Der zu hashende Text.
            salt (str): Der Salt-Wert.
            pepper (str): Der Pepper-Wert.
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            str: Der resultierende Hash-Wert.
        """
        return hashlib.sha256((salt + text + pepper).encode()).hexdigest()

    @staticmethod
    def generate_symmetric_key() -> str:
        """
        Generiert einen Schlüssel für die symmetrische Verschlüsselung.

        Returns:
            str: Der generierte Schlüssel.
        """
        return Fernet.generate_key().decode()

    @staticmethod
    def encrypt_symmetric(text: str or bytes, key: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.

        Returns:
            str: Der verschlüsselte Text.
        """
        if isinstance(text, str):
            text = text.encode()

        try:
            fernet = Fernet(key.encode())
            return fernet.encrypt(text).decode()
        except Exception as e:
            get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
            return "Error encrypt"

    @staticmethod
    def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
        """
        Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            encrypted_text (str): Der zu entschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.
            to_str (bool): default true returns str if false returns bytes
        Returns:
            str: Der entschlüsselte Text.
        """

        if isinstance(key, str):
            key = key.encode()

        #try:
        fernet = Fernet(key)
        text_b = fernet.decrypt(encrypted_text)
        if not to_str:
            return text_b
        return text_b.decode()
        # except Exception as e:
        #     get_logger().error(f"Error decrypt_symmetric {e}")
        #     if not mute:
        #         raise e
        #     if not to_str:
        #         return f"Error decoding".encode()
        #     return f"Error decoding"

    @staticmethod
    def generate_asymmetric_keys() -> (str, str):
        """
        Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

        Args:
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
        """
        private_key = rsa.generate_private_key(
            public_exponent=65537,
            key_size=2048 * 3,
        )
        public_key = private_key.public_key()

        # Serialisieren der Schlüssel
        pem_private_key = private_key.private_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PrivateFormat.PKCS8,
            encryption_algorithm=serialization.NoEncryption()
        ).decode()

        pem_public_key = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        ).decode()

        return pem_public_key, pem_private_key

    @staticmethod
    def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
        """
        Speichert die generierten Schlüssel in separate Dateien.
        Der private Schlüssel wird mit dem Device Key verschlüsselt.

        Args:
            public_key (str): Der öffentliche Schlüssel im PEM-Format
            private_key (str): Der private Schlüssel im PEM-Format
            directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
        """
        # Erstelle das Verzeichnis, falls es nicht existiert
        os.makedirs(directory, exist_ok=True)

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Verschlüssele den privaten Schlüssel mit dem Device Key
        encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

        # Speichere den öffentlichen Schlüssel
        public_key_path = os.path.join(directory, "public_key.pem")
        with open(public_key_path, "w") as f:
            f.write(public_key)

        # Speichere den verschlüsselten privaten Schlüssel
        private_key_path = os.path.join(directory, "private_key.pem")
        with open(private_key_path, "w") as f:
            f.write(encrypted_private_key)

        print("Saved keys in ", public_key_path)

    @staticmethod
    def load_keys_from_files(directory: str = "keys") -> (str, str):
        """
        Lädt die Schlüssel aus den Dateien.
        Der private Schlüssel wird mit dem Device Key entschlüsselt.

        Args:
            directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

        Raises:
            FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
        """
        # Pfade zu den Schlüsseldateien
        public_key_path = os.path.join(directory, "public_key.pem")
        private_key_path = os.path.join(directory, "private_key.pem")

        # Prüfe ob die Dateien existieren
        if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
            return "", ""

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Lade den öffentlichen Schlüssel
        with open(public_key_path) as f:
            public_key = f.read()

        # Lade und entschlüssele den privaten Schlüssel
        with open(private_key_path) as f:
            encrypted_private_key = f.read()
            private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

        return public_key, private_key

    @staticmethod
    def encrypt_asymmetric(text: str, public_key_str: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

        Returns:
            str: Der verschlüsselte Text.
        """
        # try:
        #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        #  except Exception as e:
        #     get_logger().error(f"Error encrypt_asymmetric {e}")
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            encrypted = public_key.encrypt(
                text.encode(),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return encrypted.hex()
        except Exception as e:
            get_logger().error(f"Error encrypt_asymmetric {e}")
            return "Invalid"

    @staticmethod
    def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
        """
        Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

        Args:
            encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
            private_key_str (str): Der private Schlüssel als String.

        Returns:
            str: Der entschlüsselte Text.
        """
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            decrypted = private_key.decrypt(
                bytes.fromhex(encrypted_text_hex),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return decrypted.decode()

        except Exception as e:
            get_logger().error(f"Error decrypt_asymmetric {e}")
        return "Invalid"

    @staticmethod
    def verify_signature(signature: str or bytes, message: str or bytes, public_key_str: str,
                         salt_length=padding.PSS.MAX_LENGTH) -> bool:
        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                padding=padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                algorithm=hashes.SHA512()
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def verify_signature_web_algo(signature: str or bytes, message: str or bytes, public_key_str: str,
                                  algo: int = -512) -> bool:
        signature_algorithm = ECDSA(hashes.SHA512())
        if algo != -512:
            signature_algorithm = ECDSA(hashes.SHA256())

        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                # padding=padding.PSS(
                #    mgf=padding.MGF1(hashes.SHA512()),
                #    salt_length=padding.PSS.MAX_LENGTH
                # ),
                signature_algorithm=signature_algorithm
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def create_signature(message: str, private_key_str: str, salt_length=padding.PSS.MAX_LENGTH,
                         row=False) -> str or bytes:
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            signature = private_key.sign(
                message.encode(),
                padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                hashes.SHA512()
            )
            if row:
                return signature
            return base64.b64encode(signature).decode()
        except Exception as e:
            get_logger().error(f"Error create_signature {e}")
            print(e)
        return "Invalid Key"

    @staticmethod
    def pem_to_public_key(pem_key: str):
        """
        Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

        Args:
            pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

        Returns:
            PublicKey: Das PublicKey-Objekt.
        """
        public_key = serialization.load_pem_public_key(pem_key.encode())
        return public_key

    @staticmethod
    def public_key_to_pem(public_key: RSAPublicKey):
        """
        Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

        Args:
            public_key (PublicKey): Das PublicKey-Objekt.

        Returns:
            str: Der PEM-kodierte öffentliche Schlüssel.
        """
        pem = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        )
        return pem.decode()

decrypt_asymmetric(encrypted_text_hex, private_key_str) staticmethod

Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

Parameters:

Name Type Description Default
encrypted_text_hex str

Der verschlüsselte Text als Hex-String.

required
private_key_str str

Der private Schlüssel als String.

required

Returns:

Name Type Description
str str

Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
@staticmethod
def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
    """
    Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

    Args:
        encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
        private_key_str (str): Der private Schlüssel als String.

    Returns:
        str: Der entschlüsselte Text.
    """
    try:
        private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
        decrypted = private_key.decrypt(
            bytes.fromhex(encrypted_text_hex),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return decrypted.decode()

    except Exception as e:
        get_logger().error(f"Error decrypt_asymmetric {e}")
    return "Invalid"

decrypt_symmetric(encrypted_text, key, to_str=True, mute=False) staticmethod

Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
encrypted_text str

Der zu entschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required
to_str bool

default true returns str if false returns bytes

True

Returns: str: Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
@staticmethod
def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
    """
    Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        encrypted_text (str): Der zu entschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.
        to_str (bool): default true returns str if false returns bytes
    Returns:
        str: Der entschlüsselte Text.
    """

    if isinstance(key, str):
        key = key.encode()

    #try:
    fernet = Fernet(key)
    text_b = fernet.decrypt(encrypted_text)
    if not to_str:
        return text_b
    return text_b.decode()

encrypt_asymmetric(text, public_key_str) staticmethod

Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
public_key_str str

Der öffentliche Schlüssel als String oder im pem format.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
@staticmethod
def encrypt_asymmetric(text: str, public_key_str: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

    Returns:
        str: Der verschlüsselte Text.
    """
    # try:
    #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
    #  except Exception as e:
    #     get_logger().error(f"Error encrypt_asymmetric {e}")
    try:
        public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        encrypted = public_key.encrypt(
            text.encode(),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return encrypted.hex()
    except Exception as e:
        get_logger().error(f"Error encrypt_asymmetric {e}")
        return "Invalid"

encrypt_symmetric(text, key) staticmethod

Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
@staticmethod
def encrypt_symmetric(text: str or bytes, key: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.

    Returns:
        str: Der verschlüsselte Text.
    """
    if isinstance(text, str):
        text = text.encode()

    try:
        fernet = Fernet(key.encode())
        return fernet.encrypt(text).decode()
    except Exception as e:
        get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
        return "Error encrypt"

generate_asymmetric_keys() staticmethod

Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

Parameters:

Name Type Description Default
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
@staticmethod
def generate_asymmetric_keys() -> (str, str):
    """
    Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

    Args:
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
    """
    private_key = rsa.generate_private_key(
        public_exponent=65537,
        key_size=2048 * 3,
    )
    public_key = private_key.public_key()

    # Serialisieren der Schlüssel
    pem_private_key = private_key.private_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PrivateFormat.PKCS8,
        encryption_algorithm=serialization.NoEncryption()
    ).decode()

    pem_public_key = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    ).decode()

    return pem_public_key, pem_private_key

generate_seed() staticmethod

Erzeugt eine zufällige Zahl als Seed.

Returns:

Name Type Description
int int

Eine zufällige Zahl.

Source code in toolboxv2/utils/security/cryp.py
68
69
70
71
72
73
74
75
76
@staticmethod
def generate_seed() -> int:
    """
    Erzeugt eine zufällige Zahl als Seed.

    Returns:
        int: Eine zufällige Zahl.
    """
    return random.randint(2 ** 32 - 1, 2 ** 64 - 1)

generate_symmetric_key() staticmethod

Generiert einen Schlüssel für die symmetrische Verschlüsselung.

Returns:

Name Type Description
str str

Der generierte Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
 94
 95
 96
 97
 98
 99
100
101
102
@staticmethod
def generate_symmetric_key() -> str:
    """
    Generiert einen Schlüssel für die symmetrische Verschlüsselung.

    Returns:
        str: Der generierte Schlüssel.
    """
    return Fernet.generate_key().decode()

load_keys_from_files(directory='keys') staticmethod

Lädt die Schlüssel aus den Dateien. Der private Schlüssel wird mit dem Device Key entschlüsselt.

Parameters:

Name Type Description Default
directory str

Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

'keys'

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel

Raises:

Type Description
FileNotFoundError

Wenn die Schlüsseldateien nicht gefunden werden können

Source code in toolboxv2/utils/security/cryp.py
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
@staticmethod
def load_keys_from_files(directory: str = "keys") -> (str, str):
    """
    Lädt die Schlüssel aus den Dateien.
    Der private Schlüssel wird mit dem Device Key entschlüsselt.

    Args:
        directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

    Raises:
        FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
    """
    # Pfade zu den Schlüsseldateien
    public_key_path = os.path.join(directory, "public_key.pem")
    private_key_path = os.path.join(directory, "private_key.pem")

    # Prüfe ob die Dateien existieren
    if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
        return "", ""

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Lade den öffentlichen Schlüssel
    with open(public_key_path) as f:
        public_key = f.read()

    # Lade und entschlüssele den privaten Schlüssel
    with open(private_key_path) as f:
        encrypted_private_key = f.read()
        private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

    return public_key, private_key

one_way_hash(text, salt='', pepper='') staticmethod

Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

Parameters:

Name Type Description Default
text str

Der zu hashende Text.

required
salt str

Der Salt-Wert.

''
pepper str

Der Pepper-Wert.

''
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Name Type Description
str str

Der resultierende Hash-Wert.

Source code in toolboxv2/utils/security/cryp.py
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
@staticmethod
def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
    """
    Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

    Args:
        text (str): Der zu hashende Text.
        salt (str): Der Salt-Wert.
        pepper (str): Der Pepper-Wert.
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        str: Der resultierende Hash-Wert.
    """
    return hashlib.sha256((salt + text + pepper).encode()).hexdigest()

pem_to_public_key(pem_key) staticmethod

Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

Parameters:

Name Type Description Default
pem_key str

Der PEM-kodierte öffentliche Schlüssel.

required

Returns:

Name Type Description
PublicKey

Das PublicKey-Objekt.

Source code in toolboxv2/utils/security/cryp.py
386
387
388
389
390
391
392
393
394
395
396
397
398
@staticmethod
def pem_to_public_key(pem_key: str):
    """
    Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

    Args:
        pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

    Returns:
        PublicKey: Das PublicKey-Objekt.
    """
    public_key = serialization.load_pem_public_key(pem_key.encode())
    return public_key

public_key_to_pem(public_key) staticmethod

Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

Parameters:

Name Type Description Default
public_key PublicKey

Das PublicKey-Objekt.

required

Returns:

Name Type Description
str

Der PEM-kodierte öffentliche Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
@staticmethod
def public_key_to_pem(public_key: RSAPublicKey):
    """
    Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

    Args:
        public_key (PublicKey): Das PublicKey-Objekt.

    Returns:
        str: Der PEM-kodierte öffentliche Schlüssel.
    """
    pem = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    )
    return pem.decode()

save_keys_to_files(public_key, private_key, directory='keys') staticmethod

Speichert die generierten Schlüssel in separate Dateien. Der private Schlüssel wird mit dem Device Key verschlüsselt.

Parameters:

Name Type Description Default
public_key str

Der öffentliche Schlüssel im PEM-Format

required
private_key str

Der private Schlüssel im PEM-Format

required
directory str

Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen

'keys'
Source code in toolboxv2/utils/security/cryp.py
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
@staticmethod
def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
    """
    Speichert die generierten Schlüssel in separate Dateien.
    Der private Schlüssel wird mit dem Device Key verschlüsselt.

    Args:
        public_key (str): Der öffentliche Schlüssel im PEM-Format
        private_key (str): Der private Schlüssel im PEM-Format
        directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
    """
    # Erstelle das Verzeichnis, falls es nicht existiert
    os.makedirs(directory, exist_ok=True)

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Verschlüssele den privaten Schlüssel mit dem Device Key
    encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

    # Speichere den öffentlichen Schlüssel
    public_key_path = os.path.join(directory, "public_key.pem")
    with open(public_key_path, "w") as f:
        f.write(public_key)

    # Speichere den verschlüsselten privaten Schlüssel
    private_key_path = os.path.join(directory, "private_key.pem")
    with open(private_key_path, "w") as f:
        f.write(encrypted_private_key)

    print("Saved keys in ", public_key_path)

Modules & Flows

toolboxv2.mods

Canvas

Tools

Bases: MainTool, EventManagerClass if EVENT_MANAGER_AVAILABLE else object

Source code in toolboxv2/mods/Canvas.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
class Tools(MainTool, EventManagerClass if EVENT_MANAGER_AVAILABLE else object):  # Inherit EventManagerClass
    def __init__(self, app: App):
        self.name = MOD_NAME
        self.version = VERSION
        self.color = "GREEN"
        self.tools_dict = {"name": MOD_NAME, "Version": self.show_version}  # Renamed to avoid conflict

        # Call MainTool's __init__ first
        self.app.logger.info(f"Canvas MainTool part initialized for app {self.app.id if self.app else 'None'}")

        if EVENT_MANAGER_AVAILABLE:
            # EventManagerClass initialization
            # Determine identification: 'CanvasP0' if central, 'CanvasPN' (Peer Node) otherwise.
            # This logic might need to be more sophisticated based on your deployment.
            canvas_identification = "CanvasPN"
            if hasattr(self.app, 'args_sto') and self.app.args_sto.background_application_runner:
                # Assuming background_application_runner implies it's a central/P0-like instance for Canvas
                canvas_identification = "CanvasP0"

            canvas_source_id = f"CanvasInstance.{self.app.id}"  # Unique source ID for this Canvas instance

            EventManagerClass.__init__(self, source_id=canvas_source_id, _identification=canvas_identification)
            self.app.logger.info(
                f"Canvas EventManager part initialized (SID: {self.source_id}, Ident: {self.identification})")

            # Name for the locally registered event handler, specific to this EventManager source_id
            self.canvas_internal_handler_event_name = f"{CANVAS_INTERNAL_BROADCAST_HANDLER_EVENT_PREFIX}{self.source_id}"
        else:
            self.app.logger.warning(
                "Canvas EventManager part NOT initialized (module unavailable). Collaboration will be local only.")
            self.source_id = f"CanvasInstance.{self.app.id}_LOCAL_ONLY"  # Dummy for logging
            self.identification = "LOCAL_ONLY"
            self.canvas_internal_handler_event_name = f"{CANVAS_INTERNAL_BROADCAST_HANDLER_EVENT_PREFIX}{self.source_id}"

        # Canvas specific state
        self.live_canvas_sessions: Dict[str, List[asyncio.Queue]] = defaultdict(list)
        self.active_user_previews: Dict[str, Dict[str, Any]] = defaultdict(lambda: defaultdict(dict))
        self.previews_lock = asyncio.Lock()


        MainTool.__ainit__(self, load=on_start, v=self.version, tool=self.tools_dict, name=self.name,
                          color=self.color, app=app)
    @property
    def db_mod(self):
        return self.app.get_mod("DB")

    @db_mod.setter
    def _set_db_mod(self, val):
        pass

    async def _handle_canvas_action_for_sse_broadcast(self, event_manager_event_id: EventID):
        """
        This method is called by this instance's EventManager when the
        `self.canvas_internal_handler_event_name` is triggered.
        Its job is to take the payload and use the original SSE broadcasting logic.
        """
        if not EVENT_MANAGER_AVAILABLE: return Result.default_internal_error("EventManager not available")

        try:
            broadcast_details = event_manager_event_id.payload
            if not isinstance(broadcast_details, dict):
                self.app.logger.error(
                    f"CanvasSSEBroadcast: Invalid payload type from EM: {type(broadcast_details)}. Expected dict. Payload: {broadcast_details}")
                return Result.default_internal_error("Invalid payload for SSE broadcast from EM")

            canvas_id = broadcast_details.get("canvas_id")
            sse_event_type = broadcast_details.get("sse_event_type")
            sse_data = broadcast_details.get("sse_data")
            originator_user_id = broadcast_details.get("originator_user_id")

            if not all([canvas_id, sse_event_type, sse_data is not None]):
                self.app.logger.error(
                    f"CanvasSSEBroadcast: Missing details in EM payload: C:{canvas_id}, E:{sse_event_type}, D_is_None:{sse_data is None}, O:{originator_user_id}")
                return Result.default_internal_error("Missing data for SSE broadcast from EM")

            self.app.logger.debug(
                f"Canvas: EM triggered internal SSE broadcast for C:{canvas_id}, Event:'{sse_event_type}', Originator:{originator_user_id}")

            await self._broadcast_to_canvas_listeners(
                canvas_id=canvas_id,
                event_type=sse_event_type,
                data=sse_data,
                originator_user_id=originator_user_id
            )
            return Result.ok(info="Broadcast dispatched to local SSE listeners via EM.")
        except Exception as e:
            self.app.logger.error(f"Canvas: Error in _handle_canvas_action_for_sse_broadcast: {e}", exc_info=True)
            return Result.default_internal_error(f"Error handling SSE broadcast via EM: {e}")

    async def _broadcast_to_canvas_listeners(self, canvas_id: str, event_type: str, data: Dict[str, Any],
                                             originator_user_id: Optional[str] = None):
        # This method remains the same as your previous version that puts messages on asyncio.Queues
        # It's now called by _handle_canvas_action_for_sse_broadcast
        message_obj = {
            "event": event_type,
            "data": json.dumps({
                "canvas_id": canvas_id,
                "originator_user_id": originator_user_id,
                **data
            })
        }
        queues_to_remove = []
        # Ensure thread-safe iteration if live_canvas_sessions could be modified elsewhere.
        # defaultdict list append is thread-safe, but iteration during modification is not.
        # Creating a copy of the list of queues for iteration is safer.
        current_queues_for_canvas = list(self.live_canvas_sessions.get(canvas_id, []))

        if not current_queues_for_canvas:
            # self.app.logger.debug(f"No SSE listeners for canvas {canvas_id} to broadcast '{event_type}'.")
            return

        # self.app.logger.debug(f"Canvas: Broadcasting '{event_type}' to {len(current_queues_for_canvas)} SSE listeners for C:{canvas_id}")
        for q in current_queues_for_canvas:
            try:
                q.put_nowait(message_obj)
            except asyncio.QueueFull:
                self.app.logger.warning(
                    f"SSE queue full for canvas {canvas_id} ({event_type}). Message dropped for one client.")
                queues_to_remove.append(q)
            except Exception as e:
                self.app.logger.error(f"Error putting message on SSE queue for canvas {canvas_id} ({event_type}): {e}")
                queues_to_remove.append(q)

        if queues_to_remove:
            # This modification needs to be careful.
            if canvas_id in self.live_canvas_sessions:
                self.live_canvas_sessions[canvas_id] = [
                    q for q in self.live_canvas_sessions[canvas_id] if q not in queues_to_remove
                ]
                if not self.live_canvas_sessions[canvas_id]:
                    async with self.previews_lock:  # Protect active_user_previews
                        if canvas_id in self.live_canvas_sessions:  # Check again before del
                            del self.live_canvas_sessions[canvas_id]
                        if canvas_id in self.active_user_previews:
                            del self.active_user_previews[canvas_id]
                    self.app.logger.info(f"SSE: All listeners removed for canvas {canvas_id} due to queue issues.")


    def show_version(self):
        self.app.logger.info(f"{self.name} Version: {self.version} (EventManager SID: {self.source_id})")
        return self.version


    async def _get_user_specific_db_key(self, request: RequestData, base_key: str) -> Optional[str]:
        return f"{base_key}_public"  # Simplified from original

CloudM

check_multiple_processes(pids)

Checks the status of multiple processes in a single system call. Returns a dictionary mapping PIDs to their status (GREEN_CIRCLE, RED_CIRCLE, or YELLOW_CIRCLE).

Source code in toolboxv2/mods/CloudM/mini.py
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
def check_multiple_processes(pids: list[int]) -> dict[int, str]:
    """
    Checks the status of multiple processes in a single system call.
    Returns a dictionary mapping PIDs to their status (GREEN_CIRCLE, RED_CIRCLE, or YELLOW_CIRCLE).
    """
    if not pids:
        return {}

    pid_status = {}

    if os.name == 'nt':  # Windows
        try:
            # Windows tasklist requires separate /FI for each filter
            command = 'tasklist'

            # Add encoding handling for Windows
            result = subprocess.run(
                command,
                capture_output=True,
                text=True,
                shell=True,
                encoding='cp850'  # Use cp850 for Windows console output
            )
            # Create a set of running PIDs from the output
            running_pids = set()
            for line in result.stdout.lower().split('\n'):
                for pid in pids:
                    if str(pid) in line:
                        running_pids.add(pid)
            # Assign status based on whether PID was found in output
            for pid in pids:
                if pid in running_pids:
                    pid_status[pid] = GREEN_CIRCLE
                else:
                    pid_status[pid] = RED_CIRCLE

        except subprocess.SubprocessError as e:
            print(f"SubprocessError: {e}")  # For debugging
            # Mark all as YELLOW_CIRCLE if there's an error running the command
            for pid in pids:
                pid_status[pid] = YELLOW_CIRCLE
        except UnicodeDecodeError as e:
            print(f"UnicodeDecodeError: {e}")  # For debugging
            # Try alternate encoding if cp850 fails
            try:
                result = subprocess.run(
                    command,
                    capture_output=True,
                    text=True,
                    shell=True,
                    encoding='utf-8'
                )
                running_pids = set()
                for line in result.stdout.lower().split('\n'):
                    for pid in pids:
                        if str(pid) in line:
                            running_pids.add(pid)

                for pid in pids:
                    pid_status[pid] = GREEN_CIRCLE if pid in running_pids else RED_CIRCLE
            except Exception as e:
                print(f"Failed with alternate encoding: {e}")  # For debugging
                for pid in pids:
                    pid_status[pid] = YELLOW_CIRCLE

    else:  # Unix/Linux/Mac
        try:
            pids_str = ','.join(str(pid) for pid in pids)
            command = f'ps -p {pids_str} -o pid='

            result = subprocess.run(
                command,
                capture_output=True,
                text=True,
                shell=True,
                encoding='utf-8'
            )
            running_pids = set(int(pid) for pid in result.stdout.strip().split())

            for pid in pids:
                pid_status[pid] = GREEN_CIRCLE if pid in running_pids else RED_CIRCLE

        except subprocess.SubprocessError as e:
            print(f"SubprocessError: {e}")  # For debugging
            for pid in pids:
                pid_status[pid] = YELLOW_CIRCLE

    return pid_status

get_service_pids(info_dir)

Extracts service names and PIDs from pid files.

Source code in toolboxv2/mods/CloudM/mini.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
def get_service_pids(info_dir):
    """Extracts service names and PIDs from pid files."""
    services = {}
    pid_files = [f for f in os.listdir(info_dir) if re.match(r'(.+)-(.+)\.pid', f)]
    for pid_file in pid_files:
        match = re.match(r'(.+)-(.+)\.pid', pid_file)
        if match:
            services_type, service_name = match.groups()
            # Read the PID from the file
            with open(os.path.join(info_dir, pid_file)) as file:
                pid = file.read().strip()
                # Store the PID using a formatted key
                services[f"{service_name} - {services_type}"] = int(pid)
    return services

get_service_status(dir)

Displays the status of all services.

Source code in toolboxv2/mods/CloudM/mini.py
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
def get_service_status(dir: str) -> str:
    """Displays the status of all services."""
    if time.time()-services_data_sto_last_update_time[0] > 30:
        services = get_service_pids(dir)
        services_data_sto[0] = services
        services_data_sto_last_update_time[0] = time.time()
    else:
        services = services_data_sto[0]
    if not services:
        return "No services found"

    # Get status for all PIDs in a single call
    pid_statuses = check_multiple_processes(list(services.values()))

    # Build the status string
    res_s = "Service(s):" + ("\n" if len(services) > 1 else ' ')
    for service_name, pid in services.items():
        status = pid_statuses.get(pid, YELLOW_CIRCLE)
        res_s += f"{status} {service_name} (PID: {pid})\n"
    services_data_display[0] = res_s.strip()
    return res_s.rstrip()

ModManager

create_and_pack_module(path, module_name='', version='-.-.-', additional_dirs=None, yaml_data=None)

Erstellt ein Python-Modul und packt es in eine ZIP-Datei.

Parameters:

Name Type Description Default
path str

Pfad zum Ordner oder zur Datei, die in das Modul aufgenommen werden soll.

required
additional_dirs dict

Zusätzliche Verzeichnisse, die hinzugefügt werden sollen.

None
version str

Version des Moduls.

'-.-.-'
module_name str

Name des Moduls.

''

Returns:

Name Type Description
str

Pfad zur erstellten ZIP-Datei.

Source code in toolboxv2/mods/CloudM/ModManager.py
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
def create_and_pack_module(path, module_name='', version='-.-.-', additional_dirs=None, yaml_data=None):
    """
    Erstellt ein Python-Modul und packt es in eine ZIP-Datei.

    Args:
        path (str): Pfad zum Ordner oder zur Datei, die in das Modul aufgenommen werden soll.
        additional_dirs (dict): Zusätzliche Verzeichnisse, die hinzugefügt werden sollen.
        version (str): Version des Moduls.
        module_name (str): Name des Moduls.

    Returns:
        str: Pfad zur erstellten ZIP-Datei.
    """
    if additional_dirs is None:
        additional_dirs = {}
    if yaml_data is None:
        yaml_data = {}

    os.makedirs("./mods_sto/temp/", exist_ok=True)

    module_path = os.path.join(path, module_name)
    print("module_pathmodule_pathmodule_path", module_path)
    if not os.path.exists(module_path):
        module_path += '.py'

    temp_dir = tempfile.mkdtemp(dir=os.path.join("./mods_sto", "temp"))
    zip_file_name = f"RST${module_name}&{__version__}§{version}.zip"
    zip_path = f"./mods_sto/{zip_file_name}"

    # Modulverzeichnis erstellen, falls es nicht existiert
    if not os.path.exists(module_path):
        return False

    if os.path.isdir(module_path):
        # tbConfig.yaml erstellen
        config_path = os.path.join(module_path, "tbConfig.yaml")
        with open(config_path, 'w') as config_file:
            yaml.dump({"version": version, "module_name": module_name,
                       "dependencies_file": f"./mods/{module_name}/requirements.txt",
                       "zip": zip_file_name, **yaml_data}, config_file)

        generate_requirements(module_path, os.path.join(module_path, "requirements.txt"))
    # Datei oder Ordner in das Modulverzeichnis kopieren
    if os.path.isdir(module_path):
        shutil.copytree(module_path, os.path.join(temp_dir, os.path.basename(module_path)), dirs_exist_ok=True)
    else:
        shutil.copy2(module_path, temp_dir)
        config_path = os.path.join(temp_dir, f"{module_name}.yaml")
        with open(config_path, 'w') as config_file:
            yaml.dump({"version": version, "dependencies_file": f"./mods/{module_name}/requirements.txt",
                       "module_name": module_name, **yaml_data}, config_file)
        generate_requirements(temp_dir, os.path.join(temp_dir, "requirements.txt"))
    # Zusätzliche Verzeichnisse hinzufügen
    for dir_name, dir_paths in additional_dirs.items():
        if isinstance(dir_paths, str):
            dir_paths = [dir_paths]
        for dir_path in dir_paths:
            full_path = os.path.join(temp_dir, dir_name)
            if os.path.isdir(dir_path):
                shutil.copytree(dir_path, full_path, dirs_exist_ok=True)
            elif os.path.isfile(dir_path):
                # Stellen Sie sicher, dass das Zielverzeichnis existiert
                os.makedirs(full_path, exist_ok=True)
                # Kopieren Sie die Datei statt des Verzeichnisses
                shutil.copy2(dir_path, full_path)
            else:
                print(f"Der Pfad {dir_path} ist weder ein Verzeichnis noch eine Datei.")

    # Modul in eine ZIP-Datei packen
    with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
        for root, _dirs, files in os.walk(temp_dir):
            for file in files:
                file_path = os.path.join(root, file)
                zipf.write(file_path, os.path.relpath(file_path, temp_dir))

    # Temperatures Modulverzeichnis löschen
    shutil.rmtree(temp_dir)

    return zip_path
download_files(urls, directory, desc, print_func, filename=None)

Hilfsfunktion zum Herunterladen von Dateien.

Source code in toolboxv2/mods/CloudM/ModManager.py
72
73
74
75
76
77
78
79
80
81
def download_files(urls, directory, desc, print_func, filename=None):
    """ Hilfsfunktion zum Herunterladen von Dateien. """
    for url in tqdm(urls, desc=desc):
        if filename is None:
            filename = os.path.basename(url)
        print_func(f"Download {filename}")
        print_func(f"{url} -> {directory}/{filename}")
        os.makedirs(directory, exist_ok=True)
        urllib.request.urlretrieve(url, f"{directory}/{filename}")
    return f"{directory}/{filename}"
handle_requirements(requirements_url, module_name, print_func)

Verarbeitet und installiert Requirements.

Source code in toolboxv2/mods/CloudM/ModManager.py
84
85
86
87
88
89
90
91
92
93
94
95
def handle_requirements(requirements_url, module_name, print_func):
    """ Verarbeitet und installiert Requirements. """
    if requirements_url:
        requirements_filename = f"{module_name}-requirements.txt"
        print_func(f"Download requirements {requirements_filename}")
        urllib.request.urlretrieve(requirements_url, requirements_filename)

        print_func("Install requirements")
        run_command(
            [sys.executable, "-m", "pip", "install", "-r", requirements_filename])

        os.remove(requirements_filename)
increment_version(version_str, max_value=99)

Inkrementiert eine Versionsnummer im Format "vX.Y.Z".

Parameters:

Name Type Description Default
version_str str

Die aktuelle Versionsnummer, z. B. "v0.0.1".

required
max_value int

Die maximale Zahl pro Stelle (default: 99).

99

Returns:

Name Type Description
str str

Die inkrementierte Versionsnummer.

Source code in toolboxv2/mods/CloudM/ModManager.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
def increment_version(version_str: str, max_value: int = 99) -> str:
    """
    Inkrementiert eine Versionsnummer im Format "vX.Y.Z".

    Args:
        version_str (str): Die aktuelle Versionsnummer, z. B. "v0.0.1".
        max_value (int): Die maximale Zahl pro Stelle (default: 99).

    Returns:
        str: Die inkrementierte Versionsnummer.
    """
    if not version_str.startswith("v"):
        raise ValueError("Die Versionsnummer muss mit 'v' beginnen, z. B. 'v0.0.1'.")

    # Entferne das führende 'v' und parse die Versionsnummer
    version_core = version_str[1:]
    try:
        version = Version(version_core)
    except ValueError as e:
        raise ValueError(f"Ungültige Versionsnummer: {version_core}") from e

    # Extrahiere die Versionsteile und konvertiere sie zu einer Liste
    parts = list(version.release)

    # Inkrementiere die letzte Stelle
    for i in range(len(parts) - 1, -1, -1):
        if parts[i] < max_value:
            parts[i] += 1
            break
        else:
            parts[i] = 0
            # Schleife fährt fort, um die nächsthöhere Stelle zu inkrementieren
    else:
        # Wenn alle Stellen auf "max_value" sind, füge eine neue Stelle hinzu
        parts.insert(0, 1)

    # Baue die neue Version
    new_version = "v" + ".".join(map(str, parts))
    return new_version
installer(app, module_name, build_state=True) async

Installiert oder aktualisiert ein Modul basierend auf der Remote-Version.

Source code in toolboxv2/mods/CloudM/ModManager.py
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
@export(mod_name=Name, name="install", test=False)
async def installer(app: App | None, module_name: str, build_state=True):
    """
    Installiert oder aktualisiert ein Modul basierend auf der Remote-Version.
    """
    if app is None:
        app = get_app(f"{Name}.installer")

    if not app.session.valid and not await app.session.login():
        return Result.default_user_error("Please login with CloudM login")

    # Hole nur die höchste verfügbare Version vom Server
    response = await app.session.fetch(f"/installer/version?name={module_name}", method="GET")
    remote_version : str = await response.text()
    remote_version = remote_version.split('"')[1]
    if remote_version == "None":
        remote_version = None
    # Finde lokale Version
    local_version = find_highest_zip_version(
        module_name, version_only=True
    )

    if not local_version and not remote_version:
        return Result.default_user_error(f"404 mod {module_name} not found")

    # Vergleiche Versionen
    local_ver = pv.parse(local_version) if local_version else pv.parse("0.0.0")
    remote_ver = pv.parse(remote_version)

    app.print(f"Mod versions - Local: {local_ver}, Remote: {remote_ver}")

    if remote_ver > local_ver:
        # Konstruiere die URL direkt aus Modulname und Version
        mod_url = f"/installer/mods_sto/RST${module_name}&{app.version}§{remote_version}.zip"
        download_path = Path(app.start_dir) / 'mods_sto'

        app.print(f"Fetching Mod from {app.session.base+mod_url}")
        if not await app.session.download_file(mod_url, str(download_path)):
            app.print("Failed to download mod")
            if 'y' not in input("Download manually and place in mods_sto folder. Done? (y/n) ").lower():
                return Result.default_user_error("Installation cancelled")

        # Korrigiere Dateinamen
        zip_name = mod_url.split('/')[-1]
        clean_name = zip_name.replace("$", '').replace("&", '').replace("§", '')
        with contextlib.suppress(FileExistsError):
            os.rename(
                str(download_path / clean_name),
                str(download_path / zip_name)
            )

        with Spinner("Installing from zip"):
            report = install_from_zip(app, zip_name)

        if not report:
            return Result.default_user_error("Setup error occurred")

        if build_state:
            get_state_from_app(app)

        return report

    app.print("Module is already up to date")
    return Result.ok()
run_command(command, cwd=None)

Führt einen Befehl aus und gibt den Output zurück.

Source code in toolboxv2/mods/CloudM/ModManager.py
488
489
490
491
492
def run_command(command, cwd=None):
    """Führt einen Befehl aus und gibt den Output zurück."""
    result = subprocess.run(command, cwd=cwd, capture_output=True, text=True, check=True,
                            encoding='cp850')
    return result.stdout
uninstall_module(path, module_name='', version='-.-.-', additional_dirs=None, yaml_data=None)

Deinstalliert ein Python-Modul, indem es das Modulverzeichnis oder die ZIP-Datei entfernt.

Parameters:

Name Type Description Default
path str

Pfad zum Ordner oder zur Datei, die in das Modul aufgenommen werden soll.

required
additional_dirs dict

Zusätzliche Verzeichnisse, die hinzugefügt werden sollen.

None
version str

Version des Moduls.

'-.-.-'
module_name str

Name des Moduls.

''
Source code in toolboxv2/mods/CloudM/ModManager.py
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
def uninstall_module(path, module_name='', version='-.-.-', additional_dirs=None, yaml_data=None):
    """
    Deinstalliert ein Python-Modul, indem es das Modulverzeichnis oder die ZIP-Datei entfernt.

    Args:
        path (str): Pfad zum Ordner oder zur Datei, die in das Modul aufgenommen werden soll.
        additional_dirs (dict): Zusätzliche Verzeichnisse, die hinzugefügt werden sollen.
        version (str): Version des Moduls.
        module_name (str): Name des Moduls.

    """
    if additional_dirs is None:
        additional_dirs = {}
    if yaml_data is None:
        yaml_data = {}

    os.makedirs("./mods_sto/temp/", exist_ok=True)

    base_path = os.path.dirname(path)
    module_path = os.path.join(base_path, module_name)
    zip_path = f"./mods_sto/RST${module_name}&{__version__}§{version}.zip"

    # Modulverzeichnis erstellen, falls es nicht existiert
    if not os.path.exists(module_path):
        print("Module %s already uninstalled")
        return False

    # Datei oder Ordner in das Modulverzeichnis kopieren
    shutil.rmtree(module_path)

    # Zusätzliche Verzeichnisse hinzufügen
    for _dir_name, dir_paths in additional_dirs.items():
        if isinstance(dir_paths, str):
            dir_paths = [dir_paths]
        for dir_path in dir_paths:
            shutil.rmtree(dir_path)
            print(f"Der Pfad {dir_path} wurde entfernt")

    # Ursprüngliches Modulverzeichnis löschen
    shutil.rmtree(zip_path)
unpack_and_move_module(zip_path, base_path='./mods', module_name='')

Entpackt eine ZIP-Datei und verschiebt die Inhalte an die richtige Stelle. Überschreibt existierende Dateien für Update-Unterstützung.

Parameters:

Name Type Description Default
zip_path str

Pfad zur ZIP-Datei, die entpackt werden soll

required
base_path str

Basispfad, unter dem das Modul gespeichert werden soll

'./mods'
module_name str

Name des Moduls (optional, wird sonst aus ZIP-Namen extrahiert)

''

Returns:

Name Type Description
str str

Name des installierten Moduls

Source code in toolboxv2/mods/CloudM/ModManager.py
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
def unpack_and_move_module(zip_path: str, base_path: str = './mods', module_name: str = '') -> str:
    """
    Entpackt eine ZIP-Datei und verschiebt die Inhalte an die richtige Stelle.
    Überschreibt existierende Dateien für Update-Unterstützung.

    Args:
        zip_path (str): Pfad zur ZIP-Datei, die entpackt werden soll
        base_path (str): Basispfad, unter dem das Modul gespeichert werden soll
        module_name (str): Name des Moduls (optional, wird sonst aus ZIP-Namen extrahiert)

    Returns:
        str: Name des installierten Moduls
    """
    # Konvertiere Pfade zu Path-Objekten für bessere Handhabung
    zip_path = Path(zip_path)
    base_path = Path(base_path)

    # Extrahiere Modulnamen falls nicht angegeben
    if not module_name:
        module_name = zip_path.name.split('$')[1].split('&')[0]

    module_path = base_path / module_name
    temp_base = Path('./mods_sto/temp')

    try:
        # Erstelle temporäres Verzeichnis
        temp_base.mkdir(parents=True, exist_ok=True)
        with tempfile.TemporaryDirectory(dir=str(temp_base)) as temp_dir:
            temp_dir = Path(temp_dir)

            with Spinner(f"Extracting {zip_path.name}"):
                # Entpacke ZIP-Datei
                with zipfile.ZipFile(zip_path, 'r') as zip_ref:
                    zip_ref.extractall(temp_dir)

            # Behandle Modul-Verzeichnis
            source_module = temp_dir / module_name
            if source_module.exists():
                with Spinner(f"Installing module to {module_path}"):
                    if module_path.exists():
                        # Lösche existierendes Modul-Verzeichnis für sauberes Update
                        shutil.rmtree(module_path)
                    # Verschiebe neues Modul-Verzeichnis
                    shutil.copytree(source_module, module_path, dirs_exist_ok=True)

            # Behandle zusätzliche Dateien im Root
            with Spinner("Installing additional files"):
                for item in temp_dir.iterdir():
                    if item.name == module_name:
                        continue

                    target = Path('./') / item.name
                    if item.is_dir():
                        with Spinner(f"Installing directory {item.name}"):
                            if target.exists():
                                shutil.rmtree(target)
                            shutil.copytree(item, target, dirs_exist_ok=True)
                    else:
                        with Spinner(f"Installing file {item.name}"):
                            shutil.copy2(item, target)

            print(f"Successfully installed/updated module {module_name} to {module_path}")
            return module_name

    except Exception as e:
        print(f"Error during installation: {str(e)}")
        # Cleanup bei Fehler
        if module_path.exists():
            shutil.rmtree(module_path)
        raise

UserAccountManager

get_current_user_from_request_api_wrapper(app, request) async

API callable version of get_current_user_from_request for tbjs/admin panel

Source code in toolboxv2/mods/CloudM/UserAccountManager.py
148
149
150
151
152
153
154
155
156
157
158
159
@export(mod_name=Name, api=True, version=version, request_as_kwarg=True, row=False)  # row=False to return JSON
async def get_current_user_from_request_api_wrapper(app: App, request: RequestData):
    """ API callable version of get_current_user_from_request for tbjs/admin panel """
    user = await get_current_user_from_request(app, request)
    if not user:
        # Return error that tbjs can handle
        return Result.default_user_error(info="User not authenticated or found.", data=None, exec_code=401)
    user_dict = asdict(user)
    pub_user_data = {}
    for key in ['name','pub_key','email','creation_time','is_persona','level','log_level','settings']:
        pub_user_data[key] = user_dict.get(key, None)
    return Result.ok(data=pub_user_data)

email_services

send_email_verification_email(app, user_email, username, verification_url)

Sends an email verification link to the user.

Source code in toolboxv2/mods/CloudM/email_services.py
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
@s_export
def send_email_verification_email(app: App, user_email: str, username: str, verification_url: str):
    """Sends an email verification link to the user."""
    sender = EmailSender(app)
    subject = f"Verify Your Email for {APP_NAME}"
    preview_text = f"Almost there, {username}! Just one more step to activate your account."

    content_html = f"""
        <h2>Hi {username},</h2>
        <p>Thanks for signing up for {APP_NAME}! To complete your registration, please verify your email address by clicking the button below.</p>
        <a href="{verification_url}" class="button">Verify Email Address</a>
        <p>If you didn't create an account with {APP_NAME}, you can safely ignore this email.</p>
        <p>If the button doesn't work, copy and paste this link into your browser:<br><span class="link-in-text">{verification_url}</span></p>
        <p>Sincerely,<br>The {APP_NAME} Team</p>
    """
    return sender.send_html_email(user_email, subject, content_html, preview_text)

Sends a magic link email for login.

Source code in toolboxv2/mods/CloudM/email_services.py
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
@s_export
def send_magic_link_email(app: App, user_email: str, magic_link_url: str, username: str = None):
    """Sends a magic link email for login."""
    sender = EmailSender(app)
    greeting_name = f", {username}" if username else ""
    subject = f"Your Magic Login Link for {APP_NAME}"
    preview_text = "Securely access your account with this one-time link."

    content_html = f"""
        <h2>Hello{greeting_name}!</h2>
        <p>You requested a magic link to sign in to your {APP_NAME} account.</p>
        <p>Click the button below to log in. This link is temporary and will expire shortly.</p>
        <a href="{magic_link_url}" class="button">Log In Securely</a>
        <p> Invitation key: {magic_link_url.split('?key=')[1].split('&name=')[0].replace('%23', '#')}</p>
        <p>If you did not request this link, please ignore this email. Your account is safe.</p>
        <p>If the button doesn't work, copy and paste this link into your browser:<br><span class="link-in-text">{magic_link_url}</span></p>
        <p>Thanks,<br>The {APP_NAME} Team</p>
    """
    return sender.send_html_email(user_email, subject, content_html, preview_text)
send_signup_invitation_email(app, invited_user_email, invited_username, inviter_username=None)

Generates an invitation link and sends it via email.

Source code in toolboxv2/mods/CloudM/email_services.py
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
@s_export
def send_signup_invitation_email(app: App, invited_user_email: str, invited_username: str,
                                 inviter_username: str = None):
    """Generates an invitation link and sends it via email."""
    sender = EmailSender(app)

    # Generate invitation code as specified in the prompt
    # This uses the Code class, assuming TB_R_KEY is set in the environment
    invitation_code = Code.one_way_hash(invited_username, "00#", os.getenv("TB_R_KEY", "pepper123"))[:12] + str(
        uuid.uuid4())[:6]

    # Construct the signup link URL (adjust your frontend signup path as needed)
    signup_link_url = f"{APP_BASE_URL}/web/assets/signup.html?invitation={quote(invitation_code)}&email={quote(invited_user_email)}&username={quote(invited_username)}"

    subject = f"You're Invited to Join {APP_NAME}!"
    preview_text = f"{inviter_username or 'A friend'} has invited you to {APP_NAME}!"
    inviter_line = f"<p>{inviter_username} has invited you to join.</p>" if inviter_username else "<p>You've been invited to join.</p>"

    content_html = f"""
        <h2>Hello {invited_username},</h2>
        {inviter_line}
        <p>{APP_NAME} is an exciting platform, and we'd love for you to be a part of it!</p>
        <p>Click the button below to accept the invitation and create your account:</p>
        <a href="{signup_link_url}" class="button">Accept Invitation & Sign Up</a>
        <p>This invitation is unique to you : {invitation_code}</p>
        <p>If the button doesn't work, copy and paste this link into your browser:<br><span class="link-in-text">{signup_link_url}</span></p>
        <p>We look forward to seeing you there!<br>The {APP_NAME} Team</p>
    """
    return sender.send_html_email(invited_user_email, subject, content_html, preview_text)
send_waiting_list_confirmation_email(app, user_email)

Sends a confirmation email for joining the waiting list.

Source code in toolboxv2/mods/CloudM/email_services.py
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
@s_export
def send_waiting_list_confirmation_email(app: App, user_email: str):
    """Sends a confirmation email for joining the waiting list."""
    sender = EmailSender(app)
    subject = f"You're on the Waiting List for {APP_NAME}!"
    preview_text = "Thanks for your interest! We'll keep you updated."

    content_html = f"""
        <h2>You're In!</h2>
        <p>Thank you for joining the waiting list for {APP_NAME}. We're working hard to get things ready and appreciate your interest.</p>
        <p>We'll notify you as soon as we have updates or when access becomes available.</p>
        <p>In the meantime, you can follow our progress or learn more at <a href="{APP_BASE_URL}" class="link-in-text">{APP_BASE_URL}</a>.</p>
        <p>Stay tuned,<br>The {APP_NAME} Team</p>
    """
    return sender.send_html_email(user_email, subject, content_html, preview_text,
                                  recipient_email_for_unsubscribe=user_email, show_unsubscribe_link=True)
send_welcome_email(app, user_email, username, welcome_action_url=None)

Sends a welcome email to a new user.

Source code in toolboxv2/mods/CloudM/email_services.py
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
@s_export  # Changed to native, api=False as it's a backend function
def send_welcome_email(app: App, user_email: str, username: str, welcome_action_url: str = None):
    """Sends a welcome email to a new user."""
    sender = EmailSender(app)
    subject = f"Welcome to {APP_NAME}, {username}!"
    preview_text = f"We're thrilled to have you, {username}!"
    action_url = welcome_action_url or f"{APP_BASE_URL}/dashboard"  # Default to dashboard

    content_html = f"""
        <h2>Welcome Aboard, {username}!</h2>
        <p>Thank you for signing up for {APP_NAME}. We're excited to have you join our community!</p>
        <p>Here are a few things you might want to do next:</p>
        <ul>
            <li>Explore your new account features.</li>
            <li>Customize your profile.</li>
        </ul>
        <p>Click the button below to get started:</p>
        <a href="{action_url}" class="button">Go to Your Dashboard</a>
        <p>If the button doesn't work, copy and paste this link into your browser:<br><span class="link-in-text">{action_url}</span></p>
        <p>Best regards,<br>The {APP_NAME} Team</p>
    """
    return sender.send_html_email(user_email, subject, content_html, preview_text,
                                  recipient_email_for_unsubscribe=user_email, show_unsubscribe_link=True)

mini

check_multiple_processes(pids)

Checks the status of multiple processes in a single system call. Returns a dictionary mapping PIDs to their status (GREEN_CIRCLE, RED_CIRCLE, or YELLOW_CIRCLE).

Source code in toolboxv2/mods/CloudM/mini.py
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
def check_multiple_processes(pids: list[int]) -> dict[int, str]:
    """
    Checks the status of multiple processes in a single system call.
    Returns a dictionary mapping PIDs to their status (GREEN_CIRCLE, RED_CIRCLE, or YELLOW_CIRCLE).
    """
    if not pids:
        return {}

    pid_status = {}

    if os.name == 'nt':  # Windows
        try:
            # Windows tasklist requires separate /FI for each filter
            command = 'tasklist'

            # Add encoding handling for Windows
            result = subprocess.run(
                command,
                capture_output=True,
                text=True,
                shell=True,
                encoding='cp850'  # Use cp850 for Windows console output
            )
            # Create a set of running PIDs from the output
            running_pids = set()
            for line in result.stdout.lower().split('\n'):
                for pid in pids:
                    if str(pid) in line:
                        running_pids.add(pid)
            # Assign status based on whether PID was found in output
            for pid in pids:
                if pid in running_pids:
                    pid_status[pid] = GREEN_CIRCLE
                else:
                    pid_status[pid] = RED_CIRCLE

        except subprocess.SubprocessError as e:
            print(f"SubprocessError: {e}")  # For debugging
            # Mark all as YELLOW_CIRCLE if there's an error running the command
            for pid in pids:
                pid_status[pid] = YELLOW_CIRCLE
        except UnicodeDecodeError as e:
            print(f"UnicodeDecodeError: {e}")  # For debugging
            # Try alternate encoding if cp850 fails
            try:
                result = subprocess.run(
                    command,
                    capture_output=True,
                    text=True,
                    shell=True,
                    encoding='utf-8'
                )
                running_pids = set()
                for line in result.stdout.lower().split('\n'):
                    for pid in pids:
                        if str(pid) in line:
                            running_pids.add(pid)

                for pid in pids:
                    pid_status[pid] = GREEN_CIRCLE if pid in running_pids else RED_CIRCLE
            except Exception as e:
                print(f"Failed with alternate encoding: {e}")  # For debugging
                for pid in pids:
                    pid_status[pid] = YELLOW_CIRCLE

    else:  # Unix/Linux/Mac
        try:
            pids_str = ','.join(str(pid) for pid in pids)
            command = f'ps -p {pids_str} -o pid='

            result = subprocess.run(
                command,
                capture_output=True,
                text=True,
                shell=True,
                encoding='utf-8'
            )
            running_pids = set(int(pid) for pid in result.stdout.strip().split())

            for pid in pids:
                pid_status[pid] = GREEN_CIRCLE if pid in running_pids else RED_CIRCLE

        except subprocess.SubprocessError as e:
            print(f"SubprocessError: {e}")  # For debugging
            for pid in pids:
                pid_status[pid] = YELLOW_CIRCLE

    return pid_status
get_service_pids(info_dir)

Extracts service names and PIDs from pid files.

Source code in toolboxv2/mods/CloudM/mini.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
def get_service_pids(info_dir):
    """Extracts service names and PIDs from pid files."""
    services = {}
    pid_files = [f for f in os.listdir(info_dir) if re.match(r'(.+)-(.+)\.pid', f)]
    for pid_file in pid_files:
        match = re.match(r'(.+)-(.+)\.pid', pid_file)
        if match:
            services_type, service_name = match.groups()
            # Read the PID from the file
            with open(os.path.join(info_dir, pid_file)) as file:
                pid = file.read().strip()
                # Store the PID using a formatted key
                services[f"{service_name} - {services_type}"] = int(pid)
    return services
get_service_status(dir)

Displays the status of all services.

Source code in toolboxv2/mods/CloudM/mini.py
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
def get_service_status(dir: str) -> str:
    """Displays the status of all services."""
    if time.time()-services_data_sto_last_update_time[0] > 30:
        services = get_service_pids(dir)
        services_data_sto[0] = services
        services_data_sto_last_update_time[0] = time.time()
    else:
        services = services_data_sto[0]
    if not services:
        return "No services found"

    # Get status for all PIDs in a single call
    pid_statuses = check_multiple_processes(list(services.values()))

    # Build the status string
    res_s = "Service(s):" + ("\n" if len(services) > 1 else ' ')
    for service_name, pid in services.items():
        status = pid_statuses.get(pid, YELLOW_CIRCLE)
        res_s += f"{status} {service_name} (PID: {pid})\n"
    services_data_display[0] = res_s.strip()
    return res_s.rstrip()

module

hash_password(password)

Hash a password for storing.

Source code in toolboxv2/mods/CloudM/module.py
111
112
113
114
115
116
117
def hash_password(password):
    """Hash a password for storing."""
    salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')
    pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'), salt,
                                  100000)
    pwdhash = binascii.hexlify(pwdhash)
    return (salt + pwdhash).decode('ascii')
verify_password(stored_password, provided_password)

Verify a stored password against one provided by user

Source code in toolboxv2/mods/CloudM/module.py
121
122
123
124
125
126
127
128
def verify_password(stored_password, provided_password):
    """Verify a stored password against one provided by user"""
    salt = stored_password[:64]
    stored_password = stored_password[64:]
    pwdhash = hashlib.pbkdf2_hmac('sha512', provided_password.encode('utf-8'),
                                  salt.encode('ascii'), 100000)
    pwdhash = binascii.hexlify(pwdhash).decode('ascii')
    return pwdhash == stored_password

CodeVerification

VerificationSystem

Source code in toolboxv2/mods/CodeVerification.py
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
class VerificationSystem:
    def __init__(self, tools_db, scope="main"):
        """
        Initialize VerificationSystem with DB Tools integration

        Args:
            tools_db (Tools): Database tools from toolboxv2.mods.DB
            scope (str, optional): Scope for templates and codes. Defaults to "main".
        """
        self.tools_db = tools_db
        self.scope = scope
        self.tidmp = {}
        self._ensure_scope_templates()

    def get(self):
        return self

    def reset_scope_templates(self):
        """
        Ensure a templates dictionary exists for the current scope in the database
        """
        templates_key = f"verification_templates_{self.scope}"

        self.tools_db.set(templates_key, json.dumps({}))

    def _ensure_scope_templates(self):
        """
        Ensure a templates dictionary exists for the current scope in the database
        """
        templates_key = f"verification_templates_{self.scope}"

        # Check if templates exist for this scope
        templates_exist = self.tools_db.if_exist(templates_key)

        if templates_exist.is_error() and not templates_exist.is_data():
            # Initialize empty templates dictionary if not exists
            self.tools_db.set(templates_key, json.dumps({}))
        else:
            allt = self.get_all_templates()

            for k, v in allt.items():
                if 'name' not in v:
                    continue
                self.tidmp[v['name']] = k

    def add_config_template(self, template: ConfigTemplate) -> str:
        """
        Add a new configuration template to the database

        Args:
            template (ConfigTemplate): The configuration template

        Returns:
            str: Unique identifier of the template
        """
        # Ensure template has the current scope
        template.scope = self.scope

        # Generate a unique template ID
        template_id = secrets.token_urlsafe(8)

        # Get existing templates for this scope
        templates = self.get_all_templates()

        # Add new template
        self.tidmp[template.name] = template_id
        templates[template_id] = asdict(template)

        # Save updated templates back to database
        templates_key = f"verification_templates_{self.scope}"
        save_result = self.tools_db.set(templates_key, json.dumps(templates))

        if save_result.is_error():
            raise ValueError("Could not save template")

        return template_id

    def get_all_templates(self):
        templates_key = f"verification_templates_{self.scope}"
        templates_result = self.tools_db.get(templates_key)

        if not templates_result.is_error() and templates_result.is_data():
            try:
                templates_result.result.data = json.loads(templates_result.get())
            except Exception as e:
                templates_result.print()
                print(f"Errro loding template data curupted : {str(e)}")
                templates_result.result.data = {}
        else:
            templates_result.result.data = {}
        if not isinstance(templates_result, dict):
            templates_result = templates_result.result.data
        return templates_result

    def generate_code(self, template_id: str) -> str:
        """
        Generate a code based on the configuration template

        Args:
            template_id (str): ID of the configuration template

        Returns:
            str: Generated verification code
        """
        # Get templates for this scope
        templates = self.get_all_templates()
        print(templates, self.tidmp, template_id)
        if template_id not in templates:
            template_id = self.tidmp.get(template_id, template_id)
        if template_id not in templates:
            raise ValueError("Invalid configuration template")

        template_dict = templates[template_id]
        ConfigTemplate(**template_dict)

        # Generate a random code with max 16 characters
        code = secrets.token_urlsafe(10)[:16]

        # Prepare code information
        code_info = {
            'template_id': template_id,
            'created_at': time.time(),
            'uses_count': 0,
            'scope': self.scope
        }

        # Store code information in database
        codes_key = f"verification_codes_{self.scope}"
        existing_codes_result = self.tools_db.get(codes_key)

        existing_codes = {}
        if not existing_codes_result.is_error() and existing_codes_result.is_data():
            d = existing_codes_result.get()
            if isinstance(d, list):
                d = d[0]
            existing_codes = json.loads(d)

        existing_codes[code] = code_info

        save_result = self.tools_db.set(codes_key, json.dumps(existing_codes))

        if save_result.is_error():
            raise ValueError("Could not save generated code")

        return code

    def validate_code(self, code: str) -> dict[str, Any] | None:
        """
        Validate a code and return template information

        Args:
            code (str): Code to validate

        Returns:
            Optional[Dict[str, Any]]: Template information for valid code, else None
        """
        # Get codes for this scope
        codes_key = f"verification_codes_{self.scope}"
        codes_result = self.tools_db.get(codes_key)

        if codes_result.is_error() or not codes_result.is_data():
            return None

        d = codes_result.get()
        if isinstance(d, list):
            d = d[0]
        existing_codes = json.loads(d)

        if code not in existing_codes:
            return None

        code_info = existing_codes[code]

        # Check if code is from the same scope
        if code_info.get('scope') != self.scope:
            return None

        # Get templates for this scope
        templates = self.get_all_templates()
        template_id = code_info['template_id']

        if template_id not in templates:
            return templates

        template_dict = templates[template_id]
        template = ConfigTemplate(**template_dict)

        # Check usage count
        if code_info['uses_count'] >= template.max_uses:
            del existing_codes[code]
            self.tools_db.set(codes_key, json.dumps(existing_codes))
            return None

        # Check time validity for timed codes
        if template.usage_type == 'timed':
            current_time = time.time()
            if template.valid_duration and (current_time - code_info['created_at']) > template.valid_duration:
                del existing_codes[code]
                self.tools_db.set(codes_key, json.dumps(existing_codes))
                return None

        # Update uses count
        existing_codes[code]['uses_count'] += 1
        uses_count = existing_codes[code].get('uses_count', 1)
        # Remove code if it's a one-time use
        if template.usage_type == 'one_time':
            del existing_codes[code]

        # Save updated codes
        self.tools_db.set(codes_key, json.dumps(existing_codes))

        return {
            'template_name': template.name,
            'usage_type': template.usage_type,
            'uses_count': uses_count
        }
__init__(tools_db, scope='main')

Initialize VerificationSystem with DB Tools integration

Parameters:

Name Type Description Default
tools_db Tools

Database tools from toolboxv2.mods.DB

required
scope str

Scope for templates and codes. Defaults to "main".

'main'
Source code in toolboxv2/mods/CodeVerification.py
27
28
29
30
31
32
33
34
35
36
37
38
def __init__(self, tools_db, scope="main"):
    """
    Initialize VerificationSystem with DB Tools integration

    Args:
        tools_db (Tools): Database tools from toolboxv2.mods.DB
        scope (str, optional): Scope for templates and codes. Defaults to "main".
    """
    self.tools_db = tools_db
    self.scope = scope
    self.tidmp = {}
    self._ensure_scope_templates()
add_config_template(template)

Add a new configuration template to the database

Parameters:

Name Type Description Default
template ConfigTemplate

The configuration template

required

Returns:

Name Type Description
str str

Unique identifier of the template

Source code in toolboxv2/mods/CodeVerification.py
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
def add_config_template(self, template: ConfigTemplate) -> str:
    """
    Add a new configuration template to the database

    Args:
        template (ConfigTemplate): The configuration template

    Returns:
        str: Unique identifier of the template
    """
    # Ensure template has the current scope
    template.scope = self.scope

    # Generate a unique template ID
    template_id = secrets.token_urlsafe(8)

    # Get existing templates for this scope
    templates = self.get_all_templates()

    # Add new template
    self.tidmp[template.name] = template_id
    templates[template_id] = asdict(template)

    # Save updated templates back to database
    templates_key = f"verification_templates_{self.scope}"
    save_result = self.tools_db.set(templates_key, json.dumps(templates))

    if save_result.is_error():
        raise ValueError("Could not save template")

    return template_id
generate_code(template_id)

Generate a code based on the configuration template

Parameters:

Name Type Description Default
template_id str

ID of the configuration template

required

Returns:

Name Type Description
str str

Generated verification code

Source code in toolboxv2/mods/CodeVerification.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
def generate_code(self, template_id: str) -> str:
    """
    Generate a code based on the configuration template

    Args:
        template_id (str): ID of the configuration template

    Returns:
        str: Generated verification code
    """
    # Get templates for this scope
    templates = self.get_all_templates()
    print(templates, self.tidmp, template_id)
    if template_id not in templates:
        template_id = self.tidmp.get(template_id, template_id)
    if template_id not in templates:
        raise ValueError("Invalid configuration template")

    template_dict = templates[template_id]
    ConfigTemplate(**template_dict)

    # Generate a random code with max 16 characters
    code = secrets.token_urlsafe(10)[:16]

    # Prepare code information
    code_info = {
        'template_id': template_id,
        'created_at': time.time(),
        'uses_count': 0,
        'scope': self.scope
    }

    # Store code information in database
    codes_key = f"verification_codes_{self.scope}"
    existing_codes_result = self.tools_db.get(codes_key)

    existing_codes = {}
    if not existing_codes_result.is_error() and existing_codes_result.is_data():
        d = existing_codes_result.get()
        if isinstance(d, list):
            d = d[0]
        existing_codes = json.loads(d)

    existing_codes[code] = code_info

    save_result = self.tools_db.set(codes_key, json.dumps(existing_codes))

    if save_result.is_error():
        raise ValueError("Could not save generated code")

    return code
reset_scope_templates()

Ensure a templates dictionary exists for the current scope in the database

Source code in toolboxv2/mods/CodeVerification.py
43
44
45
46
47
48
49
def reset_scope_templates(self):
    """
    Ensure a templates dictionary exists for the current scope in the database
    """
    templates_key = f"verification_templates_{self.scope}"

    self.tools_db.set(templates_key, json.dumps({}))
validate_code(code)

Validate a code and return template information

Parameters:

Name Type Description Default
code str

Code to validate

required

Returns:

Type Description
dict[str, Any] | None

Optional[Dict[str, Any]]: Template information for valid code, else None

Source code in toolboxv2/mods/CodeVerification.py
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
def validate_code(self, code: str) -> dict[str, Any] | None:
    """
    Validate a code and return template information

    Args:
        code (str): Code to validate

    Returns:
        Optional[Dict[str, Any]]: Template information for valid code, else None
    """
    # Get codes for this scope
    codes_key = f"verification_codes_{self.scope}"
    codes_result = self.tools_db.get(codes_key)

    if codes_result.is_error() or not codes_result.is_data():
        return None

    d = codes_result.get()
    if isinstance(d, list):
        d = d[0]
    existing_codes = json.loads(d)

    if code not in existing_codes:
        return None

    code_info = existing_codes[code]

    # Check if code is from the same scope
    if code_info.get('scope') != self.scope:
        return None

    # Get templates for this scope
    templates = self.get_all_templates()
    template_id = code_info['template_id']

    if template_id not in templates:
        return templates

    template_dict = templates[template_id]
    template = ConfigTemplate(**template_dict)

    # Check usage count
    if code_info['uses_count'] >= template.max_uses:
        del existing_codes[code]
        self.tools_db.set(codes_key, json.dumps(existing_codes))
        return None

    # Check time validity for timed codes
    if template.usage_type == 'timed':
        current_time = time.time()
        if template.valid_duration and (current_time - code_info['created_at']) > template.valid_duration:
            del existing_codes[code]
            self.tools_db.set(codes_key, json.dumps(existing_codes))
            return None

    # Update uses count
    existing_codes[code]['uses_count'] += 1
    uses_count = existing_codes[code].get('uses_count', 1)
    # Remove code if it's a one-time use
    if template.usage_type == 'one_time':
        del existing_codes[code]

    # Save updated codes
    self.tools_db.set(codes_key, json.dumps(existing_codes))

    return {
        'template_name': template.name,
        'usage_type': template.usage_type,
        'uses_count': uses_count
    }

DB

local_instance

load_from_json(filename)

Lädt Daten aus einer JSON-Datei.

:param filename: Der Dateiname oder Pfad der zu ladenden Datei. :return: Die geladenen Daten.

Source code in toolboxv2/mods/DB/local_instance.py
137
138
139
140
141
142
143
144
145
146
147
148
def load_from_json(filename):
    """
    Lädt Daten aus einer JSON-Datei.

    :param filename: Der Dateiname oder Pfad der zu ladenden Datei.
    :return: Die geladenen Daten.
    """
    if not os.path.exists(filename):
        return {'data': ''}

    with open(filename) as file:
        return json.load(file)
save_to_json(data, filename)

Speichert die übergebenen Daten in einer JSON-Datei.

:param data: Die zu speichernden Daten. :param filename: Der Dateiname oder Pfad, in dem die Daten gespeichert werden sollen.

Source code in toolboxv2/mods/DB/local_instance.py
123
124
125
126
127
128
129
130
131
132
133
134
def save_to_json(data, filename):
    """
    Speichert die übergebenen Daten in einer JSON-Datei.

    :param data: Die zu speichernden Daten.
    :param filename: Der Dateiname oder Pfad, in dem die Daten gespeichert werden sollen.
    """
    if not os.path.exists(filename):
        open(filename, 'a').close()

    with open(filename, 'w+') as file:
        json.dump(data, file, indent=4)

reddis_instance

sync_redis_databases(source_url, target_url)

Synchronize keys from the source Redis database to the target Redis database. This function scans all keys in the source DB and uses DUMP/RESTORE to replicate data to the target.

Parameters:

Name Type Description Default
source_url str

The Redis URL of the source database.

required
target_url str

The Redis URL of the target database.

required

Returns:

Name Type Description
int

The number of keys successfully synchronized.

Source code in toolboxv2/mods/DB/reddis_instance.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
def sync_redis_databases(source_url, target_url):
    """Synchronize keys from the source Redis database to the target Redis database.
    This function scans all keys in the source DB and uses DUMP/RESTORE to replicate data to the target.

    Args:
        source_url (str): The Redis URL of the source database.
        target_url (str): The Redis URL of the target database.

    Returns:
        int: The number of keys successfully synchronized.
    """
    try:
        src_client = redis.from_url(source_url)
        tgt_client = redis.from_url(target_url)
    except Exception as e:
        print(f"Error connecting to one of the Redis instances: {e}")
        return 0

    total_synced = 0
    cursor = 0
    try:
        while True:
            cursor, keys = src_client.scan(cursor=cursor, count=100)
            for key in keys:
                try:
                    serialized_value = src_client.dump(key)
                    if serialized_value is None:
                        continue
                    # Restore key with TTL=0 and replace existing key
                    tgt_client.restore(key, 0, serialized_value, replace=True)
                    total_synced += 1
                except Exception as e:
                    print(f"Error syncing key {key}: {e}")
            if cursor == 0:
                break
    except Exception as scan_error:
        print(f"Error during scanning keys: {scan_error}")

    print(f"Synced {total_synced} keys from {source_url} to {target_url}")
    return total_synced

tb_adapter

DB

Bases: ABC

Source code in toolboxv2/mods/DB/tb_adapter.py
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
class DB(ABC):
    @abc.abstractmethod
    def get(self, query: str) -> Result:
        """get data"""

    @abc.abstractmethod
    def set(self, query: str, value) -> Result:
        """set data"""

    @abc.abstractmethod
    def append_on_set(self, query: str, value) -> Result:
        """append set data"""

    @abc.abstractmethod
    def delete(self, query: str, matching=False) -> Result:
        """delete data"""

    @abc.abstractmethod
    def if_exist(self, query: str) -> bool:
        """return True if query exists"""

    @abc.abstractmethod
    def exit(self) -> Result:
        """Close DB connection and optional save data"""
append_on_set(query, value) abstractmethod

append set data

Source code in toolboxv2/mods/DB/tb_adapter.py
63
64
65
@abc.abstractmethod
def append_on_set(self, query: str, value) -> Result:
    """append set data"""
delete(query, matching=False) abstractmethod

delete data

Source code in toolboxv2/mods/DB/tb_adapter.py
67
68
69
@abc.abstractmethod
def delete(self, query: str, matching=False) -> Result:
    """delete data"""
exit() abstractmethod

Close DB connection and optional save data

Source code in toolboxv2/mods/DB/tb_adapter.py
75
76
77
@abc.abstractmethod
def exit(self) -> Result:
    """Close DB connection and optional save data"""
get(query) abstractmethod

get data

Source code in toolboxv2/mods/DB/tb_adapter.py
55
56
57
@abc.abstractmethod
def get(self, query: str) -> Result:
    """get data"""
if_exist(query) abstractmethod

return True if query exists

Source code in toolboxv2/mods/DB/tb_adapter.py
71
72
73
@abc.abstractmethod
def if_exist(self, query: str) -> bool:
    """return True if query exists"""
set(query, value) abstractmethod

set data

Source code in toolboxv2/mods/DB/tb_adapter.py
59
60
61
@abc.abstractmethod
def set(self, query: str, value) -> Result:
    """set data"""

EventManager

module

EventManagerClass
Source code in toolboxv2/mods/EventManager/module.py
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
class EventManagerClass:
    events: set[Event] = set()
    source_id: str
    _name: str
    _identification: str

    routes_client: dict[str, ProxyRout] = {}
    routers_servers: dict[str, DaemonRout] = {}
    routers_servers_tasks: list[Any] = []
    routers_servers_tasks_running_flag: bool = False

    receiver_que: queue.Queue
    response_que: queue.Queue

    def add_c_route(self, name, route: ProxyRout):
        self.routes_client[name] = route

    async def receive_all_client_data(self):

        close_connections = []
        add_ev = []
        for name, client in self.routes_client.items():
            if client.client is None or not client.client.get('alive', False):
                close_connections.append(name)
                continue
            data = client.r

            if isinstance(data, str) and data == "No data":
                continue
            elif isinstance(data, EventID) and len(data.get_source()) != 0:
                await self.trigger_event(data)
            elif isinstance(data, EventID) and len(data.get_source()) == 0:
                print(f"Event returned {data.payload}")
                self.response_que.put(data)
            elif isinstance(data,
                            dict) and 'error' in data and 'origin' in data and 'result' in data and 'info' in data:

                self.response_que.put(Result.result_from_dict(**data).print())
            elif isinstance(data,
                            dict) and 'source' in data and 'path' in data and 'ID' in data and 'identifier' in data:
                del data['identifier']
                ev_id = EventID(**data)
                await self.trigger_event(ev_id)
            elif isinstance(data, Event):
                print("Event:", str(data.event_id), data.name)
                add_ev.append(data)
            elif isinstance(data, Result):
                self.response_que.put(data.print())
            else:
                print(f"Unknown Data {data}")

        for ev in add_ev:
            await self.register_event(ev)

        for client_name in close_connections:
            print(f"Client {client_name} closing connection")
            self.remove_c_route(client_name)

    def remove_c_route(self, name):
        self.routes_client[name].close()
        del self.routes_client[name]

    def crate_rout(self, source, addr=None):
        if addr is None:
            addr = ('0.0.0.0', 6588)
        host, port = addr
        if isinstance(port, str):
            port = int(port)
        return Rout(
            _from=self.source_id,
            _to=source,
            _from_port=int(os.getenv("TOOLBOXV2_BASE_PORT", 6588)),
            _from_host=os.getenv("TOOLBOXV2_BASE_HOST"),
            _to_port=port,
            _to_host=host,
            routing_function=self.routing_function_router,
        )

    def __init__(self, source_id, _identification="PN"):
        self.bo = False
        self.running = False
        self.source_id = source_id
        self.receiver_que = queue.Queue()
        self.response_que = queue.Queue()
        self._identification = _identification
        self._name = self._identification + '-' + str(uuid.uuid4()).split('-')[1]
        self.routes = {}
        self.logger = get_logger()

    @property
    def identification(self) -> str:
        return self._identification

    @identification.setter
    def identification(self, _identification: str):
        self.stop()
        self._identification = _identification
        self._name = self._identification + '-' + str(uuid.uuid4()).split('-')[1]

    async def identity_post_setter(self):

        do_reconnect = len(list(self.routers_servers.keys())) > 0
        if self._identification == "P0":
            await self.add_server_route(self._identification, ('0.0.0.0', 6568))
        if self._identification == "P0|S0":
            await self.add_server_route(self._identification, ('0.0.0.0', 6567))

        await asyncio.sleep(0.1)
        self.start()
        await asyncio.sleep(0.1)
        if do_reconnect:
            self.reconnect("ALL")

    async def open_connection_server(self, port):
        await self.add_server_route(self._identification, ('0.0.0.0', port))

    def start(self):
        self.running = True
        threading.Thread(target=async_test(self.receiver), daemon=True).start()

    def make_event_from_fuction(self, fuction, name, *args, source_types=SourceTypes.F,
                                scope=Scope.local,
                                exec_in=ExecIn.local,
                                threaded=False, **kwargs):

        return Event(source=fuction,
                     name=name,
                     event_id=EventID.crate_with_source(self.source_id), args=args,
                     kwargs_=kwargs,
                     source_types=source_types,
                     scope=scope,
                     exec_in=exec_in,
                     threaded=threaded,
                     )

    async def add_client_route(self, source_id, addr):
        if source_id in self.routes_client:
            if self.routes_client[source_id].client is None or not self.routes_client[source_id].client.get('alive'):
                await self.routes_client[source_id].reconnect()
                return True
            print("Already connected")
            return False
        try:
            pr = await ProxyRout.toProxy(rout=self.crate_rout(source_id, addr=addr), name=source_id)
            await asyncio.sleep(0.1)
            await pr.client.get('sender')({"id": self._identification,
                                           "continue": False,
                                           "key": os.getenv('TB_R_KEY', 'root@remote')})
            await asyncio.sleep(0.1)
            self.add_c_route(source_id, pr)
            return True
        except Exception as e:
            print(f"Check the port {addr} Sever likely not Online : {e}")
            return False

    async def add_mini_client(self, name: str, addr: tuple[str, int]):

        mini_proxy = await ProxyRout(class_instance=None, timeout=15, app=get_app(),
                                     remote_functions=[""], peer=False, name=name, do_connect=False)

        async def _(x):
            return await self.routers_servers[self._identification].send(x, addr)

        mini_proxy.put_data = _
        mini_proxy.connect = lambda *x, **_: None
        mini_proxy.reconnect = lambda *x, **_: None
        mini_proxy.close = lambda *x, **_: None
        mini_proxy.client = {'alive': True}
        mini_proxy.r = "No data"
        self.routes_client[name] = mini_proxy

    async def on_register(self, id_, data):
        try:
            if "unknown" not in self.routes:
                self.routes["unknown"] = {}

            if id_ != "new_con" and 'id' in data:
                id_data = data.get('id')
                id_ = eval(id_)
                c_host, c_pot = id_
                print(f"Registering: new client {id_data} : {c_host, c_pot}")
                if id_data not in self.routes_client:
                    await self.add_mini_client(id_data, (c_host, c_pot))
                    self.routes[str((c_host, c_pot))] = id_data

            # print("self.routes:", self.routes)
        except Exception as e:
            print("Error in on_register", str(e))

    def on_client_exit(self, id_):

        if isinstance(id_, str):
            id_ = eval(id_)

        c_name = self.routes.get(id_)

        if c_name is None:
            return

        if c_name in self.routes_client:
            self.remove_c_route(c_name)
            print(f"Removed route to {c_name}")

    async def add_server_route(self, source_id, addr=None):
        if addr is None:
            addr = ('0.0.0.0', 6588)
        try:
            self.routers_servers[source_id] = await DaemonRout(rout=self.crate_rout(source_id, addr=addr),
                                                               name=source_id,
                                                               on_r=self.on_register)
            self.routers_servers_tasks.append(self.routers_servers[source_id].online)
        except Exception as e:
            print(f"Sever already Online : {e}")

        if not self.routers_servers_tasks_running_flag:
            self.routers_servers_tasks_running_flag = True
            threading.Thread(target=self.server_route_runner, daemon=True).start()

    def server_route_runner(self):
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

        # Sammle alle Ergebnisse zusammen
        results = loop.run_until_complete(asyncio.gather(*self.routers_servers_tasks))

        for result in results:
            print(result)

        loop.close()
        self.routers_servers_tasks_running_flag = False

    async def add_js_route(self, source_id="js:web"):
        await self.add_server_route(source_id, ("./web/scripts/tb_socket.sock", 0))

    async def register_event(self, event: Event):

        if event in self.events:
            return Result.default_user_error("Event registration failed Event already registered")

        print(f"Registration new Event : {event.name}, {str(event.event_id)}")
        self.events.add(event)

        if event.scope.name == Scope.instance.name:
            return

        if event.scope.name == Scope.local.name:
            if not self.bo and "P0" not in self.routes_client and os.getenv("TOOLBOXV2_BASE_HOST",
                                                                            "localhost") != "localhost":
                await self.add_client_route("P0", (os.getenv("TOOLBOXV2_BASE_HOST", "localhost"),
                                                   os.getenv("TOOLBOXV2_BASE_PORT", 6568)))
                self.bo = True
            return

        if event.scope.name == Scope.local_network.name:
            if self.identification == "P0" and not self.bo:
                t0 = threading.Thread(target=self.start_brodcast_router_local_network, daemon=True)
                t0.start()
            elif not self.bo and "P0" not in self.routes_client and os.getenv("TOOLBOXV2_BASE_HOST",
                                                                              "localhost") == "localhost":
                self.bo = True
                # self.add_server_route(self.identification, ("127.0.0.1", 44667))
                with Spinner(message="Sercheing for Rooter instance", count_down=True, time_in_s=6):
                    with ThreadPoolExecutor(max_workers=1) as executor:
                        t0 = executor.submit(make_known, self.identification)
                        try:
                            data = t0.result(timeout=6)
                        except TimeoutError:
                            print("No P0 found in network or on device")
                            return
                    print(f"Found P0 on {type(data)} {data.get('host')}")
                    await self.add_client_route("P0", (data.get("host"), os.getenv("TOOLBOXV2_BASE_PORT", 6568)))
            elif not self.bo and "P0" not in self.routes_client and os.getenv("TOOLBOXV2_BASE_HOST",
                                                                              "localhost") != "localhost":
                do = await self.add_client_route("P0", (
                    os.getenv("TOOLBOXV2_BASE_HOST", "localhost"), os.getenv("TOOLBOXV2_BASE_PORT", 6568)))
                self.bo = do
                if not do:
                    print("Connection failed")
                    os.environ["TOOLBOXV2_BASE_HOST"] = "localhost"

        if event.scope.name == Scope.global_network.name:
            await self.add_server_route(self.source_id, ('0.0.0.0', os.getenv("TOOLBOXV2_REMOTE_PORT", 6587)))

    async def connect_to_remote(self, host=os.getenv("TOOLBOXV2_REMOTE_IP"),
                                port=os.getenv("TOOLBOXV2_REMOTE_PORT", 6587)):
        await self.add_client_route("S0", (host, port))

    def start_brodcast_router_local_network(self):
        self.bo = True

        # print("Starting brodcast router 0")
        router = start_client(get_local_ip())
        # print("Starting brodcast router 1")
        # next(router)
        # print("Starting brodcast router")
        while self.running:
            source_id, connection = next(router)
            print(f"Infos :{source_id}, connection :{connection}")
            self.routes[source_id] = connection[0]
            router.send(self.running)

        router.send("e")
        router.close()

    def _get_event_by_id_or_name(self, event_id: str or EventID):
        if isinstance(event_id, str):
            events = [e for e in self.events if e.name == event_id]
            if len(events) < 1:
                return Result.default_user_error("Event not registered")
            event = events[0]

        elif isinstance(event_id, EventID):
            events = [e for e in self.events if e.event_id.ID == event_id.ID]
            if len(events) < 1:
                events = [e for e in self.events if e.name == event_id.ID]
            if len(events) < 1:
                return Result.default_user_error("Event not registered")
            event = events[0]

        elif isinstance(event_id, Event):
            if event_id not in self.events:
                return Result.default_user_error("Event not registered")
            event = event_id

        else:
            event = Result.default_user_error("Event not registered")

        return event

    def remove_event(self, event: Event or EventID or str):

        event = self._get_event_by_id_or_name(event)
        if isinstance(event, Event):
            self.events.remove(event)
        else:
            return event

    async def _trigger_local(self, event_id: EventID):
        """
        Exec source based on

        source_types
            F -> call directly
            R -> use get_app(str(event_id)).run_any(*args, **kwargs)
            S -> evaluate string
        scope
            instance -> _trigger_local
            local -> if you ar proxy app run the event through get_app(str(event_id)).run_any(TBEF.EventManager._trigger_local, args=args, kwargs=kwargs, get_result=True)
            local_network -> use proxy0 app to communicate withe Daemon0 then local
            global_network ->
        exec_in
        event_id
        threaded

                       """
        event = self._get_event_by_id_or_name(event_id)

        if isinstance(event, Result):
            event.print()
            if self.identification == "P0":
                return event
            print(f"Routing to P0 {self.events}")
            if self.source_id not in self.routes_client:
                # self.routers[self.source_id] = DaemonRout(rout=self.crate_rout(self.source_id))
                await self.add_client_route("P0", ('127.0.0.1', 6568))
            return await self.route_event_id(event_id)

        # if event.threaded:
        #    threading.Thread(target=self.runner, args=(event, event_id), daemon=True).start()
        #    return "Event running In Thread"
        # else:

        return await self.runner(event, event_id)

    async def runner(self, event, event_id):

        if event.source_types.name is SourceTypes.P.name:
            return event.source(*event.args, payload=event_id, **event.kwargs_)

        if event.source_types.name is SourceTypes.F.name:
            return event.source(*event.args, **event.kwargs_)

        if event.source_types.name is SourceTypes.R.name:
            return get_app(str(event_id)).run_any(mod_function_name=event.source, get_results=True, args_=event.args,
                                                  kwargs_=event.kwargs_)

        if event.source_types.name is SourceTypes.AP.name:
            return await event.source(*event.args, payload=event_id, **event.kwargs_)

        if event.source_types.name is SourceTypes.AF.name:
            return await event.source(*event.args, **event.kwargs_)

        if event.source_types.name is SourceTypes.AR.name:
            return await get_app(str(event_id)).run_any(mod_function_name=event.source, get_results=True,
                                                        args_=event.args,
                                                        kwargs_=event.kwargs_)

        if event.source_types.name is SourceTypes.S.name:
            return eval(event.source, __locals={'app': get_app(str(event_id)), 'event': event, 'eventManagerC': self})

    async def routing_function_router(self, event_id: EventID):

        result = await self.trigger_event(event_id)

        if result is None:
            result = Result.default_user_error("Invalid Event ID")

        if isinstance(result, bytes | dict):
            pass
        elif isinstance(result, Result):
            result.result.data_info = str(event_id)
        elif isinstance(result, EventID):
            result = Result.default_internal_error("Event not found", data=result)
        else:
            result = Result.ok(data=result, data_info="<automatic>", info=str(event_id.path))

        if isinstance(result, str):
            result = result.encode()

        return result

    async def trigger_evnet_by_name(self, name: str):
        await self.trigger_event(EventID.crate_name_as_id(name=name))

    async def trigger_event(self, event_id: EventID):
        """
        Exec source based on

        source_types
            F -> call directly
            R -> use get_app(str(event_id)).run_any(*args, **kwargs)
            S -> evaluate string
        scope
            instance -> _trigger_local
            local -> if you ar proxy app run the event through get_app(str(event_id)).run_any(TBEF.EventManager._trigger_local, args=args, kwargs=kwargs, get_result=True)
            local_network -> use proxy0 app to communicate withe Daemon0 then local
            global_network ->
        exec_in
        event_id
        threaded

                       """
        # print(f"event-id Ptah : {event_id.get_path()}")
        # print(f"testing trigger_event for {event_id.get_source()} {event_id.get_source()[-1] == self.source_id} ")
        print(str(event_id))
        if event_id.get_source()[-1] == self.source_id:
            payload = await self._trigger_local(event_id)
            event_id.set_payload(payload)
            if len(event_id.path) > 1:
                event_id.source = ':'.join([e.split(':')[0] for e in event_id.get_path() if e != "E"])
                res = await self.route_event_id(event_id)
                if isinstance(res, Result):
                    res.print()
                else:
                    print(res)
            return payload
        return await self.route_event_id(event_id)

    async def route_event_id(self, event_id: EventID):

        # print(f"testing route_event_id for {event_id.get_source()[-1]}")
        if event_id.get_source()[-1] == '*':  # self.identification == "P0" and
            responses = []
            event_id.source = ':'.join(event_id.get_source()[:-1])
            event_id.add_path(f"{self._name}({self.source_id})")
            data = asdict(event_id)
            for name, rout_ in self.routes_client.items():
                if name in event_id.path:
                    continue
                ret = await rout_.put_data(data)
                responses.append(ret)
            return responses
        route = self.routes_client.get(event_id.get_source()[-1])
        # print("route:", route)
        if route is None:
            route = self.routes_client.get(event_id.get_path()[-1])
        if route is None:
            return event_id.add_path(("" if len(event_id.get_source()) == 1 else "404#")+self.identification)
        time.sleep(0.25)
        event_id.source = ':'.join(event_id.get_source()[:-1])
        event_id.add_path(f"{self._name}({self.source_id})")
        return await route.put_data(asdict(event_id))

    async def receiver(self):

        t0 = time.time()

        while self.running:
            time.sleep(0.25)
            if not self.receiver_que.empty():
                event_id = self.receiver_que.get()
                print("Receiver Event", str(event_id))
                await self.trigger_event(event_id)

            if time.time() - t0 > 5:
                await self.receive_all_client_data()
                t0 = time.time()

    def info(self):
        return {"source": self.source_id, "known_routs:": self.routers_servers, "_router": self.routes_client,
                "events": self.events}

    def stop(self):
        self.running = False
        list(map(lambda x: x.disconnect(), self.routes_client.values()))
        list(map(lambda x: x.stop(), self.routers_servers.values()))

    def reconnect(self, name):
        if name is None:
            pass
        elif name in self.routes_client:
            self.routes_client[name].reconnect()
            return
        list(map(lambda x: x.reconnect(), self.routes_client.values()))

    async def verify(self, name):
        if name is None:
            pass
        elif name in self.routes_client:
            await self.routes_client[name].verify()
            return
        for x in self.routes_client.values():
            await x.verify()
trigger_event(event_id) async

Exec source based on

source_types F -> call directly R -> use get_app(str(event_id)).run_any(args, *kwargs) S -> evaluate string scope instance -> _trigger_local local -> if you ar proxy app run the event through get_app(str(event_id)).run_any(TBEF.EventManager._trigger_local, args=args, kwargs=kwargs, get_result=True) local_network -> use proxy0 app to communicate withe Daemon0 then local global_network -> exec_in event_id threaded

Source code in toolboxv2/mods/EventManager/module.py
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
async def trigger_event(self, event_id: EventID):
    """
    Exec source based on

    source_types
        F -> call directly
        R -> use get_app(str(event_id)).run_any(*args, **kwargs)
        S -> evaluate string
    scope
        instance -> _trigger_local
        local -> if you ar proxy app run the event through get_app(str(event_id)).run_any(TBEF.EventManager._trigger_local, args=args, kwargs=kwargs, get_result=True)
        local_network -> use proxy0 app to communicate withe Daemon0 then local
        global_network ->
    exec_in
    event_id
    threaded

                   """
    # print(f"event-id Ptah : {event_id.get_path()}")
    # print(f"testing trigger_event for {event_id.get_source()} {event_id.get_source()[-1] == self.source_id} ")
    print(str(event_id))
    if event_id.get_source()[-1] == self.source_id:
        payload = await self._trigger_local(event_id)
        event_id.set_payload(payload)
        if len(event_id.path) > 1:
            event_id.source = ':'.join([e.split(':')[0] for e in event_id.get_path() if e != "E"])
            res = await self.route_event_id(event_id)
            if isinstance(res, Result):
                res.print()
            else:
                print(res)
        return payload
    return await self.route_event_id(event_id)
Rout dataclass
Source code in toolboxv2/mods/EventManager/module.py
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
@dataclass
class Rout:
    _from: str
    _to: str

    _from_port: int
    _from_host: str

    _to_port: int
    _to_host: str

    routing_function: Callable

    @property
    def to_host(self):
        return self._to_host

    @property
    def to_port(self):
        return self._to_port

    async def put_data(self, event_id_data: dict[str, str]):
        event_id: EventID = EventID(**event_id_data)
        return await self.routing_function(event_id)

    def close(self):
        """ Close """
close()

Close

Source code in toolboxv2/mods/EventManager/module.py
165
166
def close(self):
    """ Close """

FastApi

fast_api_install

FileBrowser
Source code in toolboxv2/mods/FastApi/fast_api_install.py
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
class FileBrowser:
    ALLOWED_DIRECTORIES: set[str] = {"mods_sto", "flows", "static", "apps"}

    def __init__(self, start_dir: str):
        self.static_dir = pathlib.Path(start_dir).resolve()
        self.current_container = None

    def is_path_allowed(self, file_path: pathlib.Path) -> bool:
        """Check if the path is within allowed directories."""
        if not file_path.is_relative_to(self.static_dir):
            return False

        relative_parts = file_path.parts[len(self.static_dir.parts):]
        return any(part in self.ALLOWED_DIRECTORIES for part in relative_parts)

    async def download_file(self, file_path: pathlib.Path) -> None:
        """Handle file download."""
        if not file_path.is_file() or not self.is_path_allowed(file_path):
            ui.notify('Access denied or file not found', type='negative')
            return

        # Use NiceGUI's download function
        await ui.download(str(file_path))

    def refresh_view(self, path: pathlib.Path) -> None:
        """Refresh the file browser view."""
        if self.current_container:
            self.current_container.clear()

        with self.current_container:
            # Add header with current path
            ui.label(f'Current directory: {path.relative_to(self.static_dir)}').classes('text-h6')

            # Add parent directory link if not at root
            if path != self.static_dir and path.parent.is_relative_to(self.static_dir):
                with ui.row().classes('w-full items-center'):
                    ui.button('..', on_click=lambda p=path.parent: self.refresh_view(p)) \
                        .classes('bg-blue-100 px-4 py-2 rounded')

            # List directories first
            for item in sorted(path.iterdir()):
                if not self.is_path_allowed(item):
                    continue

                with ui.row().classes('w-full items-center gap-2'):
                    if item.is_dir():
                        ui.button(f'📁 {item.name}/',
                                  on_click=lambda p=item: self.refresh_view(p)) \
                            .classes('bg-blue-100 px-4 py-2 rounded')
                    else:
                        ui.label(f'📄 {item.name}').classes('flex-grow')
                        ui.button('Download',
                                  on_click=lambda p=item: self.download_file(p)) \
                            .classes('bg-green-100 px-4 py-2 rounded')
download_file(file_path) async

Handle file download.

Source code in toolboxv2/mods/FastApi/fast_api_install.py
405
406
407
408
409
410
411
412
async def download_file(self, file_path: pathlib.Path) -> None:
    """Handle file download."""
    if not file_path.is_file() or not self.is_path_allowed(file_path):
        ui.notify('Access denied or file not found', type='negative')
        return

    # Use NiceGUI's download function
    await ui.download(str(file_path))
is_path_allowed(file_path)

Check if the path is within allowed directories.

Source code in toolboxv2/mods/FastApi/fast_api_install.py
397
398
399
400
401
402
403
def is_path_allowed(self, file_path: pathlib.Path) -> bool:
    """Check if the path is within allowed directories."""
    if not file_path.is_relative_to(self.static_dir):
        return False

    relative_parts = file_path.parts[len(self.static_dir.parts):]
    return any(part in self.ALLOWED_DIRECTORIES for part in relative_parts)
refresh_view(path)

Refresh the file browser view.

Source code in toolboxv2/mods/FastApi/fast_api_install.py
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
def refresh_view(self, path: pathlib.Path) -> None:
    """Refresh the file browser view."""
    if self.current_container:
        self.current_container.clear()

    with self.current_container:
        # Add header with current path
        ui.label(f'Current directory: {path.relative_to(self.static_dir)}').classes('text-h6')

        # Add parent directory link if not at root
        if path != self.static_dir and path.parent.is_relative_to(self.static_dir):
            with ui.row().classes('w-full items-center'):
                ui.button('..', on_click=lambda p=path.parent: self.refresh_view(p)) \
                    .classes('bg-blue-100 px-4 py-2 rounded')

        # List directories first
        for item in sorted(path.iterdir()):
            if not self.is_path_allowed(item):
                continue

            with ui.row().classes('w-full items-center gap-2'):
                if item.is_dir():
                    ui.button(f'📁 {item.name}/',
                              on_click=lambda p=item: self.refresh_view(p)) \
                        .classes('bg-blue-100 px-4 py-2 rounded')
                else:
                    ui.label(f'📄 {item.name}').classes('flex-grow')
                    ui.button('Download',
                              on_click=lambda p=item: self.download_file(p)) \
                        .classes('bg-green-100 px-4 py-2 rounded')

fast_lit

APIRequestHelper
Source code in toolboxv2/mods/FastApi/fast_lit.py
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
class APIRequestHelper:
    def __init__(self, token_secret: str):
        self.token_secret = token_secret

    async def make_api_request(self, endpoint: str, method: str, data: dict | None = None,
                               headers: dict | None = None, session_token: str | None = None) -> Any:
        """
        Make API requests while maintaining session context
        """
        import httpx

        if headers is None:
            headers = {}

        if session_token:
            try:
                session_data = jwt.decode(session_token, self.token_secret, algorithms=["HS256"])
                headers['X-Session-ID'] = session_data.get('session_id')
                headers['Authorization'] = f'Bearer {session_token}'
            except jwt.InvalidTokenError:
                raise ValueError("Invalid session token")

        async with httpx.AsyncClient() as client:
            response = await client.request(
                method=method,
                url=endpoint,
                json=data,
                headers=headers
            )

            return response.json()
make_api_request(endpoint, method, data=None, headers=None, session_token=None) async

Make API requests while maintaining session context

Source code in toolboxv2/mods/FastApi/fast_lit.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
async def make_api_request(self, endpoint: str, method: str, data: dict | None = None,
                           headers: dict | None = None, session_token: str | None = None) -> Any:
    """
    Make API requests while maintaining session context
    """
    import httpx

    if headers is None:
        headers = {}

    if session_token:
        try:
            session_data = jwt.decode(session_token, self.token_secret, algorithms=["HS256"])
            headers['X-Session-ID'] = session_data.get('session_id')
            headers['Authorization'] = f'Bearer {session_token}'
        except jwt.InvalidTokenError:
            raise ValueError("Invalid session token")

    async with httpx.AsyncClient() as client:
        response = await client.request(
            method=method,
            url=endpoint,
            json=data,
            headers=headers
        )

        return response.json()
BidirectionalStreamlitAppManager

Bases: BaseHTTPMiddleware

Source code in toolboxv2/mods/FastApi/fast_lit.py
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
class BidirectionalStreamlitAppManager(BaseHTTPMiddleware, metaclass=Singleton):
    def __init__(self, app: FastAPI, streamlit_apps_dir: str = "./apps"):
        super().__init__(app)
        self.streamlit_manager = StreamlitAppManager()
        self.streamlit_apps_dir = streamlit_apps_dir
        self.token_secret = os.getenv("TOKEN_SECRET", "your-secret-key")
        self.api_helper = APIRequestHelper(self.token_secret)

        # Run cleanup task
        asyncio.create_task(self.periodic_cleanup())

    #def add_ws(self, fast_app):
        # Register WebSocket routes
     #   fast_app.add_api_websocket_route("/ws/{session_id}/{app_id}", self.websocket_endpoint, "StWebSocket")

    async def periodic_cleanup(self):
        while True:
            self.streamlit_manager.cleanup_inactive_apps()
            await asyncio.sleep(3600)

    def create_streamlit_token(self, session_data: dict, app_name: str) -> str:
        payload = {
            "app_name": app_name,
            "session_id": session_data.get("ID"),
            "user_data": session_data.get("live_data"),
            "exp": datetime.utcnow() + timedelta(hours=1)
        }
        return jwt.encode(payload, self.token_secret, algorithm="HS256")

    #async def websocket_endpoint(self, websocket: WebSocket, session_id: str, app_id: str):
    #    await self.streamlit_manager.ws_manager.connect(websocket, session_id, app_id)
    #    try:
    #        while True:
    #            message = await websocket.receive_json()
    #            await self.streamlit_manager.ws_manager.handle_message(session_id, message)
    #    except WebSocketDisconnect:
    #        await self.streamlit_manager.ws_manager.disconnect(session_id, app_id)

    async def resolve_session_token(self, request: Request) -> str | None:
        """
        Extract and validate session token from request
        """
        token = request.headers.get('Authorization', '').replace('Bearer ', '')
        if not token:
            token = request.query_params.get('token')

        if token:
            try:
                jwt.decode(token, self.token_secret, algorithms=["HS256"])
                return token
            except jwt.InvalidTokenError:
                return None
        return None

    async def dispatch(self, request: Request, call_next) -> Response:
        # Handle API routes with session token resolution
        if request.url.path.startswith("/api/"):
            session_token = await self.resolve_session_token(request)
            if session_token:
                # Inject session data into request state
                request.state.session_token = session_token
                request.state.api_helper = self.api_helper

        # Handle Streamlit routes
        elif request.url.path.startswith("/apps/"):
            app_name = request.url.path.split("/")[-1]
            app_path = os.path.join(self.streamlit_apps_dir, f"{app_name}.py")

            # Verify session is valid
            if 'public' not in app_name and not request.session.get("valid", False):
                return JSONResponse(
                    status_code=401,
                    content={"message": "Invalid session"}
                )

            if not os.path.exists(app_path):
                return JSONResponse(
                    status_code=401,
                    content={"message": "no app found"}
                )

            streamlit_token = self.create_streamlit_token(request.session, app_name)
            port = await self.streamlit_manager.start_app(app_path, request.session.get("ID")+app_name)
            streamlit_url = f"http://{host}:{port}?token={streamlit_token}"
            return RedirectResponse(url=streamlit_url)

        resposee = await call_next(request)
        return resposee
resolve_session_token(request) async

Extract and validate session token from request

Source code in toolboxv2/mods/FastApi/fast_lit.py
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
async def resolve_session_token(self, request: Request) -> str | None:
    """
    Extract and validate session token from request
    """
    token = request.headers.get('Authorization', '').replace('Bearer ', '')
    if not token:
        token = request.query_params.get('token')

    if token:
        try:
            jwt.decode(token, self.token_secret, algorithms=["HS256"])
            return token
        except jwt.InvalidTokenError:
            return None
    return None
inject_custom_css(css_file_path='./web/assets/styles.css')

Liest eine CSS-Datei ein und injiziert sie in die Streamlit-App.

Source code in toolboxv2/mods/FastApi/fast_lit.py
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def inject_custom_css(css_file_path="./web/assets/styles.css"):
    """
    Liest eine CSS-Datei ein und injiziert sie in die Streamlit-App.
    """
    import streamlit as st
    try:
        with open(css_file_path) as f:
            css_content = f.read()

        # CSS in einen <style>-Tag einbetten
        css_injection = f"<style>{css_content}</style>"

        # CSS in Streamlit injizieren
        st.markdown(css_injection, unsafe_allow_html=True)
    except Exception as e:
        st.error(f"Fehler beim Laden des CSS: {e}")

    st.markdown("""
        <style>
            .reportview-container {
                margin-top: -2em;
            }
            #MainMenu {visibility: hidden;}
            .stDeployButton {display:none;}
            footer {visibility: hidden;}
            #stDecoration {display:none;}
        </style>
    """, unsafe_allow_html=True)
make_api_request(endpoint, method='GET', data=None) async

Helper function for making API requests from Streamlit apps

Source code in toolboxv2/mods/FastApi/fast_lit.py
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
async def make_api_request(endpoint: str, method: str = "GET", data: dict | None = None):
    """Helper function for making API requests from Streamlit apps"""
    import streamlit as st

    if not hasattr(st.session_state, 'token'):
        st.error("No valid session token found")
        st.stop()

    headers = {
        'Authorization': f'Bearer {st.session_state.token}',
        'Content-Type': 'application/json'
    }

    try:
        api_helper = APIRequestHelper(os.getenv("TOKEN_SECRET", "your-secret-key"))
        response = await api_helper.make_api_request(
            endpoint=endpoint,
            method=method,
            data=data,
            headers=headers,
            session_token=st.session_state.token
        )
        return response
    except Exception as e:
        st.error(f"API request failed: {str(e)}")
        return None

fast_nice

NiceGUIManager
Source code in toolboxv2/mods/FastApi/fast_nice.py
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
class NiceGUIManager(metaclass=Singleton):
    init = False
    def __init__(self, fastapi_app: FastAPI = None, styles_path: str = "./web/assets/styles.css"):

        if fastapi_app is None:
            return None
        self.admin_password = os.getenv("TB_R_KEY", "root@admin")
        self.app = fastapi_app
        self.styles_path = styles_path
        self.registered_guis: dict[str, dict[str, Any]] = {}
        self.ws_connections: dict[str, dict[str, WebSocket]] = {}
        self.mount_path = "/gui"
        self.endpoints: list[UIEndpoint] = []

        self.helper_contex = open("./dist/helper.html", encoding="utf-8").read()

        self.app.add_middleware(BaseHTTPMiddleware, dispatch=self.middleware_dispatch)

        # Add WebSocket endpoint
        self.app.websocket("/ws/{session_id}/{gui_id}")(self.websocket_endpoint)
        self._setup_admin_gui()
        self._setup_endpoints_api()

    def _setup_endpoints_api(self):
        @self.app.get("/api/CloudM/openui")
        def get_ui_endpoints(request: Request) -> list[dict]:
            def _(endpoint):
                add_true = True
                if endpoint.only_valid:
                    add_true = request.session['valid']

                if add_true and endpoint.only_root:
                    add_true = request.session.get('live_data', {}).get('user_name') == 'root'
                return add_true
            return [{"path": endpoint.path,
    "title": endpoint.title,
    "description": endpoint.description} for endpoint in self.endpoints if endpoint.show and _(endpoint)]

    def _setup_admin_gui(self):
        """Setup the admin GUI interface"""

        @ui.page('/admin')
        def admin_gui(user=None):
            print("admin_gui;", user)
            if user is None or user.name != "root":
                return

            with ui.card().style("background-color: var(--background-color) !important").classes('w-full'):
                ui.label('NiceGUI Manager Admin Interface').classes('text-2xl font-bold mb-4')

                # GUI Management Section
                with ui.tabs().style("background-color: var(--background-color) !important") as tabs:
                    ui.tab('Registered GUIs')
                    ui.tab('Add New GUI')
                    ui.tab('System Status')

                with ui.tab_panels(tabs, value='Registered GUIs').style(
                    "background-color: var(--background-color) !important"):
                    with ui.tab_panel('Registered GUIs'):
                        self._show_registered_guis()

                    with ui.tab_panel('Add New GUI'):
                        self._show_add_gui_form()

                    with ui.tab_panel('System Status'):
                        self._show_system_status()

        self.register_gui("admin", admin_gui, "/admin", only_root=True)

    def _show_registered_guis(self):
        """Show list of registered GUIs with management options"""
        with ui.column().classes('w-full gap-4'):
            for gui_id, gui_info in self.registered_guis.items():
                with ui.card().classes('w-full').style("background-color: var(--background-color) !important"):
                    with ui.row().classes('w-full items-center justify-between').style(
                        "background-color: var(--background-color) !important"):
                        ui.label(f'GUI ID: {gui_id}').classes('font-bold')
                        ui.label(f'Path: {gui_info["path"]}')

                        created_at = gui_info['created_at'].strftime('%Y-%m-%d %H:%M:%S')
                        ui.label(f'Created: {created_at}')

                        with ui.row().classes('gap-2').style("background-color: var(--background-color) !important"):
                            ui.button('View', on_click=lambda g=gui_info['path']: ui.navigate.to(g))
                            ui.button('Remove', on_click=lambda g=gui_id: self._handle_gui_removal(g))
                            ui.button('Restart', on_click=lambda g=gui_id: self._handle_gui_restart(g))

                    # Show connection status
                    active_connections = sum(
                        1 for connections in self.ws_connections.values()
                        if gui_id in connections
                    )
                    ui.label(f'Active Connections: {active_connections}')

    def _show_add_gui_form(self):
        """Show form for adding new GUI"""
        with ui.card().classes('w-full').style("background-color: var(--background-color) !important"):
            gui_id = ui.input('GUI ID').classes('w-full')
            mount_path = ui.input('Mount Path (optional)').classes('w-full')

            # Code editor for GUI setup
            code_editor = ui.editor(
                value='def setup_gui():\n    ui.label("New GUI")\n',
            ).classes('w-full h-64')

            def add_new_gui():
                try:
                    # Create setup function from code
                    setup_code = code_editor.value
                    setup_namespace = {}
                    exec(setup_code, {'ui': ui}, setup_namespace)
                    setup_func = setup_namespace['setup_gui']

                    # Register the new GUI
                    self.register_gui(
                        gui_id.value,
                        setup_func,
                        mount_path.value if mount_path.value else None
                    )

                    ui.notify('GUI added successfully')
                    ui.navigate.to('admin')  # Refresh page
                except Exception as e:
                    ui.notify(f'Error adding GUI: {str(e)}', color='negative')

            ui.button('Add GUI', on_click=add_new_gui).classes('w-full mt-4')

    def _show_system_status(self):
        """Show system status information"""
        with ui.card().classes('w-full').style("background-color: var(--background-color) !important"):
            ui.label('System Status').classes('text-xl font-bold mb-4')

            # System stats
            ui.label(f'Total GUIs: {len(self.registered_guis)}')
            ui.label(f'Total WebSocket Connections: {sum(len(conns) for conns in self.ws_connections.values())}')

            # Memory usage
            import psutil
            process = psutil.Process()
            memory_usage = process.memory_info().rss / 1024 / 1024  # MB
            ui.label(f'Memory Usage: {memory_usage:.2f} MB')

            # Add refresh button
            ui.button('Refresh Stats', on_click=lambda: ui.navigate.to('/admin'))

    def _handle_gui_removal(self, gui_id: str):
        """Handle GUI removal with confirmation"""

        def confirm_remove():
            if self.remove_gui(gui_id):
                ui.notify(f'GUI {gui_id} removed successfully')
                ui.navigate.to('/admin')  # Refresh page
            else:
                ui.notify('Error removing GUI', color='negative')

        ui.notify('Are you sure?',
                  actions=[{'label': 'Yes', 'on_click': confirm_remove},
                           {'label': 'No'}])

    def _handle_gui_restart(self, gui_id: str):
        """Handle GUI restart"""
        try:
            if gui_id in self.registered_guis:
                gui_info = self.registered_guis[gui_id]
                # Re-register the GUI with the same setup
                self.register_gui(gui_id, gui_info['setup'], gui_info['path'])
                ui.notify(f'GUI {gui_id} restarted successfully')
            else:
                ui.notify('GUI not found', color='negative')
        except Exception as e:
            ui.notify(f'Error restarting GUI: {str(e)}', color='negative')

    def _load_styles(self) -> str:
        """Load custom styles from CSS file"""
        try:
            with open(self.styles_path) as f:
                return f.read()
        except Exception as e:
            print(f"Error loading styles: {e}")
            return ""

    def register_gui(self, gui_id: str, setup_func: Callable, mount_path: str | None = None, additional: str | None = None, title: str | None = None , description: str | None = None, **kwargs) -> None:
        """Register a new NiceGUI application"""
        path = mount_path or f"/{gui_id}"
        self.endpoints.append(UIEndpoint(path=self.mount_path+path, title=title if title is not None else path.replace('/', '') , description=description if description is not None else '', **kwargs))
        if additional is None:
            additional = ""

        def has_parameters(func, *params):
            """
            Überprüft, ob die Funktion bestimmte Parameter hat.

            :param func: Die zu analysierende Funktion.
            :param params: Eine Liste der zu suchenden Parameter.
            :return: Ein Dictionary mit den Parametern und einem booleschen Wert.
            """
            signature = inspect.signature(func)
            func_params = signature.parameters.keys()
            return {param: param in func_params for param in params}

        async def request_to_request_session(request):
            jk = request.json()
            if asyncio.iscoroutine(jk):
                with contextlib.suppress(Exception):
                    jk = await jk
            def js():
                return jk
            return RequestSession(
                session=request.session,
                body=request.body,
                json=js,
                row=request,
            )

        get_app()

        @ui.page(path)
        async def wrapped_gui(request: Request):
            # Inject custom styles
            ui.add_body_html(self.helper_contex + additional)
            # ui.switch('Dark').bind_value(ui, 'dark_mode')
            # ui.add_css("q-card {background-color: var(--background-color)} !important")
            # ui.add_body_html('<script src="../index.js" type="module" defer></script>')

            # Initialize the GUI
            params_ = {}
            params = has_parameters(setup_func, 'request', 'user', 'session', 'id', 'sid')

            if params.get('request'):
                params_['request'] = await request_to_request_session(request)
            if params.get('user'):
                params_['user'] = await get_user_from_request(get_app(), request)
            if params.get('session'):
                params_['session'] = request.session
            if params.get('spec'):
                params_['spec'] = get_spec(request)
            if params.get('sid'):
                params_['sid'] = get_s_id(request)

            async def task():
                if asyncio.iscoroutine(setup_func):

                    # Event Listener für Button hinzufügen
                    await ui.run_javascript('''
                            Quasar.Dark.set("auto");
                            tailwind.config.darkMode = "media";
                        ''')

                    await ui.run_javascript("""
                    document.getElementById('darkModeToggle').addEventListener('click', function () {
                    const labelToggel = document.getElementById('toggleLabel')
                    if (labelToggel.innerHTML == `<span class="material-symbols-outlined">
dark_mode
</span>`){
                            Quasar.Dark.set(true);
                            tailwind.config.darkMode = "class";
                            document.body.classList.add("dark");
                        }else{
                            Quasar.Dark.set(false);
                            tailwind.config.darkMode = "class"
                            document.body.classList.remove("dark");
                        }
                    });
                    """)

                    if not params_:
                        await setup_func()
                    else:
                        await setup_func(**params_)
                else:
                    if not params_:
                        setup_func()
                    else:
                        setup_func(**params_)




            await task()
            # return result

        self.registered_guis[gui_id] = {
            'path': path,
            'setup': setup_func,
            'created_at': datetime.now()
        }

        print("Registered GUI:", self.registered_guis[gui_id])
        return True

    def remove_gui(self, gui_id: str) -> bool:
        """Remove a registered GUI application"""
        if gui_id in self.registered_guis:
            # Remove from registry
            del self.registered_guis[gui_id]

            # Clean up any WebSocket connections
            for session_id in self.ws_connections:
                if gui_id in self.ws_connections[session_id]:
                    del self.ws_connections[session_id][gui_id]

            return True
        return False

    async def websocket_endpoint(self, websocket: WebSocket, session_id: str, gui_id: str):
        """Handle WebSocket connections for real-time updates"""
        await websocket.accept()

        if session_id not in self.ws_connections:
            self.ws_connections[session_id] = {}
        self.ws_connections[session_id][gui_id] = websocket

        try:
            while True:
                data = await websocket.receive_json()
                # Handle incoming WebSocket messages
                await self.handle_ws_message(session_id, gui_id, data)
        except WebSocketDisconnect:
            if session_id in self.ws_connections:
                if gui_id in self.ws_connections[session_id]:
                    del self.ws_connections[session_id][gui_id]

    async def handle_ws_message(self, session_id: str, gui_id: str, message: dict):
        """Handle incoming WebSocket messages"""
        # Implement custom WebSocket message handling
        if message.get('type') == 'update':
            # Broadcast updates to all connected clients for this GUI
            await self.broadcast_to_gui(gui_id, {
                'type': 'update',
                'data': message.get('data')
            })

    async def broadcast_to_gui(self, gui_id: str, message: dict):
        """Broadcast a message to all sessions connected to a specific GUI"""
        for session_connections in self.ws_connections.values():
            if gui_id in session_connections:
                await session_connections[gui_id].send_json(message)

    async def middleware_dispatch(self, request: Request, call_next) -> Response:
        """Custom middleware for session handling and authentication"""
        async def callN():
            response = await call_next(request)
            return response

        if not request.url.path.startswith(self.mount_path):
            return await callN()

        if request.url.path.endswith("/favicon.ico"):
            return await callN()
        if "_nicegui" in request.url.path and "static" in request.url.path:
            return await callN()
        if "_nicegui" in request.url.path and "components" in request.url.path:
            return await callN()
        if "_nicegui" in request.url.path and "codehilite" in request.url.path:
            return await callN()
        if "_nicegui" in request.url.path and "libraries" in request.url.path:
            return await callN()

        if "open" in request.url.path:
            return await callN()

        # Verify session if needed
        if not request.session.get("valid", False):
            return RedirectResponse(f"/web/login?next={request.url.path}")

        response = await call_next(request)
        return response

    def init_app(self) -> None:
        """Initialize the FastAPI application with NiceGUI integration"""
        self.init = True
        ui.run_with(
            self.app,
            mount_path=self.mount_path,
            favicon=os.getenv("FAVI"), # "/root/Toolboxv2/toolboxv2/favicon.ico"
            show_welcome_message=False,
            # prod_js=False,
        )
broadcast_to_gui(gui_id, message) async

Broadcast a message to all sessions connected to a specific GUI

Source code in toolboxv2/mods/FastApi/fast_nice.py
362
363
364
365
366
async def broadcast_to_gui(self, gui_id: str, message: dict):
    """Broadcast a message to all sessions connected to a specific GUI"""
    for session_connections in self.ws_connections.values():
        if gui_id in session_connections:
            await session_connections[gui_id].send_json(message)
handle_ws_message(session_id, gui_id, message) async

Handle incoming WebSocket messages

Source code in toolboxv2/mods/FastApi/fast_nice.py
352
353
354
355
356
357
358
359
360
async def handle_ws_message(self, session_id: str, gui_id: str, message: dict):
    """Handle incoming WebSocket messages"""
    # Implement custom WebSocket message handling
    if message.get('type') == 'update':
        # Broadcast updates to all connected clients for this GUI
        await self.broadcast_to_gui(gui_id, {
            'type': 'update',
            'data': message.get('data')
        })
init_app()

Initialize the FastAPI application with NiceGUI integration

Source code in toolboxv2/mods/FastApi/fast_nice.py
398
399
400
401
402
403
404
405
406
407
def init_app(self) -> None:
    """Initialize the FastAPI application with NiceGUI integration"""
    self.init = True
    ui.run_with(
        self.app,
        mount_path=self.mount_path,
        favicon=os.getenv("FAVI"), # "/root/Toolboxv2/toolboxv2/favicon.ico"
        show_welcome_message=False,
        # prod_js=False,
    )
middleware_dispatch(request, call_next) async

Custom middleware for session handling and authentication

Source code in toolboxv2/mods/FastApi/fast_nice.py
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
async def middleware_dispatch(self, request: Request, call_next) -> Response:
    """Custom middleware for session handling and authentication"""
    async def callN():
        response = await call_next(request)
        return response

    if not request.url.path.startswith(self.mount_path):
        return await callN()

    if request.url.path.endswith("/favicon.ico"):
        return await callN()
    if "_nicegui" in request.url.path and "static" in request.url.path:
        return await callN()
    if "_nicegui" in request.url.path and "components" in request.url.path:
        return await callN()
    if "_nicegui" in request.url.path and "codehilite" in request.url.path:
        return await callN()
    if "_nicegui" in request.url.path and "libraries" in request.url.path:
        return await callN()

    if "open" in request.url.path:
        return await callN()

    # Verify session if needed
    if not request.session.get("valid", False):
        return RedirectResponse(f"/web/login?next={request.url.path}")

    response = await call_next(request)
    return response
register_gui(gui_id, setup_func, mount_path=None, additional=None, title=None, description=None, **kwargs)

Register a new NiceGUI application

Source code in toolboxv2/mods/FastApi/fast_nice.py
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
    def register_gui(self, gui_id: str, setup_func: Callable, mount_path: str | None = None, additional: str | None = None, title: str | None = None , description: str | None = None, **kwargs) -> None:
        """Register a new NiceGUI application"""
        path = mount_path or f"/{gui_id}"
        self.endpoints.append(UIEndpoint(path=self.mount_path+path, title=title if title is not None else path.replace('/', '') , description=description if description is not None else '', **kwargs))
        if additional is None:
            additional = ""

        def has_parameters(func, *params):
            """
            Überprüft, ob die Funktion bestimmte Parameter hat.

            :param func: Die zu analysierende Funktion.
            :param params: Eine Liste der zu suchenden Parameter.
            :return: Ein Dictionary mit den Parametern und einem booleschen Wert.
            """
            signature = inspect.signature(func)
            func_params = signature.parameters.keys()
            return {param: param in func_params for param in params}

        async def request_to_request_session(request):
            jk = request.json()
            if asyncio.iscoroutine(jk):
                with contextlib.suppress(Exception):
                    jk = await jk
            def js():
                return jk
            return RequestSession(
                session=request.session,
                body=request.body,
                json=js,
                row=request,
            )

        get_app()

        @ui.page(path)
        async def wrapped_gui(request: Request):
            # Inject custom styles
            ui.add_body_html(self.helper_contex + additional)
            # ui.switch('Dark').bind_value(ui, 'dark_mode')
            # ui.add_css("q-card {background-color: var(--background-color)} !important")
            # ui.add_body_html('<script src="../index.js" type="module" defer></script>')

            # Initialize the GUI
            params_ = {}
            params = has_parameters(setup_func, 'request', 'user', 'session', 'id', 'sid')

            if params.get('request'):
                params_['request'] = await request_to_request_session(request)
            if params.get('user'):
                params_['user'] = await get_user_from_request(get_app(), request)
            if params.get('session'):
                params_['session'] = request.session
            if params.get('spec'):
                params_['spec'] = get_spec(request)
            if params.get('sid'):
                params_['sid'] = get_s_id(request)

            async def task():
                if asyncio.iscoroutine(setup_func):

                    # Event Listener für Button hinzufügen
                    await ui.run_javascript('''
                            Quasar.Dark.set("auto");
                            tailwind.config.darkMode = "media";
                        ''')

                    await ui.run_javascript("""
                    document.getElementById('darkModeToggle').addEventListener('click', function () {
                    const labelToggel = document.getElementById('toggleLabel')
                    if (labelToggel.innerHTML == `<span class="material-symbols-outlined">
dark_mode
</span>`){
                            Quasar.Dark.set(true);
                            tailwind.config.darkMode = "class";
                            document.body.classList.add("dark");
                        }else{
                            Quasar.Dark.set(false);
                            tailwind.config.darkMode = "class"
                            document.body.classList.remove("dark");
                        }
                    });
                    """)

                    if not params_:
                        await setup_func()
                    else:
                        await setup_func(**params_)
                else:
                    if not params_:
                        setup_func()
                    else:
                        setup_func(**params_)




            await task()
            # return result

        self.registered_guis[gui_id] = {
            'path': path,
            'setup': setup_func,
            'created_at': datetime.now()
        }

        print("Registered GUI:", self.registered_guis[gui_id])
        return True
remove_gui(gui_id)

Remove a registered GUI application

Source code in toolboxv2/mods/FastApi/fast_nice.py
320
321
322
323
324
325
326
327
328
329
330
331
332
def remove_gui(self, gui_id: str) -> bool:
    """Remove a registered GUI application"""
    if gui_id in self.registered_guis:
        # Remove from registry
        del self.registered_guis[gui_id]

        # Clean up any WebSocket connections
        for session_id in self.ws_connections:
            if gui_id in self.ws_connections[session_id]:
                del self.ws_connections[session_id][gui_id]

        return True
    return False
websocket_endpoint(websocket, session_id, gui_id) async

Handle WebSocket connections for real-time updates

Source code in toolboxv2/mods/FastApi/fast_nice.py
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
async def websocket_endpoint(self, websocket: WebSocket, session_id: str, gui_id: str):
    """Handle WebSocket connections for real-time updates"""
    await websocket.accept()

    if session_id not in self.ws_connections:
        self.ws_connections[session_id] = {}
    self.ws_connections[session_id][gui_id] = websocket

    try:
        while True:
            data = await websocket.receive_json()
            # Handle incoming WebSocket messages
            await self.handle_ws_message(session_id, gui_id, data)
    except WebSocketDisconnect:
        if session_id in self.ws_connections:
            if gui_id in self.ws_connections[session_id]:
                del self.ws_connections[session_id][gui_id]
create_nicegui_manager(app, token_secret=None)

Create and initialize a NiceGUI manager instance

Source code in toolboxv2/mods/FastApi/fast_nice.py
414
415
416
417
418
419
def create_nicegui_manager(app: FastAPI, token_secret: str | None = None) -> NiceGUIManager:
    """Create and initialize a NiceGUI manager instance"""
    manager = NiceGUIManager(app, token_secret)
    manager.init_app()
    manager_online[0] = True
    return manager

manager

Tools

Bases: MainTool, FileHandler

A production-ready API Manager for running, monitoring, and managing FastAPI instances.

This class allows you to
  • Start API instances (live, development, debug)
  • Stop and restart running APIs
  • Update configuration for APIs
  • Get live diagnostic info about running APIs
Source code in toolboxv2/mods/FastApi/manager.py
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
class Tools(MainTool, FileHandler):
    """
    A production-ready API Manager for running, monitoring, and managing FastAPI instances.

    This class allows you to:
      - Start API instances (live, development, debug)
      - Stop and restart running APIs
      - Update configuration for APIs
      - Get live diagnostic info about running APIs
    """

    def __init__(self, app: Any | None = None) -> None:
        # Running APIs will be stored as a mapping from api_name to subprocess.Popen
        self.running_apis: dict[str, multiprocessing.Process] = {}
        self.api_config: dict[str, dict[str, str | int]] = {}
        self.version: str = VERSION
        self.name: str = NAME
        self.logger: logging.Logger = app.logger if app else logging.getLogger(__name__)
        self.color: str = "WHITE"
        self.keys: dict[str, str] = {"Apis": "api~config"}
        # In case app is not passed in, ensure that we have a dummy object with required properties

        # Define available tool commands
        self.tools: dict[str, Any] = {
            "all": [
                ["Version", "Shows current Version"],
                ["edit-api", "Set default API for name, host and port"],
                ["start-api", "Start an API instance"],
                ["stop-api", "Stop a running API instance"],
                ["restart-api", "Restart an API instance"],
                ["info", "Show API configurations and running APIs"],
            ],
            "name": "api_manager",
            "Version": self.show_version,
            "edit-api": self.conf_api,
            "stop-api": self.stop_api,
            "start": self.start_live,
            "startE": self._start_api,
            "startDev": self.start_dev,
            "startDUG": self.start_debug,
            "info": self.show_running,
            "restart-api": self.restart_api,
        }

        # Initialize FileHandler with default configuration data
        default_config = {
            "Apis": {
                'main': {
                    "Name": 'main',
                    "version": self.version,
                    "port": 5000,
                    "host": '127.0.0.1'
                }
            }
        }
        FileHandler.__init__(self, "apis.config", self.app.id, self.keys, default_config)
        MainTool.__init__(
            self,
            load=self.on_start,
            v=self.version,
            tool=self.tools,
            name=self.name,
            logs=self.logger,
            color=self.color,
            on_exit=self.on_exit,
        )
        os.makedirs("./.data", exist_ok=True)

    @staticmethod
    def _get_pid_file_path(api_name: str) -> str:
        """Get the path to the PID file for an API."""
        return os.path.join("./.data", f"api_pid_{api_name}")


    def show_version(self) -> str:
        """Display and return the current version."""
        self.logger.info("Version: %s", self.version)
        return self.version

    def info(self) -> dict[str, Any]:
        """
        Return diagnostic information about API configurations and currently running APIs.
        """
        config_info = {name: cfg for name, cfg in self.api_config.items()}
        running_info = {name: proc.pid for name, proc in self.running_apis.items() if proc.is_alive()}
        self.logger.info("API Configurations: %s", config_info)
        self.logger.info("Running APIs: %s", running_info)
        # Optionally, print to console as well
        for api_name, cfg in config_info.items():
            print(f"Configured API - Name: {api_name}, Config: {cfg}")
        print("Running APIs:")
        for api_name, pid in running_info.items():
            print(f"API: {api_name}, Process ID: {pid}")
        return {"configurations": config_info, "running": running_info}

    def conf_api(self, api_name: str, host: str = "localhost", port: int = 5000) -> None:
        """
        Update or create an API configuration.

        Args:
            api_name (str): The name of the API.
            host (str): The host address (default "localhost"). Use "lh" for "127.0.0.1" or "0" for "0.0.0.0".
            port (int): The port number (default 5000; use "0" for port 8000).
        """
        if host.lower() == "lh":
            host = "127.0.0.1"
        if host == "0":
            host = "0.0.0.0"
        if str(port) == "0":
            port = 8000

        self.api_config[api_name] = {
            "Name": api_name,
            "version": self.version,
            "port": int(port),
            "host": host,
        }
        self.logger.info("Updated API configuration for '%s': %s", api_name, self.api_config[api_name])
        print(f"API configuration updated: {self.api_config[api_name]}")

    def start_dev(self, api_name: str, *modules: str, **kwargs: Any) -> str | None:
        """
        Start an API in development mode.

        If additional modules are provided, they are stored in a BlobFile for later use.

        Args:
            api_name (str): The API name.
            *modules (str): Additional modules for the API.

        Returns:
            Optional[str]: Status message.
        """
        if modules:
            api_name_dev = f"{api_name}_D"
            with BlobFile(f"FastApi/{api_name_dev}/dev", mode='w') as f:
                f.write_json({'modules': modules})
            api_name = api_name_dev

        return self._start_api(api_name, live=False, reload=False, test_override=False, host="localhost")

    def start_live(self, api_name: str) -> str | None:
        """
        Start an API in live mode.
        """
        return self._start_api(api_name, live=True, reload=False, test_override=False)

    def start_debug(self, api_name: str) -> str | None:
        """
        Start an API in debug mode.
        """
        return self._start_api(api_name, live=False, reload=True, test_override=True, host="localhost")

    def _start_api(
        self,
        api_name: str,
        live: bool = False,
        reload: bool = False,
        test_override: bool = False,
        host: str = "localhost"
    ) -> str | None:
        """
        Start an API process with the given configuration.

        Args:
            api_name (str): The API name.
            live (bool): Whether to run in live mode.
            reload (bool): Whether to enable auto-reload.
            test_override (bool): If True, allow start even if running in a test environment.
            host (str): Host to bind the API on.

        Returns:
            Optional[str]: A status message or error message.
        """
        # Prevent starting an API if in test mode unless explicitly overridden.
        if 'test' in self.app.id and not test_override:
            msg = "No API allowed in test mode"
            self.logger.warning(msg)
            return msg

        if not api_name:
            self.logger.error("No API name provided.")
            return None

        # Check if API is already running.
        if api_name in self.running_apis and self.running_apis[api_name].is_alive():
            msg = f"API '{api_name}' is already running."
            self.logger.info(msg)
            return msg

        # Ensure that live and reload are not both enabled.
        if live and reload:
            raise ValueError("Live mode and reload mode cannot be enabled simultaneously.")

        # If configuration does not exist, add it automatically.
        if api_name not in self.api_config:
            self.api_config[api_name] = {
                "Name": api_name,
                "version": self.version,
                "port": self.app.args_sto.port,
                "host": host if host and isinstance(host, str) else "localhost",
            }
            if live:
                self.api_config[api_name]['host'] = "0.0.0.0"
            self.logger.info("Auto-added API configuration for '%s': %s", api_name, self.api_config[api_name])

        # For live mode, always bind to all interfaces.
        if live:
            self.api_config[api_name]['host'] = "0.0.0.0"

        api_data = self.api_config[api_name]

        # Check for required frontend dependencies.
        node_modules_path = os.path.join(self.app.start_dir, "web", "node_modules")
        if not os.path.exists(node_modules_path):
            self.logger.info("Node modules folder not found. Installing dependencies in '%s'", node_modules_path)
            os.system("npm install --prefix ./web ./web")

        # Build the uvicorn command.
        cmd_parts: list[str] = [
            # sys.executable,
            # "-m",
            "uvicorn",
            "toolboxv2.mods.FastApi.fast_api_main:app",
            f"--host {api_data['host']}",
            f"--port {api_data['port']}",
            f"--header data:{self.app.debug}:{api_name}"
        ]
        if reload:
            # Reload directories can be adjusted as needed.
            cmd_parts.append("--reload")
            cmd_parts.append("--reload-dir ./utils")
            cmd_parts.append("--reload-dir ./mods/FastApi")
        command: str = " ".join(cmd_parts)
        self.logger.info("Starting API '%s' with command: %s", api_name, command)

        print(command)

        # Print QR codes for local and public IPs for convenience.
        protocol = "http"  # Adjust if SSL is configured
        local_url = f"{protocol}://{get_local_ip()}:{api_data['port']}"
        public_url = f"{protocol}://{get_public_ip()}:{api_data['port']}"
        print_qrcode_to_console(local_url)
        print_qrcode_to_console(public_url)

        try:

            process = multiprocessing.Process(
                target=os.system,
                args=(command,),
                # daemon=True
            )
            process.start()

            # Store the process
            self.running_apis[api_name] = process

            # Save PID to file
            with open(self._get_pid_file_path(api_name), "w") as f:
                f.write(str(process.pid))

            # Store process info in file handler
            self.add_to_save_file_handler(
                key=f"pr{api_name}",
                value=json.dumps({
                    "pid": process.pid,
                    "start_time": datetime.now().isoformat(),
                    "host": api_data['host'],
                    "port": api_data['port']
                })
            )

            msg = f"Starting API '{api_name}' at {api_data['host']}:{api_data['port']} (PID: {process.pid})"
            self.logger.info(msg)
            return msg
        except Exception as e:
            self.logger.exception("Failed to start API '%s': %s", api_name, e)
            return f"Failed to start API '{api_name}': {e}"

    async def stop_api(self, api_name: str, delete: bool = True) -> str:
        """
        Stop a running API and clean up resources.
        """
        if api_name not in self.api_config:
            msg = f"API with the name '{api_name}' is not configured."
            self.logger.warning(msg)
            return msg

        pid_file = self._get_pid_file_path(api_name)
        if not os.path.exists(pid_file):
            self.logger.warning("No pid file found for API '%s'", api_name)
            return f"No pid file found for API '{api_name}'."

        try:
            # Read PID from file
            with open(pid_file) as f:
                api_pid = int(f.read().strip())

            # Try graceful shutdown first
            if 'core' in self.app.id:
                if not await self.app.session.login():
                    self.logger.warning("Could not login with username '%s'", self.app.get_username())
                try:
                    response = await self.app.session.fetch(f"/api/exit/{api_pid}", method="POST")
                    self.logger.info("Exit response for API '%s': %s", api_name, response)
                except Exception as e:
                    self.logger.warning("Failed to stop API gracefully: %s", e)

            # Force kill if process still exists
            process = self.running_apis.get(api_name)
            if process and process.is_alive():
                process.terminate()
                process.join(timeout=5)
                if process.is_alive():
                    process.kill()

            # Fallback to system commands if needed
            try:
                if system() == "Windows":
                    os.system(f"taskkill /pid {api_pid} /F")
                else:
                    os.kill(api_pid, signal.SIGKILL)
            except ProcessLookupError:
                pass  # Process already terminated

            # Cleanup
            if os.path.exists(pid_file):
                os.remove(pid_file)
            if delete and api_name in self.running_apis:
                del self.running_apis[api_name]

            # Update file handler
            self.add_to_save_file_handler(
                key=f"pr{api_name}",
                value=json.dumps({
                    "stop_time": datetime.now().isoformat(),
                    "status": "stopped"
                })
            )
            self.save_file_handler()

            msg = f"Stopped API '{api_name}'."
            self.logger.info(msg)
            return msg

        except Exception as e:
            self.logger.exception("Error stopping API '%s': %s", api_name, e)
            return f"Error stopping API '{api_name}': {e}"

    def nf(self, name):
        if len(name) > 10:
            return name[:10]
        elif len(name) < 10:
            return name + '~' * (len(name)-10)
        else:
            return name

    def show_running(self) -> list[str]:
        """
        Display and return the list of currently running APIs with their status.
        """
        self.on_start()
        running_list = []
        print(self.api_config)
        for api_name in self.api_config:

            # Get stored process info
            process_info = self.get_file_handler(f"pr{api_name}")
            print('#',api_name, '#',process_info)
            if process_info is None:
                process_info = {}
            status = {
                "name": api_name,
                "online": api_name in self.running_apis,
                "start_time": process_info.get("start_time", "offline"),
                "pid": process_info.get("pid", ''),
                "host": process_info.get("host", ''),
                "port": process_info.get("port", '')
            }
            running_list.append(status)

        # Log and print current status
        self.logger.info("APIs: %s", running_list)
        print("\nAPIs:")
        for api in running_list:
            print(f"- {api['name']}: at {api['host']}:{api['port']}")
            print(f"  Started: {api['start_time']}")

        return [api["name"] for api in running_list]

    async def restart_api(self, api_name: str) -> str:
        """
        Restart the given API by stopping it and starting it again.

        Args:
            api_name (str): The name of the API to restart.

        Returns:
            str: A status message.
        """
        stop_message = await self.stop_api(api_name)
        self.logger.info("Restart: %s", stop_message)
        # Allow some time for the process to fully terminate.
        time.sleep(4)
        start_message = self._start_api(api_name)
        return f"Restarting API '{api_name}': {start_message}"

    def on_start(self) -> None:
        """
        Load API configuration from file when the tool starts.
        """
        self.load_file_handler()
        data = self.get_file_handler(self.keys["Apis"])
        try:
            if isinstance(data, str):
                self.api_config = json.loads(data)
            else:
                self.api_config = data
            self.logger.info("Loaded API configuration: %s", self.api_config)
        except Exception as e:
            self.logger.exception("Error loading API configuration: %s", e)
            self.api_config = {}

    async def on_exit(self) -> None:
        """
        Gracefully stop all running APIs and save configuration upon exit.
        """
        # Save configuration data.
        if len(self.api_config) != 0:
            self.add_to_save_file_handler(self.keys["Apis"], json.dumps(self.api_config))
        # Attempt to stop all running APIs.
        # for api_name in list(self.running_apis.keys()):
        #     await self.stop_api(api_name, delete=False)
        self.running_apis = {}
        self.save_file_handler()
        self.logger.info("Exiting API Manager. All running APIs stopped and configuration saved.")
conf_api(api_name, host='localhost', port=5000)

Update or create an API configuration.

Parameters:

Name Type Description Default
api_name str

The name of the API.

required
host str

The host address (default "localhost"). Use "lh" for "127.0.0.1" or "0" for "0.0.0.0".

'localhost'
port int

The port number (default 5000; use "0" for port 8000).

5000
Source code in toolboxv2/mods/FastApi/manager.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
def conf_api(self, api_name: str, host: str = "localhost", port: int = 5000) -> None:
    """
    Update or create an API configuration.

    Args:
        api_name (str): The name of the API.
        host (str): The host address (default "localhost"). Use "lh" for "127.0.0.1" or "0" for "0.0.0.0".
        port (int): The port number (default 5000; use "0" for port 8000).
    """
    if host.lower() == "lh":
        host = "127.0.0.1"
    if host == "0":
        host = "0.0.0.0"
    if str(port) == "0":
        port = 8000

    self.api_config[api_name] = {
        "Name": api_name,
        "version": self.version,
        "port": int(port),
        "host": host,
    }
    self.logger.info("Updated API configuration for '%s': %s", api_name, self.api_config[api_name])
    print(f"API configuration updated: {self.api_config[api_name]}")
info()

Return diagnostic information about API configurations and currently running APIs.

Source code in toolboxv2/mods/FastApi/manager.py
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
def info(self) -> dict[str, Any]:
    """
    Return diagnostic information about API configurations and currently running APIs.
    """
    config_info = {name: cfg for name, cfg in self.api_config.items()}
    running_info = {name: proc.pid for name, proc in self.running_apis.items() if proc.is_alive()}
    self.logger.info("API Configurations: %s", config_info)
    self.logger.info("Running APIs: %s", running_info)
    # Optionally, print to console as well
    for api_name, cfg in config_info.items():
        print(f"Configured API - Name: {api_name}, Config: {cfg}")
    print("Running APIs:")
    for api_name, pid in running_info.items():
        print(f"API: {api_name}, Process ID: {pid}")
    return {"configurations": config_info, "running": running_info}
on_exit() async

Gracefully stop all running APIs and save configuration upon exit.

Source code in toolboxv2/mods/FastApi/manager.py
444
445
446
447
448
449
450
451
452
453
454
455
456
async def on_exit(self) -> None:
    """
    Gracefully stop all running APIs and save configuration upon exit.
    """
    # Save configuration data.
    if len(self.api_config) != 0:
        self.add_to_save_file_handler(self.keys["Apis"], json.dumps(self.api_config))
    # Attempt to stop all running APIs.
    # for api_name in list(self.running_apis.keys()):
    #     await self.stop_api(api_name, delete=False)
    self.running_apis = {}
    self.save_file_handler()
    self.logger.info("Exiting API Manager. All running APIs stopped and configuration saved.")
on_start()

Load API configuration from file when the tool starts.

Source code in toolboxv2/mods/FastApi/manager.py
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
def on_start(self) -> None:
    """
    Load API configuration from file when the tool starts.
    """
    self.load_file_handler()
    data = self.get_file_handler(self.keys["Apis"])
    try:
        if isinstance(data, str):
            self.api_config = json.loads(data)
        else:
            self.api_config = data
        self.logger.info("Loaded API configuration: %s", self.api_config)
    except Exception as e:
        self.logger.exception("Error loading API configuration: %s", e)
        self.api_config = {}
restart_api(api_name) async

Restart the given API by stopping it and starting it again.

Parameters:

Name Type Description Default
api_name str

The name of the API to restart.

required

Returns:

Name Type Description
str str

A status message.

Source code in toolboxv2/mods/FastApi/manager.py
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
async def restart_api(self, api_name: str) -> str:
    """
    Restart the given API by stopping it and starting it again.

    Args:
        api_name (str): The name of the API to restart.

    Returns:
        str: A status message.
    """
    stop_message = await self.stop_api(api_name)
    self.logger.info("Restart: %s", stop_message)
    # Allow some time for the process to fully terminate.
    time.sleep(4)
    start_message = self._start_api(api_name)
    return f"Restarting API '{api_name}': {start_message}"
show_running()

Display and return the list of currently running APIs with their status.

Source code in toolboxv2/mods/FastApi/manager.py
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
def show_running(self) -> list[str]:
    """
    Display and return the list of currently running APIs with their status.
    """
    self.on_start()
    running_list = []
    print(self.api_config)
    for api_name in self.api_config:

        # Get stored process info
        process_info = self.get_file_handler(f"pr{api_name}")
        print('#',api_name, '#',process_info)
        if process_info is None:
            process_info = {}
        status = {
            "name": api_name,
            "online": api_name in self.running_apis,
            "start_time": process_info.get("start_time", "offline"),
            "pid": process_info.get("pid", ''),
            "host": process_info.get("host", ''),
            "port": process_info.get("port", '')
        }
        running_list.append(status)

    # Log and print current status
    self.logger.info("APIs: %s", running_list)
    print("\nAPIs:")
    for api in running_list:
        print(f"- {api['name']}: at {api['host']}:{api['port']}")
        print(f"  Started: {api['start_time']}")

    return [api["name"] for api in running_list]
show_version()

Display and return the current version.

Source code in toolboxv2/mods/FastApi/manager.py
95
96
97
98
def show_version(self) -> str:
    """Display and return the current version."""
    self.logger.info("Version: %s", self.version)
    return self.version
start_debug(api_name)

Start an API in debug mode.

Source code in toolboxv2/mods/FastApi/manager.py
168
169
170
171
172
def start_debug(self, api_name: str) -> str | None:
    """
    Start an API in debug mode.
    """
    return self._start_api(api_name, live=False, reload=True, test_override=True, host="localhost")
start_dev(api_name, *modules, **kwargs)

Start an API in development mode.

If additional modules are provided, they are stored in a BlobFile for later use.

Parameters:

Name Type Description Default
api_name str

The API name.

required
*modules str

Additional modules for the API.

()

Returns:

Type Description
str | None

Optional[str]: Status message.

Source code in toolboxv2/mods/FastApi/manager.py
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
def start_dev(self, api_name: str, *modules: str, **kwargs: Any) -> str | None:
    """
    Start an API in development mode.

    If additional modules are provided, they are stored in a BlobFile for later use.

    Args:
        api_name (str): The API name.
        *modules (str): Additional modules for the API.

    Returns:
        Optional[str]: Status message.
    """
    if modules:
        api_name_dev = f"{api_name}_D"
        with BlobFile(f"FastApi/{api_name_dev}/dev", mode='w') as f:
            f.write_json({'modules': modules})
        api_name = api_name_dev

    return self._start_api(api_name, live=False, reload=False, test_override=False, host="localhost")
start_live(api_name)

Start an API in live mode.

Source code in toolboxv2/mods/FastApi/manager.py
162
163
164
165
166
def start_live(self, api_name: str) -> str | None:
    """
    Start an API in live mode.
    """
    return self._start_api(api_name, live=True, reload=False, test_override=False)
stop_api(api_name, delete=True) async

Stop a running API and clean up resources.

Source code in toolboxv2/mods/FastApi/manager.py
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
async def stop_api(self, api_name: str, delete: bool = True) -> str:
    """
    Stop a running API and clean up resources.
    """
    if api_name not in self.api_config:
        msg = f"API with the name '{api_name}' is not configured."
        self.logger.warning(msg)
        return msg

    pid_file = self._get_pid_file_path(api_name)
    if not os.path.exists(pid_file):
        self.logger.warning("No pid file found for API '%s'", api_name)
        return f"No pid file found for API '{api_name}'."

    try:
        # Read PID from file
        with open(pid_file) as f:
            api_pid = int(f.read().strip())

        # Try graceful shutdown first
        if 'core' in self.app.id:
            if not await self.app.session.login():
                self.logger.warning("Could not login with username '%s'", self.app.get_username())
            try:
                response = await self.app.session.fetch(f"/api/exit/{api_pid}", method="POST")
                self.logger.info("Exit response for API '%s': %s", api_name, response)
            except Exception as e:
                self.logger.warning("Failed to stop API gracefully: %s", e)

        # Force kill if process still exists
        process = self.running_apis.get(api_name)
        if process and process.is_alive():
            process.terminate()
            process.join(timeout=5)
            if process.is_alive():
                process.kill()

        # Fallback to system commands if needed
        try:
            if system() == "Windows":
                os.system(f"taskkill /pid {api_pid} /F")
            else:
                os.kill(api_pid, signal.SIGKILL)
        except ProcessLookupError:
            pass  # Process already terminated

        # Cleanup
        if os.path.exists(pid_file):
            os.remove(pid_file)
        if delete and api_name in self.running_apis:
            del self.running_apis[api_name]

        # Update file handler
        self.add_to_save_file_handler(
            key=f"pr{api_name}",
            value=json.dumps({
                "stop_time": datetime.now().isoformat(),
                "status": "stopped"
            })
        )
        self.save_file_handler()

        msg = f"Stopped API '{api_name}'."
        self.logger.info(msg)
        return msg

    except Exception as e:
        self.logger.exception("Error stopping API '%s': %s", api_name, e)
        return f"Error stopping API '{api_name}': {e}"

FileWidget

FileUploadHandler

Source code in toolboxv2/mods/FileWidget.py
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
class FileUploadHandler:
    def __init__(self, upload_dir: str = 'uploads'):
        self.upload_dir = Path(upload_dir)
        self.upload_dir.mkdir(parents=True, exist_ok=True)
        # self.app = get_app().app # If logger is needed here

    def save_file(self, chunk_info: ChunkInfo, storage: BlobStorage) -> str:
        """Speichert die Datei oder Chunk. Chunks werden lokal gespeichert, dann zu BlobStorage gemerged."""
        final_blob_path = Path(chunk_info.filename).name  # Use only filename part for security within blob storage

        if chunk_info.total_chunks == 1:
            # Komplette Datei direkt in BlobStorage speichern
            # print(f"Saving single part file: {final_blob_path} to BlobStorage directly.") # Debug
            with BlobFile(final_blob_path, 'w', storage=storage) as bf:
                bf.write(chunk_info.content)
        else:
            # Chunk lokal speichern
            # Sanitize filename for local path (original chunk_info.filename might contain path parts client-side)
            safe_base_filename = "".join(
                c if c.isalnum() or c in ('.', '_', '-') else '_' for c in Path(chunk_info.filename).name)
            chunk_path = self.upload_dir / f"{safe_base_filename}.part{chunk_info.chunk_index}"
            # print(f"Saving chunk: {chunk_path} locally. Total chunks: {chunk_info.total_chunks}") # Debug

            with open(chunk_path, 'wb') as f:
                f.write(chunk_info.content)

            if self._all_chunks_received(safe_base_filename, chunk_info.total_chunks):
                # print(f"All chunks received for {safe_base_filename}. Merging to BlobStorage path: {final_blob_path}") # Debug
                self._merge_chunks_to_blob(safe_base_filename, chunk_info.total_chunks, final_blob_path, storage)
                self._cleanup_chunks(safe_base_filename, chunk_info.total_chunks)
            # else:
            # print(f"Still waiting for more chunks for {safe_base_filename}.") # Debug

        return final_blob_path  # Path within BlobStorage

    def _all_chunks_received(self, safe_base_filename: str, total_chunks: int) -> bool:
        for i in range(total_chunks):
            chunk_path = self.upload_dir / f"{safe_base_filename}.part{i}"
            if not chunk_path.exists():
                # print(f"Chunk {i} for {safe_base_filename} not found. Path: {chunk_path}") # Debug
                return False
        # print(f"All {total_chunks} chunks found for {safe_base_filename}.") # Debug
        return True

    def _merge_chunks_to_blob(self, safe_base_filename: str, total_chunks: int, final_blob_path: str,
                              storage: BlobStorage):
        # print(f"Merging {total_chunks} chunks for {safe_base_filename} into Blob: {final_blob_path}") # Debug
        with BlobFile(final_blob_path, 'w', storage=storage) as outfile:
            for i in range(total_chunks):
                chunk_path = self.upload_dir / f"{safe_base_filename}.part{i}"
                # print(f"Appending chunk {i} ({chunk_path}) to Blob.") # Debug
                with open(chunk_path, 'rb') as chunk_file:
                    outfile.write(chunk_file.read())
        # print(f"Finished merging chunks for {safe_base_filename} to Blob: {final_blob_path}") # Debug

    def _cleanup_chunks(self, safe_base_filename: str, total_chunks: int):
        # print(f"Cleaning up {total_chunks} chunks for {safe_base_filename}.") # Debug
        for i in range(total_chunks):
            chunk_path = self.upload_dir / f"{safe_base_filename}.part{i}"
            if chunk_path.exists():
                # print(f"Removing chunk: {chunk_path}") # Debug
                try:
                    os.remove(chunk_path)
                except OSError as e:
                    # self.app.logger.error(f"Error removing chunk {chunk_path}: {e}") # If logger available
                    print(f"Error removing chunk {chunk_path}: {e}")
save_file(chunk_info, storage)

Speichert die Datei oder Chunk. Chunks werden lokal gespeichert, dann zu BlobStorage gemerged.

Source code in toolboxv2/mods/FileWidget.py
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
def save_file(self, chunk_info: ChunkInfo, storage: BlobStorage) -> str:
    """Speichert die Datei oder Chunk. Chunks werden lokal gespeichert, dann zu BlobStorage gemerged."""
    final_blob_path = Path(chunk_info.filename).name  # Use only filename part for security within blob storage

    if chunk_info.total_chunks == 1:
        # Komplette Datei direkt in BlobStorage speichern
        # print(f"Saving single part file: {final_blob_path} to BlobStorage directly.") # Debug
        with BlobFile(final_blob_path, 'w', storage=storage) as bf:
            bf.write(chunk_info.content)
    else:
        # Chunk lokal speichern
        # Sanitize filename for local path (original chunk_info.filename might contain path parts client-side)
        safe_base_filename = "".join(
            c if c.isalnum() or c in ('.', '_', '-') else '_' for c in Path(chunk_info.filename).name)
        chunk_path = self.upload_dir / f"{safe_base_filename}.part{chunk_info.chunk_index}"
        # print(f"Saving chunk: {chunk_path} locally. Total chunks: {chunk_info.total_chunks}") # Debug

        with open(chunk_path, 'wb') as f:
            f.write(chunk_info.content)

        if self._all_chunks_received(safe_base_filename, chunk_info.total_chunks):
            # print(f"All chunks received for {safe_base_filename}. Merging to BlobStorage path: {final_blob_path}") # Debug
            self._merge_chunks_to_blob(safe_base_filename, chunk_info.total_chunks, final_blob_path, storage)
            self._cleanup_chunks(safe_base_filename, chunk_info.total_chunks)
        # else:
        # print(f"Still waiting for more chunks for {safe_base_filename}.") # Debug

    return final_blob_path  # Path within BlobStorage

access_shared_file(self, request, share_id, filename=None, row=None) async

Accesses a shared file via its share_id. The URL for this would be like /api/FileWidget/shared/{share_id_value} The 'share_id: str' in signature implies ToolBoxV2 extracts it from path.

Source code in toolboxv2/mods/FileWidget.py
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
@export(mod_name=MOD_NAME, api=True, version=VERSION, name="open_shared", api_methods=['GET'],
        request_as_kwarg=True, level=-1, row=True)
async def access_shared_file(self, request: RequestData, share_id: str, filename: str = None, row=None) -> Result:  # share_id from query params
    """
    Accesses a shared file via its share_id.
    The URL for this would be like /api/FileWidget/shared/{share_id_value}
    The 'share_id: str' in signature implies ToolBoxV2 extracts it from path.
    """
    if not share_id:
        return Result.html(data="Share ID is missing in path.", status=302)

    share_info = self.shares.get(share_id)
    if not share_info:
        return Result.html(data="Share link is invalid or has expired.", status=404)

    owner_uid = share_info["owner_uid"]
    file_path_in_owner_storage = share_info["file_path"]

    try:
        # Get BlobStorage for the owner, not the current request's user (if any)
        owner_storage = await self.get_blob_storage(
            owner_uid_override=owner_uid)  # Crucially, pass request=None if not needed
        self.app.logger.info(
            f"Accessing shared file via link {share_id}: owner {owner_uid}, path {file_path_in_owner_storage}")
        result = await _prepare_file_response(self, owner_storage, file_path_in_owner_storage, row=row is not None)
        if result.is_error():
            self.app.logger.error(f"Error preparing shared file response for {share_id}: {result.info.help_text}")
            return Result.html(data=f"Failed to prepare shared file for download. {result.info.help_text} {result.result.data_info}")
        return result
    except ValueError as e:  # From get_blob_storage if owner_uid is invalid for some reason
        self.app.logger.error(f"Error getting owner's storage for shared file {share_id} (owner {owner_uid}): {e}",
                              exc_info=True)
        return Result.html(data="Could not access owner's storage for shared file.")
    except Exception as e:
        self.app.logger.error(
            f"Error accessing shared file {share_id} (owner {owner_uid}, path {file_path_in_owner_storage}): {e}",
            exc_info=True)
        return Result.html(data="Could not retrieve shared file.")

get_main_ui(self) async

Serves the main HTML UI for the FileWidget.

Source code in toolboxv2/mods/FileWidget.py
599
600
601
602
603
@export(mod_name=MOD_NAME, api=True, version=VERSION, name="ui", api_methods=['GET'])
async def get_main_ui(self) -> Result:
    """Serves the main HTML UI for the FileWidget."""
    html_content = get_template_content()
    return Result.html(data=html_content)

handle_upload(self, request, form_data=None) async

Handles file uploads. Expects chunked data via form_data kwarg from Rust server. 'form_data' structure (from Rust's parsing of multipart) after client sends FormData with fields: 'file' (the blob), 'fileName', 'chunkIndex', 'totalChunks'.

Expected form_data in this Python function: { "file": { // This 'file' key is the NAME of the form field that held the file blob "filename": "original_file_name_for_this_chunk.txt", // from Content-Disposition of the 'file' field part "content_type": "mime/type_of_chunk", "content_base64": "BASE64_ENCODED_CHUNK_CONTENT" }, "fileName": "overall_final_filename.txt", // From a separate form field named 'fileName' "chunkIndex": "0", // From a separate form field named 'chunkIndex' "totalChunks": "5" // From a separate form field named 'totalChunks' }

Source code in toolboxv2/mods/FileWidget.py
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
@export(mod_name=MOD_NAME, api=True, version=VERSION, name="upload", api_methods=['POST'], request_as_kwarg=True)
async def handle_upload(self, request: RequestData, form_data: Optional[Dict[str, Any]] = None) -> Result:
    """
    Handles file uploads. Expects chunked data via form_data kwarg from Rust server.
    'form_data' structure (from Rust's parsing of multipart) after client sends FormData with fields:
    'file' (the blob), 'fileName', 'chunkIndex', 'totalChunks'.

    Expected `form_data` in this Python function:
    {
        "file": {  // This 'file' key is the NAME of the form field that held the file blob
            "filename": "original_file_name_for_this_chunk.txt", // from Content-Disposition of the 'file' field part
            "content_type": "mime/type_of_chunk",
            "content_base64": "BASE64_ENCODED_CHUNK_CONTENT"
        },
        "fileName": "overall_final_filename.txt", // From a separate form field named 'fileName'
        "chunkIndex": "0",                        // From a separate form field named 'chunkIndex'
        "totalChunks": "5"                        // From a separate form field named 'totalChunks'
    }
    """
    self.app.logger.debug(
        f"FileWidget: handle_upload called. Received form_data keys: {list(form_data.keys()) if form_data else 'None'}"
    )
    self.app.logger.debug(f"FileWidget: handle_upload called. Received form_data: {request.to_dict()}")
    # self.app.logger.debug(f"Full form_data: {form_data}") # For deeper debugging if needed

    if not form_data:
        return Result.default_user_error(info="No form data received for upload.", exec_code=400)

    try:
        storage = await self.get_blob_storage(request)

        # Extract data from form_data (populated by Rust server from multipart)
        file_field_data = form_data.get('file')  # This is the dict from UploadedFile struct
        # The 'file_field_data.get('filename')' is the name of the chunk part,
        # which the JS client sets to be the same as the original file's name.
        # This is fine for FileUploadHandler.save_file's chunk_info.filename if total_chunks > 1,
        # as it will be used to create temporary part files like "original_file_name.txt.part0".

        overall_filename_from_form = form_data.get('fileName') # This is the target filename for the assembled file.
        chunk_index_str = form_data.get('chunkIndex')
        total_chunks_str = form_data.get('totalChunks')

        if not all([
            file_field_data, isinstance(file_field_data, dict),
            overall_filename_from_form,
            chunk_index_str is not None, # Check for presence, not just truthiness (0 is valid)
            total_chunks_str is not None # Check for presence
        ]):
            missing = []
            if not file_field_data or not isinstance(file_field_data, dict): missing.append("'file' object field")
            if not overall_filename_from_form: missing.append("'fileName' field")
            if chunk_index_str is None: missing.append("'chunkIndex' field")
            if total_chunks_str is None: missing.append("'totalChunks' field")

            self.app.logger.error(
                f"Missing critical form data fields for upload: {missing}. Received form_data: {form_data}")
            return Result.default_user_error(info=f"Incomplete upload data. Missing: {', '.join(missing)}",
                                             exec_code=400)

        content_base64 = file_field_data.get('content_base64')
        if not content_base64:
            return Result.default_user_error(info="File content (base64) not found in 'file' field data.",
                                             exec_code=400)

        try:
            content_bytes = base64.b64decode(content_base64)
        except base64.binascii.Error as b64_error:
            self.app.logger.error(f"Base64 decoding failed for upload: {b64_error}")
            return Result.default_user_error(info="Invalid file content encoding.", exec_code=400)

        try:
            chunk_index = int(chunk_index_str)
            total_chunks = int(total_chunks_str)
        except ValueError:
            return Result.default_user_error(info="Invalid chunk index or total chunks value. Must be integers.", exec_code=400)

        # Use the 'overall_filename_from_form' for the ChunkInfo.filename,
        # as this is the intended final name in blob storage.
        # FileUploadHandler will use Path(this_name).name to ensure it's just a filename.
        chunk_info_to_save = ChunkInfo(
            filename=overall_filename_from_form, # THIS IS THE KEY CHANGE FOR CONSISTENCY
            chunk_index=chunk_index,
            total_chunks=total_chunks,
            content=content_bytes
        )

        self.app.logger.info(
            f"Processing chunk {chunk_index + 1}/{total_chunks} for final file '{overall_filename_from_form}'. " # Log the intended final name
            f"Size: {len(content_bytes)} bytes."
        )

        saved_blob_path = self.upload_handler.save_file(chunk_info_to_save, storage) # saved_blob_path will be Path(overall_filename_from_form).name

        msg = f"Chunk {chunk_index + 1}/{total_chunks} for '{saved_blob_path}' saved."
        if chunk_info_to_save.chunk_index == chunk_info_to_save.total_chunks - 1:
            # Check if fully assembled
            # The 'safe_base_filename' in FileUploadHandler is derived from ChunkInfo.filename,
            # which we've now set to 'overall_filename_from_form'.
            # So, this check should work correctly.
            safe_base_filename_for_check = "".join(
                c if c.isalnum() or c in ('.', '_', '-') else '_' for c in Path(overall_filename_from_form).name)

            # A slight delay might be needed if file system operations are not instantly consistent across threads/processes
            # For now, assume direct check is okay.
            # await asyncio.sleep(0.1) # Optional small delay if race conditions are suspected with file system

            if self.upload_handler._all_chunks_received(safe_base_filename_for_check, total_chunks):
                msg = f"File '{saved_blob_path}' upload complete and assembled."
                self.app.logger.info(msg)
            else:
                msg = f"Final chunk for '{saved_blob_path}' saved, but assembly check failed or is pending."
                self.app.logger.warning(msg + f" (Could not verify all chunks for '{safe_base_filename_for_check}' immediately after final one)")


        return Result.ok(data={"message": msg, "path": saved_blob_path}) # Return the blob-relative path

    except ValueError as e:
        self.app.logger.error(f"Upload processing error: {e}", exc_info=True)
        return Result.default_user_error(info=f"Upload error: {str(e)}",
                                         exec_code=400 if "authentication" in str(e).lower() else 400)
    except Exception as e:
        self.app.logger.error(f"Unexpected error during file upload: {e}", exc_info=True)
        return Result.default_internal_error(info="An unexpected error occurred during upload.")

POA

module

ActionManagerEnhanced
Source code in toolboxv2/mods/POA/module.py
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
class ActionManagerEnhanced:
    DB_ITEMS_PREFIX = "donext_items"
    DB_HISTORY_PREFIX = "donext_history"
    DB_CURRENT_ITEM_PREFIX = "donext_current_item"
    DB_UNDO_LOG_PREFIX = "donext_undo_log"
    DB_SETTINGS_PREFIX = "donext_settings"  # Added for user settings

    def __init__(self, app: App, user_id: str):
        self.app = app
        self.user_id = user_id
        self.db = app.get_mod("DB")
        self.isaa = app.get_mod("isaa")

        self.settings: UserSettings = UserSettings(user_id=user_id)  # Initialize with defaults
        self.items: List[ActionItem] = []
        self.history: List[HistoryEntry] = []
        self.current_item: Optional[ActionItem] = None
        self.undo_log: List[UndoLogEntry] = []

        self._load_settings()  # Load settings first as they might affect item loading
        self._load_data()

    def _get_db_key(self, prefix: str) -> str:
        return f"{prefix}_{self.user_id}"

    def get_user_timezone(self) -> pytz.BaseTzInfo:
        try:
            return pytz.timezone(self.settings.timezone)
        except pytz.UnknownTimeZoneError:
            return pytz.utc

    def _load_settings(self):
        settings_key = self._get_db_key(self.DB_SETTINGS_PREFIX)
        try:
            settings_data = self.db.get(settings_key)
            if settings_data.is_data() and settings_data.get():
                loaded_settings = json.loads(settings_data.get()[0]) if isinstance(settings_data.get(),
                                                                                   list) else json.loads(
                    settings_data.get())
                self.settings = UserSettings.model_validate_json_safe(loaded_settings)
            else:  # Save default settings if not found
                self._save_settings()
        except Exception as e:
            self.app.logger.error(f"Error loading settings for user {self.user_id}: {e}. Using defaults.")
            self.settings = UserSettings(user_id=self.user_id)  # Fallback to defaults
            self._save_settings()  # Attempt to save defaults

    def _save_settings(self):
        try:
            self.db.set(self._get_db_key(self.DB_SETTINGS_PREFIX), json.dumps(self.settings.model_dump_json_safe()))
        except Exception as e:
            self.app.logger.error(f"Error saving settings for user {self.user_id}: {e}")

    def update_user_settings(self, settings_data: Dict[str, Any]) -> UserSettings:
        # Ensure user_id is not changed by malicious input
        current_user_id = self.settings.user_id
        updated_settings = UserSettings.model_validate(
            {**self.settings.model_dump(), **settings_data, "user_id": current_user_id})
        self.settings = updated_settings
        self._save_settings()
        # Potentially re-process items if timezone change affects interpretations, though this is complex.
        # For now, new items will use the new timezone. Existing UTC times remain.
        self.app.logger.info(f"User {self.user_id} settings updated: Timezone {self.settings.timezone}")
        return self.settings

    def _load_data(self):
        items_key = self._get_db_key(self.DB_ITEMS_PREFIX)
        history_key = self._get_db_key(self.DB_HISTORY_PREFIX)
        current_item_key = self._get_db_key(self.DB_CURRENT_ITEM_PREFIX)
        undo_log_key = self._get_db_key(self.DB_UNDO_LOG_PREFIX)
        user_tz_str = self.settings.timezone  # For model_validate_json_safe context

        try:
            items_data = self.db.get(items_key)
            if items_data.is_data() and items_data.get():
                loaded_items_raw = json.loads(items_data.get()[0]) if isinstance(items_data.get(),
                                                                                 list) else json.loads(items_data.get())
                self.items = [ActionItem.model_validate_json_safe(item_dict, user_timezone_str=user_tz_str) for
                              item_dict in loaded_items_raw]

            history_data = self.db.get(history_key)
            if history_data.is_data() and history_data.get():
                loaded_history_raw = json.loads(history_data.get()[0]) if isinstance(history_data.get(),
                                                                                     list) else json.loads(
                    history_data.get())
                self.history = [HistoryEntry.model_validate_json_safe(entry_dict) for entry_dict in loaded_history_raw]

            current_item_data = self.db.get(current_item_key)
            if current_item_data.is_data() and current_item_data.get():
                current_item_dict = json.loads(current_item_data.get()[0]) if isinstance(current_item_data.get(),
                                                                                         list) else json.loads(
                    current_item_data.get())
                if current_item_dict:
                    self.current_item = ActionItem.model_validate_json_safe(current_item_dict,
                                                                            user_timezone_str=user_tz_str)

            undo_log_data = self.db.get(undo_log_key)
            if undo_log_data.is_data() and undo_log_data.get():
                loaded_undo_raw = json.loads(undo_log_data.get()[0]) if isinstance(undo_log_data.get(),
                                                                                   list) else json.loads(
                    undo_log_data.get())
                self.undo_log = [UndoLogEntry.model_validate_json_safe(entry_dict) for entry_dict in loaded_undo_raw]

        except Exception as e:
            self.app.logger.error(f"Error loading data for user {self.user_id}: {e}")
            self.items, self.history, self.current_item, self.undo_log = [], [], None, []
        self._recalculate_next_due_for_all()

    def _save_data(self):
        try:
            self.db.set(self._get_db_key(self.DB_ITEMS_PREFIX),
                        json.dumps([item.model_dump_json_safe() for item in self.items]))
            self.db.set(self._get_db_key(self.DB_HISTORY_PREFIX),
                        json.dumps([entry.model_dump_json_safe() for entry in self.history]))
            self.db.set(self._get_db_key(self.DB_CURRENT_ITEM_PREFIX),
                        json.dumps(self.current_item.model_dump_json_safe() if self.current_item else None))
            self.db.set(self._get_db_key(self.DB_UNDO_LOG_PREFIX),
                        json.dumps([entry.model_dump_json_safe() for entry in self.undo_log]))
        except Exception as e:
            self.app.logger.error(f"Error saving data for user {self.user_id}: {e}")

    def _add_history_entry(self, item: ActionItem, status_override: Optional[ActionStatus] = None,
                           notes: Optional[str] = None):
        entry = HistoryEntry(
            item_id=item.id, item_title=item.title, item_type=item.item_type,
            status_changed_to=status_override or item.status,
            parent_id=item.parent_id, notes=notes
        )
        self.history.append(entry)

    def _datetime_to_user_tz(self, dt_utc: Optional[datetime]) -> Optional[datetime]:
        if not dt_utc: return None
        if dt_utc.tzinfo is None: dt_utc = pytz.utc.localize(dt_utc)  # Should already be UTC
        return dt_utc.astimezone(self.get_user_timezone())

    def _datetime_from_user_input_str(self, dt_str: Optional[str]) -> Optional[datetime]:
        if not dt_str: return None
        try:
            dt = isoparse(dt_str)
            if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:  # Naive
                return self.get_user_timezone().localize(dt).astimezone(pytz.utc)
            return dt.astimezone(pytz.utc)  # Aware, convert to UTC
        except ValueError:
            self.app.logger.warning(f"Could not parse datetime string: {dt_str}")
            return None

    def _recalculate_next_due(self, item: ActionItem):
        now_utc = datetime.now(pytz.utc)
        user_tz = self.get_user_timezone()

        if item.status == ActionStatus.COMPLETED and item.item_type == ItemType.TASK:
            if item.frequency and item.frequency != Frequency.ONE_TIME:
                base_time_utc = item.last_completed or now_utc  # last_completed is already UTC

                # If item had a fixed_time, align next_due to that time of day in user's timezone
                if item.fixed_time:
                    original_fixed_time_user_tz = item.fixed_time.astimezone(user_tz)
                    # Start from last_completed (or now if missing) in user's timezone for calculation
                    base_time_user_tz = base_time_utc.astimezone(user_tz)

                    # Ensure base_time_user_tz is at least original_fixed_time_user_tz for alignment
                    # but calculations should project from last completion.
                    # For example, if daily task due 9am was completed at 11am, next one is tomorrow 9am.
                    # If completed at 8am, next one is today 9am (if fixed_time was today 9am) or tomorrow 9am.

                    # Let's use last_completed as the primary anchor for when the *next* cycle starts.
                    # The original fixed_time's time component is used for the *time of day* of the next due.

                    current_anchor_user_tz = base_time_user_tz

                    # Calculate next occurrence based on frequency
                    if item.frequency == Frequency.DAILY:
                        next_due_user_tz_date = (current_anchor_user_tz + timedelta(days=1)).date()
                    elif item.frequency == Frequency.WEEKLY:
                        next_due_user_tz_date = (current_anchor_user_tz + timedelta(weeks=1)).date()
                    elif item.frequency == Frequency.MONTHLY:  # Simplified
                        next_due_user_tz_date = (current_anchor_user_tz + timedelta(days=30)).date()
                    elif item.frequency == Frequency.ANNUALLY:
                        next_due_user_tz_date = (current_anchor_user_tz + timedelta(days=365)).date()
                    else:  # Should not happen for recurring
                        item.next_due = None
                        return

                    # Combine with original time of day
                    next_due_user_tz = datetime.combine(next_due_user_tz_date, original_fixed_time_user_tz.time(),
                                                        tzinfo=user_tz)
                    item.next_due = next_due_user_tz.astimezone(pytz.utc)

                else:  # No original fixed_time, so recur based on current time of completion
                    if item.frequency == Frequency.DAILY:
                        item.next_due = base_time_utc + timedelta(days=1)
                    elif item.frequency == Frequency.WEEKLY:
                        item.next_due = base_time_utc + timedelta(weeks=1)
                    elif item.frequency == Frequency.MONTHLY:
                        item.next_due = base_time_utc + timedelta(days=30)
                    elif item.frequency == Frequency.ANNUALLY:
                        item.next_due = base_time_utc + timedelta(days=365)

                # Advance until future if needed (e.g., completing an overdue recurring task)
                # This loop must operate on user's local time perception of "next day"
                while item.next_due and item.next_due < now_utc:
                    next_due_user = item.next_due.astimezone(user_tz)
                    original_time_comp = next_due_user.time()  # Preserve time of day

                    if item.frequency == Frequency.DAILY:
                        next_due_user_adv = next_due_user + timedelta(days=1)
                    elif item.frequency == Frequency.WEEKLY:
                        next_due_user_adv = next_due_user + timedelta(weeks=1)
                    # For monthly/annually, simple timedelta might shift day of month. Using replace for date part.
                    elif item.frequency == Frequency.MONTHLY:
                        # This simplified logic might need dateutil.relativedelta for accuracy
                        year, month = (next_due_user.year, next_due_user.month + 1) if next_due_user.month < 12 else (
                            next_due_user.year + 1, 1)
                        try:
                            next_due_user_adv = next_due_user.replace(year=year, month=month)
                        except ValueError:  # Handle e.g. trying to set Feb 30
                            import calendar
                            last_day = calendar.monthrange(year, month)[1]
                            next_due_user_adv = next_due_user.replace(year=year, month=month, day=last_day)

                    elif item.frequency == Frequency.ANNUALLY:
                        try:
                            next_due_user_adv = next_due_user.replace(year=next_due_user.year + 1)
                        except ValueError:  # Handle leap day if original was Feb 29
                            next_due_user_adv = next_due_user.replace(year=next_due_user.year + 1,
                                                                      day=28)  # Or March 1st
                    else:
                        break

                    item.next_due = user_tz.localize(
                        datetime.combine(next_due_user_adv.date(), original_time_comp)).astimezone(pytz.utc)

                item.status = ActionStatus.NOT_STARTED  # Reset for next occurrence
            else:  # One-time task
                item.next_due = None
        elif item.status == ActionStatus.NOT_STARTED and item.fixed_time and not item.next_due:
            item.next_due = item.fixed_time  # fixed_time is already UTC

        # If task is not completed, not started, and has a next_due in the past, but also a fixed_time in the future
        # (e.g. recurring task whose current instance was missed, but fixed_time points to a specific time for all instances)
        # ensure next_due is not before fixed_time if fixed_time is relevant for setting.
        # This logic is complex. Current setup: fixed_time is the "template", next_due is the "instance".

    def _recalculate_next_due_for_all(self):
        for item in self.items:
            self._recalculate_next_due(item)

    def add_item(self, item_data: Dict[str, Any], by_ai: bool = False, imported: bool = False) -> ActionItem:
        item_data['_user_timezone_str'] = self.settings.timezone  # For validation context
        item = ActionItem.model_validate(
            item_data)  # Pydantic handles string->datetime, then model_validator converts to UTC
        item.created_by_ai = by_ai
        item.updated_at = datetime.now(pytz.utc)  # Ensure update

        # Initial next_due for new items if not already set by iCal import logic
        if not item.next_due and item.fixed_time and item.status == ActionStatus.NOT_STARTED:
            item.next_due = item.fixed_time

        self.items.append(item)
        self._add_history_entry(item, status_override=ActionStatus.NOT_STARTED,
                                notes="Item created" + (" by AI" if by_ai else "") + (
                                    " via import" if imported else ""))
        if by_ai:
            self._log_ai_action("ai_create_item", [item.id])

        self._save_data()
        return item

    def get_item_by_id(self, item_id: str) -> Optional[ActionItem]:
        return next((item for item in self.items if item.id == item_id), None)

    def update_item(self, item_id: str, update_data: Dict[str, Any], by_ai: bool = False) -> Optional[ActionItem]:
        item = self.get_item_by_id(item_id)
        if not item: return None

        previous_data_json = item.model_dump_json() if by_ai else None

        # Pass user timezone for validation context if datetime strings are present
        update_data_with_tz_context = {**update_data, '_user_timezone_str': self.settings.timezone}

        updated_item_dict = item.model_dump()
        updated_item_dict.update(update_data_with_tz_context)

        try:
            # Re-validate the whole model to ensure consistency and proper conversions
            new_item_state = ActionItem.model_validate(updated_item_dict)
            # Preserve original ID and created_at, apply new state
            new_item_state.id = item.id
            new_item_state.created_at = item.created_at
            self.items[self.items.index(item)] = new_item_state
            item = new_item_state
        except Exception as e:
            self.app.logger.error(f"Error validating updated item data: {e}. Update aborted for item {item_id}.")
            return None  # Or raise error

        item.updated_at = datetime.now(pytz.utc)
        item.created_by_ai = by_ai

        self._recalculate_next_due(item)
        self._add_history_entry(item, notes="Item updated" + (" by AI" if by_ai else ""))

        if by_ai:
            self._log_ai_action("ai_modify_item", [item.id],
                                {item.id: previous_data_json} if previous_data_json else None)

        self._save_data()
        return item

    def remove_item(self, item_id: str, record_history: bool = True) -> bool:
        item = self.get_item_by_id(item_id)
        if not item: return False

        children_ids = [child.id for child in self.items if child.parent_id == item_id]
        for child_id in children_ids:
            self.remove_item(child_id, record_history=record_history)

        self.items = [i for i in self.items if i.id != item_id]
        if self.current_item and self.current_item.id == item_id:
            self.current_item = None

        if record_history:
            self._add_history_entry(item, status_override=ActionStatus.CANCELLED, notes="Item removed")
        self._save_data()
        return True

    def set_current_item(self, item_id: str) -> Optional[ActionItem]:
        item = self.get_item_by_id(item_id)
        if not item: return None
        if item.status == ActionStatus.COMPLETED and item.item_type == ItemType.TASK and item.frequency == Frequency.ONE_TIME:
            return None

        self.current_item = item
        if item.status == ActionStatus.NOT_STARTED:
            item.status = ActionStatus.IN_PROGRESS
            item.updated_at = datetime.now(pytz.utc)
            self._add_history_entry(item, notes="Set as current, status to In Progress")
        else:
            self._add_history_entry(item, notes="Set as current")
        self._save_data()
        return item

    def complete_current_item(self) -> Optional[ActionItem]:
        if not self.current_item: return None

        item_to_complete = self.current_item
        item_to_complete.status = ActionStatus.COMPLETED
        item_to_complete.last_completed = datetime.now(pytz.utc)
        item_to_complete.updated_at = datetime.now(pytz.utc)

        self._recalculate_next_due(item_to_complete)
        self._add_history_entry(item_to_complete, status_override=ActionStatus.COMPLETED, notes="Marked as completed")

        self.current_item = None  # Clear current item after completion
        self._save_data()
        return item_to_complete

    def get_suggestions(self, count: int = 2) -> List[ActionItem]:
        # Prioritize AI suggestions if ISAA is available
        if self.isaa:
            active_items_for_ai = []
            for item in self.items:
                if item.status != ActionStatus.COMPLETED and item.status != ActionStatus.CANCELLED:
                    # Convert datetimes to user's local timezone string for AI context
                    item_dump = item.model_dump_json_safe()  # This is already UTC ISO
                    # Optionally, convert to user's timezone string if AI is better with local times
                    # For now, UTC ISO is fine.
                    active_items_for_ai.append(item_dump)

            MAX_ITEMS_FOR_CONTEXT = 20
            if len(active_items_for_ai) > MAX_ITEMS_FOR_CONTEXT:
                active_items_for_ai.sort(
                    key=lambda x: (x.get('priority', 3), x.get('next_due') or '9999-12-31T23:59:59Z'))
                active_items_for_ai = active_items_for_ai[:MAX_ITEMS_FOR_CONTEXT]

            now_user_tz_str = datetime.now(self.get_user_timezone()).isoformat()

            prompt = (
                f"User's current time: {now_user_tz_str} (Timezone: {self.settings.timezone}). "
                f"Active items (tasks/notes) are provided below (datetimes are in UTC ISO format). "
                f"Suggest the top {count} item IDs to focus on. Consider priority, due dates (next_due), "
                f"and if a current item is set (current_item_id), its sub-items might be relevant. "
                f"Tasks are generally more actionable. Focus on 'not_started' or 'in_progress'.\n\n"
                f"Active Items (JSON):\n{json.dumps(active_items_for_ai, indent=2)}\n\n"
                f"Current Item ID: {self.current_item.id if self.current_item else 'None'}\n\n"
                f"Return JSON: {{ \"suggested_item_ids\": [\"id1\", \"id2\"] }}."
            )

            class SuggestedIds(BaseModel):
                suggested_item_ids: List[str]

            try:
                structured_response = asyncio.run(
                    self.isaa.format_class(SuggestedIds, prompt, agent_name="TaskCompletion"))
                if structured_response and isinstance(structured_response, dict):
                    suggested_ids_model = SuggestedIds(**structured_response)
                    ai_suggestions = [self.get_item_by_id(id_str) for id_str in suggested_ids_model.suggested_item_ids
                                      if self.get_item_by_id(id_str)]
                    if ai_suggestions: return ai_suggestions[:count]
            except Exception as e:
                self.app.logger.error(f"Error getting AI suggestions: {e}")

        # Fallback to basic suggestions
        return self._get_basic_suggestions(count)

    def _get_basic_suggestions(self, count: int = 2) -> List[ActionItem]:
        now_utc = datetime.now(pytz.utc)
        available_items = [
            item for item in self.items
            if item.status in [ActionStatus.NOT_STARTED, ActionStatus.IN_PROGRESS]
        ]

        if self.current_item:
            sub_items = [item for item in available_items if item.parent_id == self.current_item.id]
            # If current item has actionable sub-items, prioritize them
            if any(s.next_due and s.next_due < (now_utc + timedelta(hours=2)) for s in sub_items) or \
                any(s.priority <= 2 for s in sub_items):  # Urgent sub-items (due soon or high priority)
                available_items = sub_items  # Focus on sub-items
            # If no urgent sub-items, consider other items too, but maybe give slight preference to other sub-items.
            # For simplicity now, if current_item is set, and it has sub-items, suggestions come from sub-items.
            # If no sub-items, or current_item is not set, consider all available_items.
            elif sub_items:  # Has sub-items, but none are "urgent" by above criteria
                available_items = sub_items
            # If current_item has no sub_items, then general pool is used.

        def sort_key(item: ActionItem):
            # Sort by: 1. Due Date (earlier is better, None is last) 2. Priority (lower num is higher)
            due_date_utc = item.next_due if item.next_due else datetime.max.replace(tzinfo=pytz.utc)
            return (due_date_utc, item.priority)

        available_items.sort(key=sort_key)
        return available_items[:count]

    def get_history(self, limit: int = 50) -> List[HistoryEntry]:
        return sorted(self.history, key=lambda x: x.timestamp, reverse=True)[:limit]

    def get_all_items_hierarchy(self) -> Dict[str, List[Dict[str, Any]]]:
        # This method remains largely the same, just ensure model_dump_json_safe is used.
        # Datetimes will be ISO UTC strings. Client JS needs to handle display in user's local time.
        hierarchy = {"root": []}
        item_map = {item.id: item.model_dump_json_safe() for item in self.items}  # Uses UTC ISO dates

        # This part seems fine, it builds hierarchy based on parent_id
        processed_ids = set()
        root_items_temp = []

        for item_id, item_dict in item_map.items():
            parent_id = item_dict.get("parent_id")
            if parent_id and parent_id in item_map:
                if "children" not in item_map[parent_id]:
                    item_map[parent_id]["children"] = []
                item_map[parent_id]["children"].append(item_dict)
            else:
                root_items_temp.append(item_dict)
        hierarchy["root"] = root_items_temp

        def sort_children_recursive(node_list):
            for node_dict in node_list:
                if "children" in node_dict:
                    # Sort children by priority, then creation date
                    node_dict["children"].sort(key=lambda x: (x.get('priority', 3), isoparse(x.get('created_at'))))
                    sort_children_recursive(node_dict["children"])

        # Sort root items
        hierarchy["root"].sort(key=lambda x: (x.get('priority', 3), isoparse(x.get('created_at'))))
        sort_children_recursive(hierarchy["root"])
        return hierarchy

    # --- AI Specific Methods ---
    async def ai_create_item_from_text(self, text: str) -> Optional[ActionItem]:
        if not self.isaa:
            self.app.logger.warning("ISAA module not available for AI item creation.")
            return None

        class ParsedItemFromText(BaseModel):
            item_type: Literal["task", "note"] = "task"
            title: str
            description: Optional[str] = None
            priority: Optional[int] = Field(default=3, ge=1, le=5)
            due_date_str: Optional[str] = None  # e.g., "tomorrow", "next monday at 5pm", "2024-12-25 17:00"
            frequency_str: Optional[str] = Field(default="one_time",
                                                 description="e.g. 'daily', 'weekly', 'one_time', 'every friday'")

        user_tz = self.get_user_timezone()
        current_time_user_tz_str = datetime.now(user_tz).strftime('%Y-%m-%d %H:%M:%S %Z%z')
        prompt = (
            f"User's current time is {current_time_user_tz_str}. Parse the input into a structured item. "
            f"For due_date_str, interpret relative dates/times based on this current time and output "
            f"a specific date string like 'YYYY-MM-DD HH:MM:SS'. If time is omitted, assume a default like 9 AM. "
            f"If date is omitted but time is given (e.g. 'at 5pm'), assume today if 5pm is future, else tomorrow. "
            f"User input: \"{text}\"\n\n"
            f"Format as JSON for ParsedItemFromText."
        )
        try:
            raw_response = await self.isaa.mini_task_completion(prompt, agent_name="TaskCompletion")
            if not raw_response: self.app.logger.error("AI parsing returned empty."); return None

            json_str = raw_response
            if "```json" in json_str: json_str = json_str.split("```json")[1].split("```")[0].strip()
            parsed_dict = json.loads(json_str)
            parsed_data_model = ParsedItemFromText(**parsed_dict)

            item_constructor_data = {
                "item_type": ItemType(parsed_data_model.item_type),
                "title": parsed_data_model.title,
                "description": parsed_data_model.description,
                "priority": parsed_data_model.priority or 3,
            }

            if parsed_data_model.due_date_str:
                # ISAA is prompted to return YYYY-MM-DD HH:MM:SS.
                # This string is assumed to be in the user's local timezone.
                # The ActionItem model_validator will convert this to UTC.
                item_constructor_data["fixed_time"] = parsed_data_model.due_date_str  # Pass as string

            # Frequency parsing (simplified)
            if parsed_data_model.frequency_str:
                freq_str_lower = parsed_data_model.frequency_str.lower()
                if "daily" in freq_str_lower:
                    item_constructor_data["frequency"] = Frequency.DAILY
                elif "weekly" in freq_str_lower:
                    item_constructor_data["frequency"] = Frequency.WEEKLY
                elif "monthly" in freq_str_lower:
                    item_constructor_data["frequency"] = Frequency.MONTHLY
                elif "annually" in freq_str_lower or "yearly" in freq_str_lower:
                    item_constructor_data["frequency"] = Frequency.ANNUALLY
                else:
                    item_constructor_data["frequency"] = Frequency.ONE_TIME

            return self.add_item(item_constructor_data, by_ai=True)
        except Exception as e:
            self.app.logger.error(
                f"Error creating item with AI: {e}. Raw: {raw_response if 'raw_response' in locals() else 'N/A'}")
            return None

    def _log_ai_action(self, action_type: Literal["ai_create_item", "ai_modify_item", "ical_import"],
                       item_ids: List[str], previous_data_map: Optional[Dict[str, str]] = None):
        entry = UndoLogEntry(action_type=action_type, item_ids=item_ids, previous_data_json_map=previous_data_map)
        self.undo_log.append(entry)
        if len(self.undo_log) > 20: self.undo_log = self.undo_log[-20:]
        # _save_data called by caller

    async def undo_last_ai_action(self) -> bool:  # Also handles iCal import undo
        if not self.undo_log: return False
        last_action = self.undo_log.pop()
        action_undone_count = 0

        if last_action.action_type in ["ai_create_item", "ical_import"]:
            for item_id in last_action.item_ids:
                if self.remove_item(item_id, record_history=False):  # Don't double-log removal for undo
                    action_undone_count += 1
        elif last_action.action_type == "ai_modify_item":
            if last_action.previous_data_json_map:
                for item_id, prev_data_json in last_action.previous_data_json_map.items():
                    try:
                        prev_data = ActionItem.model_validate_json_safe(json.loads(prev_data_json),
                                                                        user_timezone_str=self.settings.timezone)
                        # Replace item
                        found = False
                        for i, item_in_list in enumerate(self.items):
                            if item_in_list.id == item_id:
                                self.items[i] = prev_data
                                if self.current_item and self.current_item.id == item_id:
                                    self.current_item = prev_data
                                found = True
                                break
                        if found:
                            action_undone_count += 1
                        else:
                            self.app.logger.warning(f"Could not find item {item_id} to restore during AI undo.")
                    except Exception as e:
                        self.app.logger.error(f"Error restoring item {item_id} during undo: {e}")
            else:  # Should not happen for modify
                self.app.logger.warning(
                    f"Undo for AI modify action on item(s) {last_action.item_ids} had no previous_data_json_map.")

        if action_undone_count > 0:
            # Create a generic history entry for the undo action
            generic_undo_item_title = f"Related to {len(last_action.item_ids)} item(s)"
            if len(last_action.item_ids) == 1:
                item_for_title = self.get_item_by_id(last_action.item_ids[0])  # Might be None if it was a create undo
                generic_undo_item_title = item_for_title.title if item_for_title else "N/A (Undone Action)"

            self.history.append(HistoryEntry(
                item_id=last_action.item_ids[0],  # Representative item
                item_title=generic_undo_item_title,
                item_type=ItemType.TASK,  # Generic
                status_changed_to=ActionStatus.CANCELLED,  # Generic status for undo
                notes=f"Undid action: {last_action.action_type} for {len(last_action.item_ids)} item(s)."
            ))
            self._save_data()
            return True

        # If nothing was undone, put action back to log
        self.undo_log.append(last_action)
        return False

    # --- iCalendar Methods ---
    def _parse_ical_dt(self, dt_ical: Union[vDatetime, vDate], user_tz: pytz.BaseTzInfo) -> Optional[datetime]:
        """Converts icalendar vDatetime or vDate to UTC datetime."""
        if not dt_ical: return None
        dt_val = dt_ical.dt

        if isinstance(dt_val, datetime):
            if dt_val.tzinfo is None:  # Naive datetime, assume user's local timezone as per iCal spec for floating
                return user_tz.localize(dt_val).astimezone(pytz.utc)
            return dt_val.astimezone(pytz.utc)  # Aware datetime
        elif isinstance(dt_val, date):  # All-day event, represent as start of day in user's TZ, then UTC
            return user_tz.localize(datetime.combine(dt_val, datetime.min.time())).astimezone(pytz.utc)
        return None

    def _map_ical_priority_to_app(self, ical_priority: Optional[int]) -> int:
        if ical_priority is None: return 3  # Default
        if 1 <= ical_priority <= 4: return 1  # High
        if ical_priority == 5: return 3  # Medium
        if 6 <= ical_priority <= 9: return 5  # Low
        return 3  # Default for 0 or other values

    def _map_app_priority_to_ical(self, app_priority: int) -> int:
        if app_priority == 1: return 1  # High
        if app_priority == 2: return 3
        if app_priority == 3: return 5  # Medium
        if app_priority == 4: return 7
        if app_priority == 5: return 9  # Low
        return 0  # No priority

    def _map_rrule_to_frequency(self, rrule_prop: Optional[vRecur]) -> Tuple[Frequency, Optional[str]]:
        if not rrule_prop:
            return Frequency.ONE_TIME, None

        rrule_dict = rrule_prop.to_dict()
        freq = rrule_dict.get('FREQ')
        original_rrule_str = vRecur.from_dict(rrule_dict).to_ical().decode('utf-8')

        if freq == 'DAILY': return Frequency.DAILY, original_rrule_str
        if freq == 'WEEKLY': return Frequency.WEEKLY, original_rrule_str
        if freq == 'MONTHLY': return Frequency.MONTHLY, original_rrule_str
        if freq == 'YEARLY': return Frequency.ANNUALLY, original_rrule_str

        # If RRULE is complex or not a direct match, import as ONE_TIME for each instance
        # but store the original RRULE string for reference or future advanced handling.
        return Frequency.ONE_TIME, original_rrule_str

    def import_ical_events(self, ical_string: str) -> List[ActionItem]:
        imported_items: List[ActionItem] = []
        try:
            cal = iCalCalendar.from_ical(ical_string)
            user_tz = self.get_user_timezone()
            now_utc = datetime.now(pytz.utc)
            import_limit_date_utc = now_utc + timedelta(days=RECURRING_IMPORT_WINDOW_DAYS)

            processed_uids_for_session = set()  # To avoid processing same base recurring event multiple times in one import

            for component in cal.walk():
                if component.name == "VEVENT":
                    uid = component.get('uid')
                    if not uid:
                        uid = str(uuid.uuid4())  # Generate a UID if missing
                    else:
                        uid = uid.to_ical().decode('utf-8')

                    summary = component.get('summary', 'Untitled Event').to_ical().decode('utf-8')
                    description = component.get('description', '').to_ical().decode('utf-8')
                    location = component.get('location', '').to_ical().decode('utf-8')
                    dtstart_ical = component.get('dtstart')
                    dtend_ical = component.get('dtend')  # Can be used for duration if needed
                    ical_priority_val = component.get('priority')
                    ical_priority = int(ical_priority_val.to_ical().decode('utf-8')) if ical_priority_val else None

                    rrule_prop = component.get('rrule')  # This is a vRecur object or None

                    start_time_utc = self._parse_ical_dt(dtstart_ical, user_tz)
                    if not start_time_utc:
                        self.app.logger.warning(f"Skipping event '{summary}' due to missing/invalid DTSTART.")
                        continue

                    app_priority = self._map_ical_priority_to_app(ical_priority)

                    # Check for existing item with this iCal UID to potentially update (simplistic check)
                    # A more robust update would involve comparing sequence numbers, etc.
                    # For now, if UID exists, we might skip or update. Let's try to update.
                    # To keep it simpler for now, we will create new items for occurrences.
                    # UID management needs to be precise for updates.
                    # If an item is an instance of a recurring event, its UID in our system might be base_uid + occurrence_date.

                    if rrule_prop:
                        if uid in processed_uids_for_session:  # Already processed this recurring event's base
                            continue
                        processed_uids_for_session.add(uid)

                        # Handle recurring event
                        rrule_str = rrule_prop.to_ical().decode('utf-8')
                        # Ensure DTSTART is part of the rrule context if not explicitly in rrulestr
                        if 'DTSTART' not in rrule_str.upper() and start_time_utc:
                            # dateutil.rrule needs start time; icalendar often bakes it in.
                            # If start_time_utc is naive, use user_tz to make it aware.
                            dtstart_for_rrule = start_time_utc.astimezone(
                                user_tz) if start_time_utc.tzinfo else user_tz.localize(start_time_utc)
                            # rrule_obj = rrulestr(rrule_str, dtstart=dtstart_for_rrule) # This is complex due to TZ handling in rrulestr
                            # The icalendar library's component should be timezone aware from DTSTART
                            # So, let's assume dtstart_ical.dt is the correct starting point.
                            try:
                                rrule_obj = rrulestr(rrule_str, dtstart=dtstart_ical.dt)
                            except Exception as e_rr:
                                self.app.logger.error(
                                    f"Could not parse RRULE '{rrule_str}' for event '{summary}': {e_rr}")
                                continue

                        occurrences_imported = 0
                        # Generate occurrences starting from now (in user's timezone, aligned to event's time)
                        # or from event's start_time_utc if it's in the future.

                        # The rrule iteration should be in the event's original timezone context if possible,
                        # or consistently in user's timezone for 'now'.
                        # Let's use UTC for iteration and then convert.

                        # Iterate from the event's actual start time or now, whichever is later for relevant future instances.
                        iteration_start_utc = max(now_utc, start_time_utc)

                        for occ_dt_aware in rrule_obj.between(iteration_start_utc, import_limit_date_utc, inc=True):
                            if occurrences_imported >= MAX_RECURRING_INSTANCES_TO_IMPORT:
                                break

                            # occ_dt_aware is usually from dateutil.rrule, may need tzinfo set or conversion.
                            # If rrulestr was given an aware dtstart, occurrences should be aware.
                            # Ensure it's UTC for our system.
                            occ_utc = occ_dt_aware.astimezone(pytz.utc) if occ_dt_aware.tzinfo else pytz.utc.localize(
                                occ_dt_aware)

                            instance_uid = f"{uid}-{occ_utc.strftime('%Y%m%dT%H%M%S%Z')}"

                            # Check if this specific instance already exists
                            existing_instance = next((item for item in self.items if item.ical_uid == instance_uid),
                                                     None)
                            if existing_instance:
                                self.app.logger.info(
                                    f"Instance {instance_uid} for '{summary}' already exists. Skipping.")
                                continue

                            item_data = {
                                "title": summary, "description": description, "location": location,
                                "item_type": ItemType.TASK, "fixed_time": occ_utc,
                                "frequency": Frequency.ONE_TIME,  # Each imported instance is one-time in our system
                                "priority": app_priority, "ical_uid": instance_uid,  # Instance-specific UID
                                "status": ActionStatus.NOT_STARTED,
                                "ical_rrule_original": rrule_str  # Store original rule for reference
                            }
                            new_item = self.add_item(item_data, imported=True)
                            imported_items.append(new_item)
                            occurrences_imported += 1

                        if occurrences_imported == 0 and start_time_utc > now_utc and start_time_utc <= import_limit_date_utc:
                            # If it's a future non-recurring event (or rrule didn't yield instances in window but start is in window)
                            # This case is for when rrule_prop exists but yields no instances in the .between() range,
                            # but the initial DTSTART itself is valid and upcoming.
                            # However, rrule.between should include dtstart if inc=True and it's within range.
                            # This path might be redundant if .between is inclusive and dtstart is in range.
                            pass


                    else:  # Non-recurring event
                        # Only import if it's upcoming or started recently and not completed (e.g. within last day)
                        if start_time_utc < (
                            now_utc - timedelta(days=1)) and not dtend_ical:  # Too old, and no end time to check
                            self.app.logger.info(f"Skipping old non-recurring event '{summary}' (UID: {uid})")
                            continue
                        if dtend_ical:
                            end_time_utc = self._parse_ical_dt(dtend_ical, user_tz)
                            if end_time_utc and end_time_utc < now_utc:  # Event has already ended
                                self.app.logger.info(f"Skipping past event '{summary}' (UID: {uid}) that has ended.")
                                continue

                        existing_item = next((item for item in self.items if item.ical_uid == uid), None)
                        if existing_item:  # Simplistic update: remove old, add new. Better: update in place.
                            self.app.logger.info(
                                f"Event with UID {uid} ('{summary}') already exists. Re-importing (simple replace).")
                            self.remove_item(existing_item.id, record_history=False)

                        item_data = {
                            "title": summary, "description": description, "location": location,
                            "item_type": ItemType.TASK, "fixed_time": start_time_utc,
                            "frequency": Frequency.ONE_TIME, "priority": app_priority,
                            "ical_uid": uid, "status": ActionStatus.NOT_STARTED
                        }
                        new_item = self.add_item(item_data, imported=True)
                        imported_items.append(new_item)

            if imported_items:
                self._log_ai_action("ical_import", [item.id for item in imported_items])
            self._save_data()  # Ensure all changes are saved
            self.app.logger.info(f"Imported {len(imported_items)} items from iCalendar data.")

        except Exception as e:
            self.app.logger.error(f"Failed to parse iCalendar string: {e}", exc_info=True)
            # Potentially re-raise or return empty list with error status
        return imported_items

    def import_ical_from_url(self, url: str) -> List[ActionItem]:
        try:
            headers = {'User-Agent': 'POA_App/1.0 (+https://yourdomain.com/poa_app_info)'}  # Be a good internet citizen
            response = requests.get(url, timeout=10, headers=headers)
            response.raise_for_status()  # Raises HTTPError for bad responses (4XX or 5XX)
            return self.import_ical_events(response.text)
        except requests.exceptions.RequestException as e:
            self.app.logger.error(f"Error fetching iCalendar from URL {url}: {e}")
            return []
        except Exception as e:  # Catch other errors like parsing
            self.app.logger.error(f"Error processing iCalendar from URL {url}: {e}")
            return []

    def import_ical_from_file_content(self, file_content: bytes) -> List[ActionItem]:
        try:
            # Try to decode as UTF-8, but iCal can have other encodings.
            # Standard is UTF-8. `icalendar` lib handles encoding detection mostly.
            ical_string = file_content.decode('utf-8', errors='replace')
            return self.import_ical_events(ical_string)
        except UnicodeDecodeError as e:
            self.app.logger.error(f"Encoding error reading iCalendar file: {e}. Try ensuring UTF-8 encoding.")
            # Try with 'latin-1' as a common fallback for some older files
            try:
                ical_string = file_content.decode('latin-1', errors='replace')
                return self.import_ical_events(ical_string)
            except Exception as e_fallback:
                self.app.logger.error(f"Fallback decoding also failed for iCalendar file: {e_fallback}")
                return []
        except Exception as e:
            self.app.logger.error(f"Error processing iCalendar file content: {e}")
            return []

    def export_to_ical_string(self) -> str:
        cal = iCalCalendar()
        cal.add('prodid', '-//POA App//yourdomain.com//')
        cal.add('version', '2.0')
        user_tz = self.get_user_timezone()

        for item in self.items:
            if item.item_type == ItemType.TASK and item.fixed_time:
                event = iCalEvent()
                event.add('summary', item.title)

                # Ensure fixed_time is UTC for iCal standard practice
                dtstart_utc = item.fixed_time
                if dtstart_utc.tzinfo is None:  # Should not happen if stored correctly
                    dtstart_utc = pytz.utc.localize(dtstart_utc)
                else:
                    dtstart_utc = dtstart_utc.astimezone(pytz.utc)
                event.add('dtstart', dtstart_utc)  # vDatetime handles UTC conversion for .to_ical()

                # Add DTEND (e.g., 1 hour duration for tasks, or based on item if available)
                # For simplicity, let's assume 1 hour duration if not specified
                event.add('dtend', dtstart_utc + timedelta(hours=1))

                event.add('dtstamp', datetime.now(pytz.utc))  # Time the event was created in iCal
                event.add('uid', item.ical_uid or item.id)  # Use original iCal UID if present, else our ID

                if item.description:
                    event.add('description', item.description)
                if item.location:
                    event.add('location', item.location)

                event.add('priority', self._map_app_priority_to_ical(item.priority))

                # Handle recurrence
                if item.frequency != Frequency.ONE_TIME:
                    if item.ical_rrule_original:  # If we have the original complex rule, use it
                        try:
                            # vRecur.from_ical requires bytes
                            event.add('rrule', vRecur.from_ical(item.ical_rrule_original.encode()))
                        except Exception as e_rrule:
                            self.app.logger.warning(
                                f"Could not parse stored original RRULE '{item.ical_rrule_original}' for item {item.id}: {e_rrule}. Exporting as simple recurrence.")
                            # Fallback to simple mapping
                            self._add_simple_rrule(event, item.frequency)
                    else:  # Map simple frequency
                        self._add_simple_rrule(event, item.frequency)

                cal.add_component(event)
        return cal.to_ical().decode('utf-8')

    def _add_simple_rrule(self, event: iCalEvent, frequency: Frequency):
        rrule_params = {}
        if frequency == Frequency.DAILY:
            rrule_params['freq'] = 'DAILY'
        elif frequency == Frequency.WEEKLY:
            rrule_params['freq'] = 'WEEKLY'
        elif frequency == Frequency.MONTHLY:
            rrule_params['freq'] = 'MONTHLY'
        elif frequency == Frequency.ANNUALLY:
            rrule_params['freq'] = 'YEARLY'

        if rrule_params:
            event.add('rrule', vRecur(rrule_params))

SchedulerManager

SchedulerManagerClass

Source code in toolboxv2/mods/SchedulerManager.py
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
class SchedulerManagerClass:
    def __init__(self):
        self.jobs = {}
        self.thread = None
        self.running = False
        self.last_successful_jobs = deque(maxlen=3)  # Stores last 3 successful job names
        self.job_errors = {}  # Stores job names as keys and error messages as values

    def _run(self):
        while self.running:
            schedule.run_pending()
            time.sleep(1)

    def start(self):
        if not self.running:
            self.running = True
            self.thread = threading.Thread(target=self._run, daemon=True)
            self.thread.start()

    def stop(self):
        self.running = False
        if self.thread is not None:
            self.thread.join()

    def job_wrapper(self, job_name: str, job_function: callable):
        """
        Wrap a job function to track success and errors.
        """
        def wrapped_job(*args, **kwargs):
            try:
                job_function(*args, **kwargs)
                # If the job ran successfully, store it in the success queue
                self.last_successful_jobs.append(job_name)
                if job_name in self.job_errors:
                    del self.job_errors[job_name]  # Remove error record if job succeeded after failing
            except Exception as e:
                # Capture any exceptions and store them
                self.job_errors[job_name] = str(e)

        return wrapped_job


    def register_job(self,
                     job_id: str,
                     second: int = -1,
                     func: (Callable or str) | None = None,
                     job: schedule.Job | None = None,
                     time_passer: schedule.Job | None = None,
                     object_name: str | None = None,
                     receive_job: bool = False,
                     save: bool = False,
                     max_live: bool = False,
                     serializer=serializer_default,
                     args=None, kwargs=None):
        """
            Parameters
            ----------
                job_id : str
                    id for the job for management
                second : int
                    The time interval in seconds between each call of the job.
                func : Callable or str
                    The function to be executed as the job.
                job : schedule.Job
                    An existing job object from the schedule library.
                time_passer : schedule.Job
                    A job without a function, used to specify the time interval.
                object_name : str
                    The name of the object containing in the 'func' var to be executed.
                receive_job : bool
                    A flag indicating whether the job should be received from an object from 'func' var.
                save : bool
                    A flag indicating whether the job should be saved.
                max_live : bool
                    A flag indicating whether the job should have a maximum live time.
                serializer : dill
                    json pickel or dill must have a dumps fuction
                *args, **kwargs : Any serializable and deserializable
                    Additional arguments to be passed to the job function.

            Returns
            -------
           """

        if job is None and func is None:
            return Result.default_internal_error("Both job and func are not specified."
                                                 " Please specify either job or func.")
        if job is not None and func is not None:
            return Result.default_internal_error("Both job and func are specified. Please specify either job or func.")

        if job is not None:
            def func(x):
                return x
            return self._save_job(job_id=job_id,
                                  job=job,
                                  save=save,
                                  func=func,
                                  args=args,
                                  kwargs=kwargs,
                                  serializer=serializer)

        parsed_attr = self._parse_function(func=func, object_name=object_name)

        if parsed_attr.is_error():
            parsed_attr.result.data_info = f"Error parsing function for job : {job_id}"
            return parsed_attr

        if receive_job:
            job = parsed_attr.get()
        else:
            func = parsed_attr.get()

        time_passer = self._prepare_time_passer(time_passer=time_passer,
                                                second=second)

        job_func = self._prepare_job_func(func=func,
                                          max_live=max_live,
                                          second=second,
                                          args=args,
                                          kwargs=kwargs,
                                          job_id=job_id)

        job = self._get_final_job(job=job,
                                  func=self.job_wrapper(job_id, job_func),
                                  time_passer=time_passer,
                                  job_func=job_func,
                                  args=args,
                                  kwargs=kwargs)
        if job.is_error():
            return job

        job = job.get()

        return self._save_job(job_id=job_id,
                              job=job,
                              save=save,
                              func=func,
                              args=args,
                              kwargs=kwargs,
                              serializer=serializer)

    @staticmethod
    def _parse_function(func: str or Callable, object_name):
        if isinstance(func, str) and func.endswith('.py'):
            with open(func) as file:
                func_code = file.read()
                exec(func_code)
                func = locals()[object_name]
        elif isinstance(func, str) and func.endswith('.dill') and safety_mode == 'open':
            try:
                with open(func, 'rb') as file:
                    func = dill.load(file)
            except FileNotFoundError:
                return Result.default_internal_error(f"Function file {func} not found or dill not installed")
        elif isinstance(func, str):
            local_vars = {'app': get_app(from_=Name + f".pasing.{object_name}")}
            try:
                exec(func.strip(), {}, local_vars)
            except Exception as e:
                return Result.default_internal_error(f"Function parsing failed withe {e}")
            func = local_vars[object_name]
        elif isinstance(func, Callable):
            pass
        else:
            return Result.default_internal_error("Could not parse object scheduler_manager.parse_function")
        return Result.ok(func)

    @staticmethod
    def _prepare_time_passer(time_passer, second):
        if time_passer is None and second > 0:
            return schedule.every(second).seconds
        elif time_passer is None and second <= 0:
            raise ValueError("second must be greater than 0")
        return time_passer

    def _prepare_job_func(self, func: Callable, max_live: bool, second: float, job_id: str, *args, **kwargs):
        if max_live:
            end_time = datetime.now() + timedelta(seconds=second)

            def job_func():
                if datetime.now() < end_time:
                    func(*args, **kwargs)
                else:
                    job = self.jobs.get(job_id, {}).get('job')
                    if job is not None:
                        schedule.cancel_job(job)
                    else:
                        print("Error Canceling job")

            return job_func
        return func

    @staticmethod
    def _get_final_job(job, func, time_passer, job_func, args, kwargs):
        if job is None and isinstance(func, Callable):
            job = time_passer.do(job_func, *args, **kwargs)
        elif job is not None:
            pass
        else:
            return Result.default_internal_error("No Final job found for register")
        return Result.ok(job)

    def _save_job(self, job_id, job, save, args=None, **kwargs):
        if job is not None:
            self.jobs[job_id] = {'id': job_id, 'job': job, 'save': save, 'func': job_id, 'args': args,
                                 'kwargs': kwargs}
            f = (f"Added Job {job_id} :{' - saved' if save else ''}"
                  f"{' - args ' + str(len(args)) if args else ''}"
                  f"{' - kwargs ' + str(len(kwargs.keys())) if kwargs else ''}")
            return Result.ok(f)
        else:
            return Result.default_internal_error(job_id)

    def cancel_job(self, job_id):
        if job_id not in self.jobs:
            print("Job not found")
            return
        schedule.cancel_job(self.jobs[job_id].get('job'))
        self.jobs[job_id]["cancelled"] = True
        self.jobs[job_id]["save"] = False
        print("Job cancelled")

    def del_job(self, job_id):
        if job_id not in self.jobs:
            print("Job not found")
            return
        if not self.jobs[job_id].get("cancelled", False):
            print("Job not cancelled canceling job")
            self.cancel_job(job_id)
        del self.jobs[job_id]
        print("Job deleted")

    def save_jobs(self, file_path, serializer=serializer_default):
        with open(file_path, 'wb') as file:
            save_jobs = [job for job in self.jobs.values() if job['save']]
            serializer.dump(save_jobs, file)

    def load_jobs(self, file_path, deserializer=deserializer_default):
        with open(file_path, 'rb') as file:
            jobs = deserializer.load(file)
            for job_info in jobs:
                del job_info['job']
                func = deserializer.loads(job_info['func'])
                self.register_job(job_info['id'], func=func, **job_info)

    def get_tasks_table(self):
        if not self.jobs:
            return "No tasks registered."

        # Calculate the maximum width for each column
        id_width = max(len("Task ID"), max(len(job_id) for job_id in self.jobs))
        next_run_width = len("Next Execution")
        interval_width = len("Interval")

        # Create the header
        header = f"| {'Task ID':<{id_width}} | {'Next Execution':<{next_run_width}} | {'Interval':<{interval_width}} |"
        separator = f"|{'-' * (id_width + 2)}|{'-' * (next_run_width + 2)}|{'-' * (interval_width + 2)}|"

        # Create the table rows
        rows = []
        for job_id, job_info in self.jobs.items():
            job = job_info['job']
            next_run = job.next_run.strftime("%Y-%m-%d %H:%M:%S") if job.next_run else "N/A"
            interval = self._get_interval_str(job)
            row = f"| {job_id:<{id_width}} | {next_run:<{next_run_width}} | {interval:<{interval_width}} |"
            rows.append(row)

        # Combine all parts of the table
        table = "\n".join([header, separator] + rows)
        return table

    def _get_interval_str(self, job):
        if job.interval == 0:
            return "Once"

        units = [
            (86400, "day"),
            (3600, "hour"),
            (60, "minute"),
            (1, "second")
        ]

        for seconds, unit in units:
            if job.interval % seconds == 0:
                count = job.interval // seconds
                return f"Every {count} {unit}{'s' if count > 1 else ''}"

        return f"Every {job.interval} seconds"
job_wrapper(job_name, job_function)

Wrap a job function to track success and errors.

Source code in toolboxv2/mods/SchedulerManager.py
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
def job_wrapper(self, job_name: str, job_function: callable):
    """
    Wrap a job function to track success and errors.
    """
    def wrapped_job(*args, **kwargs):
        try:
            job_function(*args, **kwargs)
            # If the job ran successfully, store it in the success queue
            self.last_successful_jobs.append(job_name)
            if job_name in self.job_errors:
                del self.job_errors[job_name]  # Remove error record if job succeeded after failing
        except Exception as e:
            # Capture any exceptions and store them
            self.job_errors[job_name] = str(e)

    return wrapped_job
register_job(job_id, second=-1, func=None, job=None, time_passer=None, object_name=None, receive_job=False, save=False, max_live=False, serializer=serializer_default, args=None, kwargs=None)
Parameters
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
job_id : str
    id for the job for management
second : int
    The time interval in seconds between each call of the job.
func : Callable or str
    The function to be executed as the job.
job : schedule.Job
    An existing job object from the schedule library.
time_passer : schedule.Job
    A job without a function, used to specify the time interval.
object_name : str
    The name of the object containing in the 'func' var to be executed.
receive_job : bool
    A flag indicating whether the job should be received from an object from 'func' var.
save : bool
    A flag indicating whether the job should be saved.
max_live : bool
    A flag indicating whether the job should have a maximum live time.
serializer : dill
    json pickel or dill must have a dumps fuction
*args, **kwargs : Any serializable and deserializable
    Additional arguments to be passed to the job function.
Returns
Source code in toolboxv2/mods/SchedulerManager.py
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
def register_job(self,
                 job_id: str,
                 second: int = -1,
                 func: (Callable or str) | None = None,
                 job: schedule.Job | None = None,
                 time_passer: schedule.Job | None = None,
                 object_name: str | None = None,
                 receive_job: bool = False,
                 save: bool = False,
                 max_live: bool = False,
                 serializer=serializer_default,
                 args=None, kwargs=None):
    """
        Parameters
        ----------
            job_id : str
                id for the job for management
            second : int
                The time interval in seconds between each call of the job.
            func : Callable or str
                The function to be executed as the job.
            job : schedule.Job
                An existing job object from the schedule library.
            time_passer : schedule.Job
                A job without a function, used to specify the time interval.
            object_name : str
                The name of the object containing in the 'func' var to be executed.
            receive_job : bool
                A flag indicating whether the job should be received from an object from 'func' var.
            save : bool
                A flag indicating whether the job should be saved.
            max_live : bool
                A flag indicating whether the job should have a maximum live time.
            serializer : dill
                json pickel or dill must have a dumps fuction
            *args, **kwargs : Any serializable and deserializable
                Additional arguments to be passed to the job function.

        Returns
        -------
       """

    if job is None and func is None:
        return Result.default_internal_error("Both job and func are not specified."
                                             " Please specify either job or func.")
    if job is not None and func is not None:
        return Result.default_internal_error("Both job and func are specified. Please specify either job or func.")

    if job is not None:
        def func(x):
            return x
        return self._save_job(job_id=job_id,
                              job=job,
                              save=save,
                              func=func,
                              args=args,
                              kwargs=kwargs,
                              serializer=serializer)

    parsed_attr = self._parse_function(func=func, object_name=object_name)

    if parsed_attr.is_error():
        parsed_attr.result.data_info = f"Error parsing function for job : {job_id}"
        return parsed_attr

    if receive_job:
        job = parsed_attr.get()
    else:
        func = parsed_attr.get()

    time_passer = self._prepare_time_passer(time_passer=time_passer,
                                            second=second)

    job_func = self._prepare_job_func(func=func,
                                      max_live=max_live,
                                      second=second,
                                      args=args,
                                      kwargs=kwargs,
                                      job_id=job_id)

    job = self._get_final_job(job=job,
                              func=self.job_wrapper(job_id, job_func),
                              time_passer=time_passer,
                              job_func=job_func,
                              args=args,
                              kwargs=kwargs)
    if job.is_error():
        return job

    job = job.get()

    return self._save_job(job_id=job_id,
                          job=job,
                          save=save,
                          func=func,
                          args=args,
                          kwargs=kwargs,
                          serializer=serializer)

Tools

Bases: MainTool, SchedulerManagerClass

Source code in toolboxv2/mods/SchedulerManager.py
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
class Tools(MainTool, SchedulerManagerClass):
    version = version

    def __init__(self, app=None):
        self.name = Name
        self.color = "VIOLET2"

        self.keys = {"mode": "db~mode~~:"}
        self.encoding = 'utf-8'
        self.tools = {'name': Name}
        MainTool.__init__(self,
                          load=self.init_sm,
                          v=self.version,
                          name=self.name,
                          color=self.color,
                          on_exit=self.on_exit)

        SchedulerManagerClass.__init__(self)

    @export(
        mod_name=Name,
        name="Version",
        version=version,
    )
    def get_version(self):
        return self.version

    # Exportieren der Scheduler-Instanz für die Nutzung in anderen Modulen
    @export(mod_name=Name, name='init', version=version, initial=True)
    def init_sm(self):
        if os.path.exists(self.app.data_dir + '/jobs.compact'):
            print("SchedulerManager try loading from file")
            self.load_jobs(
                self.app.data_dir + '/jobs.compact'
            )
            print("SchedulerManager Successfully loaded")
        print("STARTING SchedulerManager")
        self.start()

    @export(mod_name=Name, name='clos_manager', version=version, exit_f=True)
    def on_exit(self):
        self.stop()
        self.save_jobs(self.app.data_dir + '/jobs.compact')
        return f"saved {len(self.jobs.keys())} jobs in {self.app.data_dir + '/jobs.compact'}"

    @export(mod_name=Name, name='instance', version=version)
    def get_instance(self):
        return self

    @export(mod_name=Name, name='start', version=version)
    def start_instance(self):
        return self.start()

    @export(mod_name=Name, name='stop', version=version)
    def stop_instance(self):
        return self.stop()

    @export(mod_name=Name, name='cancel', version=version)
    def cancel_instance(self, job_id):
        return self.cancel_job(job_id)

    @export(mod_name=Name, name='dealt', version=version)
    def dealt_instance(self, job_id):
        return self.del_job(job_id)

    @export(mod_name=Name, name='add', version=version)
    def register_instance(self, job_data: dict):
        """
        example dicts :
            -----------
            {
                "job_id": "job0",
                "second": 0,
                "func": None,
                "job": None,
                "time_passer": None,
                "object_name": "tb_job_fuction",
                "receive_job": False,
                "save": False,
                "max_live": True,
                # just lev it out "serializer": serializer_default,
                "args": [],
                "kwargs": {},
            }

            job_id : str
                id for the job for management
            second (optional): int
                The time interval in seconds between each call of the job.
            func (optional): Callable or str
                The function to be executed as the job.
            job (optional):  schedule.Job
                An existing job object from the schedule library.
            time_passer (optional):  schedule.Job
                A job without a function, used to specify the time interval.
            object_name (optional): str
                The name of the object containing in the 'func' var to be executed.
            receive_job (optional): bool
                A flag indicating whether the job should be received from an object from 'func' var.
            save (optional): bool
                A flag indicating whether the job should be saved.
            max_live (optional): bool
                A flag indicating whether the job should have a maximum live time.
            serializer (optional): bool
                json pickel or dill must have a dumps fuction
            *args, **kwargs (optional):
                Additional arguments to be passed to the job function.


        Parameters
            ----------
           job_data : dict

        example usage
            ----------
            `python

            `

    """
        if job_data is None:
            self.app.logger.error("No job data provided")
            return None
        job_id = job_data["job_id"]
        second = job_data.get("second", 0)
        func = job_data.get("func")
        job = job_data.get("job")
        time_passer = job_data.get("time_passer")
        object_name = job_data.get("object_name", "tb_job_fuction")
        receive_job = job_data.get("receive_job", False)
        save = job_data.get("save", False)
        max_live = job_data.get("max_live", True)
        serializer = job_data.get("serializer", serializer_default)
        args = job_data.get("args", ())
        kwargs = job_data.get("kwargs", {})

        return self.register_job(
            job_id=job_id,
            second=second,
            func=func,
            job=job,
            time_passer=time_passer,
            object_name=object_name,
            receive_job=receive_job,
            save=save,
            max_live=max_live,
            serializer=serializer,
            args=args,
            kwargs=kwargs
        )
register_instance(job_data)
example dicts

{ "job_id": "job0", "second": 0, "func": None, "job": None, "time_passer": None, "object_name": "tb_job_fuction", "receive_job": False, "save": False, "max_live": True, # just lev it out "serializer": serializer_default, "args": [], "kwargs": {}, }

job_id : str id for the job for management second (optional): int The time interval in seconds between each call of the job. func (optional): Callable or str The function to be executed as the job. job (optional): schedule.Job An existing job object from the schedule library. time_passer (optional): schedule.Job A job without a function, used to specify the time interval. object_name (optional): str The name of the object containing in the 'func' var to be executed. receive_job (optional): bool A flag indicating whether the job should be received from an object from 'func' var. save (optional): bool A flag indicating whether the job should be saved. max_live (optional): bool A flag indicating whether the job should have a maximum live time. serializer (optional): bool json pickel or dill must have a dumps fuction args, *kwargs (optional): Additional arguments to be passed to the job function.

Parameters ---------- job_data : dict

example usage ---------- `python

1
`
Source code in toolboxv2/mods/SchedulerManager.py
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
@export(mod_name=Name, name='add', version=version)
def register_instance(self, job_data: dict):
    """
    example dicts :
        -----------
        {
            "job_id": "job0",
            "second": 0,
            "func": None,
            "job": None,
            "time_passer": None,
            "object_name": "tb_job_fuction",
            "receive_job": False,
            "save": False,
            "max_live": True,
            # just lev it out "serializer": serializer_default,
            "args": [],
            "kwargs": {},
        }

        job_id : str
            id for the job for management
        second (optional): int
            The time interval in seconds between each call of the job.
        func (optional): Callable or str
            The function to be executed as the job.
        job (optional):  schedule.Job
            An existing job object from the schedule library.
        time_passer (optional):  schedule.Job
            A job without a function, used to specify the time interval.
        object_name (optional): str
            The name of the object containing in the 'func' var to be executed.
        receive_job (optional): bool
            A flag indicating whether the job should be received from an object from 'func' var.
        save (optional): bool
            A flag indicating whether the job should be saved.
        max_live (optional): bool
            A flag indicating whether the job should have a maximum live time.
        serializer (optional): bool
            json pickel or dill must have a dumps fuction
        *args, **kwargs (optional):
            Additional arguments to be passed to the job function.


    Parameters
        ----------
       job_data : dict

    example usage
        ----------
        `python

        `

"""
    if job_data is None:
        self.app.logger.error("No job data provided")
        return None
    job_id = job_data["job_id"]
    second = job_data.get("second", 0)
    func = job_data.get("func")
    job = job_data.get("job")
    time_passer = job_data.get("time_passer")
    object_name = job_data.get("object_name", "tb_job_fuction")
    receive_job = job_data.get("receive_job", False)
    save = job_data.get("save", False)
    max_live = job_data.get("max_live", True)
    serializer = job_data.get("serializer", serializer_default)
    args = job_data.get("args", ())
    kwargs = job_data.get("kwargs", {})

    return self.register_job(
        job_id=job_id,
        second=second,
        func=func,
        job=job,
        time_passer=time_passer,
        object_name=object_name,
        receive_job=receive_job,
        save=save,
        max_live=max_live,
        serializer=serializer,
        args=args,
        kwargs=kwargs
    )

SocketManager

The SocketManager Supports 2 types of connections 1. Client Server 2. Peer to Peer

TruthSeeker

arXivCrawler

ArXiv Crawler for TruthSeeker. Main module for processing research queries.

ArXivPDFProcessor

Main processor for research queries. This is a wrapper around the new ResearchProcessor for backward compatibility.

Source code in toolboxv2/mods/TruthSeeker/arXivCrawler.py
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
class ArXivPDFProcessor:
    """
    Main processor for research queries.
    This is a wrapper around the new ResearchProcessor for backward compatibility.
    """
    def __init__(self,
                 query: str,
                 tools,
                 chunk_size: int = 1_000_000,
                 overlap: int = 2_000,
                 max_workers=None,
                 num_search_result_per_query=6,
                 max_search=6,
                 download_dir="pdfs",
                 callback=None,
                 num_workers=None):
        """Initialize the ArXiv PDF processor.

        Args:
            query: Research query
            tools: Tools module
            chunk_size: Size of text chunks for processing
            overlap: Overlap between chunks
            max_workers: Maximum number of worker threads
            num_search_result_per_query: Number of search results per query
            max_search: Maximum number of search queries
            download_dir: Directory to save downloaded files
            callback: Callback function for status updates
            num_workers: Number of worker threads
        """
        # Create the new research processor
        self.processor = ResearchProcessor(
            query=query,
            tools=tools,
            chunk_size=chunk_size,
            overlap=overlap,
            max_workers=max_workers,
            num_search_result_per_query=num_search_result_per_query,
            max_search=max_search,
            download_dir=download_dir,
            callback=callback,
            num_workers=num_workers
        )

        # Copy attributes for backward compatibility
        self.insights_generated = False
        self.queries_generated = False
        self.query = query
        self.tools = tools
        self.mem = tools.get_memory()
        self.chunk_size = chunk_size
        self.overlap = overlap
        self.max_workers = max_workers
        self.nsrpq = num_search_result_per_query
        self.max_search = max_search
        self.download_dir = download_dir
        self.parser = RobustPDFDownloader(download_dir=download_dir)
        self.callback = callback if callback is not None else lambda status: None
        self.mem_name = None
        self.current_session = None
        self.all_ref_papers = 0
        self.last_insights_list = None
        self.all_texts_len = 0
        self.f_texts_len = 0
        self.s_id = str(uuid.uuid4())
        self.semantic_model = self.processor.semantic_model
        self._query_progress = {}
        self._progress_lock = threading.Lock()
        self.num_workers = self.processor.num_workers

    def _update_global_progress(self) -> float:
        """Calculate overall progress considering all processing phases."""
        return self.processor._update_global_progress()

    async def search_and_process_papers(self, queries: list[str]) -> list[Paper]:
        """Search for and process papers based on queries.

        Args:
            queries: List of search queries

        Returns:
            List of processed papers
        """
        # Use the new processor to search and process papers
        unified_papers = await self.processor.search_and_process_papers(queries)

        # Convert UnifiedPaper objects to Paper objects for backward compatibility
        papers = []
        for paper in unified_papers:
            if paper.source == "arxiv":
                # Convert to the old Paper format
                arxiv_paper = Paper(
                    title=paper.title,
                    authors=paper.authors,
                    summary=paper.summary,
                    url=paper.url,
                    pdf_url=paper.pdf_url,
                    published=paper.published,
                    updated=paper.source_specific_data.get("updated", ""),
                    categories=paper.source_specific_data.get("categories", []),
                    paper_id=paper.paper_id
                )
                papers.append(arxiv_paper)

        # Update attributes for backward compatibility
        self.all_ref_papers = self.processor.all_ref_papers
        self.all_texts_len = self.processor.all_texts_len
        self.f_texts_len = self.processor.f_texts_len

        return papers

    def send_status(self, step: str, progress: float = None, additional_info: str = ""):
        """Send status update via callback."""
        if progress is None:
            progress = self._update_global_progress()
        self.callback({
            "step": step,
            "progress": progress,
            "info": additional_info
        })

    def generate_queries(self) -> list[str]:
        self.send_status("Generating search queries")
        self.queries_generated = False

        class ArXivQueries(BaseModel):
            queries: list[str] = Field(..., description="List of ArXiv search queries (en)")

        try:
            query_generator: ArXivQueries = self.tools.format_class(
                ArXivQueries,
                f"Generate a list of precise ArXiv search queries to comprehensively address: {self.query}"
            )
            queries = [self.query] + query_generator["queries"]
        except Exception:
            self.send_status("Error generating queries", additional_info="Using default query.")
            queries = [self.query]

        if len(queries[:self.max_search]) > 0:
            self.queries_generated = True
        return queries[:self.max_search]

    def init_process_papers(self):
        self.mem.create_memory(self.mem_name, model_config={"model_name": "anthropic/claude-3-5-haiku-20241022"})
        self.send_status("Memory initialized")


    async def generate_insights(self, queries) -> dict:
        self.send_status("Generating insights")
        query = self.query
        # max_it = 0
        results = await self.mem.query(query=query, memory_names=self.mem_name, unified_retrieve=True, query_params={
            "max_sentences": 25})
        #query = queries[min(len(queries)-1, max_it)]

        self.insights_generated = True
        self.send_status("Insights generated", progress=1.0)
        return results

    async def extra_query(self, query, query_params=None, unified_retrieve=True):
        self.send_status("Processing follow-up query", progress=0.5)
        results = await self.mem.query(query=query, memory_names=self.mem_name,
                                                      query_params=query_params, unified_retrieve=unified_retrieve)
        self.send_status("Processing follow-up query Done", progress=1)
        return results

    def generate_mem_name(self):
        class UniqueMemoryName(BaseModel):
            """unique memory name based on the user query"""
            name: str
        return self.tools.get_agent("thinkm").format_class(UniqueMemoryName, self.query).get('name', '_'.join(self.query.split(" ")[:3]))

    def initialize(self, session_id, second=False):
        self.current_session = session_id
        self.insights_generated = False
        self.queries_generated = False
        if second:
            return
        self.mem_name = self.generate_mem_name().strip().replace("\n", '') + '_' + session_id
        self.init_process_papers()

    async def process(self, query=None) -> tuple[list[Paper], dict]:
        if query is not None:
            self.query = query
        self.send_status("Starting research process")
        t0 = time.perf_counter()
        self.initialize(self.s_id, query is not None)

        queries = self.generate_queries()

        papers = await self.search_and_process_papers(queries)

        if len(papers) == 0:
            class UserQuery(BaseModel):
                """Fix all typos and clear the original user query"""
                new_query: str
            self.query= self.tools.format_class(
                UserQuery,
                self.query
            )["new_query"]
            queries = self.generate_queries()
            papers = await self.search_and_process_papers(queries)

        insights = await self.generate_insights(queries)

        elapsed_time = time.perf_counter() - t0
        self.send_status("Process complete", progress=1.0,
                         additional_info=f"Total time: {elapsed_time:.2f}s, Papers analyzed: {len(papers)}/{self.all_ref_papers}")

        return papers, insights

    @staticmethod
    def estimate_processing_metrics(query_length: int, **config) -> (float, float):
        """Return estimated time (seconds) and price for processing."""
        total_papers = config['max_search'] * config['num_search_result_per_query']
        median_text_length = 100000  # 10 pages * 10000 characters

        # Estimated chunks to process
        total_chunks = total_papers * (median_text_length / config['chunk_size']) + 1 / config['overlap']
        processed_chunks = total_chunks * 0.45
        total_chars = TextSplitter(config['chunk_size'],
                     config['overlap']
                     ).approximate(config['chunk_size'] * processed_chunks)
        # Time estimation (seconds)
        .75 / config['chunk_size']  # Hypothetical time per chunk in seconds
        w = (config.get('num_workers', 16) if config.get('num_workers', 16) is not None else 16 / 10)
        # Processing_ time - Insights Genration - Insights Query   -   Indexing Time     -    Download Time     -       workers   -   Query Genration time - Ui - Init Db
        estimated_time = ((8+total_papers*0.012)+(total_chunks/20000) * .005 + (total_chunks/2) * .0003 + total_papers * 2.8 ) / w + (0.25 * config['max_search']) + 6 + 4

        price_per_char = 0.0000012525
        price_per_t_chunk =  total_chars * price_per_char
        estimated_price = price_per_t_chunk ** 1.7

        # estimated_price = 0 if query_length < 420 and estimated_price < 5 else estimated_price
        if estimated_time < 10:
            estimated_time = 10
        if estimated_price < .04:
            estimated_price = .04
        return round(estimated_time, 2), round(estimated_price, 4)
__init__(query, tools, chunk_size=1000000, overlap=2000, max_workers=None, num_search_result_per_query=6, max_search=6, download_dir='pdfs', callback=None, num_workers=None)

Initialize the ArXiv PDF processor.

Parameters:

Name Type Description Default
query str

Research query

required
tools

Tools module

required
chunk_size int

Size of text chunks for processing

1000000
overlap int

Overlap between chunks

2000
max_workers

Maximum number of worker threads

None
num_search_result_per_query

Number of search results per query

6
max_search

Maximum number of search queries

6
download_dir

Directory to save downloaded files

'pdfs'
callback

Callback function for status updates

None
num_workers

Number of worker threads

None
Source code in toolboxv2/mods/TruthSeeker/arXivCrawler.py
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
def __init__(self,
             query: str,
             tools,
             chunk_size: int = 1_000_000,
             overlap: int = 2_000,
             max_workers=None,
             num_search_result_per_query=6,
             max_search=6,
             download_dir="pdfs",
             callback=None,
             num_workers=None):
    """Initialize the ArXiv PDF processor.

    Args:
        query: Research query
        tools: Tools module
        chunk_size: Size of text chunks for processing
        overlap: Overlap between chunks
        max_workers: Maximum number of worker threads
        num_search_result_per_query: Number of search results per query
        max_search: Maximum number of search queries
        download_dir: Directory to save downloaded files
        callback: Callback function for status updates
        num_workers: Number of worker threads
    """
    # Create the new research processor
    self.processor = ResearchProcessor(
        query=query,
        tools=tools,
        chunk_size=chunk_size,
        overlap=overlap,
        max_workers=max_workers,
        num_search_result_per_query=num_search_result_per_query,
        max_search=max_search,
        download_dir=download_dir,
        callback=callback,
        num_workers=num_workers
    )

    # Copy attributes for backward compatibility
    self.insights_generated = False
    self.queries_generated = False
    self.query = query
    self.tools = tools
    self.mem = tools.get_memory()
    self.chunk_size = chunk_size
    self.overlap = overlap
    self.max_workers = max_workers
    self.nsrpq = num_search_result_per_query
    self.max_search = max_search
    self.download_dir = download_dir
    self.parser = RobustPDFDownloader(download_dir=download_dir)
    self.callback = callback if callback is not None else lambda status: None
    self.mem_name = None
    self.current_session = None
    self.all_ref_papers = 0
    self.last_insights_list = None
    self.all_texts_len = 0
    self.f_texts_len = 0
    self.s_id = str(uuid.uuid4())
    self.semantic_model = self.processor.semantic_model
    self._query_progress = {}
    self._progress_lock = threading.Lock()
    self.num_workers = self.processor.num_workers
estimate_processing_metrics(query_length, **config) staticmethod

Return estimated time (seconds) and price for processing.

Source code in toolboxv2/mods/TruthSeeker/arXivCrawler.py
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
@staticmethod
def estimate_processing_metrics(query_length: int, **config) -> (float, float):
    """Return estimated time (seconds) and price for processing."""
    total_papers = config['max_search'] * config['num_search_result_per_query']
    median_text_length = 100000  # 10 pages * 10000 characters

    # Estimated chunks to process
    total_chunks = total_papers * (median_text_length / config['chunk_size']) + 1 / config['overlap']
    processed_chunks = total_chunks * 0.45
    total_chars = TextSplitter(config['chunk_size'],
                 config['overlap']
                 ).approximate(config['chunk_size'] * processed_chunks)
    # Time estimation (seconds)
    .75 / config['chunk_size']  # Hypothetical time per chunk in seconds
    w = (config.get('num_workers', 16) if config.get('num_workers', 16) is not None else 16 / 10)
    # Processing_ time - Insights Genration - Insights Query   -   Indexing Time     -    Download Time     -       workers   -   Query Genration time - Ui - Init Db
    estimated_time = ((8+total_papers*0.012)+(total_chunks/20000) * .005 + (total_chunks/2) * .0003 + total_papers * 2.8 ) / w + (0.25 * config['max_search']) + 6 + 4

    price_per_char = 0.0000012525
    price_per_t_chunk =  total_chars * price_per_char
    estimated_price = price_per_t_chunk ** 1.7

    # estimated_price = 0 if query_length < 420 and estimated_price < 5 else estimated_price
    if estimated_time < 10:
        estimated_time = 10
    if estimated_price < .04:
        estimated_price = .04
    return round(estimated_time, 2), round(estimated_price, 4)
search_and_process_papers(queries) async

Search for and process papers based on queries.

Parameters:

Name Type Description Default
queries list[str]

List of search queries

required

Returns:

Type Description
list[Paper]

List of processed papers

Source code in toolboxv2/mods/TruthSeeker/arXivCrawler.py
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
async def search_and_process_papers(self, queries: list[str]) -> list[Paper]:
    """Search for and process papers based on queries.

    Args:
        queries: List of search queries

    Returns:
        List of processed papers
    """
    # Use the new processor to search and process papers
    unified_papers = await self.processor.search_and_process_papers(queries)

    # Convert UnifiedPaper objects to Paper objects for backward compatibility
    papers = []
    for paper in unified_papers:
        if paper.source == "arxiv":
            # Convert to the old Paper format
            arxiv_paper = Paper(
                title=paper.title,
                authors=paper.authors,
                summary=paper.summary,
                url=paper.url,
                pdf_url=paper.pdf_url,
                published=paper.published,
                updated=paper.source_specific_data.get("updated", ""),
                categories=paper.source_specific_data.get("categories", []),
                paper_id=paper.paper_id
            )
            papers.append(arxiv_paper)

    # Update attributes for backward compatibility
    self.all_ref_papers = self.processor.all_ref_papers
    self.all_texts_len = self.processor.all_texts_len
    self.f_texts_len = self.processor.f_texts_len

    return papers
send_status(step, progress=None, additional_info='')

Send status update via callback.

Source code in toolboxv2/mods/TruthSeeker/arXivCrawler.py
322
323
324
325
326
327
328
329
330
def send_status(self, step: str, progress: float = None, additional_info: str = ""):
    """Send status update via callback."""
    if progress is None:
        progress = self._update_global_progress()
    self.callback({
        "step": step,
        "progress": progress,
        "info": additional_info
    })
main(query='Beste strategien in bretspielen sitler von katar') async

Main execution function

Source code in toolboxv2/mods/TruthSeeker/arXivCrawler.py
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
async def main(query: str = "Beste strategien in bretspielen sitler von katar"):
    """Main execution function"""
    with Spinner("Init Isaa"):
        tools = get_app("ArXivPDFProcessor", name=None).get_mod("isaa")
        tools.init_isaa(build=True)
    processor = ArXivPDFProcessor(query, tools=tools)
    papers, insights = await processor.process()

    print("Generated Insights:", insights)
    print("Generated Insights_list:", processor.last_insights_list)
    kb = tools.get_memory(processor.mem_name)
    print(await kb.query_concepts("AI"))
    print(await kb.retrieve("Evaluation metrics for assessing AI Agent performance"))
    print(kb.concept_extractor.concept_graph.concepts.keys())
    kb.vis(output_file="insights_graph.html")
    kb.save("mem.plk")
    # await get_app("ArXivPDFProcessor", name=None).a_idle()
    return insights

nGui

import colorsys import json import time from datetime import datetime, timedelta from queue import Queue from typing import Dict, Union, List, Any

from fastapi import Request import os import random from threading import Thread, Event

import networkx as nx from dataclasses import asdict

from toolboxv2 import get_app from toolboxv2.mods.FastApi.fast_nice import register_nicegui

import asyncio

from nicegui import ui

from pathlib import Path import stripe

from toolboxv2.mods.TruthSeeker.arXivCrawler import Paper from toolboxv2.mods.isaa.base.AgentUtils import anything_from_str_to_dict

Set your secret key (use environment variables in production!)

stripe.api_key = os.getenv('STRIPE_SECRET_KEY', 'sk_test_YourSecretKey')

def create_landing_page(): # Set up dynamic background ui.query("body").style("background: linear-gradient(135deg, #1a1a2e 0%, #16213e 100%)")

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# Main container with enhanced responsive design
with ui.column().classes(
"w-full max-w-md p-8 rounded-3xl shadow-2xl "
"items-center self-center mx-auto my-8"
):
    # Advanced styling for glass-morphism effect
    ui.query(".nicegui-column").style("""
    background: rgba(255, 255, 255, 0.05);
    backdrop-filter: blur(12px);
    border: 1px solid rgba(255, 255, 255, 0.1);
    transition: all 0.3s ease-in-out;
    """)

    # Animated logo/brand icon
    with ui.element("div").classes("animate-fadeIn"):
        ui.icon("science").classes(
        "text-7xl mb-6 text-primary "
        "transform hover:scale-110 transition-transform"
        )

    # Enhanced typography for title
    ui.label("TruthSeeker").classes(
    "text-5xl font-black text-center "
    "text-primary mb-2 animate-slideDown"
    )

    # Stylized subtitle with brand message
    ui.label("Precision. Discovery. Insights.").classes(
    "text-xl font-medium text-center "
    "mb-10 animate-fadeIn"
    )

    # Button container for consistent spacing
    ui.button(
    "Start Research",
    on_click=lambda: ui.navigate.to("/open-Seeker.seek")
    ).classes(
    "w-full px-6 py-4 text-lg font-bold "
    "bg-primary hover:bg-primary-dark "
    "transform hover:-translate-y-0.5 "
    "transition-all duration-300 ease-in-out "
    "rounded-xl shadow-lg animate-slideUp"
    )

    # Navigation links container
    with ui.element("div").classes("mt-8 space-y-3 text-center"):
        ui.link(
        "Demo video",
        ).classes(
        "block text-lg text-gray-200 hover:text-primary "
        "transition-colors duration-300 animate-fadeIn"
        ).on("click", lambda: ui.navigate.to("/open-Seeker.demo"))

        ui.link(
        "About Us",
        ).classes(
        "block text-lg text-gray-400 hover:text-primary "
        "transition-colors duration-300 animate-fadeIn"
        ).on("click", lambda: ui.navigate.to("/open-Seeker.about"))

def create_video_demo(): with ui.card().classes('w-full max-w-3xl mx-auto').style( 'background: var(--background-color); color: var(--text-color)'): # Video container with responsive aspect ratio with ui.element('div').classes('relative w-full aspect-video'): video = ui.video('../api/TruthSeeker/video').classes('w-full h-full object-cover')

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
        # Custom controls overlay
        with ui.element('div').classes('absolute bottom-0 left-0 right-0 bg-black/50 p-2'):
            with ui.row().classes('items-center gap-2'):
                #play_btn = ui.button(icon='play_arrow', on_click=lambda: video.props('playing=true'))
                #pause_btn = ui.button(icon='pause', on_click=lambda: video.props('playing=false'))
                ui.slider(min=0, max=100, value=0).classes('w-full').bind_value(video, 'time')
                #mute_btn = ui.button(icon='volume_up', on_click=lambda: video.props('muted=!muted'))
                #fullscreen_btn = ui.button(icon='fullscreen', on_click=lambda: video.props('fullscreen=true'))


    # Video description
    ui.markdown('Walkthrough of TruthSeeker features and capabilities.')
    # Back to Home Button
    ui.button('Back to Home', on_click=lambda: ui.navigate.to('/open-Seeker')).classes(
        'mt-6 w-full bg-primary text-white hover:opacity-90'
    )

return video

def create_about_page(): """Create a comprehensive About page for TruthSeeker""" with ui.column().classes('w-full max-w-4xl mx-auto p-6'): # Page Header ui.label('About TruthSeeker').classes('text-4xl font-bold text-primary mb-6')

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
    # Mission Statement
    with ui.card().classes('w-full mb-6').style(
        'background: var(--background-color); color: var(--text-color); padding: 20px; border-radius: 8px; box-shadow: 0 2px 8px rgba(0,0,0,0.1);'
    ):
        ui.label('Our Mission').classes('text-2xl font-semibold text-primary mb-4')
        ui.markdown("""
            TruthSeeker aims to democratize access to scientific knowledge,
            transforming complex academic research into comprehensible insights.
            We bridge the gap between raw data and meaningful understanding.
        """).classes('text-lg').style('color: var(--text-color);')

    # Core Technologies
    with ui.card().classes('w-full mb-6').style(
        'background: var(--background-color); color: var(--text-color); padding: 20px; border-radius: 8px; box-shadow: 0 2px 8px rgba(0,0,0,0.1);'
    ):
        ui.label('Core Technologies').classes('text-2xl font-semibold text-primary mb-4')
        with ui.row().classes('gap-4 w-full'):
            with ui.column().classes('flex-1 text-center'):
                ui.icon('search').classes('text-4xl text-primary mb-2')
                ui.label('Advanced Query Processing').classes('font-bold')
                ui.markdown('Intelligent algorithms that extract nuanced research insights.').style(
                    'color: var(--text-color);')
            with ui.column().classes('flex-1 text-center'):
                ui.icon('analytics').classes('text-4xl text-primary mb-2')
                ui.label('Semantic Analysis').classes('font-bold')
                ui.markdown('Deep learning models for comprehensive research verification.').style(
                    'color: var(--text-color);')
            with ui.column().classes('flex-1 text-center'):
                ui.icon('verified').classes('text-4xl text-primary mb-2')
                ui.label('Research Validation').classes('font-bold')
                ui.markdown('Multi-layered verification of academic sources.').style('color: var(--text-color);')
    # Research Process
    with ui.card().classes('w-full').style('background: var(--background-color);color: var(--text-color);'):
        ui.label('Research Discovery Process').classes('text-2xl font-semibold text-primary mb-4')
        with ui.card().classes('q-pa-md q-mx-auto').style(
            'max-width: 800px; background: var(--background-color); border-radius: 8px; box-shadow: 0 2px 8px rgba(0,0,0,0.1);'
        ) as card:
            ui.markdown("# Research Workflow").style(
                "color: var(--primary-color); text-align: center; margin-bottom: 20px;")
            ui.markdown(
                """
                Welcome to TruthSeeker’s interactive research assistant. Follow the steps below to transform your initial inquiry into a refined, actionable insight.
                """
            ).style("color: var(--text-color); text-align: center; margin-bottom: 30px;")

            # The stepper component
            with ui.stepper().style('background: var(--background-color); color: var(--text-color);') as stepper:
                # Step 1: Query Initialization
                with ui.step('Query Initialization'):
                    ui.markdown("### Step 1: Query Initialization").style("color: var(--primary-color);")
                    ui.markdown(
                        """
                        Begin by entering your research question or selecting from popular academic domains.
                        This sets the direction for our semantic analysis engine.
                        """
                    ).style("color: var(--text-color); margin-bottom: 20px;")
                    with ui.stepper_navigation():
                        ui.button('Next', on_click=stepper.next).props('rounded color=primary')

                # Step 2: Semantic Search
                with ui.step('Semantic Search'):
                    ui.markdown("### Step 2: Semantic Search").style("color: var(--primary-color);")
                    ui.markdown(
                        """
                        Our advanced algorithms now process your input to generate context-rich queries.
                        This stage refines the search context by understanding the deeper intent behind your question.
                        """
                    ).style("color: var(--text-color); margin-bottom: 20px;")
                    with ui.stepper_navigation():
                        ui.button('Back', on_click=stepper.previous).props('flat')
                        ui.button('Next', on_click=stepper.next).props('rounded color=primary')

                # Step 3: Document Analysis
                with ui.step('Document Analysis'):
                    ui.markdown("### Step 3: Document Analysis").style("color: var(--primary-color);")
                    ui.markdown(
                        """
                        The system then dives into a detailed analysis of academic papers, parsing content to extract key insights and connections.
                        This ensures that even subtle but crucial information is captured.
                        """
                    ).style("color: var(--text-color); margin-bottom: 20px;")
                    with ui.stepper_navigation():
                        ui.button('Back', on_click=stepper.previous).props('flat')
                        ui.button('Next', on_click=stepper.next).props('rounded color=primary')

                # Step 4: Insight Generation
                with ui.step('Insight Generation'):
                    ui.markdown("### Step 4: Insight Generation").style("color: var(--primary-color);")
                    ui.markdown(
                        """
                        Finally, we synthesize the analyzed data into clear, actionable research summaries.
                        These insights empower you with concise guidance to drive further inquiry or practical application.
                        """
                    ).style("color: var(--text-color); margin-bottom: 20px;")
                    with ui.stepper_navigation():
                        ui.button('Back', on_click=stepper.previous).props('flat')

    # Back to Home Button
    ui.button('Back to Home', on_click=lambda: ui.navigate.to('/open-Seeker')).classes(
        'mt-6 w-full bg-primary text-white hover:opacity-90'
    )
Dummy-Implementierung für get_tools()

def get_tools(): """ Hier solltest du dein richtiges Werkzeug-Objekt zurückliefern. In diesem Beispiel gehen wir davon aus, dass du über eine Funktion wie get_app verfügst. """ return get_app("ArXivPDFProcessor", name=None).get_mod("isaa")

def create_graph_tab(processor_instance: Dict, graph_ui: ui.element, main_ui: ui.element): """Create and update the graph visualization"""

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
# Get HTML graph from processor
_html_content = processor_instance["instance"].tools.get_memory(processor_instance["instance"].mem_name)
html_content = "" if isinstance(_html_content, list) else _html_content.vis(get_output_html=True)

# Ensure static directory exists
static_dir = Path('dist/static')
static_dir.mkdir(exist_ok=True)

# Save HTML to static file
graph_file = static_dir / f'graph{processor_instance["instance"].mem_name}.html'
# Save HTML to static file with added fullscreen functionality

# Add fullscreen JavaScript
graph_file.write_text(html_content, encoding='utf-8')

with main_ui:
    # Clear existing content except fullscreen button
    graph_ui.clear()

    with graph_ui:
        ui.html(f"""

            <iframe
                 src="/static/graph{processor_instance["instance"].mem_name}.html"
                style="width: 100%; height: 800px; border: none; background: #1a1a1a;"
                >
            </iframe>
        """).classes('w-full h-full')

is_init = [False]

--- Database Setup ---

def get_db(): db = get_app().get_mod("DB") if not is_init[0]: is_init[0] = True db.edit_cli("LD") db.initialize_database() return db

import pickle

--- Session State Management ---

def get_user_state(session_id: str, is_new=False) -> dict: db = get_db() state_ = { 'balance': .5, 'last_reset': datetime.utcnow().isoformat(), 'research_history': [], 'payment_id': '', } if session_id is None: state_['balance'] *= -1 if is_new: return state_, True return state_ state = db.get(f"TruthSeeker::session:{session_id}") if state.get() is None: state = state_ if is_new: return state_, True else: try: state = pickle.loads(state.get()) except Exception as e: print(e) state = { 'balance': 0.04, 'last_reset': datetime.utcnow().isoformat(), 'research_history': ["Sorry we had an error recreating your state"], 'payment_id': '', } if is_new: return state, True if is_new: return state, False return state

def save_user_state(session_id: str, state: dict): db = get_db() print("Saving state") db.set(f"TruthSeeker::session:{session_id}", pickle.dumps(state)).print()

def delete_user_state(session_id: str): db = get_db() print("Saving state") db.delete(f"TruthSeeker::session:{session_id}").print()

def reset_daily_balance(state: dict, valid=False) -> dict: now = datetime.utcnow() last_reset = datetime.fromisoformat(state.get('last_reset', now.isoformat())) if now - last_reset > timedelta(hours=24): state['balance'] = max(state.get('balance', 1.6 if valid else 0.5), 1.6 if valid else 0.5) state['last_reset'] = now.isoformat() return state

class MemoryResultsDisplay

def init(self, results: List[Dict[str, Any]], main_ui: ui.element): self.results = results self.main_ui = main_ui self.setup_ui()

def setup_ui(self): """Set up the main UI for displaying memory results""" with self.main_ui: self.main_ui.clear() with ui.column().classes('w-full'): for mem_result in self.results: self.create_memory_card(mem_result)

def create_memory_card(self, mem_result: Dict[str, Any]): """Create a card for each memory result""" result = mem_result.get("result", {}) with self.main_ui: if isinstance(result, dict): self.display_dict_result(result) elif hasattr(result, 'overview'): # Assuming RetrievalResult type self.display_retrieval_result(result) else: ui.label("Unsupported result type").classes('--text-color:error')

def display_dict_result(self, result: Dict[str, Any]): """Display dictionary-based result with collapsible sections""" # Summary Section summary = result.get("summary", {}) if isinstance(summary, str): try: summary = json.loads(summary[:-1]) except json.JSONDecodeError: summary = {"error": "Could not parse summary"}

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# Raw Results Section
raw_results = result.get("raw_results", {})
if isinstance(raw_results, str):
    try:
        raw_results = json.loads(raw_results[:-1])
    except json.JSONDecodeError:
        raw_results = {"error": "Could not parse raw results"}

# Metadata Section
metadata = result.get("metadata", {})
with self.main_ui:
    # Collapsible Sections
    with ui.column().classes('w-full space-y-2').style("max-width: 100%;"):
        # Summary Section
        with ui.expansion('Summary', icon='description').classes('w-full') as se:
            self.display_nested_data(summary, main_ui=se)

        # Raw Results Section
        with ui.expansion('Raw Results', icon='work').classes('w-full') as re:
            self.display_nested_data(raw_results, main_ui=re)

        # Metadata Section
        if metadata:
            with ui.expansion('Metadata', icon='info').classes('w-full'):
                ui.markdown(f"```json

{json.dumps(metadata, indent=2)} ```").style("max-width: 100%;")

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
def display_retrieval_result(self, result):
    """Display retrieval result with detailed sections"""
    with self.main_ui:
        with ui.column().classes('w-full space-y-4').style("max-width: 100%;"):
            # Overview Section
            with ui.expansion('Overview', icon='visibility').classes('w-full') as ov:
                for overview_item in result.overview:
                    if isinstance(overview_item, str):
                        overview_item = json.loads(overview_item)
                    self.display_nested_data(overview_item, main_ui=ov)

            # Details Section
            with ui.expansion('Details', icon='article').classes('w-full'):
                for chunk in result.details:
                    with ui.card().classes('w-full p-3 mb-2').style("background: var(--background-color)"):
                        ui.label(chunk.text).classes('font-medium mb-2 --text-color:secondary')

                        with ui.row().classes('w-full justify-between').style("background: var(--background-color)"):
                            ui.label(f"Embedding Shape: {chunk.embedding.shape}").classes('text-sm')
                            ui.label(f"Content Hash: {chunk.content_hash}").classes('text-sm')

                        if chunk.cluster_id is not None:
                            ui.label(f"Cluster ID: {chunk.cluster_id}").classes('text-sm')

            # Cross References Section
            with ui.expansion('Cross References', icon='link').classes('w-full'):
                for topic, chunks in result.cross_references.items():
                    with ui.card().classes('w-full p-3 mb-2').style("background: var(--background-color)"):
                        ui.label(topic).classes('font-semibold mb-2 --text-color:secondary')
                        for chunk in chunks:
                            ui.label(chunk.text).classes('text-sm mb-1')

def display_nested_data(self, data: Union[Dict, List], indent: int = 0, main_ui=None):
    """Recursively display nested dictionary or list data"""
    with (self.main_ui if main_ui is None else main_ui):
        if isinstance(data, dict):
            with ui.column().classes(f'ml-{indent * 2}').style("max-width: 100%;"):
                for key, value in data.items():
                    with ui.row().classes('items-center'):
                        ui.label(f"{key}:").classes('font-bold mr-2 --text-color:primary')
                        if isinstance(value, list):
                            if key == "main_chunks":
                                continue
                            self.display_nested_data(value, indent + 1, main_ui=main_ui)
                        if isinstance(value, dict):
                            ui.markdown(f"```json

{json.dumps(value, indent=2)} ").classes("break-words w-full").style("max-width: 100%;") else: ui.label(str(value)).classes('--text-color:secondary') elif isinstance(data, list): with ui.column().classes(f'ml-{indent * 2}').style("max-width: 100%;"): for item in data: if isinstance(item, str): item = json.loads(item) if isinstance(item, list): self.display_nested_data(item, indent + 1, main_ui=main_ui) if isinstance(item, dict): ui.markdown(f"json {json.dumps(item, indent=2)} ```").classes("break-words w-full").style("max-width: 100%;") else: ui.label(str(item)).classes('--text-color:secondary')

def create_followup_section(processor_instance: Dict, main_ui: ui.element, session_id, balance): main_ui.clear() with main_ui: ui.label("Query Interface (1ct)").classes("text-xl font-semibold mb-4")

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
    # Container for query inputs
    query_container = ui.column().classes("w-full gap-4")
    query = ""  # Store references to query inputs
    # Query parameters section
    with ui.expansion("Query Parameters", icon="settings").classes("w-full") as query_e:
        with ui.grid(columns=2).classes("w-full gap-4"):
            k_input = ui.number("Results Count (k)", value=2, min=1, max=20)
            min_sim = ui.number("Min Similarity", value=.3, min=0, max=1, step=0.1)
            cross_depth = ui.number("Cross Reference Depth", value=2, min=1, max=5)
            max_cross = ui.number("Max Cross References", value=10, min=1, max=20)
            max_sent = ui.number("Max Sentences", value=10, min=1, max=50)
            unified = ui.switch("Unified Retrieve (+3ct)", value=True)

    # Results display
    with ui.element("div").classes("w-full mt-4") as results_display:
        pass
    results_display = results_display
    with query_container:
        query_input = ui.input("Query", placeholder="Enter your query...")                 .classes("w-full")
    # Control buttons
    with ui.row().classes("w-full gap-4 mt-4"):
        ui.button("Execute Query", on_click=lambda: asyncio.create_task(execute_query()))                 .classes("bg-green-600 hover:bg-green-700")
        ui.button("Clear Results", on_click=lambda: results_display.clear())                 .classes("bg-red-600 hover:bg-red-700")
query_input = query_input

async def execute_query():
    """Execute a single query with parameters"""
    nonlocal query_input, results_display, main_ui
    try:
        query_text = query_input.value
        if not query_text.strip():
            with main_ui:
                ui.notify("No Input", type="warning")
            return ""

        if not processor_instance.get("instance"):
            with main_ui:
                ui.notify("No active processor instance", type="warning")
            return
        # Collect parameters
        params = {
            "k": int(k_input.value),
            "min_similarity": min_sim.value,
            "cross_ref_depth": int(cross_depth.value),
            "max_cross_refs": int(max_cross.value),
            "max_sentences": int(max_sent.value),
            "unified": unified.value
        }
        # Construct query parameters
        query_params = {
            "k": params["k"],
            "min_similarity": params["min_similarity"],
            "cross_ref_depth": params["cross_ref_depth"],
            "max_cross_refs": params["max_cross_refs"],
            "max_sentences": params["max_sentences"]
        }

        # Execute query
        results = await processor_instance["instance"].extra_query(
            query=query_text,
            query_params=query_params,
            unified_retrieve=params["unified"]
        )
        print("results",results)
        s = get_user_state(session_id)
        s['balance'] -= .04 if unified.value else .01
        save_user_state(session_id, s)
        with main_ui:
            balance.set_text(f"Balance: {s['balance']:.2f}€")
        # Format results
        with main_ui:
            with results_display:
                MemoryResultsDisplay(results, results_display)

    except Exception as e:
        return f"Error executing query: {str(e)}

"

1
# Add initial query input

online_states = [0] def create_research_interface(Processor):

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
def helpr(request, session: dict):

    state = {'balance':0, 'research_history': []}
    main_ui = None
    with ui.column().classes("w-full max-w-6xl mx-auto p-6 space-y-6") as loading:
        ui.spinner(size='lg')
        ui.label('Initializing...').classes('ml-2')

    # Container for main content (initially hidden)
    content = ui.column().classes('hidden')

    # Extract session data before spawning thread
    session_id = session.get('ID')
    session_id_h = session.get('IDh')
    session_rid = request.row.query_params.get('session_id') if hasattr(request, 'row') else request.query_params.get('session_id')
    session_valid = session.get('valid')

    # Thread communication
    result_queue = Queue()
    ready_event = Event()

    def init_background():
        nonlocal session_id, session_id_h, session_rid, session_valid
        try:
            # Original initialization logic
            _state, is_new = get_user_state(session_id, is_new=True)

            if is_new and session_id_h != "#0":
                _state = get_user_state(session_id_h)
                save_user_state(session_id, _state)
                delete_user_state(session_id_h)
            if session_rid:
                state_: dict
                state_, is_new_ = get_user_state(session_rid, is_new=True)
                if not is_new_:
                    _state = state_.copy()
                    state_['payment_id'] = ''
                    state_['last_reset'] = datetime.utcnow().isoformat()
                    state_['research_history'] = state_['research_history'][:3]
                    state_['balance'] = 0
                    save_user_state(session_id, _state)
            _state = reset_daily_balance(_state, session_valid)
            save_user_state(session_id, _state)

            # Send result back to main thread
            result_queue.put(_state)
            ready_event.set()
        except Exception as e:
            result_queue.put(e)
            ready_event.set()

        # Start background initialization

    Thread(target=init_background).start()

    def check_ready():
        nonlocal state
        if ready_event.is_set():
            result = result_queue.get()

            # Check if initialization failed
            if isinstance(result, Exception):
                loading.clear()
                with loading:
                    ui.label(f"Error during initialization: {str(result)}").classes('text-red-500')
                return

            # Get state and build main UI
            state = result
            loading.classes('hidden')
            content.classes(remove='hidden')
            main_ui.visible = True
            with main_ui:
                balance.set_text(f"Balance: {state['balance']:.2f}€")
                show_history()
            return  # Stop the timer

        # Check again in 100ms
        ui.timer(0.1, check_ready, once=True)

    # Start checking for completion
    check_ready()

    # Wir speichern die aktive Instanz, damit Follow-Up Fragen gestellt werden können
    processor_instance = {"instance": None}

    # UI-Elemente als Platzhalter; wir definieren sie später in der UI und machen sie so
    # in den Callback-Funktionen über "nonlocal" verfügbar.
    overall_progress = None
    status_label = None
    results_card = None
    summary_content = None
    analysis_content = None
    references_content = None
    followup_card = None
    research_card = None
    config_cart = None
    progress_card = None
    balance = None
    graph_ui = None

    sr_button = None
    r_button = None
    r_text = None


    # Global config storage with default values
    config = {
        'chunk_size': 21000,
        'overlap': 600,
        'num_search_result_per_query': 3,
        'max_search': 3,
        'num_workers': None
    }

    def update_estimates():
        """
        Dummy estimation based on query length and configuration.
        (Replace with your own non-linear formula if needed.)
        """
        query_text = query.value or ""
        query_length = len(query_text)
        # For example: estimated time scales with chunk size and query length.
        estimated_time ,estimated_price = Processor.estimate_processing_metrics(query_length, **config)
        estimated_time *= max(1, online_states[0] * 6)
        if processor_instance["instance"] is not None:
            estimated_price += .25
        if estimated_time < 60:
            time_str = f"~{int(estimated_time)}s"
        elif estimated_time < 3600:
            minutes = estimated_time // 60
            seconds = estimated_time % 60
            time_str = f"~{int(minutes)}m {int(seconds)}s"
        else:
            hours = estimated_time // 3600
            minutes = (estimated_time % 3600) // 60
            time_str = f"~{int(hours)}h {int(minutes)}m"
        with main_ui:
            query_length_label.set_text(f"Total Papers: {config['max_search']*config['num_search_result_per_query']}")
            time_label.set_text(f"Processing Time: {time_str}")
            price_label.set_text(f"Price: {estimated_price:.2f}€")

        return estimated_price

    def on_config_change(event):
        """
        Update the global config based on input changes and recalc estimates.
        """
        try:
            config['chunk_size'] = int(chunk_size_input.value)
        except ValueError:
            pass
        try:
            config['overlap'] = int(overlap_input.value)
            if config['overlap'] > config['chunk_size'] / 4:
                config['overlap'] = int(config['chunk_size'] / 4)
                with main_ui:
                    overlap_input.value = config['overlap']
        except ValueError:
            pass
        try:
            config['num_search_result_per_query'] = int(num_search_result_input.value)
        except ValueError:
            pass
        try:
            config['max_search'] = int(max_search_input.value)
        except ValueError:
            pass
        try:
            config['num_workers'] = int(num_workers_input.value) if num_workers_input.value != 0 else None
        except ValueError:
            config['num_workers'] = None

        update_estimates()

    def on_query_change():
        update_estimates()

    # Callback, der vom Processor (über processor_instance.callback) aufgerufen wird.
    def update_status(data: dict):
        nonlocal overall_progress, status_label
        if not data:
            return
        # Aktualisiere den Fortschrittsbalken und den aktuellen Schritt (wenn vorhanden)
        with main_ui:
            if isinstance(data, dict):
                progress = data.get("progress", 0)
                step = data.get("step", "Processing...")
                overall_progress.value =round( progress ,2) # nicegui.linear_progress erwartet einen Wert zwischen 0 und 1
                status_label.set_text(f"{step} {data.get('info','')}")
            else:
                status_label.set_text(f"{data}")

    def start_search():
        nonlocal balance

        async def helper():
            nonlocal processor_instance, overall_progress, status_label, results_card,                     summary_content, analysis_content,config, references_content, followup_card,sr_button,r_button,r_text

            try:
                if not validate_inputs():
                    with main_ui:
                        state['balance'] += est_price
                        save_user_state(session_id, state)
                        balance.set_text(f"Balance: {state['balance']:.2f}€")
                    return
                reset_interface()
                show_progress_indicators()

                query_text = query.value.strip()
                # Erzeuge das "tools"-Objekt (abhängig von deiner konkreten Implementation)
                tools = get_tools()
                with main_ui:
                    research_card.visible = False
                    config_cart.visible = False
                    config_section.visible = False
                    query.set_value("")
                # Direkt instanziieren: Eine neue ArXivPDFProcessor-Instanz
                if processor_instance["instance"] is not None:
                    processor = processor_instance["instance"]
                    processor.chunk_size = config['chunk_size']
                    processor.overlap = config['overlap']
                    processor.num_search_result_per_query = config['num_search_result_per_query']
                    processor.max_search = config['max_search']
                    processor.num_workers = config['num_workers']
                    papers, insights = await processor.process(query_text)
                else:
                    processor = Processor(query_text, tools=tools, **config)
                # Setze den Callback so, dass Updates in der GUI angezeigt werden
                    processor.callback = update_status
                    processor_instance["instance"] = processor
                    papers, insights = await processor.process()

                update_results({
                    "papers": papers,
                    "insights": insights
                })
                with main_ui:
                    research_card.visible = True
                    config_cart.visible = True
                    show_history()

            except Exception as e:
                import traceback

                with main_ui:
                    update_status({"progress": 0, "step": "Error", "info": str(e)})
                    state['balance'] += est_price
                    save_user_state(session_id, state)
                    balance.set_text(f"Balance: {state['balance']:.2f}€")
                    ui.notify(f"Error {str(e)})", type="negative")
                    research_card.visible = True
                    config_cart.visible = True
                    config_section.visible = True
                print(traceback.format_exc())

        def target():
            get_app().run_a_from_sync(helper, )

        est_price = update_estimates()
        if est_price > state['balance']:
            with main_ui:
                ui.notify(f"Insufficient balance. Need €{est_price:.2f}", type='negative')
        else:
            state['balance'] -= est_price
            save_user_state(session_id, state)
            with main_ui:
                online_states[0] += 1
                balance.set_text(f"Balance: {state['balance']:.2f}€ Running Queries: {online_states[0]}")

            Thread(target=target, daemon=True).start()
            with main_ui:
                online_states[0] -= 1
                balance.set_text(f"Balance: {get_user_state(session_id)['balance']:.2f}€")


    def show_history():
        with config_cart:
            for idx, entry in enumerate(state['research_history']):
                with ui.card().classes("w-full backdrop-blur-lg bg-white/10 p-4"):
                    ui.label(entry['query']).classes('text-sm')
                    ui.button("Open").on_click(lambda _, i=idx: load_history(i))

    def reset():
        nonlocal processor_instance, results_card, followup_card, sr_button, r_button, r_text
        processor_instance["instance"] = None
        show_progress_indicators()
        with main_ui:
            config_cart.visible = False
            config_section.visible = False
            followup_card.visible = False
            results_card.visible = False
            r_button.visible = False
            r_text.set_text("Research Interface")
            sr_button.set_text("Start Research")
        start_search()
    # UI-Aufbau

    with ui.column().classes("w-full max-w-6xl mx-auto p-6 space-y-6") as main_ui:
        balance = ui.label(f"Balance: {state['balance']:.2f}€").classes("text-s font-semibold")

        config_cart = config_cart

        # --- Research Input UI Card ---
        with ui.card().classes("w-full backdrop-blur-lg bg-white/10 p-4") as research_card:
            r_text = ui.label("Research Interface").classes("text-3xl font-bold mb-4")

            # Query input section with auto-updating estimates
            query = ui.input("Research Query",
                                placeholder="Gib hier deine Forschungsfrage ein...",
                                value="")                     .classes("w-full min-h-[100px]")                     .on('change', lambda e: on_query_change()).style("color: var(--text-color)")

            # --- Action Buttons ---
            with ui.row().classes("mt-4"):
                sr_button =ui.button("Start Research", on_click=start_search)                         .classes("bg-blue-600 hover:bg-blue-700 py-3 rounded-lg")
                ui.button("toggle config",
                          on_click=lambda: setattr(config_section, 'visible', not config_section.visible) or show_progress_indicators()).style(
                    "color: var(--text-color)")
                r_button = ui.button("Start new Research",
                          on_click=reset).style(
                    "color: var(--text-color)")
        sr_button = sr_button
        r_button = r_button
        r_button.visible = False
        research_card = research_card

        # --- Options Cart / Configurations ---
        with ui.card_section().classes("w-full backdrop-blur-lg bg-white/10 hidden") as config_section:
            ui.separator()
            ui.label("Configuration Options").classes("text-xl font-semibold mt-4 mb-2")
            with ui.row():
                chunk_size_input = ui.number(label="Chunk Size",
                                             value=config['chunk_size'], format='%.0f', max=64_000, min=1000,
                                             step=100)                         .on('change', on_config_change).style("color: var(--text-color)")
                overlap_input = ui.number(label="Overlap",
                                          value=config['overlap'], format='%.0f', max=6400, min=100, step=50)                         .on('change', on_config_change).style("color: var(--text-color)")

            with ui.row():
                num_search_result_input = ui.number(label="Results per Query",
                                                    value=config['num_search_result_per_query'], format='%.0f',
                                                    min=1, max=100, step=1)                         .on('change', on_config_change).style("color: var(--text-color)")
                max_search_input = ui.number(label="Max Search Queries",
                                             value=config['max_search'], format='%.0f', min=1, max=100, step=1)                         .on('change', on_config_change).style("color: var(--text-color)")
                num_workers_input = ui.number(label="Number of Workers (leave empty for default)",
                                              value=0, format='%.0f', min=0, max=32, step=1)                         .on('change', on_config_change).style("color: var(--text-color)")
        config_section = config_section
        config_section.visible = False
        # --- Ergebnisse anzeigen ---
        with ui.card().classes("w-full backdrop-blur-lg p-4 bg-white/10") as results_card:
            ui.label("Research Results").classes("text-xl font-semibold mb-4")
            with ui.tabs() as tabs:
                ui.tab("Summary")
                ui.tab("References")
                ui.tab("SystemStates")
            with ui.tab_panels(tabs, value="Summary").classes("w-full").style("background-color: var(--background-color)"):
                with ui.tab_panel("Summary"):
                    summary_content = ui.markdown("").style("color : var(--text-color)")
                with ui.tab_panel("References"):
                    references_content = ui.markdown("").style("color : var(--text-color)")
                with ui.tab_panel("SystemStates"):
                    analysis_content = ui.markdown("").style("color : var(--text-color)")


        # Ergebnisse sichtbar machen, sobald sie vorliegen.
        results_card = results_card
        results_card.visible = False

        # --- Follow-Up Bereich mit mehrfachen Folgefragen und Suchparametern ---
        with ui.card().classes("w-full backdrop-blur-lg bg-white/10 p-4 hidden") as followup_card:
            pass

        # Zugriff auf followup_card (falls später benötigt)
        followup_card = followup_card
        followup_card.visible = False

        # --- Fortschrittsanzeige ---
        with ui.card().classes("w-full backdrop-blur-lg bg-white/10 p-4") as progress_card:
            with ui.row():
                ui.label("Research Progress").classes("text-xl font-semibold mb-4")
                query_length_label = ui.label("").classes("mt-6 hover:text-primary transition-colors duration-300")
                time_label = ui.label("Time: ...").classes("mt-6 hover:text-primary transition-colors duration-300")
                price_label = ui.label("Price: ...").classes(
                    "mt-6 hover:text-primary transition-colors duration-300")

            overall_progress = ui.linear_progress(0).classes("w-full mb-4")
            status_label = ui.label("Warte auf Start...").classes("text-base")
        # Wir merken uns progress_card, falls wir ihn zurücksetzen wollen.
        progress_card = progress_card

        query_length_label = query_length_label
        time_label = time_label
        price_label = price_label

        with ui.card().classes("w-full backdrop-blur-lg bg-white/10 p-4") as config_cart:
            # --- Process Code Section ---
            # --- Estimated Time and Price ---
            # ui.label("History").classes("text-xl font-semibold mt-4 mb-2")
            ui.label('Research History').classes('text-xl p-4')
            show_history()

        ui.button('Add Credits', on_click=lambda: balance_overlay(session_id)).props('icon=paid')
        ui.label('About TruthSeeker').classes(
            'mt-6 text-gray-500 hover:text-primary '
            'transition-colors duration-300'
        ).on('click', lambda: ui.navigate.to('/open-Seeker.about', new_tab=True))

        with ui.element('div').classes("w-full").style("white:100%; height:100%") as graph_ui:
            pass

        with ui.card().classes("w-full p-4").style("background-color: var(--background-color)"):
            ui.label("Private Session link (restore the session on a different device)")
            base_url = f'https://{os.getenv("HOSTNAME")}/gui/open-Seeker.seek' if not 'localhost' in os.getenv("HOSTNAME") else 'http://localhost:5000/gui/open-Seeker.seek'
            ui.label(f"{base_url}?session_id={session_id}").style("white:100%")
            ui.label("Changes each time!")

        graph_ui = graph_ui
        graph_ui.visible = False
    main_ui = main_ui
    main_ui.visible = False

    # --- Hilfsfunktionen ---
    def validate_inputs() -> bool:
        if not query.value.strip():
            with main_ui:
                ui.notify("Bitte gib eine Forschungsfrage ein.", type="warning")
            return False
        return True

    def reset_interface():
        nonlocal overall_progress, status_label, results_card, followup_card
        overall_progress.value = 0
        with main_ui:
            status_label.set_text("Research startet...")
        # Ergebnisse und Follow-Up Bereich verstecken
        results_card.visible = False
        followup_card.visible = False
        graph_ui.visible = False

    def show_progress_indicators():
        nonlocal progress_card
        progress_card.visible = True

    def update_results(data: dict, save=True):
        nonlocal summary_content, analysis_content, references_content, results_card,                followup_card,graph_ui, r_button, r_text, sr_button
        with main_ui:
            r_button.visible = True
            r_text.set_text("Add to current Results or press 'Start new Research'")
            sr_button.set_text("Add to current Results")
        # Handle papers (1-to-1 case)
        papers = data.get("papers", [])
        if not isinstance(papers, list):
            papers = [papers]

        # Get insights
        insights = data.get("insights", [])

        if save:
            history_entry = data.copy()
            history_entry['papers'] = [paper.model_dump_json() for paper in papers]
            if processor_instance is not None and processor_instance['instance'] is not None:
                history_entry["mam_name"] = processor_instance['instance'].mem_name
                history_entry["query"] = processor_instance['instance'].query

                history_entry["processor_memory"] = processor_instance['instance'].tools.get_memory(

                ).save_memory(history_entry["mam_name"], None)
            state['research_history'].append(history_entry)
            save_user_state(session_id, state)
        else:
            papers = [Paper(**json.loads(paper)) for paper in papers]
        create_followup_section(processor_instance, followup_card, session_id, balance)
        with main_ui:
            progress_card.visible = False
            # Build summary from insights
            summaries = []
            for insight in insights:
                if 'result' in insight and 'summary' in insight['result']:
                    if isinstance(insight['result']['summary'], str):
                        # print(insight['result']['summary'], "NEXT", json.loads(insight['result']['summary'][:-1]),"NEXT22",  type(json.loads(insight['result']['summary'][:-1])))
                        insight['result']['summary'] = json.loads(insight['result']['summary'][:-1])
                    main_summary = insight['result']['summary'].get('main_summary', '')
                    if main_summary:
                        summaries.append(main_summary)
            summary_text = "

".join(summaries) if summaries else "No summary available." summary_content.set_content(f"# Research Summary

{summary_text}")

1
2
3
4
5
            # Analysis section (unchanged if processor details haven't changed)
            if processor_instance["instance"] is not None:
                inst = processor_instance["instance"]
                analysis_md = (
                    f"# Analysis

" f"- query: {inst.query} " f"- chunk_size: {inst.chunk_size} " f"- overlap: {inst.overlap} " f"- max_workers: {inst.max_workers} " f"- num_search_result_per_query: {inst.nsrpq} " f"- max_search: {inst.max_search} " f"- download_dir: {inst.download_dir} " f"- mem_name: {inst.mem_name} " f"- current_session: {inst.current_session} " f"- all_ref_papers: {inst.all_ref_papers} " f"- all_texts_len: {inst.all_texts_len} " f"- final_texts_len: {inst.f_texts_len} " f"- num_workers: {inst.num_workers}" ) analysis_content.set_content(analysis_md)

1
2
            # References and Insights section
            references_md = "# References

" # Add papers references_md += " ".join( f"- ({i}) {getattr(paper, 'title', 'Unknown Title')}})" for i, paper in enumerate(papers) )

1
2
            # Add detailed insights
            references_md += "
Insights

" for i, insight in enumerate(insights): print(insight) result = insight.get('result', {}) summary = result.get('summary', {})

1
2
3
4
5
                if isinstance(summary, str):
                    summary = json.loads(summary)

                # Main summary
                references_md += f"
Insight

" references_md += f"### Main Summary {summary.get('main_summary', 'No summary available.')} "

1
2
3
4
                # Concept Analysis
                concept_analysis = summary.get('concept_analysis', {})
                if concept_analysis:
                    references_md += "
Concept Analysis

" references_md += "#### Key Concepts - " + " - ".join( concept_analysis.get('key_concepts', [])) + " " references_md += "

Relationships
  • " + "
  • ".join( concept_analysis.get('relationships', [])) + " " references_md += "
Importance Hierarchy
  • " + "
  • ".join( concept_analysis.get('importance_hierarchy', [])) + " "

    1
    2
    3
    4
                # Topic Insights
                topic_insights = summary.get('topic_insights', {})
                if topic_insights:
                    references_md += "
    
    Topic Insights

    " references_md += "#### Primary Topics - " + " - ".join( topic_insights.get('primary_topics', [])) + " " references_md += "

    Cross References
    • " + "
    • ".join( topic_insights.get('cross_references', [])) + " " references_md += "
    Knowledge Gaps
    • " + "
    • ".join( topic_insights.get('knowledge_gaps', [])) + " "

      1
      2
      3
      4
              # Relevance Assessment
              relevance = summary.get('relevance_assessment', {})
              if relevance:
                  references_md += "
      
      Relevance Assessment

      " references_md += f"- Query Alignment: {relevance.get('query_alignment', 'N/A')} " references_md += f"- Confidence Score: {relevance.get('confidence_score', 'N/A')} " references_md += f"- Coverage Analysis: {relevance.get('coverage_analysis', 'N/A')} "

       1
       2
       3
       4
       5
       6
       7
       8
       9
      10
      11
      12
      13
          references_content.set_content(references_md)
      
          # nx concpts graph
          if processor_instance["instance"] is not None:
              create_graph_tab(
                  processor_instance,
                  graph_ui,main_ui
              )
      
          # Show results and followup cards
          results_card.visible = True
          followup_card.visible = True
          graph_ui.visible = True
      

      def load_history(index: int): entry = state['research_history'][index] if processor_instance is not None and processor_instance['instance'] is not None:

       1
       2
       3
       4
       5
       6
       7
       8
       9
      10
      11
      12
      13
      14
          processor_instance["instance"].mem_name = entry["mam_name"]
          processor_instance['instance'].query = entry["query"]
      
          pass
      else:
          processor = Processor(entry["query"], tools=get_tools(), **config)
          # Setze den Callback so, dass Updates in der GUI angezeigt werden
          processor.callback = update_status
          processor.mem_name = entry["mam_name"]
          processor_instance["instance"] = processor
      
      processor_instance["instance"].tools.get_memory().load_memory(entry["mam_name"], entry["processor_memory"])
      processor_instance["instance"].mem_name = entry["mam_name"]
      update_results(entry, save=False)
      

    return helpr

--- Stripe Integration ---

def regiser_stripe_integration(is_scc=True): def stripe_callback(request: Request):

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
    sid = request.row.query_params.get('session_id') if hasattr(request, 'row') else request.query_params.get('session_id')
    state = get_user_state(sid)

    if state['payment_id'] == '':
        with ui.card().classes("w-full items-center").style("background-color: var(--background-color)"):
            ui.label(f"No payment id!").classes("text-lg font-bold")
            ui.button(
                "Start Research",
                on_click=lambda: ui.navigate.to("/open-Seeker.seek?session_id="+sid)
            ).classes(
                "w-full px-6 py-4 text-lg font-bold "
                "bg-primary hover:bg-primary-dark "
                "transform hover:-translate-y-0.5 "
                "transition-all duration-300 ease-in-out "
                "rounded-xl shadow-lg animate-slideUp"
            )
        return

    try:
        session_data = stripe.checkout.Session.retrieve(state['payment_id'])
    except Exception as e:
        with ui.card().classes("w-full items-center").style("background-color: var(--background-color)"):
            ui.label(f"No Transactions Details !{e}").classes("text-lg font-bold")
            ui.button(
                "Start Research",
                on_click=lambda: ui.navigate.to("/open-Seeker.seek")
            ).classes(
                "w-full px-6 py-4 text-lg font-bold "
                "bg-primary hover:bg-primary-dark "
                "transform hover:-translate-y-0.5 "
                "transition-all duration-300 ease-in-out "
                "rounded-xl shadow-lg animate-slideUp"
            )
            return
    with ui.card().classes("w-full items-center").style("background-color: var(--background-color)"):
        if is_scc and state['payment_id'] != '' and session_data.payment_status == 'paid':
            state = get_user_state(sid)
            amount = session_data.amount_total / 100  # Convert cents to euros
            state['balance'] += amount
            state['payment_id'] = ''
            save_user_state(sid, state)

        # ui.navigate.to(f'/session?session={session}')
            ui.label(f"Transaction Complete - New balance :{state['balance']}").classes("text-lg font-bold")
            with ui.card().classes("w-full p-4").style("background-color: var(--background-color)"):
                ui.label("Private Session link (restore the session on a different device)")
                base_url = f'https://{os.getenv("HOSTNAME")}/gui/open-Seeker.seek' if not 'localhost' in os.getenv("HOSTNAME")else 'http://localhost:5000/gui/open-Seeker.seek'
                ui.label(f"{base_url}?session_id={sid}").style("white:100%")
                ui.label("Changes each time!")
        else:
            ui.label(f"Transaction Error! {session_data}, {dir(session_data)}").classes("text-lg font-bold")
        ui.button(
            "Start Research",
            on_click=lambda: ui.navigate.to("/open-Seeker.seek")
        ).classes(
            "w-full px-6 py-4 text-lg font-bold "
            "bg-primary hover:bg-primary-dark "
            "transform hover:-translate-y-0.5 "
            "transition-all duration-300 ease-in-out "
            "rounded-xl shadow-lg animate-slideUp"
        )


return stripe_callback

def handle_stripe_payment(amount: float, session_id): base_url = f'https://{os.getenv("HOSTNAME")}/gui/open-Seeker.stripe' if not 'localhost' in os.getenv("HOSTNAME") else 'http://localhost:5000/gui/open-Seeker.stripe' session = stripe.checkout.Session.create( payment_method_types=['card', "link", ], line_items=[{ 'price_data': { 'currency': 'eur', 'product_data': {'name': 'Research Credits'}, 'unit_amount': int(amount * 100), }, 'quantity': 1, }], automatic_tax={"enabled": True}, mode='payment', success_url=f'{base_url}?session_id={session_id}', cancel_url=f'{base_url}.error' ) state = get_user_state(session_id) state['payment_id'] = session.id save_user_state(session_id, state) ui.navigate.to(session.url, new_tab=True)

--- UI Components ---

def balance_overlay(session_id): with ui.dialog().classes('w-full max-w-md bg-white/20 backdrop-blur-lg rounded-xl') as dialog: with ui.card().classes('w-full p-6 space-y-4').style("background-color: var(--background-color)"): ui.label('Add Research Credits').classes('text-2xl font-bold') amount = ui.number('Amount (€) min 2', value=5, format='%.2f', min=2, max=9999, step=1).classes('w-full') with ui.row().classes('w-full justify-between'): ui.button('Cancel', on_click=dialog.close).props('flat') ui.button('Purchase', on_click=lambda: handle_stripe_payment(amount.value, session_id)) return dialog

def create_ui(processor): # ui_instance = register_nicegui("open-Seeker", create_landing_page , additional=""" """, show=False) register_nicegui("open-Seeker.demo", create_video_demo, additional=""" """, show=False)

newui

cleanup_module(app)

Cleanup resources when the module is unloaded

Source code in toolboxv2/mods/TruthSeeker/newui.py
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
@export(mod_name=MOD_NAME, version=version, exit_f=True)
def cleanup_module(app: App):
    """Cleanup resources when the module is unloaded"""
    # Clean up any temp files or resources
    import glob
    import shutil

    # Remove temporary PDF directories
    for pdf_dir in glob.glob("pdfs_*"):
        try:
            shutil.rmtree(pdf_dir)
        except Exception as e:
            print(f"Error removing directory {pdf_dir}: {str(e)}")

    # Clear any SSE queues
    if hasattr(app, 'sse_queues'):
        app.sse_queues = {}

    if hasattr(app, 'payment_queues'):
        app.payment_queues = {}

    return Result.ok(info="ArXivPDFProcessor UI cleaned up")
create_payment(app, data) async

Create a Stripe payment session

Source code in toolboxv2/mods/TruthSeeker/newui.py
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
@export(mod_name=MOD_NAME, api=True, version=version)
async def create_payment(app: App, data):
    """Create a Stripe payment session"""
    amount = data.get("amount")
    session_id = data.get("session_id")

    if amount < 2:
        return Result.default_user_error(info="Minimum donation amount is €2")

    try:
        # Create a Stripe Checkout Session
        base_url = f"https://{os.getenv('HOSTNAME', 'localhost:5000')}"
        success_url = f"{base_url}/api/{MOD_NAME}/payment_success?session_id={session_id}"
        cancel_url = f"{base_url}/api/{MOD_NAME}/payment_cancel?session_id={session_id}"

        stripe_session = stripe.checkout.Session.create(
            payment_method_types=['card', 'link'],
            line_items=[{
                'price_data': {
                    'currency': 'eur',
                    'product_data': {'name': 'Research Credits'},
                    'unit_amount': int(amount * 100),
                },
                'quantity': 1,
            }],
            automatic_tax={"enabled": True},
            mode='payment',
            success_url=success_url,
            cancel_url=cancel_url
        )

        # Store the payment info
        if not hasattr(app, 'payment_info'):
            app.payment_info = {}

        # Initialize payment_queues if not already done
        if not hasattr(app, 'payment_queues'):
            app.payment_queues = {}

        # Create a queue for this payment
        app.payment_queues[session_id] = asyncio.Queue()

        app.payment_info[session_id] = {
            'payment_id': stripe_session.id,
            'amount': amount,
            'status': 'pending'
        }

        return Result.ok(data={"url": stripe_session.url})
    except Exception as e:
        return Result.default_sys_error(info=f"Error creating payment: {str(e)}")
estimate_processing(data) async

Estimate processing time and cost for a given query

Source code in toolboxv2/mods/TruthSeeker/newui.py
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
@export(mod_name=MOD_NAME, api=True, version=version)
async def estimate_processing(data):
    """Estimate processing time and cost for a given query"""
    # Use the static method to estimate metrics
    query, max_search, num_search_result_per_query= data.get("query", ""), data.get("max_search",4), data.get("num_search_result_per_query",6)
    estimated_time, estimated_price = ArXivPDFProcessor.estimate_processing_metrics(
        query_length=len(query),
        max_search=max_search,
        num_search_result_per_query=num_search_result_per_query,
        chunk_size=1_000_000,
        overlap=2_000,
        num_workers=None
    )

    return Result.ok(data={
        "time": estimated_time,
        "price": estimated_price
    })
follow_up_query(app, data) async

Ask a follow-up question about the research

Source code in toolboxv2/mods/TruthSeeker/newui.py
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
@export(mod_name=MOD_NAME, api=True, version=version)
async def follow_up_query(app: App, data):
    """Ask a follow-up question about the research"""
    research_id = data.get("research_id")
    query = data.get("query")

    if not hasattr(app, 'research_processes') or research_id not in app.research_processes:
        return Result.default_user_error(info="Research process not found")

    research_process = app.research_processes[research_id]

    if research_process['status'] != 'complete':
        return Result.default_user_error(info="Research is not complete")

    processor = research_process['processor']
    if not processor:
        return Result.default_user_error(info="Processor not available")

    try:
        # Use the extra_query method to ask follow-up questions
        result = await processor.extra_query(query)

        return Result.ok(data={"answer": result['response'] if result and 'response' in result else "No response"})
    except Exception as e:
        return Result.default_sys_error(info=f"Error processing follow-up query: {str(e)}")
initialize_module(app)

Initialize the module and register UI with CloudM

Source code in toolboxv2/mods/TruthSeeker/newui.py
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
@export(mod_name=MOD_NAME, version=version, initial=True)
def initialize_module(app: App):
    """Initialize the module and register UI with CloudM"""
    # Register the UI with CloudM
    app.run_any(("CloudM", "add_ui"),
                name="TruthSeeker",
                title="TruthSeeker Research",
                path=f"/api/{MOD_NAME}/get_main_ui",
                description="AI Research Assistant"
                )

    # Initialize SSE message queues
    if not hasattr(app, 'sse_queues'):
        app.sse_queues = {}
    print("TruthSeeker online")
    return Result.ok(info="ArXivPDFProcessor UI initialized")
payment_cancel(app, session_id, request_as_kwarg=True, request=None) async

Handle cancelled payment

Source code in toolboxv2/mods/TruthSeeker/newui.py
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
@export(mod_name=MOD_NAME, api=True, version=version)
async def payment_cancel(app: App, session_id: str, request_as_kwarg=True, request=None):
    """Handle cancelled payment"""
    if hasattr(app, 'payment_info') and session_id in app.payment_info:
        app.payment_info[session_id]['status'] = 'cancelled'

        # Notify SSE clients about payment cancellation
        if hasattr(app, 'payment_queues') and session_id in app.payment_queues:
            await app.payment_queues[session_id].put({
                "status": "cancelled"
            })

    return Result.html(app.web_context() + """
    <div style="text-align: center; padding: 50px;">
        <h2>Payment Cancelled</h2>
        <p>Your payment was cancelled.</p>
        <script>
            setTimeout(function() {
                window.close();
            }, 3000);
        </script>
    </div>
    """)
payment_stream(app, session_id) async

SSE stream endpoint for payment status updates

Source code in toolboxv2/mods/TruthSeeker/newui.py
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
@export(mod_name=MOD_NAME, api=True, version=version)
async def payment_stream(app: App, session_id: str):
    """SSE stream endpoint for payment status updates"""
    if not hasattr(app, 'payment_queues'):
        app.payment_queues = {}

    # Create a message queue for this session_id if it doesn't exist
    if session_id not in app.payment_queues:
        app.payment_queues[session_id] = asyncio.Queue()

    async def generate():
        try:
            # Stream payment updates
            while True:
                try:
                    # Wait for a payment update with a timeout
                    payment_data = await asyncio.wait_for(app.payment_queues[session_id].get(), timeout=30)
                    yield f"event: payment_update\ndata: {json.dumps(payment_data)}\n\n"

                    # If the payment is complete or cancelled, exit the loop
                    if payment_data.get('status') in ['completed', 'cancelled']:
                        break
                except TimeoutError:
                    # Send a keep-alive comment to prevent connection timeout
                    yield ":\n\n"
        finally:
            # Clean up resources when the client disconnects
            if session_id in app.payment_queues:
                # Keep the queue for other potential clients
                pass

    return Result.stream(generate())
payment_success(app, session_id, request_as_kwarg=True, request=None) async

Handle successful payment

Source code in toolboxv2/mods/TruthSeeker/newui.py
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
@export(mod_name=MOD_NAME, api=True, version=version)
async def payment_success(app: App, session_id: str, request_as_kwarg=True, request=None):
    """Handle successful payment"""
    if not hasattr(app, 'payment_info') or session_id not in app.payment_info:
        return Result.html(app.web_context() + """
        <div style="text-align: center; padding: 50px;">
            <h2>Payment Session Not Found</h2>
            <p>Return to the main page to continue.</p>
            <a href="/" style="display: inline-block; margin-top: 20px; padding: 10px 20px; background-color: #4F46E5; color: white; text-decoration: none; border-radius: 5px;">Return to Home</a>
        </div>
        """)

    payment_info = app.payment_info[session_id]

    try:
        # Verify the payment with Stripe
        stripe_session = stripe.checkout.Session.retrieve(payment_info['payment_id'])

        if stripe_session.payment_status == 'paid':
            payment_info['status'] = 'completed'

            # Notify SSE clients about payment completion
            if hasattr(app, 'payment_queues') and session_id in app.payment_queues:
                await app.payment_queues[session_id].put({
                    "status": "completed",
                    "amount": payment_info['amount']
                })

            return Result.html(app.web_context() + """
            <div style="text-align: center; padding: 50px;">
                <h2>Thank You for Your Support!</h2>
                <p>Your payment was successful. You can now close this window and continue with your research.</p>
                <script>
                    setTimeout(function() {
                        window.close();
                    }, 5000);
                </script>
            </div>
            """)
        else:
            return Result.html(app.web_context() + """
            <div style="text-align: center; padding: 50px;">
                <h2>Payment Not Completed</h2>
                <p>Your payment has not been completed. Please try again.</p>
                <button onclick="window.close()">Close Window</button>
            </div>
            """)
    except Exception as e:
        return Result.html(app.web_context() + f"""
        <div style="text-align: center; padding: 50px;">
            <h2>Error Processing Payment</h2>
            <p>There was an error processing your payment: {str(e)}</p>
            <button onclick="window.close()">Close Window</button>
        </div>
        """)
research_results(app, research_id) async

Get the results of a completed research process

Source code in toolboxv2/mods/TruthSeeker/newui.py
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
@export(mod_name=MOD_NAME, api=True, version=version)
async def research_results(app: App, research_id: str):
    """Get the results of a completed research process"""
    if not hasattr(app, 'research_processes') or research_id not in app.research_processes:
        return Result.default_user_error(info="Research process not found")

    research_process = app.research_processes[research_id]

    if research_process['status'] != 'complete':
        return Result.default_user_error(info="Research is not complete")

    return Result.ok(data=research_process['results'])
research_status(app, research_id) async

Get the status of a research process

Source code in toolboxv2/mods/TruthSeeker/newui.py
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
@export(mod_name=MOD_NAME, api=True, version=version)
async def research_status(app: App, research_id: str):
    """Get the status of a research process"""
    if not hasattr(app, 'research_processes') or research_id not in app.research_processes:
        return Result.default_user_error(info="Research process not found")

    research_process = app.research_processes[research_id]

    return Result.ok(data={
        "status": research_process['status'],
        "progress": research_process['progress'],
        "step": research_process['step'],
        "info": research_process['info']
    })
start_research(app, data) async

Start a new research process

Source code in toolboxv2/mods/TruthSeeker/newui.py
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
@export(mod_name=MOD_NAME, api=True, version=version)
async def start_research(app: App, data):
    """Start a new research process"""
    # Get data from the request
    query = data.get("query")
    session_id = data.get("session_id")
    max_search = data.get("max_search", 4)
    num_search_result_per_query = data.get("num_search_result_per_query", 4)

    # Get the tools module
    tools = get_app("ArXivPDFProcessor").get_mod("isaa")
    if not hasattr(tools, 'initialized') or not tools.initialized:
        tools.init_isaa(build=True)

    # Generate a unique research_id
    research_id = str(uuid.uuid4())

    # Store the research information in a global dictionary
    if not hasattr(app, 'research_processes'):
        app.research_processes = {}

    # Initialize SSE queues if not already done
    if not hasattr(app, 'sse_queues'):
        app.sse_queues = {}

    # Create a queue for this research process
    app.sse_queues[research_id] = asyncio.Queue()

    # Create a processor with callback for status updates
    app.research_processes[research_id] = {
        'status': 'initializing',
        'progress': 0.0,
        'step': 'Initializing',
        'info': '',
        'query': query,
        'session_id': session_id,
        'processor': None,
        'results': None,
        'stop_requested': False
    }

    # Define the callback function that sends updates to the SSE queue
    def status_callback(status_data):
        if research_id in app.research_processes:
            process = app.research_processes[research_id]
            process['status'] = 'processing'
            process['progress'] = status_data.get('progress', 0.0)
            process['step'] = status_data.get('step', '')
            process['info'] = status_data.get('info', '')

            # Put the status update in the SSE queue
            status_update = {
                "status": process['status'],
                "progress": process['progress'],
                "step": process['step'],
                "info": process['info']
            }

            if research_id in app.sse_queues:
                asyncio.create_task(app.sse_queues[research_id].put(status_update))

    # Create the processor
    processor = ArXivPDFProcessor(
        query=query,
        tools=tools,
        chunk_size=1_000_000,
        overlap=2_000,
        max_search=max_search,
        num_search_result_per_query=num_search_result_per_query,
        download_dir=f"pdfs_{research_id}",
        callback=status_callback
    )

    app.research_processes[research_id]['processor'] = processor

    # Process in the background
    async def process_in_background():
        try:
            # Check if stop was requested before starting
            if app.research_processes[research_id]['stop_requested']:
                app.research_processes[research_id]['status'] = 'stopped'
                if research_id in app.sse_queues:
                    await app.sse_queues[research_id].put({
                        "status": "stopped",
                        "progress": 0,
                        "step": "Research stopped",
                        "info": ""
                    })
                return

            # Start processing
            papers, insights = await processor.process()

            # Check if stop was requested during processing
            if app.research_processes[research_id]['stop_requested']:
                app.research_processes[research_id]['status'] = 'stopped'
                if research_id in app.sse_queues:
                    await app.sse_queues[research_id].put({
                        "status": "stopped",
                        "progress": 1,
                        "step": "Research stopped",
                        "info": ""
                    })
                return

            # Store results
            app.research_processes[research_id]['results'] = {
                'papers': papers,
                'insights': insights['response'] if insights and 'response' in insights else None
            }
            app.research_processes[research_id]['status'] = 'complete'

            # Send final status update
            if research_id in app.sse_queues:
                await app.sse_queues[research_id].put({
                    "status": "complete",
                    "progress": 1,
                    "step": "Research complete",
                    "info": f"Found {len(papers)} papers"
                })

        except Exception as e:
            app.research_processes[research_id]['status'] = 'error'
            app.research_processes[research_id]['info'] = str(e)

            # Send error status
            if research_id in app.sse_queues:
                await app.sse_queues[research_id].put({
                    "status": "error",
                    "progress": 0,
                    "step": "Error",
                    "info": str(e)
                })

            print(f"Error in research process {research_id}: {str(e)}")

    # Start the background task
    asyncio.create_task(process_in_background())

    return Result.ok(data={"research_id": research_id})
status_stream(app, research_id) async

SSE stream endpoint for research status updates

Source code in toolboxv2/mods/TruthSeeker/newui.py
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
@export(mod_name=MOD_NAME, api=True, version=version)
async def status_stream(app: App, research_id: str):
    """SSE stream endpoint for research status updates"""
    if not hasattr(app, 'sse_queues'):
        app.sse_queues = {}

    # Create a message queue for this research_id if it doesn't exist
    if research_id not in app.sse_queues:
        app.sse_queues[research_id] = asyncio.Queue()

    async def generate():
        # Send initial status
        if hasattr(app, 'research_processes') and research_id in app.research_processes:
            process = app.research_processes[research_id]
            initial_status = {
                "status": process['status'],
                "progress": process['progress'],
                "step": process['step'],
                "info": process['info']
            }
            yield f"event: status_update\ndata: {json.dumps(initial_status)}\n\n"

        try:
            # Stream status updates
            while True:
                try:
                    # Wait for a new status update with a timeout
                    status_data = await asyncio.wait_for(app.sse_queues[research_id].get(), timeout=30)
                    yield f"event: status_update\ndata: {json.dumps(status_data)}\n\n"

                    # If the research is complete or there was an error, exit the loop
                    if status_data.get('status') in ['complete', 'error', 'stopped']:
                        break
                except TimeoutError:
                    # Send a keep-alive comment to prevent connection timeout
                    yield ":\n\n"
        finally:
            # Clean up resources when the client disconnects
            if research_id in app.sse_queues:
                # Keep the queue for other potential clients
                pass

    return Result.stream(generate())
stop_research(app, data) async

Stop a research process

Source code in toolboxv2/mods/TruthSeeker/newui.py
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
@export(mod_name=MOD_NAME, api=True, version=version)
async def stop_research(app: App, data):
    """Stop a research process"""
    research_id = data.get("research_id")
    if not hasattr(app, 'research_processes') or research_id not in app.research_processes:
        return Result.default_user_error(info="Research process not found")

    app.research_processes[research_id]['stop_requested'] = True

    # Send stopped status to SSE clients
    if hasattr(app, 'sse_queues') and research_id in app.sse_queues:
        await app.sse_queues[research_id].put({
            "status": "stopped",
            "progress": app.research_processes[research_id]['progress'],
            "step": "Stopping research",
            "info": ""
        })

    return Result.ok(data={"status": "stop_requested"})

one

IntelligenceRingEmbeddings
Source code in toolboxv2/mods/TruthSeeker/one.py
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
class IntelligenceRingEmbeddings:
    name: str = "sentence-transformers/all-MiniLM-L6-v2"
    clip_name: str = "openai/clip-vit-base-patch32"
    wav2vec_name: str = "facebook/wav2vec2-base-960h"
    device: str = "cuda" if torch.cuda.is_available() else "cpu"
    vector_size: int = 768
    tokenizer: Any | None = None
    text_model: Any | None = None

    clip_processor: Any | None = None
    clip_model: Any | None = None

    audio_processor: Any | None = None
    audio_model: Any | None = None

    text_projection: Any | None = None
    image_projection: Any | None = None
    audio_projection: Any | None = None

    def __init__(self, **kwargs):

        super().__init__(**kwargs)
        self._ndims = self.vector_size

        # Text embedding model
        self.tokenizer = AutoTokenizer.from_pretrained(self.name)
        self.text_model = AutoModel.from_pretrained(self.name).to(self.device)

        # Image embedding model (CLIP)
        self.clip_processor = CLIPProcessor.from_pretrained(self.clip_name)
        self.clip_model = CLIPModel.from_pretrained(self.clip_name).to(self.device)

        # Audio embedding model (Wav2Vec2)
        self.audio_processor = Wav2Vec2Processor.from_pretrained(self.wav2vec_name)
        self.audio_model = Wav2Vec2Model.from_pretrained(self.wav2vec_name).to(self.device)

        # Projection layers to align dimensions
        self.text_projection = torch.nn.Linear(
            self.text_model.config.hidden_size,
            self.vector_size
        ).to(self.device)
        self.image_projection = torch.nn.Linear(
            self.clip_model.config.vision_config.hidden_size,
            self.vector_size
        ).to(self.device)
        self.audio_projection = torch.nn.Linear(
            self.audio_model.config.hidden_size,
            self.vector_size
        ).to(self.device)

    def _process_text(self, text: str) -> torch.Tensor:
        encoded_input = self.tokenizer(
            text,
            padding=True,
            truncation=True,
            max_length=self.vector_size,
            return_tensors='pt'
        ).to(self.device)

        with torch.no_grad():
            outputs = self.text_model(**encoded_input)
            embeddings = self._mean_pooling(outputs, encoded_input['attention_mask'])
            projected = self.text_projection(embeddings)
            return torch.nn.functional.normalize(projected, p=2, dim=1)

    def _process_image(self, image_data: bytes | str) -> torch.Tensor:
        # Handle different image input types
        if isinstance(image_data, str):
            if image_data.startswith('data:image'):
                # Handle base64 encoded images
                image_data = base64.b64decode(image_data.split(',')[1])
            else:
                # Handle file paths
                with open(image_data, 'rb') as f:
                    image_data = f.read()

        # Convert bytes to PIL Image
        image = Image.open(io.BytesIO(image_data))

        # Process image with CLIP
        inputs = self.clip_processor(images=image, return_tensors="pt").to(self.device)

        with torch.no_grad():
            outputs = self.clip_model.get_image_features(**inputs)
            projected = self.image_projection(outputs)
            return torch.nn.functional.normalize(projected, p=2, dim=1)

    def _process_audio(self, audio_data: bytes | str | np.ndarray) -> torch.Tensor:
        try:
            import torchaudio
        except ImportError:
            raise ValueError("Couldn't load audio install torchaudio'")
        # Handle different audio input types
        if isinstance(audio_data, str):
            if audio_data.startswith('data:audio'):
                # Handle base64 encoded audio
                audio_data = base64.b64decode(audio_data.split(',')[1])
                waveform, sample_rate = torchaudio.load(io.BytesIO(audio_data))
            else:
                # Handle file paths
                waveform, sample_rate = torchaudio.load(audio_data)
        elif isinstance(audio_data, bytes):
            waveform, sample_rate = torchaudio.load(io.BytesIO(audio_data))
        else:
            # Assume numpy array with sample rate in metadata
            waveform = torch.from_numpy(audio_data)
            sample_rate = 16000  # Default sample rate

        # Resample if necessary
        if sample_rate != 16000:
            resampler = torchaudio.transforms.Resample(sample_rate, 16000)
            waveform = resampler(waveform)

        # Process audio with Wav2Vec2
        inputs = self.audio_processor(waveform, sampling_rate=16000, return_tensors="pt").to(self.device)

        with torch.no_grad():
            outputs = self.audio_model(**inputs)
            # Mean pooling over time dimension
            embeddings = outputs.last_hidden_state.mean(dim=1)
            projected = self.audio_projection(embeddings)
            return torch.nn.functional.normalize(projected, p=2, dim=1)

    def _mean_pooling(self, model_output: torch.Tensor, attention_mask: torch.Tensor) -> torch.Tensor:
        token_embeddings = model_output[0]
        input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
        return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)

    def process_input(self, input_data: InputData) -> np.ndarray:
        if input_data.modality == "text":
            embeddings = self._process_text(input_data.content)
        elif input_data.modality == "image":
            embeddings = self._process_image(input_data.content)
        elif input_data.modality == "audio":
            embeddings = self._process_audio(input_data.content)
        else:
            raise ValueError(f"Unsupported modality: {input_data.modality}")

        return embeddings.cpu().numpy()

    def compute_query_embeddings(self, query: str | bytes | np.ndarray, modality: str = "text") -> list[
        np.ndarray]:
        """Compute embeddings for query input"""
        input_data = InputData(query, modality)
        embedding = self.process_input(input_data)
        return [embedding.squeeze()]

    def compute_source_embeddings(self, sources: list[str | bytes | np.ndarray], modalities: list[str]) -> list[
        np.ndarray]:
        """Compute embeddings for source inputs"""
        embeddings = []
        for source, modality in zip(sources, modalities, strict=False):
            input_data = InputData(source, modality)
            embedding = self.process_input(input_data)
            embeddings.append(embedding.squeeze())
        return embeddings

    def ndims(self) -> int:
        return self._ndims
compute_query_embeddings(query, modality='text')

Compute embeddings for query input

Source code in toolboxv2/mods/TruthSeeker/one.py
180
181
182
183
184
185
def compute_query_embeddings(self, query: str | bytes | np.ndarray, modality: str = "text") -> list[
    np.ndarray]:
    """Compute embeddings for query input"""
    input_data = InputData(query, modality)
    embedding = self.process_input(input_data)
    return [embedding.squeeze()]
compute_source_embeddings(sources, modalities)

Compute embeddings for source inputs

Source code in toolboxv2/mods/TruthSeeker/one.py
187
188
189
190
191
192
193
194
195
def compute_source_embeddings(self, sources: list[str | bytes | np.ndarray], modalities: list[str]) -> list[
    np.ndarray]:
    """Compute embeddings for source inputs"""
    embeddings = []
    for source, modality in zip(sources, modalities, strict=False):
        input_data = InputData(source, modality)
        embedding = self.process_input(input_data)
        embeddings.append(embedding.squeeze())
    return embeddings

tests

TestTruthSeeker

Bases: TestCase

Source code in toolboxv2/mods/TruthSeeker/tests.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
class TestTruthSeeker(unittest.TestCase):
    def setUp(self):
        # Mock the App class
        self.mock_app = Mock()
        self.mock_app.get_mod.return_value = Mock()

        # Setup mock for run_any that returns iterable dict
        self.mock_app.run_any.return_value = {
            "1": {"name": "template1"},
            "2": {"name": "template2"}
        }

        # Mock RequestSession
        self.mock_request = Mock()
        self.mock_request.json = AsyncMock()

    @patch('os.path.join')
    @patch('builtins.open', create=True)
    def test_start_initialization(self, mock_open, mock_join):
        """Test the start function initializes correctly"""
        # Setup mock file handling
        mock_file = Mock()
        mock_file.read.return_value = "test content"
        mock_open.return_value.__enter__.return_value = mock_file

        # Call start function
        start(self.mock_app)

        # Verify app initialization calls
        self.mock_app.get_mod.assert_called_with("CodeVerification")
        self.mock_app.run_any.assert_any_call(("CodeVerification", "init_scope"), scope="TruthSeeker")
        self.mock_app.run_any.assert_any_call(("CodeVerification", "init_scope"), scope="TruthSeeker-promo")

    @async_test
    async def test_codes_valid_request(self):
        """Test the codes function with valid input"""
        # Mock request data
        test_data = {
            "query": "test query",
            "depth": "Q",
            "promoCode": "PROMO15",
            "ontimeCode": "TEST123"
        }
        self.mock_request.json.return_value = test_data

        # Mock code verification
        self.mock_app.run_any.return_value = {
            "template_name": "Promo15",
            "usage_type": "one_time"
        }

        result = await codes(self.mock_app, self.mock_request)

        self.assertTrue(result['valid'])
        self.assertIn('ontimeKey', result)
        self.assertIn('ppc', result)

    @async_test
    async def test_codes_invalid_promo(self):
        """Test the codes function with invalid promo code"""
        test_data = {
            "query": "test query",
            "depth": "I",
            "promoCode": "INVALID",
            "ontimeCode": "TEST123"
        }
        self.mock_request.json.return_value = test_data

        # Mock invalid promo code verification
        self.mock_app.run_any.return_value = None

        result = await codes(self.mock_app, self.mock_request)

        self.assertIn('ppc', result)
        self.assertTrue(result['ppc']['price'] > 0)

    @async_test
    async def test_process_valid_request(self):
        """Test the process function with valid input"""
        test_data = {
            "query": "test query",
            "depth": "Q",
            "ontimeKey": "VALID_KEY",
            "email": "test@example.com"
        }
        self.mock_request.json.return_value = test_data

        # Mock valid key verification
        self.mock_app.run_any.return_value = {
            "template_name": "PROCESS",
            "usage_type": "timed",
            "uses_count": 1
        }

        # Mock ArXivPDFProcessor
        with patch('toolboxv2.mods.TruthSeeker.module.ArXivPDFProcessor') as mock_processor:
            mock_insights = MagicMock()
            mock_insights.is_true = "True"
            mock_insights.summary = "Test summary"
            mock_insights.key_point = "Point1>\n\n<Point2"

            mock_processor.return_value.process.return_value = ([], mock_insights)

            result = await process(self.mock_app, self.mock_request)

            self.assertEqual(result['is_true'], "True")
            self.assertEqual(result['summary'], "Test summary")

    @async_test
    async def test_process_invalid_key(self):
        """Test the process function with invalid key"""
        test_data = {
            "query": "test query",
            "depth": "Q",
            "ontimeKey": "INVALID_KEY",
            "email": "test@example.com"
        }
        self.mock_request.json.return_value = test_data

        # Mock invalid key verification
        self.mock_app.run_any.return_value = None

        result = await process(self.mock_app, self.mock_request)

        self.assertEqual(result['summary'], "INVALID QUERY")
        self.assertEqual(result['insights'], [])
        self.assertEqual(result['papers'], [])

    def test_byCode_functionality(self):
        """Test the byCode function"""
        test_request = Mock()
        test_request.json.return_value = ["payKey", "codeClass", "ontimeKey"]

        result = byCode(self.mock_app, test_request)

        self.assertEqual(result, {'code': 'code'})
test_byCode_functionality()

Test the byCode function

Source code in toolboxv2/mods/TruthSeeker/tests.py
337
338
339
340
341
342
343
344
def test_byCode_functionality(self):
    """Test the byCode function"""
    test_request = Mock()
    test_request.json.return_value = ["payKey", "codeClass", "ontimeKey"]

    result = byCode(self.mock_app, test_request)

    self.assertEqual(result, {'code': 'code'})
test_codes_invalid_promo() async

Test the codes function with invalid promo code

Source code in toolboxv2/mods/TruthSeeker/tests.py
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
@async_test
async def test_codes_invalid_promo(self):
    """Test the codes function with invalid promo code"""
    test_data = {
        "query": "test query",
        "depth": "I",
        "promoCode": "INVALID",
        "ontimeCode": "TEST123"
    }
    self.mock_request.json.return_value = test_data

    # Mock invalid promo code verification
    self.mock_app.run_any.return_value = None

    result = await codes(self.mock_app, self.mock_request)

    self.assertIn('ppc', result)
    self.assertTrue(result['ppc']['price'] > 0)
test_codes_valid_request() async

Test the codes function with valid input

Source code in toolboxv2/mods/TruthSeeker/tests.py
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
@async_test
async def test_codes_valid_request(self):
    """Test the codes function with valid input"""
    # Mock request data
    test_data = {
        "query": "test query",
        "depth": "Q",
        "promoCode": "PROMO15",
        "ontimeCode": "TEST123"
    }
    self.mock_request.json.return_value = test_data

    # Mock code verification
    self.mock_app.run_any.return_value = {
        "template_name": "Promo15",
        "usage_type": "one_time"
    }

    result = await codes(self.mock_app, self.mock_request)

    self.assertTrue(result['valid'])
    self.assertIn('ontimeKey', result)
    self.assertIn('ppc', result)
test_process_invalid_key() async

Test the process function with invalid key

Source code in toolboxv2/mods/TruthSeeker/tests.py
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
@async_test
async def test_process_invalid_key(self):
    """Test the process function with invalid key"""
    test_data = {
        "query": "test query",
        "depth": "Q",
        "ontimeKey": "INVALID_KEY",
        "email": "test@example.com"
    }
    self.mock_request.json.return_value = test_data

    # Mock invalid key verification
    self.mock_app.run_any.return_value = None

    result = await process(self.mock_app, self.mock_request)

    self.assertEqual(result['summary'], "INVALID QUERY")
    self.assertEqual(result['insights'], [])
    self.assertEqual(result['papers'], [])
test_process_valid_request() async

Test the process function with valid input

Source code in toolboxv2/mods/TruthSeeker/tests.py
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
@async_test
async def test_process_valid_request(self):
    """Test the process function with valid input"""
    test_data = {
        "query": "test query",
        "depth": "Q",
        "ontimeKey": "VALID_KEY",
        "email": "test@example.com"
    }
    self.mock_request.json.return_value = test_data

    # Mock valid key verification
    self.mock_app.run_any.return_value = {
        "template_name": "PROCESS",
        "usage_type": "timed",
        "uses_count": 1
    }

    # Mock ArXivPDFProcessor
    with patch('toolboxv2.mods.TruthSeeker.module.ArXivPDFProcessor') as mock_processor:
        mock_insights = MagicMock()
        mock_insights.is_true = "True"
        mock_insights.summary = "Test summary"
        mock_insights.key_point = "Point1>\n\n<Point2"

        mock_processor.return_value.process.return_value = ([], mock_insights)

        result = await process(self.mock_app, self.mock_request)

        self.assertEqual(result['is_true'], "True")
        self.assertEqual(result['summary'], "Test summary")
test_start_initialization(mock_open, mock_join)

Test the start function initializes correctly

Source code in toolboxv2/mods/TruthSeeker/tests.py
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
@patch('os.path.join')
@patch('builtins.open', create=True)
def test_start_initialization(self, mock_open, mock_join):
    """Test the start function initializes correctly"""
    # Setup mock file handling
    mock_file = Mock()
    mock_file.read.return_value = "test content"
    mock_open.return_value.__enter__.return_value = mock_file

    # Call start function
    start(self.mock_app)

    # Verify app initialization calls
    self.mock_app.get_mod.assert_called_with("CodeVerification")
    self.mock_app.run_any.assert_any_call(("CodeVerification", "init_scope"), scope="TruthSeeker")
    self.mock_app.run_any.assert_any_call(("CodeVerification", "init_scope"), scope="TruthSeeker-promo")
run_all_tests()

Run all test classes

Source code in toolboxv2/mods/TruthSeeker/tests.py
393
394
395
396
@default_test
def run_all_tests():
    """Run all test classes"""
    return run_test_suite()
run_arxiv_processor_tests(test_name=None)

Run TestArXivPDFProcessor tests

Source code in toolboxv2/mods/TruthSeeker/tests.py
380
381
382
def run_arxiv_processor_tests(test_name=None):
    """Run TestArXivPDFProcessor tests"""
    return run_test_suite(TestArXivPDFProcessor, test_name)
run_pdf_downloader_tests(test_name=None)

Run TestRobustPDFDownloader tests

Source code in toolboxv2/mods/TruthSeeker/tests.py
375
376
377
def run_pdf_downloader_tests(test_name=None):
    """Run TestRobustPDFDownloader tests"""
    return run_test_suite(TestRobustPDFDownloader, test_name)
run_specific_test(test_class, test_name)

Run a specific test from a test class

Source code in toolboxv2/mods/TruthSeeker/tests.py
389
390
391
def run_specific_test(test_class, test_name):
    """Run a specific test from a test class"""
    return run_test_suite(test_class, test_name)
run_test_suite(test_class=None, test_name=None, verbosity=2)

Run specific test class or test case.

Parameters:

Name Type Description Default
test_class

The test class to run (optional)

None
test_name

Specific test method name to run (optional)

None
verbosity

Output detail level (default=2)

2

Returns:

Type Description

TestResult object

Source code in toolboxv2/mods/TruthSeeker/tests.py
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
def run_test_suite(test_class=None, test_name=None, verbosity=2):
    """
    Run specific test class or test case.

    Args:
        test_class: The test class to run (optional)
        test_name: Specific test method name to run (optional)
        verbosity: Output detail level (default=2)

    Returns:
        TestResult object
    """
    loader = unittest.TestLoader()
    suite = unittest.TestSuite()

    if test_class and test_name:
        # Run specific test method
        suite.addTest(test_class(test_name))
    elif test_class:
        # Run all tests in the class
        suite.addTests(loader.loadTestsFromTestCase(test_class))
    else:
        # Run all tests
        suite.addTests(loader.loadTestsFromModule(sys.modules[__name__]))

    runner = unittest.TextTestRunner(verbosity=verbosity)
    return runner.run(suite)
run_truth_seeker_tests(test_name=None)

Run TestTruthSeeker tests

Source code in toolboxv2/mods/TruthSeeker/tests.py
384
385
386
def run_truth_seeker_tests(test_name=None):
    """Run TestTruthSeeker tests"""
    return run_test_suite(TestTruthSeeker, test_name)

Run only ArXiv search tests

Source code in toolboxv2/mods/TruthSeeker/tests.py
414
415
416
417
418
419
420
@default_test
def test_arxiv_search():
    """Run only ArXiv search tests"""
    return run_specific_test(
        TestArXivPDFProcessor,
        'test_search_and_process_papers'
    )
test_pdf_download()

Run only PDF download tests

Source code in toolboxv2/mods/TruthSeeker/tests.py
398
399
400
401
402
403
404
@default_test
def test_pdf_download():
    """Run only PDF download tests"""
    return run_specific_test(
        TestRobustPDFDownloader,
        'test_download_pdf_success'
    )
test_truth_seeker()

Run only PDF download tests

Source code in toolboxv2/mods/TruthSeeker/tests.py
406
407
408
409
410
411
412
@default_test
def test_truth_seeker():
    """Run only PDF download tests"""
    return run_specific_test(
        TestTruthSeeker,
        'test_truth_seeker_success'
    )

UltimateTTT

UltimateTTTGameEngine

Source code in toolboxv2/mods/UltimateTTT.py
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
class UltimateTTTGameEngine:  # Renamed for clarity
    def __init__(self, game_state: GameState):
        self.gs = game_state
        self.size = game_state.config.grid_size

    def _check_line_for_win(self, line: List[Union[CellState, BoardWinner]],
                            symbol_to_check: Union[CellState, BoardWinner]) -> bool:
        if not line or line[0] == CellState.EMPTY or line[0] == BoardWinner.NONE:
            return False
        return all(cell == symbol_to_check for cell in line)

    def _get_board_winner_symbol(self, board: List[List[Union[CellState, BoardWinner]]],
                                 symbol_class: Union[type[CellState], type[BoardWinner]]) -> Optional[
        Union[CellState, BoardWinner]]:
        symbols_to_try = [symbol_class.X, symbol_class.O]
        for symbol in symbols_to_try:
            # Rows
            for r in range(self.size):
                if self._check_line_for_win([board[r][c] for c in range(self.size)], symbol): return symbol
            # Columns
            for c in range(self.size):
                if self._check_line_for_win([board[r][c] for r in range(self.size)], symbol): return symbol
            # Diagonals
            if self._check_line_for_win([board[i][i] for i in range(self.size)], symbol): return symbol
            if self._check_line_for_win([board[i][self.size - 1 - i] for i in range(self.size)], symbol): return symbol
        return None  # No winner

    def _is_board_full(self, board: List[List[Union[CellState, BoardWinner]]],
                       empty_value: Union[CellState, BoardWinner]) -> bool:
        return all(cell != empty_value for row in board for cell in row)

    def _determine_local_board_result(self, global_r: int, global_c: int) -> BoardWinner:
        if self.gs.global_board_winners[global_r][global_c] != BoardWinner.NONE:
            return self.gs.global_board_winners[global_r][global_c]

        local_board_cells = self.gs.local_boards_state[global_r][global_c]
        winner_symbol = self._get_board_winner_symbol(local_board_cells, CellState)
        if winner_symbol:
            return BoardWinner(winner_symbol.value)  # Convert CellState.X to BoardWinner.X
        if self._is_board_full(local_board_cells, CellState.EMPTY):
            return BoardWinner.DRAW
        return BoardWinner.NONE

    def _update_local_winner_and_check_global(self, global_r: int, global_c: int):
        new_local_winner = self._determine_local_board_result(global_r, global_c)
        if new_local_winner != BoardWinner.NONE and self.gs.global_board_winners[global_r][
            global_c] == BoardWinner.NONE:
            self.gs.global_board_winners[global_r][global_c] = new_local_winner
            self._check_for_overall_game_end()

    def _check_for_overall_game_end(self):
        if self.gs.status == GameStatus.FINISHED: return

        winner_board_symbol = self._get_board_winner_symbol(self.gs.global_board_winners, BoardWinner)
        if winner_board_symbol:  # This is BoardWinner.X or BoardWinner.O
            self.gs.overall_winner_symbol = PlayerSymbol(winner_board_symbol.value)  # Convert to PlayerSymbol
            self.gs.status = GameStatus.FINISHED
            return

        if self._is_board_full(self.gs.global_board_winners, BoardWinner.NONE):
            self.gs.is_draw = True
            self.gs.status = GameStatus.FINISHED

    def _determine_next_forced_board(self, last_move_local_r: int, last_move_local_c: int) -> Optional[Tuple[int, int]]:
        target_gr, target_gc = last_move_local_r, last_move_local_c

        if self.gs.global_board_winners[target_gr][target_gc] == BoardWinner.NONE and \
            not self._is_local_board_full(self.gs.local_boards_state[target_gr][target_gc], CellState.EMPTY):
            return (target_gr, target_gc)
        return None  # Play anywhere valid

    def _is_local_board_full(self, local_board_cells: List[List[CellState]], cell_type=CellState.EMPTY) -> bool:
        """Checks if a specific local board (passed as a 2D list of CellState) is full."""
        for r in range(self.size):
            for c in range(self.size):
                if local_board_cells[r][c] == cell_type:
                    return False
        return True

    def add_player(self, player_id: str, player_name: str,
                   is_npc: bool = False, npc_difficulty: Optional[NPCDifficulty] = None) -> bool:
        if len(self.gs.players) >= 2:
            self.gs.last_error_message = "Game is already full (2 players max)."
            return False

        # Reconnect logic for existing player (human or NPC if that makes sense)
        existing_player = self.gs.get_player_info(player_id)
        if existing_player:
            if not existing_player.is_connected:
                existing_player.is_connected = True
                # If NPC "reconnects", ensure its properties are correct (though unlikely scenario for NPC)
                if is_npc:
                    existing_player.is_npc = True
                    existing_player.npc_difficulty = npc_difficulty
                    existing_player.name = player_name  # Update name if it changed for NPC

                self.gs.last_error_message = None
                self.gs.updated_at = datetime.now(timezone.utc)

                if len(self.gs.players) == 2 and all(p.is_connected for p in self.gs.players) and \
                    self.gs.status == GameStatus.WAITING_FOR_OPPONENT:  # Should not be waiting if NPC is P2
                    self.gs.status = GameStatus.IN_PROGRESS
                    player_x_info = next(p for p in self.gs.players if p.symbol == PlayerSymbol.X)
                    self.gs.current_player_id = player_x_info.id
                    self.gs.waiting_since = None
                return True
            else:  # Player ID exists and is already connected
                self.gs.last_error_message = f"Player with ID {player_id} is already in the game and connected."
                return False

        # Adding a new player
        symbol = PlayerSymbol.X if not self.gs.players else PlayerSymbol.O

        # Construct PlayerInfo with NPC details if applicable
        player_info_data = {
            "id": player_id,
            "symbol": symbol,
            "name": player_name,
            "is_connected": True,  # NPCs are always "connected"
            "is_npc": is_npc
        }
        if is_npc and npc_difficulty:
            player_info_data["npc_difficulty"] = npc_difficulty

        new_player = PlayerInfo(**player_info_data)
        self.gs.players.append(new_player)
        self.gs.last_error_message = None

        if len(self.gs.players) == 1:  # First player added
            if self.gs.mode == GameMode.ONLINE:
                self.gs.status = GameStatus.WAITING_FOR_OPPONENT
                self.gs.current_player_id = player_id
                self.gs.waiting_since = datetime.now(timezone.utc)
            # For local mode with P1, we wait for P2 (human or NPC) to be added
            # No status change yet, current_player_id not set until P2 joins

        elif len(self.gs.players) == 2:  # Both players now present
            self.gs.status = GameStatus.IN_PROGRESS
            player_x_info = next(p for p in self.gs.players if p.symbol == PlayerSymbol.X)
            self.gs.current_player_id = player_x_info.id  # X always starts
            self.gs.next_forced_global_board = None
            self.gs.waiting_since = None

            # If the second player added is an NPC and it's their turn (e.g. P1 is human, P2 is NPC, P1 made a move)
            # This specific logic is more for when make_move hands over to an NPC.
            # Here, we just set up the game. X (P1) will make the first move.

        self.gs.updated_at = datetime.now(timezone.utc)
        return True

    def make_move(self, move: Move) -> bool:
        self.gs.last_error_message = None

        if self.gs.status != GameStatus.IN_PROGRESS:
            self.gs.last_error_message = "Game is not in progress."
            return False

        player_info = self.gs.get_player_info(move.player_id)
        if not player_info or move.player_id != self.gs.current_player_id:
            self.gs.last_error_message = "Not your turn or invalid player."
            return False

        s = self.size
        if not (0 <= move.global_row < s and 0 <= move.global_col < s and \
                0 <= move.local_row < s and 0 <= move.local_col < s):
            self.gs.last_error_message = f"Coordinates out of bounds for {s}x{s} grid."
            return False

        gr, gc, lr, lc = move.global_row, move.global_col, move.local_row, move.local_col

        if self.gs.next_forced_global_board and (gr, gc) != self.gs.next_forced_global_board:
            self.gs.last_error_message = f"Must play in global board {self.gs.next_forced_global_board}."
            return False

        if self.gs.global_board_winners[gr][gc] != BoardWinner.NONE:
            self.gs.last_error_message = f"Local board ({gr},{gc}) is already decided."
            return False
        if self.gs.local_boards_state[gr][gc][lr][lc] != CellState.EMPTY:
            self.gs.last_error_message = f"Cell ({gr},{gc})-({lr},{lc}) is already empty."  # Should be 'not empty' or 'occupied'
            # Correction:
            self.gs.last_error_message = f"Cell ({gr},{gc})-({lr},{lc}) is already occupied."
            return False

        self.gs.local_boards_state[gr][gc][lr][lc] = CellState(player_info.symbol.value)
        self.gs.moves_history.append(move)

        self._update_local_winner_and_check_global(gr, gc)

        if self.gs.status == GameStatus.FINISHED:
            self.gs.next_forced_global_board = None
        else:
            opponent_info = self.gs.get_opponent_info(self.gs.current_player_id)
            self.gs.current_player_id = opponent_info.id
            self.gs.next_forced_global_board = self._determine_next_forced_board(lr, lc)

            if self.gs.next_forced_global_board is None:
                is_any_move_possible = any(
                    self.gs.global_board_winners[r_idx][c_idx] == BoardWinner.NONE and \
                    not self._is_local_board_full(self.gs.local_boards_state[r_idx][c_idx], CellState.EMPTY)
                    for r_idx in range(s) for c_idx in range(s)
                )
                if not is_any_move_possible:
                    self._check_for_overall_game_end()
                    if self.gs.status != GameStatus.FINISHED:
                        self.gs.is_draw = True
                        self.gs.status = GameStatus.FINISHED

        self.gs.updated_at = datetime.now(timezone.utc)
        self.gs.last_made_move_coords = (move.global_row, move.global_col, move.local_row, move.local_col)

        return True

    def handle_player_disconnect(self, player_id: str):
        player = self.gs.get_player_info(player_id)
        app = get_app(GAME_NAME)  # Hol dir die App-Instanz
        if player:
            if not player.is_connected:  # Already marked as disconnected
                app.logger.info(f"Player {player_id} was already marked as disconnected from game {self.gs.game_id}.")
                return

            player.is_connected = False
            self.gs.updated_at = datetime.now(timezone.utc)
            app.logger.info(f"Player {player_id} disconnected from game {self.gs.game_id}. Name: {player.name}")

            if self.gs.mode == GameMode.ONLINE:
                if self.gs.status == GameStatus.IN_PROGRESS:
                    opponent = self.gs.get_opponent_info(player_id)
                    if opponent and opponent.is_connected:
                        self.gs.status = GameStatus.ABORTED  # Use ABORTED as "paused"
                        self.gs.player_who_paused = player_id  # Store who disconnected
                        # This message is for the game state, will be seen by the other player via SSE
                        self.gs.last_error_message = f"Player {player.name} disconnected. Waiting for them to rejoin."
                        app.logger.info(
                            f"Game {self.gs.game_id} PAUSED, waiting for {player.name} ({player_id}) to reconnect.")
                    else:
                        # Opponent also disconnected or was already gone
                        self.gs.status = GameStatus.ABORTED
                        self.gs.last_error_message = "Both players disconnected. Game aborted."
                        self.gs.player_who_paused = None  # No specific player to wait for
                        app.logger.info(
                            f"Game {self.gs.game_id} ABORTED, both players (or last active player) disconnected.")
                elif self.gs.status == GameStatus.WAITING_FOR_OPPONENT:
                    # If the creator (P1) disconnects while waiting for P2
                    if len(self.gs.players) == 1 and self.gs.players[0].id == player_id:
                        self.gs.status = GameStatus.ABORTED
                        self.gs.last_error_message = "Game creator disconnected before opponent joined. Game aborted."
                        self.gs.player_who_paused = None
                        app.logger.info(
                            f"Game {self.gs.game_id} ABORTED, creator {player.name} ({player_id}) disconnected while WAITING_FOR_OPPONENT.")
                elif self.gs.status == GameStatus.ABORTED and self.gs.player_who_paused:
                    # Game was already paused (e.g. P1 disconnected), and now P2 (the waiting one) disconnects
                    if self.gs.player_who_paused != player_id:  # Ensure it's the other player
                        self.gs.last_error_message = "Other player also disconnected during pause. Game aborted."
                        self.gs.player_who_paused = None  # No one specific to wait for now
                        app.logger.info(
                            f"Game {self.gs.game_id} ABORTED, waiting player {player.name} ({player_id}) disconnected.")

    def handle_player_reconnect(self, player_id: str) -> bool:
        player = self.gs.get_player_info(player_id)
        app = get_app(GAME_NAME)
        if not player:
            app.logger.warning(f"Reconnect attempt for unknown player {player_id} in game {self.gs.game_id}.")
            return False

        if player.is_connected:
            app.logger.info(
                f"Player {player.name} ({player_id}) attempted reconnect but was already marked as connected to game {self.gs.game_id}.")
            if self.gs.status == GameStatus.ABORTED and self.gs.player_who_paused == player_id:
                opponent = self.gs.get_opponent_info(player_id)
                if opponent and opponent.is_connected:
                    self.gs.status = GameStatus.IN_PROGRESS
                    self.gs.last_error_message = f"Connection for {player.name} re-established. Game resumed."
                    self.gs.player_who_paused = None
                    self.gs.updated_at = datetime.now(timezone.utc)
                    app.logger.info(
                        f"Game {self.gs.game_id} resumed as already-connected pauser {player.name} re-interacted.")
                else:
                    self.gs.last_error_message = f"Welcome back, {player.name}! Your opponent is still not connected."
            return True

        player.is_connected = True
        self.gs.updated_at = datetime.now(timezone.utc)
        app.logger.info(
            f"Player {player.name} ({player_id}) reconnected to game {self.gs.game_id}. Previous status: {self.gs.status}, Paused by: {self.gs.player_who_paused}")

        if self.gs.status == GameStatus.ABORTED:
            if self.gs.player_who_paused == player_id:  # The player who caused the pause has reconnected
                opponent = self.gs.get_opponent_info(player_id)
                if opponent and opponent.is_connected:
                    self.gs.status = GameStatus.IN_PROGRESS
                    self.gs.last_error_message = f"Player {player.name} reconnected. Game resumed!"
                    self.gs.player_who_paused = None
                    app.logger.info(
                        f"Game {self.gs.game_id} RESUMED. Pauser {player.name} reconnected, opponent {opponent.name} is present.")
                else:  # Pauser reconnected, opponent (still) gone or never joined (if P1 disconnected from WAITING)
                    if not opponent and len(
                        self.gs.players) == 1:  # P1 reconnected to a game they created but no P2 yet
                        self.gs.status = GameStatus.WAITING_FOR_OPPONENT
                        self.gs.player_who_paused = None
                        self.gs.current_player_id = player_id
                        self.gs.last_error_message = f"Creator {player.name} reconnected. Waiting for opponent."
                        self.gs.waiting_since = datetime.now(timezone.utc)  # Reset waiting timer
                    elif opponent:  # Opponent was there but is now disconnected
                        self.gs.player_who_paused = opponent.id  # Now waiting for the other person
                        self.gs.last_error_message = f"Welcome back, {player.name}! Your opponent ({opponent.name}) is not connected. Game remains paused."
                        app.logger.info(
                            f"Game {self.gs.game_id} still PAUSED. {player.name} reconnected, but opponent {opponent.name} is NOT. Waiting for {opponent.name}.")
                    else:  # Should be rare: 2 players in list, but opponent object not found for P1
                        self.gs.last_error_message = f"Welcome back, {player.name}! Opponent details unclear. Game remains paused."


            elif self.gs.player_who_paused and self.gs.player_who_paused != player_id:
                # The *other* player reconnected, while game was paused for initial pauser.
                initial_pauser_info = self.gs.get_player_info(self.gs.player_who_paused)
                if initial_pauser_info and initial_pauser_info.is_connected:  # This implies both are now connected.
                    self.gs.status = GameStatus.IN_PROGRESS
                    self.gs.last_error_message = f"Both players are now connected. Game resumed!"
                    self.gs.player_who_paused = None
                    app.logger.info(
                        f"Game {self.gs.game_id} RESUMED. Waiting player {player.name} reconnected, initial pauser {initial_pauser_info.name} also present.")
                else:
                    self.gs.last_error_message = f"Welcome back, {player.name}! Still waiting for {initial_pauser_info.name if initial_pauser_info else 'the other player'} to reconnect."
                    app.logger.info(
                        f"Game {self.gs.game_id} still PAUSED. Player {player.name} reconnected, but still waiting for original pauser {self.gs.player_who_paused}.")

            else:  # game is ABORTED but no specific player_who_paused (hard abort by timeout or both disconnected)
                if len(self.gs.players) == 2:  # Was a two-player game
                    opponent = self.gs.get_opponent_info(player_id)
                    if opponent:
                        # Revive the game to a paused state, waiting for the other player
                        self.gs.player_who_paused = opponent.id
                        self.gs.status = GameStatus.ABORTED  # Remains aborted, but now specifically for opponent
                        self.gs.last_error_message = f"Welcome back, {player.name}! Game was fully aborted. Now waiting for {opponent.name} to rejoin."
                        app.logger.info(
                            f"Game {self.gs.game_id} REVIVED from HARD ABORT by {player.name}. Now paused, waiting for {opponent.name} ({opponent.id}).")
                    else:  # Should not happen if two players were in game and player_id is one of them
                        self.gs.last_error_message = f"Player {player.name} reconnected, but game state is inconsistent (opponent not found)."
                        app.logger.warning(
                            f"Game {self.gs.game_id} HARD ABORT revival by {player.name} failed, opponent info missing.")
                elif len(self.gs.players) == 1 and self.gs.players[0].id == player_id:
                    # P1 created, P1 disconnected, game WAITING_FOR_OPPONENT timed out & hard aborted. P1 tries to rejoin.
                    self.gs.status = GameStatus.WAITING_FOR_OPPONENT
                    self.gs.player_who_paused = None
                    self.gs.current_player_id = player_id
                    self.gs.last_error_message = f"Creator {player.name} reconnected. Waiting for opponent."
                    self.gs.waiting_since = datetime.now(timezone.utc)  # Reset waiting timer
                    app.logger.info(
                        f"Game {self.gs.game_id} (previously hard aborted while waiting) revived by creator {player.name}. Now WAITING_FOR_OPPONENT.")
                else:
                    self.gs.last_error_message = f"Player {player.name} reconnected, but the game was aborted and cannot be revived in its current player configuration."
                    app.logger.info(
                        f"Game {self.gs.game_id} HARD ABORTED. Player {player.name} reconnected, but game cannot resume in current configuration.")


        elif self.gs.status == GameStatus.IN_PROGRESS:
            opponent = self.gs.get_opponent_info(player_id)
            if not opponent or not opponent.is_connected:
                self.gs.status = GameStatus.ABORTED
                self.gs.player_who_paused = opponent.id if opponent else None
                self.gs.last_error_message = f"Welcome back, {player.name}! Your opponent disconnected while you were away. Waiting for them."
                app.logger.info(
                    f"Game {self.gs.game_id} transitions to PAUSED. {player.name} reconnected to IN_PROGRESS, but opponent {opponent.id if opponent else 'N/A'} is gone.")
            else:
                self.gs.last_error_message = f"Player {player.name} re-established connection during active game."
                app.logger.info(
                    f"Player {player.name} ({player_id}) re-established connection to IN_PROGRESS game {self.gs.game_id}.")

        elif self.gs.status == GameStatus.WAITING_FOR_OPPONENT:
            if len(self.gs.players) == 1 and self.gs.players[0].id == player_id:
                self.gs.last_error_message = f"Creator {player.name} reconnected. Still waiting for opponent."
                self.gs.current_player_id = player_id
                self.gs.waiting_since = datetime.now(timezone.utc)  # Reset waiting timer
                app.logger.info(
                    f"Creator {player.name} ({player_id}) reconnected to WAITING_FOR_OPPONENT game {self.gs.game_id}.")
            else:
                app.logger.warning(
                    f"Non-creator {player.name} or unexpected player count for reconnect to WAITING_FOR_OPPONENT game {self.gs.game_id}.")

        return True

WebSocketManager

WebSocketPoolManager

Source code in toolboxv2/mods/WebSocketManager.py
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
class WebSocketPoolManager:
    def __init__(self):
        self.pools: dict[str, dict[str, Any]] = {}
        self.logger = logging.getLogger(__name__)

    async def create_pool(self, pool_id: str) -> None:
        """Create a new WebSocket pool."""
        if pool_id not in self.pools:
            self.pools[pool_id] = {
                'connections': {},
                'actions': {},
                'global_actions': {}
            }
            self.logger.info(f"Created new pool: {pool_id}")
        else:
            self.logger.warning(f"Pool {pool_id} already exists")

    async def add_connection(self, pool_id: str, connection_id: str, websocket) -> None:
        """Add a WebSocket connection to a pool."""
        if pool_id not in self.pools:
            await self.create_pool(pool_id)

        self.pools[pool_id]['connections'][connection_id] = websocket
        self.logger.info(f"Added connection {connection_id} to pool {pool_id}")

    async def remove_connection(self, pool_id: str, connection_id: str) -> None:
        """Remove a WebSocket connection from a pool."""
        if pool_id in self.pools and connection_id in self.pools[pool_id]['connections']:
            del self.pools[pool_id]['connections'][connection_id]
            self.logger.info(f"Removed connection {connection_id} from pool {pool_id}")
        else:
            self.logger.warning(f"Connection {connection_id} not found in pool {pool_id}")

    def register_action(self, pool_id: str, action_name: str, handler: Callable,
                        connection_ids: list[str] = None) -> None:
        """Register an action for specific connections or the entire pool."""
        if pool_id not in self.pools:
            self.logger.error(f"Pool {pool_id} does not exist")
            return

        if connection_ids is None:
            self.pools[pool_id]['global_actions'][action_name] = handler
            self.logger.info(f"Registered global action {action_name} for pool {pool_id}")
        else:
            for conn_id in connection_ids:
                if conn_id not in self.pools[pool_id]['actions']:
                    self.pools[pool_id]['actions'][conn_id] = {}
                self.pools[pool_id]['actions'][conn_id][action_name] = handler
            self.logger.info(f"Registered action {action_name} for connections {connection_ids} in pool {pool_id}")

    async def handle_message(self, pool_id: str, connection_id: str, message: str) -> None:
        """Handle incoming messages and route them to the appropriate action handler."""
        if pool_id not in self.pools or connection_id not in self.pools[pool_id]['connections']:
            self.logger.error(f"Invalid pool_id or connection_id: {pool_id}, {connection_id}")
            return

        try:
            data = json.loads(message)
            action = data.get('action')

            if action:
                if action in self.pools[pool_id]['global_actions']:
                    await self.pools[pool_id]['global_actions'][action](pool_id, connection_id, data)
                elif connection_id in self.pools[pool_id]['actions'] and action in self.pools[pool_id]['actions'][
                    connection_id]:
                    await self.pools[pool_id]['actions'][connection_id][action](pool_id, connection_id, data)
                else:
                    self.logger.warning(f"No handler found for action {action} in pool {pool_id}")
            else:
                self.logger.warning(f"No action specified in message from {connection_id} in pool {pool_id}")
        except json.JSONDecodeError:
            self.logger.error(f"Invalid JSON received from {connection_id} in pool {pool_id}")

    async def broadcast(self, pool_id: str, message: str, exclude_connection_id: str = None) -> None:
        """Broadcast a message to all connections in a pool, optionally excluding one connection."""
        if pool_id not in self.pools:
            self.logger.error(f"Pool {pool_id} does not exist")
            return

        for conn_id, websocket in self.pools[pool_id]['connections'].items():
            if conn_id != exclude_connection_id:
                try:
                    await websocket.send_text(message)
                except Exception as e:
                    self.logger.error(f"Error sending message to {conn_id} in pool {pool_id}: {str(e)}")

    async def send_to_connection(self, pool_id: str, connection_id: str, message: str) -> None:
        """Send a message to a specific connection in a pool."""
        if pool_id in self.pools and connection_id in self.pools[pool_id]['connections']:
            try:
                await self.pools[pool_id]['connections'][connection_id].send_text(message)
            except Exception as e:
                self.logger.error(f"Error sending message to {connection_id} in pool {pool_id}: {str(e)}")
        else:
            self.logger.error(f"Connection {connection_id} not found in pool {pool_id}")

    def get_pool_connections(self, pool_id: str) -> list[str]:
        """Get a list of all connection IDs in a pool."""
        if pool_id in self.pools:
            return list(self.pools[pool_id]['connections'].keys())
        else:
            self.logger.error(f"Pool {pool_id} does not exist")
            return []

    def get_all_pools(self) -> list[str]:
        """Get a list of all pool IDs."""
        return list(self.pools.keys())

    async def close_pool(self, pool_id: str) -> None:
        """Close all connections in a pool and remove the pool."""
        if pool_id in self.pools:
            for websocket in self.pools[pool_id]['connections'].values():
                await websocket.close()
            del self.pools[pool_id]
            self.logger.info(f"Closed and removed pool {pool_id}")
        else:
            self.logger.warning(f"Pool {pool_id} does not exist")

    async def close_all_pools(self) -> None:
        """Close all connections in all pools and remove all pools."""
        for pool_id in list(self.pools.keys()):
            await self.close_pool(pool_id)
        self.logger.info("Closed all pools")
add_connection(pool_id, connection_id, websocket) async

Add a WebSocket connection to a pool.

Source code in toolboxv2/mods/WebSocketManager.py
546
547
548
549
550
551
552
async def add_connection(self, pool_id: str, connection_id: str, websocket) -> None:
    """Add a WebSocket connection to a pool."""
    if pool_id not in self.pools:
        await self.create_pool(pool_id)

    self.pools[pool_id]['connections'][connection_id] = websocket
    self.logger.info(f"Added connection {connection_id} to pool {pool_id}")
broadcast(pool_id, message, exclude_connection_id=None) async

Broadcast a message to all connections in a pool, optionally excluding one connection.

Source code in toolboxv2/mods/WebSocketManager.py
602
603
604
605
606
607
608
609
610
611
612
613
async def broadcast(self, pool_id: str, message: str, exclude_connection_id: str = None) -> None:
    """Broadcast a message to all connections in a pool, optionally excluding one connection."""
    if pool_id not in self.pools:
        self.logger.error(f"Pool {pool_id} does not exist")
        return

    for conn_id, websocket in self.pools[pool_id]['connections'].items():
        if conn_id != exclude_connection_id:
            try:
                await websocket.send_text(message)
            except Exception as e:
                self.logger.error(f"Error sending message to {conn_id} in pool {pool_id}: {str(e)}")
close_all_pools() async

Close all connections in all pools and remove all pools.

Source code in toolboxv2/mods/WebSocketManager.py
647
648
649
650
651
async def close_all_pools(self) -> None:
    """Close all connections in all pools and remove all pools."""
    for pool_id in list(self.pools.keys()):
        await self.close_pool(pool_id)
    self.logger.info("Closed all pools")
close_pool(pool_id) async

Close all connections in a pool and remove the pool.

Source code in toolboxv2/mods/WebSocketManager.py
637
638
639
640
641
642
643
644
645
async def close_pool(self, pool_id: str) -> None:
    """Close all connections in a pool and remove the pool."""
    if pool_id in self.pools:
        for websocket in self.pools[pool_id]['connections'].values():
            await websocket.close()
        del self.pools[pool_id]
        self.logger.info(f"Closed and removed pool {pool_id}")
    else:
        self.logger.warning(f"Pool {pool_id} does not exist")
create_pool(pool_id) async

Create a new WebSocket pool.

Source code in toolboxv2/mods/WebSocketManager.py
534
535
536
537
538
539
540
541
542
543
544
async def create_pool(self, pool_id: str) -> None:
    """Create a new WebSocket pool."""
    if pool_id not in self.pools:
        self.pools[pool_id] = {
            'connections': {},
            'actions': {},
            'global_actions': {}
        }
        self.logger.info(f"Created new pool: {pool_id}")
    else:
        self.logger.warning(f"Pool {pool_id} already exists")
get_all_pools()

Get a list of all pool IDs.

Source code in toolboxv2/mods/WebSocketManager.py
633
634
635
def get_all_pools(self) -> list[str]:
    """Get a list of all pool IDs."""
    return list(self.pools.keys())
get_pool_connections(pool_id)

Get a list of all connection IDs in a pool.

Source code in toolboxv2/mods/WebSocketManager.py
625
626
627
628
629
630
631
def get_pool_connections(self, pool_id: str) -> list[str]:
    """Get a list of all connection IDs in a pool."""
    if pool_id in self.pools:
        return list(self.pools[pool_id]['connections'].keys())
    else:
        self.logger.error(f"Pool {pool_id} does not exist")
        return []
handle_message(pool_id, connection_id, message) async

Handle incoming messages and route them to the appropriate action handler.

Source code in toolboxv2/mods/WebSocketManager.py
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
async def handle_message(self, pool_id: str, connection_id: str, message: str) -> None:
    """Handle incoming messages and route them to the appropriate action handler."""
    if pool_id not in self.pools or connection_id not in self.pools[pool_id]['connections']:
        self.logger.error(f"Invalid pool_id or connection_id: {pool_id}, {connection_id}")
        return

    try:
        data = json.loads(message)
        action = data.get('action')

        if action:
            if action in self.pools[pool_id]['global_actions']:
                await self.pools[pool_id]['global_actions'][action](pool_id, connection_id, data)
            elif connection_id in self.pools[pool_id]['actions'] and action in self.pools[pool_id]['actions'][
                connection_id]:
                await self.pools[pool_id]['actions'][connection_id][action](pool_id, connection_id, data)
            else:
                self.logger.warning(f"No handler found for action {action} in pool {pool_id}")
        else:
            self.logger.warning(f"No action specified in message from {connection_id} in pool {pool_id}")
    except json.JSONDecodeError:
        self.logger.error(f"Invalid JSON received from {connection_id} in pool {pool_id}")
register_action(pool_id, action_name, handler, connection_ids=None)

Register an action for specific connections or the entire pool.

Source code in toolboxv2/mods/WebSocketManager.py
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
def register_action(self, pool_id: str, action_name: str, handler: Callable,
                    connection_ids: list[str] = None) -> None:
    """Register an action for specific connections or the entire pool."""
    if pool_id not in self.pools:
        self.logger.error(f"Pool {pool_id} does not exist")
        return

    if connection_ids is None:
        self.pools[pool_id]['global_actions'][action_name] = handler
        self.logger.info(f"Registered global action {action_name} for pool {pool_id}")
    else:
        for conn_id in connection_ids:
            if conn_id not in self.pools[pool_id]['actions']:
                self.pools[pool_id]['actions'][conn_id] = {}
            self.pools[pool_id]['actions'][conn_id][action_name] = handler
        self.logger.info(f"Registered action {action_name} for connections {connection_ids} in pool {pool_id}")
remove_connection(pool_id, connection_id) async

Remove a WebSocket connection from a pool.

Source code in toolboxv2/mods/WebSocketManager.py
554
555
556
557
558
559
560
async def remove_connection(self, pool_id: str, connection_id: str) -> None:
    """Remove a WebSocket connection from a pool."""
    if pool_id in self.pools and connection_id in self.pools[pool_id]['connections']:
        del self.pools[pool_id]['connections'][connection_id]
        self.logger.info(f"Removed connection {connection_id} from pool {pool_id}")
    else:
        self.logger.warning(f"Connection {connection_id} not found in pool {pool_id}")
send_to_connection(pool_id, connection_id, message) async

Send a message to a specific connection in a pool.

Source code in toolboxv2/mods/WebSocketManager.py
615
616
617
618
619
620
621
622
623
async def send_to_connection(self, pool_id: str, connection_id: str, message: str) -> None:
    """Send a message to a specific connection in a pool."""
    if pool_id in self.pools and connection_id in self.pools[pool_id]['connections']:
        try:
            await self.pools[pool_id]['connections'][connection_id].send_text(message)
        except Exception as e:
            self.logger.error(f"Error sending message to {connection_id} in pool {pool_id}: {str(e)}")
    else:
        self.logger.error(f"Connection {connection_id} not found in pool {pool_id}")

WhatsAppTb

client

DocumentSystem
Source code in toolboxv2/mods/WhatsAppTb/client.py
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
class DocumentSystem:
    def __init__(self, storage: BlobStorage):
        self.storage = storage
        self.media_types = {
            'document': ['pdf', 'doc', 'docx', 'txt'],
            'image': ['jpg', 'jpeg', 'png', 'gif'],
            'video': ['mp4', 'mov', 'avi']
        }

    def list_documents(self, filter_type: str = None) -> list[dict]:
        """List all documents with metadata"""
        docs = []
        for blob_id in self.storage._get_all_blob_ids():
            with BlobFile(blob_id, 'r', self.storage) as f:
                metadata = f.read_json()
                if metadata:
                    docs.append({
                        'id': blob_id,
                        'name': metadata.get('filename', blob_id),
                        'type': metadata.get('type', 'document'),
                        'size': metadata.get('size', 0),
                        'modified': metadata.get('timestamp', ''),
                        'preview': metadata.get('preview', '')
                    })
        if filter_type:
            return [d for d in docs if d['type'] == filter_type]
        return docs

    def save_document(self, file_data: bytes, filename: str, file_type: str) -> str:
        """Save a document with metadata"""
        blob_id = self.storage._generate_blob_id()
        metadata = {
            'filename': filename,
            'type': file_type,
            'size': len(file_data),
            'timestamp': datetime.now().isoformat(),
            'preview': self._generate_preview(file_data, file_type)
        }

        with BlobFile(blob_id, 'w', self.storage) as f:
            f.write_json(metadata)
            f.write(file_data)
        return blob_id

    def delete_document(self, blob_id: str) -> bool:
        """Delete a document"""
        try:
            self.storage.delete_blob(blob_id)
            return True
        except Exception as e:
            logging.error(f"Delete failed: {str(e)}")
            return False

    def search_documents(self, query: str) -> list[dict]:
        """Search documents by filename or content"""
        results = []
        for doc in self.list_documents():
            if query.lower() in doc['name'].lower() or self._search_in_content(doc['id'], query):
                results.append(doc)
        return results

    def _generate_preview(self, data: bytes, file_type: str) -> str:
        """Generate preview based on file type"""
        if file_type in self.media_types['image']:
            return f"Image preview: {data[:100].hex()}"
        elif file_type in self.media_types['video']:
            return "Video preview unavailable"
        return data[:100].decode('utf-8', errors='ignore')

    def _search_in_content(self, blob_id: str, query: str) -> bool:
        """Search content within documents"""
        try:
            with BlobFile(blob_id, 'r', self.storage) as f:
                content = f.read().decode('utf-8', errors='ignore')
                return query.lower() in content.lower()
        except:
            return False
delete_document(blob_id)

Delete a document

Source code in toolboxv2/mods/WhatsAppTb/client.py
107
108
109
110
111
112
113
114
def delete_document(self, blob_id: str) -> bool:
    """Delete a document"""
    try:
        self.storage.delete_blob(blob_id)
        return True
    except Exception as e:
        logging.error(f"Delete failed: {str(e)}")
        return False
list_documents(filter_type=None)

List all documents with metadata

Source code in toolboxv2/mods/WhatsAppTb/client.py
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
def list_documents(self, filter_type: str = None) -> list[dict]:
    """List all documents with metadata"""
    docs = []
    for blob_id in self.storage._get_all_blob_ids():
        with BlobFile(blob_id, 'r', self.storage) as f:
            metadata = f.read_json()
            if metadata:
                docs.append({
                    'id': blob_id,
                    'name': metadata.get('filename', blob_id),
                    'type': metadata.get('type', 'document'),
                    'size': metadata.get('size', 0),
                    'modified': metadata.get('timestamp', ''),
                    'preview': metadata.get('preview', '')
                })
    if filter_type:
        return [d for d in docs if d['type'] == filter_type]
    return docs
save_document(file_data, filename, file_type)

Save a document with metadata

Source code in toolboxv2/mods/WhatsAppTb/client.py
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
def save_document(self, file_data: bytes, filename: str, file_type: str) -> str:
    """Save a document with metadata"""
    blob_id = self.storage._generate_blob_id()
    metadata = {
        'filename': filename,
        'type': file_type,
        'size': len(file_data),
        'timestamp': datetime.now().isoformat(),
        'preview': self._generate_preview(file_data, file_type)
    }

    with BlobFile(blob_id, 'w', self.storage) as f:
        f.write_json(metadata)
        f.write(file_data)
    return blob_id
search_documents(query)

Search documents by filename or content

Source code in toolboxv2/mods/WhatsAppTb/client.py
116
117
118
119
120
121
122
def search_documents(self, query: str) -> list[dict]:
    """Search documents by filename or content"""
    results = []
    for doc in self.list_documents():
        if query.lower() in doc['name'].lower() or self._search_in_content(doc['id'], query):
            results.append(doc)
    return results
WhatsAppAssistant dataclass
Source code in toolboxv2/mods/WhatsAppTb/client.py
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
@dataclass
class WhatsAppAssistant:
    whc: WhClient
    isaa: 'Tools'
    agent: Optional['Agent'] = None
    credentials: Credentials | None = None
    state: AssistantState = AssistantState.OFFLINE

    # Service clients
    gmail_service: Any = None
    calendar_service: Any = None

    start_time: Any = None

    blob_docs_system: Any = None
    duration_minutes: int = 20
    credentials_path: str = "/root/Toolboxv2/credentials.json"
    # Progress messengers
    progress_messengers: dict[str, 'ProgressMessenger'] = field(default_factory=dict)
    buttons: dict[str, dict] = field(default_factory=dict)
    history: FileCache = field(default_factory=FileCache)

    pending_actions: dict[str, dict] = field(default_factory=dict)


    def __post_init__(self):

        self.start_time = datetime.now()
        self.processed_messages = set()
        self.message_lock = threading.Lock()
        self.audio_processor = None
        self.blob_docs_system = DocumentSystem(BlobStorage())
        self.stt = get_app().run_any(TBEF.AUDIO.STT_GENERATE,
                                     model="openai/whisper-small",
                                     row=False, device=1)

        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}

        self.load_credentials()
        self.setup_progress_messengers()
        self.setup_interaction_buttons()
        self.history = FileCache(folder=".data/WhatsAppAssistant")
        self.state = AssistantState.ONLINE

    async def generate_authorization_url(self, *a):
        """
        Generate an authorization URL for user consent

        :return: Authorization URL for the user to click and authorize access
        """
        from google_auth_oauthlib.flow import Flow
        # Define the scopes required for Gmail and Calendar
        SCOPES = [
            'https://www.googleapis.com/auth/gmail.modify',
            'https://www.googleapis.com/auth/calendar'
        ]

        # Create a flow instance to manage the OAuth 2.0 authorization process
        flow = Flow.from_client_secrets_file(
            self.credentials_path,
            scopes=SCOPES,
            redirect_uri='urn:ietf:wg:oauth:2.0:oob'  # Use 'urn:ietf:wg:oauth:2.0:oob' for desktop apps
        )

        # Generate the authorization URL
        authorization_url, _ = flow.authorization_url(
            access_type='offline',  # Allows obtaining refresh token
            prompt='consent'  # Ensures user is always prompted for consent
        )
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {'type': 'auth',
                                                                              'step': 'awaiting_key'}
        return {
            'type': 'quick_reply',
            'text': f'Url to log in {authorization_url}',
            'options': {'cancel': '❌ Cancel Upload'}
        }

    def complete_authorization(self, message: Message):
        """
        Complete the authorization process using the authorization code

        :param authorization_code: Authorization code received from Google
        """
        from google_auth_oauthlib.flow import Flow
        authorization_code = message.content
        # Define the scopes required for Gmail and Calendar
        SCOPES = [
            'https://www.googleapis.com/auth/gmail.modify',
            'https://www.googleapis.com/auth/calendar'
        ]

        # Create a flow instance to manage the OAuth 2.0 authorization process
        flow = Flow.from_client_secrets_file(
            self.credentials_path,
            scopes=SCOPES,
            redirect_uri='urn:ietf:wg:oauth:2.0:oob'
        )

        # Exchange the authorization code for credentials
        flow.fetch_token(code=authorization_code)
        self.credentials = flow.credentials

        # Save the credentials for future use
        self.save_credentials()

        # Initialize services
        self.init_services()
        return "Done"


    def save_credentials(self):
        """
        Save the obtained credentials to a file for future use
        """
        if not os.path.exists('token'):
            os.makedirs('token')

        with open('token/google_token.json', 'w') as token_file:
            token_file.write(self.credentials.to_json())


    def load_credentials(self):
        """
        Load previously saved credentials if available

        :return: Whether credentials were successfully loaded
        """
        try:
            self.credentials = Credentials.from_authorized_user_file('token/google_token.json')
            self.init_services()
            return True
        except FileNotFoundError:
            return False


    def init_services(self):
        """
        Initialize Gmail and Calendar services
        """
        from googleapiclient.discovery import build

        self.gmail_service = build('gmail', 'v1', credentials=self.credentials)
        self.calendar_service = build('calendar', 'v3', credentials=self.credentials)
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}

    def setup_progress_messengers(self):
        """Initialize progress messengers for different types of tasks"""
        self.progress_messengers = {
            'task': self.whc.progress_messenger0,
            'email': self.whc.progress_messenger1,
            'calendar': self.whc.progress_messenger2
        }

    def setup_interaction_buttons(self):
        """Define WhatsApp interaction buttons for different functionalities"""
        self.buttons = {
            'menu': {
                'header': 'Digital Assistant',
                'body': 'Please select an option:',
                'footer': '-- + --',
                'action': {
                    'button': 'Menu',
                    'sections': [
                        {
                            'title': 'Main Functions',
                            'rows': [
                                {'id': 'agent', 'title': 'Agent Controls', 'description': 'Manage your AI assistant'},
                                {'id': 'email', 'title': 'Email Management', 'description': 'Handle your emails'},
                                {'id': 'calendar', 'title': 'Calendar', 'description': 'Manage your schedule'},
                                {'id': 'docs', 'title': 'Documents', 'description': 'Handle documents'},
                                {'id': 'system', 'title': 'System', 'description': 'System controls and metrics'}
                            ]
                        }
                    ]
                }
            },
            'agent': self._create_agent_controls_buttons(),
            'email': self._create_email_controls_buttons(),
            'calendar': self._create_calendar_controls_buttons(),
            'docs': self._create_docs_controls_buttons(),
            'system': self._create_system_controls_buttons()
        }

    @staticmethod
    def _create_agent_controls_buttons():
        return {
            'header': 'Agent Controls',
            'body': 'Manage your AI assistant:',
            'action': {
                'button': 'Select',
                'sections': [
                    {
                        'title': 'Basic Actions',
                        'rows': [
                            {'id': 'agent-task', 'title': 'Agent Task', 'description': 'Run the agent'},
                            {'id': 'start', 'title': 'Start Agent', 'description': 'Run taskstack in background'},
                            {'id': 'stop', 'title': 'Stop Agent', 'description': 'Stop taskstack execution'}
                        ]
                    },
                    {
                        'title': 'Advanced Actions',
                        'rows': [
                            {'id': 'system-task', 'title': 'System Task',
                             'description': 'Run the Isaa Reasoning Agent system'},
                            {'id': 'tasks', 'title': 'Task Stack', 'description': 'View and manage tasks'},
                            {'id': 'memory', 'title': 'Clear Memory', 'description': 'Reset agent memory'}
                        ]
                    }
                ]
            }
        }

    @staticmethod
    def _create_email_controls_buttons():
        return {
            'header': 'Email Management',
            'body': 'Handle your emails:',
            'action': {
                'button': 'Select',
                'sections': [
                    {
                        'title': 'Basic Actions',
                        'rows': [
                            {'id': 'check', 'title': 'Check Emails', 'description': 'View recent emails'},
                            {'id': 'send', 'title': 'Send Email', 'description': 'Compose new email'},
                            {'id': 'summary', 'title': 'Get Summary', 'description': 'Summarize emails'}
                        ]
                    },
                    {
                        'title': 'Advanced Actions',
                        'rows': [
                            {'id': 'search', 'title': 'Search', 'description': 'Search emails'}
                        ]
                    }
                ]
            }
        }

    @staticmethod
    def _create_calendar_controls_buttons():
        return {
            'header': 'Calendar Management',
            'body': 'Manage your schedule:',
            'action': {
                'button': 'Select',
                'sections': [
                    {
                        'title': 'Basic Actions',
                        'rows': [
                            {'id': 'today', 'title': 'Today\'s Events', 'description': 'View today\'s schedule'},
                            {'id': 'add', 'title': 'Add Event', 'description': 'Create new event'},
                            {'id': 'upcoming', 'title': 'Upcoming', 'description': 'View upcoming events'}
                        ]
                    },
                    {
                        'title': 'Advanced Actions',
                        'rows': [
                            {'id': 'find_slot', 'title': 'Find Time Slot', 'description': 'Find available time'}
                        ]
                    }
                ]
            }
        }

    @staticmethod
    def _create_docs_controls_buttons():
        return {
            'header': 'Document Management',
            'body': 'Handle your documents:',
            'action': {
                'button': 'Select',
                'sections': [
                    {
                        'title': 'Basic Actions',
                        'rows': [
                            {'id': 'upload', 'title': 'Upload', 'description': 'Add new document'},
                            {'id': 'list', 'title': 'List Documents', 'description': 'View all documents'},
                            {'id': 'search', 'title': 'Search', 'description': 'Search documents'}
                        ]
                    },
                    {
                        'title': 'Advanced Actions',
                        'rows': [
                            {'id': 'delete', 'title': 'Delete', 'description': 'Remove document'}
                        ]
                    }
                ]
            }
        }

    @staticmethod
    def _create_system_controls_buttons():
        return {
            'header': 'System Controls',
            'body': 'System management:',
            'action': {
                'button': 'Select',
                'sections': [
                    {
                        'title': 'Basic Actions',
                        'rows': [
                            {'id': 'status', 'title': 'System Status', 'description': 'View current status'},
                            {'id': 'restart', 'title': 'Restart', 'description': 'Restart system'},
                            {'id': 'connect', 'title': 'Connect', 'description': 'Connect to Google Calendar and Email'}
                        ]
                    }
                ]
            }
        }

    async def handle_message(self, message: 'Message'):
        """Main message handler for incoming WhatsApp messages"""

        # Deduplication check
        with self.message_lock:
            if message.id in self.processed_messages:
                return
            last_ts = time.time()
            print(last_ts)
            if len(self.processed_messages) > 0:
                m_id, last_ts = self.processed_messages.pop()
                self.processed_messages.add((m_id, last_ts))

            print("DUPLICATION P", message.data.get('entry', [{}])[0].get('changes', [{}])[0].get('value', {}).get('messages', [{}])[0].get('timestamp', 0) , last_ts)
            if float(message.data.get('entry', [{}])[0].get('changes', [{}])[0].get('value', {}).get('messages', [{}])[0].get('timestamp', 0)) < last_ts - 120:
                return
            self.processed_messages.add((message.id, time.perf_counter()))

        # Mark message as read
        message.mark_as_read()

        # Extract content and type
        content_type = message.type
        content = message.content

        print(f"message.content {content=} {content_type=} {message.data=}")

        try:
            if content_type == 'interactive':
                await self.handle_interactive(message)
            elif content_type == 'audio':
                await self.handle_audio_message(message)
            elif content_type in ['document', 'image', 'video']:
                response = await self.handle_media_message(message)
                self.save_reply(message, response)
            elif content_type == 'text':
                if content.lower() == "menu":
                    self.whc.messenger.send_button(
                        recipient_id=self.whc.progress_messenger0.recipient_phone,
                        button=self.buttons[content.lower()]
                    )
                else:
                    await self.helper_text(message)
            else:
                message.reply("Unsupported message type")
        #except Exception as e:
        #    logging.error(f"Message handling error: {str(e)}")
        #   message.reply("❌ Error processing request")
        finally:
            # Cleanup old messages (keep 1 hour history)
            with self.message_lock:
                self._clean_processed_messages()

    async def helper_text(self, message: 'Message', return_text=False):
        if not isinstance(message.content, str) and not len(message.content) > 0:
            content = self.whc.messenger.get_message(message.data)
            print(f"contents {content=}, {message.content=}")
            message.content = content
        self.history.set(message.id, message.content)
        if len(self.pending_actions[self.whc.progress_messenger0.recipient_phone].keys()) != 0:
            message.reply(
                f"Open Interaction : {json.dumps(self.pending_actions[self.whc.progress_messenger0.recipient_phone], indent=2)}")
            if self.pending_actions[self.whc.progress_messenger0.recipient_phone].get('type') == 'auth':
                res = self.complete_authorization(message)
                self.save_reply(message, res)
            res = await self.handle_calendar_actions(message)
            if res:
                self.save_reply(message, res)
                return
            res2 = await self.handle_email_actions(message)
            if res2:
                self.save_reply(message, res2)
                return
            await self.handle_agent_actions(message)
            return
        await self.handle_agent_actions(message)

    async def handle_interactive(self, message: Message):
        """Handle all interactive messages"""
        content = self.whc.messenger.get_interactive_response(message.data)
        if content.get("type") == "list_reply":
            await self.handle_button_interaction(content.get("list_reply"), message)
        elif content.get("type") == "button_reply":
            print(content)

    async def handle_audio_message(self, message: 'Message'):
        """Process audio messages with STT and TTS"""
        # Download audio
        progress = self.progress_messengers['task']
        stop_flag = threading.Event()
        # message_id = progress.send_initial_message(mode="loading")
        progress.message_id = message.id
        progress.start_loading_in_background(stop_flag)

        content = self.whc.messenger.get_audio(message.data)
        audio_file_name = self.whc.messenger.download_media(media_url=self.whc.messenger.query_media_url(media_id=content.get('id')), mime_type='audio/opus', file_path=".data/temp")
        print(f"audio_file_name {audio_file_name}")
        if audio_file_name is None:
            message.reply("Could not process audio file")
            stop_flag.set()
            return

        text = self.stt(audio_file_name)['text']
        if not text:
            message.reply("Could not process audio")
            stop_flag.set()
            return

        message.reply("Transcription :\n "+ text)
        message.content = text
        agent_res = await self.helper_text(message, return_text=True)

        if agent_res is not None:
            pass

        stop_flag.set()
        # Process text and get response
        # response = await self.process_input(text, message)

        # Convert response to audio
        #audio_file = self.audio_processor.tts(response)
        #audio_file = None # TODO
        #self.whc.messenger.send_audio(
        #    audio=audio_file,
        #    recipient_id=self.whc.progress_messenger0.recipient_phone,
        #)

    async def confirm(self, message: Message):
        status = self.pending_actions[self.whc.progress_messenger0.recipient_phone]
        if status.get('type') == "create_event":
            if status.get('step') == "confirm_envet":
                event = self._create_calendar_event(status.get('event_data'))
                self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
                return f"✅ Event created!\n{event.get('htmlLink')}"
            return "❌"
        elif status.get('type') == "compose_email":
            if status.get('step') == "confirm_email":
                # Send email
                result = self.gmail_service.users().messages().send(
                    userId='me',
                    body=self._build_email_draft(status['draft'])
                ).execute()
                self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
                return f"✅ Email sent! Message ID: {result['id']}"
            return "❌"
        return "❌ Done"

    async def cancel(self, *a):
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
        return "✅ cancel Done"

    async def handle_button_interaction(self, content: dict, message: Message):
        """Handle button click interactions"""
        button_id = content['id']

        # First check if it's a main menu button
        if button_id in self.buttons:
            self.whc.messenger.send_button(
                recipient_id=self.whc.progress_messenger0.recipient_phone,
                button=self.buttons[button_id]
            )
            return

        # Handle action buttons
        action_handlers = {
            # Agent controls
            'start': self.start_agent,
            'stop': self.stop_agent,
            'tasks': self.show_task_stack,
            'memory': self.clear_memory,
            'system-task': self.system_task,
            'agent-task': self.agent_task,

            # Email controls
            'check': self.check_emails,
            'send': self.start_email_compose,
            'summary': self.email_summary,
            'search': self.email_search,

            # Calendar controls
            'today': self.show_today_events,
            'add': self.start_event_create,
            'upcoming': self.show_upcoming_events,
            'find_slot': self.find_time_slot,

            # Document controls
            'upload': self.start_document_upload,
            'list': self.list_documents,
            'search_docs': self.search_documents,
            'delete': self.delete_document,

            # System controls
            'status': self.system_status,
            'restart': self.restart_system,
            'connect': self.generate_authorization_url,

            'cancel': self.cancel,
            'confirm': self.confirm,
        }
        if button_id in action_handlers:
            try:
                # Start progress indicator
                progress = self.progress_messengers['task']
                stop_flag = threading.Event()
                # message_id = progress.send_initial_message(mode="loading")
                progress.message_id = message.id
                progress.start_loading_in_background(stop_flag)

                # Execute handler

                result = await action_handlers[button_id](message)


                # Send result
                if isinstance(result, str):
                    self.save_reply(message, result)
                elif isinstance(result, dict):  # For structured responses
                    self.send_structured_response(result)

                stop_flag.set()
            finally:
                #except Exception as e:
                stop_flag.set()
            #    message.reply(f"❌ Error processing {button_id}: {str(e)}")
        elif 'event_' in button_id:
            res = await self.get_event_details(button_id.replace("event_", ''))
            if isinstance(res, str):
                self.save_reply(message, res)
                return
            for r in res:
                if isinstance(r, str):
                    self.save_reply(message, r)
                else:
                    self.whc.messenger.send_location(**r)

        elif 'email_' in button_id:
            res = await self.get_email_details(button_id.replace("email_", ''))
            self.save_reply(message, res)
        else:
            message.reply("⚠️ Unknown command")

    def send_structured_response(self, result: dict):
        """Send complex responses using appropriate WhatsApp features"""
        if result['type'] == 'list':
            self.whc.messenger.send_button(
                recipient_id=self.whc.progress_messenger0.recipient_phone,
                button={
                    'header': result.get('header', ''),
                    'body': result.get('body', ''),
                    'footer': result.get('footer', ''),
                    'action': {
                        'button': 'Action',
                        'sections': result['sections']
                    }
                }
            )
        elif result['type'] == 'quick_reply':
            self.whc.messenger.send_button(
                recipient_id=self.whc.progress_messenger0.recipient_phone,
                button={
                    'header': "Quick reply",
                    'body': result['text'],
                    'footer': '',
                    'action': {'button': 'Action', 'sections': [{
                        'title': 'View',
                        'rows': [{'id': k, 'title': v[:23]} for k, v in result['options'].items()]
                    }]}
                }
            )

        elif result['type'] == 'media':
            if result['media_type'] == 'image':
                self.whc.messenger.send_image(
                    image=result['url'],
                    recipient_id=self.whc.progress_messenger0.recipient_phone,
                    caption=result.get('caption', '')
                )
            elif result['media_type'] == 'document':
                self.whc.messenger.send_document(
                    document=result['url'],
                    recipient_id=self.whc.progress_messenger0.recipient_phone,
                    caption=result.get('caption', '')
                )

    async def clear_memory(self, message):
        self.agent.reset_context()
        self.agent.taskstack.tasks = []
        return "🧠 Memory cleared successfully"

    async def system_task(self, message):
        """Initiate email search workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
            'type': 'system',
            'step': 'await_query'
        }
        return {
            'type': 'quick_reply',
            'text': "Now prompt the 🧠ISAA-System 📝",
            'options': {'cancel': '❌ Cancel Search'}
        }

    async def agent_task(self, message):
        """Initiate email search workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
            'type': 'self-agent',
            'step': 'await_query'
        }
        return {
            'type': 'quick_reply',
            'text': "Now prompt the self-agent 📝",
            'options': {'cancel': '❌ Cancel Search'}
        }

    async def check_emails(self, message, query=""):
        """Improved email checking with WhatsApp API formatting"""
        if not self.gmail_service:
            return "⚠️ Gmail service not configured"

        try:
            results = self.gmail_service.users().messages().list(
                userId='me',
                maxResults=10,
                labelIds=['INBOX'],
                q=query
            ).execute()

            emails = []
            for msg in results.get('messages', [])[:10]:
                email_data = self.gmail_service.users().messages().get(
                    userId='me',
                    id=msg['id'],
                    format='metadata'
                ).execute()

                headers = {h['name']: h['value'] for h in email_data['payload']['headers']}
                emails.append({
                    'id': msg['id'],
                    'from': headers.get('From', 'Unknown'),
                    'subject': headers.get('Subject', 'No Subject'),
                    'date': headers.get('Date', 'Unknown'),
                    'snippet': email_data.get('snippet', ''),
                    'unread': 'UNREAD' in email_data.get('labelIds', [])
                })

            return {
                'type': 'list',
                'header': '📨 Recent Emails',
                'body': 'Tap to view full email',
                'footer': 'Email Manager',
                'sections': [{
                    'title': f"Inbox ({len(emails)} emails)",
                    'rows': [{
                        'id': f"email_{email['id']}",
                        'title': f"{'📬' if email['unread'] else '📭'} {email['subject']}"[:23],
                        'description': f"From: {email['from']}\n{email['snippet']}"[:45]
                    } for email in emails]
                }]
            }
        except Exception as e:
            return f"⚠️ Error fetching emails: {str(e)}"

    async def get_email_details(self, email_id):
        """Retrieve and format full email details"""
        if not self.gmail_service:
            return "⚠️ Gmail service not configured"

        try:
            email_data = self.gmail_service.users().messages().get(
                userId='me',
                id=email_id,
                format='full'
            ).execute()

            headers = {h['name']: h['value'] for h in email_data['payload']['headers']}
            body = ""
            for part in email_data.get('payload', {}).get('parts', []):
                if part['mimeType'] == 'text/plain':
                    body = base64.urlsafe_b64decode(part['body']['data']).decode('utf-8')
                    break

            formatted_text = (
                f"📧 *Email Details*\n\n"
                f"From: {headers.get('From', 'Unknown')}\n"
                f"Subject: {headers.get('Subject', 'No Subject')}\n"
                f"Date: {headers.get('Date', 'Unknown')}\n\n"
                f"{body[:15000]}{'...' if len(body) > 15000 else ''}"
            )
            return  self.agent.mini_task(
                formatted_text , "system", "Summarize the email in bullet points with key details"
            )
        except Exception as e:
            return f"⚠️ Error fetching email: {str(e)}"

    async def email_summary(self, message):
        """Generate AI-powered email summaries"""
        try:
            messages = self.gmail_service.users().messages().list(
                userId='me',
                maxResults=3,
                labelIds=['INBOX']
            ).execute().get('messages', [])

            email_contents = []
            for msg in messages[:3]:
                email_data = self.gmail_service.users().messages().get(
                    userId='me',
                    id=msg['id'],
                    format='full'
                ).execute()
                email_contents.append(self._parse_email_content(email_data))

            summary = self.agent.mini_task(
                "\n\n".join(email_contents) , "system", "Summarize these emails in bullet points with key details:"
            )

            return f"📋 Email Summary:\n{summary}\n\n*Powered by AI*"
        except Exception as e:
            logging.error(f"Summary failed: {str(e)}")
            return f"❌ Could not generate summary: {str(e)}"

    async def email_search(self, message):
        """Initiate email search workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
            'type': 'email_search',
            'step': 'await_query'
        }
        return {
            'type': 'quick_reply',
            'text': "🔍 What would you like to search for?",
            'options': {'cancel': '❌ Cancel Search'}
        }

    async def start_email_compose(self, message):
        """Enhanced email composition workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
            'type': 'compose_email',
            'step': 'subject',
            'draft': {'attachments': []}
        }
        return {
            'type': 'quick_reply',
            'text': "📝 Let's compose an email\n\nSubject:",
            'options': {'cancel': '❌ Cancel Composition'}
        }

    async def handle_email_actions(self, message):
        """Handle multi-step email workflows"""
        user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})

        if user_state.get('type') == 'compose_email':
            return await self._handle_email_composition(message, user_state)
        if user_state.get('type') == 'email_search':
            return await self.check_emails(message, self.agent.mini_task("""Conventire Pezise zu einer googel str only query using : Gmail Suchoperatoren!

Basis-Operatoren:
- from: Absender
- to: Empfänger
- subject: Betreff
- label: Gmail Label
- has:attachment Anhänge
- newer_than:7d Zeitfilter
- before: Datum vor
- after: Datum nach

Erweiterte Operatoren:
- in:inbox
- in:sent
- in:spam
- cc: Kopie
- bcc: Blindkopie
- is:unread
- is:read
- larger:10M Größenfilter
- smaller:5M
- filename:pdf Dateityp

Profi-Tipps:
- Kombinierbar mit UND/ODER
- Anführungszeichen für exakte Suche
- Negation mit -
 beispeile : 'Ungelesene Mails letzte Woche': -> 'is:unread newer_than:7d'

""", "user",message.content))


        return None

    async def _handle_email_composition(self, message, state):
        if state['step'] == 'subject':
            state['draft']['subject'] = message.content
            state['step'] = 'body'
            return {
                'type': 'quick_reply',
                'text': "✍️ Email body:",
                'options': {'attach': '📎 Add Attachment', 'send': '📤 Send Now'}
            }

        elif state['step'] == 'body':
            if message.content == 'attach':
                state['step'] = 'attachment'
                return "📎 Please send the file you want to attach"

            state['draft']['body'] = message.content
            state['step'] = 'confirm_email'
            return {
                'type': 'quick_reply',
                'text': f"📧 Ready to send?\n\nSubject: {state['draft']['subject']}\n\n{state['draft']['body']}",
                'options': {'confirm': '✅ Send', 'cancel': '❌ cancel'}
            }

        elif state['step'] == 'attachment':
            # Handle attachment upload
            file_type = message.type
            if file_type not in ['document', 'image']:
                return "❌ Unsupported file type"

            media_url = getattr(message, file_type).id
            media_data = self.whc.messenger.download_media(media_url=self.whc.messenger.query_media_url(media_id=media_url), mime_type=media_url.type, file_path=".data/temp")
            state['draft']['attachments'].append(media_data)
            state['step'] = 'body'
            return "📎 Attachment added! Add more or send the email"


    def _parse_email_content(self, email_data):
        """Extract readable content from email payload"""
        parts = email_data.get('payload', {}).get('parts', [])
        body = ""
        for part in parts:
            if part['mimeType'] == 'text/plain':
                body += base64.urlsafe_b64decode(part['body']['data']).decode('utf-8')
        return f"Subject: {email_data.get('subject', '')}\nFrom: {email_data.get('from', '')}\n\n{body}"

    def _build_email_draft(self, draft):
        """Create MIME message from draft data"""
        message = MIMEMultipart()
        message['to'] = draft.get('to', '')
        message['subject'] = draft['subject']
        message.attach(MIMEText(draft['body']))

        for attachment in draft['attachments']:
            part = MIMEBase('application', 'octet-stream')
            part.set_payload(attachment)
            encoders.encode_base64(part)
            part.add_header('Content-Disposition', 'attachment')
            message.attach(part)

        return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}

    def _get_email_subject(self, msg):
        headers = msg.get('payload', {}).get('headers', [])
        return next((h['value'] for h in headers if h['name'] == 'Subject'), 'No Subject')

    def _get_email_sender(self, msg):
        headers = msg.get('payload', {}).get('headers', [])
        return next((h['value'] for h in headers if h['name'] == 'From'), 'Unknown Sender')

    def _get_email_snippet(self, msg):
        return msg.get('snippet', '')[:100] + '...'
    # Calendar Handlers

    # Calendar Functions
    def _format_event_time(self, event):
        """Improved time formatting for calendar events"""
        start = event['start'].get('dateTime', event['start'].get('date'))
        end = event['end'].get('dateTime', event['end'].get('date'))

        try:
            start_dt = parser.parse(start)
            end_dt = parser.parse(end)
            if 'T' in start:
                return f"{start_dt.strftime('%a %d %b %H:%M')} - {end_dt.strftime('%H:%M')}"
            return f"{start_dt.strftime('%d %b %Y')} (All Day)"
        except:
            return "Time not specified"

    async def get_event_details(self, event_id):
        """Retrieve and format calendar event details with location support"""
        if not self.calendar_service:
            return "⚠️ Calendar service not configured"

        try:
            event = self.calendar_service.events().get(
                calendarId='primary',
                eventId=event_id
            ).execute()

            response = [ (
                    f"📅 *Event Details*\n\n"
                    f"Title: {event.get('summary', 'No title')}\n"
                    f"Time: {self._format_event_time(event)}\n"
                    f"Location: {event.get('location', 'Not specified')}\n\n"
                    f"{event.get('description', 'No description')[:1000]}"
                )]

            if 'geo' in event:
                response.append({
                    'lat': float(event['geo']['latitude']),
                    'long': float(event['geo']['longitude']),
                    'name': event.get('location', 'Event Location'),
                    'address': event.get('location', ''),
                    'recipient_id': self.whc.progress_messenger0.recipient_phone
                })
            return response
        except Exception as e:
            return f"⚠️ Error fetching event: {str(e)}"

    async def show_today_events(self, message):
        """Show today's calendar events"""
        if not self.calendar_service:
            message.replay("service not online")

        now = datetime.utcnow().isoformat() + 'Z'
        end_of_day = (datetime.now() + timedelta(days=1)).replace(
            hour=0, minute=0, second=0).isoformat() + 'Z'

        events_result = self.calendar_service.events().list(
            calendarId='primary',
            timeMin=now,
            timeMax=end_of_day,
            singleEvents=True,
            orderBy='startTime'
        ).execute()

        events = events_result.get('items', [])
        return self._format_calendar_response(events, "Today's Events")

    # Updated Calendar List Handlers
    async def show_upcoming_events(self, message):
        """Show upcoming events with interactive support"""
        if not self.calendar_service:
            return "⚠️ Calendar service not configured"

        try:
            now = datetime.utcnow().isoformat() + 'Z'
            next_week = (datetime.now() + timedelta(days=7)).isoformat() + 'Z'

            events_result = self.calendar_service.events().list(
                calendarId='primary',
                timeMin=now,
                timeMax=next_week,
                singleEvents=True,
                orderBy='startTime',
                maxResults=10
            ).execute()

            events = events_result.get('items', [])
            return self._format_calendar_response(events, "Upcoming Events")
        except Exception as e:
            return f"⚠️ Error fetching events: {str(e)}"

    async def start_event_create(self, message):
        """Initiate event creation workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
            'type': 'create_event',
            'step': 'title',
            'event_data': {}
        }
        return {
            'type': 'quick_reply',
            'text': "Let's create an event! What's the title?",
            'options': {'cancel': '❌ Cancel'}
        }

    async def find_time_slot(self, message):
        """Find and display the next 5 available time slots with dynamic durations"""
        if not self.calendar_service:
            return "⚠️ Calendar service not configured"

        try:
            # Define the time range for the search (next 24 hours)
            now = datetime.now(UTC)
            end_time = now + timedelta(days=1)

            # FreeBusy Request
            freebusy_request = {
                "timeMin": now.isoformat(),
                "timeMax": end_time.isoformat(),
                "items": [{"id": 'primary'}]
            }

            freebusy_response = self.calendar_service.freebusy().query(body=freebusy_request).execute()
            busy_slots = freebusy_response['calendars']['primary']['busy']

            # Slot-Berechnung
            available_slots = self._calculate_efficient_slots(
                busy_slots,
                self.duration_minutes
            )

            # Format the response for WhatsApp
            return {
                'type': 'list',
                'header': "⏰ Available Time Slots",
                'body': "Tap to select a time slot",
                'footer': "Time Slot Finder",
                'sections': [{
                    'title': "Next 5 Available Slots",
                    'rows': [{
                        'id': f"slot_{slot['start'].timestamp()}",
                        'title': f"🕒 {slot['start'].strftime('%H:%M')} - {slot['end'].strftime('%H:%M')}",
                        'description': f"Duration: {slot['duration']}"
                    } for slot in available_slots[:5]]
                }]
            }
        except Exception as e:
            return f"⚠️ Error finding time slots: {str(e)}"

    def _calculate_efficient_slots(self, busy_slots, duration_minutes):
        """Effiziente Slot-Berechnung"""
        available_slots = []
        current = datetime.now(UTC)
        end_time = current + timedelta(days=1)

        while current < end_time:
            slot_end = current + timedelta(minutes=duration_minutes)

            if slot_end > end_time:
                break

            is_available = all(
                slot_end <= parser.parse(busy['start']) or
                current >= parser.parse(busy['end'])
                for busy in busy_slots
            )

            if is_available:
                available_slots.append({
                    'start': current,
                    'end': slot_end,
                    'duration': f"{duration_minutes} min"
                })
                current = slot_end
            else:
                current += timedelta(minutes=15)

        return available_slots

    async def handle_calendar_actions(self, message):
        """Handle calendar-related pending actions"""
        user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})

        if user_state.get('type') == 'create_event':
            return await self._handle_event_creation(message, user_state)

        return None

    async def _handle_event_creation(self, message, state):
        step = state['step']
        event_data = state['event_data']

        if step == 'title':
            event_data['summary'] = message.content
            state['step'] = 'start_time'
            return "📅 When should it start? (e.g., 'tomorrow 2pm' or '2024-03-20 14:30')"

        elif step == 'start_time':
            event_data['start'] = self._parse_time(message.content)
            state['step'] = 'end_time'
            return "⏰ When should it end? (e.g., '3pm' or '2024-03-20 15:30')"

        elif step == 'end_time':
            event_data['end'] = self._parse_time(message.content, reference=event_data['start'])
            state['step'] = 'description'
            return "📝 Add a description (or type 'skip')"

        elif step == 'description':
            if message.content.lower() != 'skip':
                event_data['description'] = message.content
            state['step'] = 'confirm_envet'
            return self._create_confirmation_message(event_data)

    def _format_calendar_response(self, events, title):
        """Enhanced calendar formatting with interactive support"""
        if not events:
            return f"📅 No {title.lower()} found"

        return {
            'type': 'list',
            'header': title,
            'body': "Tap to view event details",
            "footer": "-- Calendar --",
            'sections': [{
                'title': f"{len(events)} Events",
                'rows': [{
                    'id': f"event_{event['id']}",
                    'title': f"📅 {event['summary']}"[:23],
                    'description': self._format_event_time(event)[:45]
                } for event in events[:5]]
            }]
        }

    def _parse_iso_to_readable(self, iso_str):
        """Convert ISO datetime to readable format"""
        dt = datetime.fromisoformat(iso_str.replace('Z', '+00:00'))
        return dt.strftime("%a %d %b %Y %H:%M")

    def _parse_time(self, time_str, reference=None):
        """
        Konvertiert natürliche Sprache zu präziser Datetime

        Unterstützt:
        - 'heute'
        - 'morgen'
        - 'in einer woche'
        - '10 uhr'
        - '10pm'
        - 'nächsten montag'
        """
        if reference is None:
            reference = datetime.now()

        try:
            import dateparser

            # Dateparser für flexibel Zeitparsing
            parsed_time = dateparser.parse(
                time_str,
                settings={
                    'PREFER_DATES_FROM': 'future',
                    'RELATIVE_BASE': reference,
                    'TIMEZONE': 'Europe/Berlin'
                }
            )

            if parsed_time is None:
                # Fallback auf dateutil wenn dateparser scheitert
                parsed_time = parser .parse(time_str, fuzzy=True, default=reference)

            return parsed_time

        except Exception as e:
            print(f"Zeitparsing-Fehler: {e}")
            return reference

    def _calculate_free_slots(self, start, end, busy_slots):
        """Calculate free time slots between busy periods"""
        # Implementation would calculate available windows
        return [{
            'start': "09:00",
            'end': "11:00",
            'duration': "2 hours"
        }]

    def _create_confirmation_message(self, event_data):
        """Create event confirmation message"""
        details = [
            f"📌 Title: {event_data['summary']}",
            f"🕒 Start: {self._parse_iso_to_readable(event_data['start'])}",
            f"⏰ End: {self._parse_iso_to_readable(event_data['end'])}",
            f"📝 Description: {event_data.get('description', 'None')}"
        ]
        return {
            'type': 'quick_reply',
            'text': "\n".join(details),
            'options': {'confirm': '✅ Confirm', 'cancel': '❌ Cancel'}
        }

    def _create_calendar_event(self, event_data):
        """Create event through Calendar API"""
        event = {
            'summary': event_data['summary'],
            'start': {'dateTime': event_data['start']},
            'end': {'dateTime': event_data['end']},
        }
        if 'description' in event_data:
            event['description'] = event_data['description']

        return self.calendar_service.events().insert(
            calendarId='primary',
            body=event
        ).execute()

    async def system_status(self, message):
        o = (datetime.now() - self.start_time)
        o.microseconds = 0
        status = {
            "🤖 Agent": "Online" if self.agent else "Offline",
            "📧 Email": "Connected" if self.gmail_service else "Disconnected",
            "📅 Calendar": "Connected" if self.calendar_service else "Disconnected",
            "📄 Documents": "Connected" if self.blob_docs_system else "Disconnected",
            "⏳ Uptime": f"{str(o.isoformat())}"
        }
        return "\n".join([f"{k}: {v}" for k, v in status.items()])

    async def restart_system(self, message):
        message.reply("🔄 System restart initiated...")
        time.sleep(1)
        await self.clear_memory(message)
        time.sleep(1)
        return  "✅ System restarted"

    # Updated document handlers
    async def list_documents(self, message, filter_type=None):
        docs = self.blob_docs_system.list_documents(filter_type)
        if len(docs) == 0:
            return "No docs found"
        else:
            return str(docs)
        return {
            'type': 'list',
            'body': 'Stored Documents',
            'action': {
                'sections': [{
                    'title': 'Your Documents',
                    'rows': [{
                        'id': doc['id'],
                        'title': f"{self._get_icon(doc['type'])} {doc['name']}"[:23],
                        'description': f"{doc['type'].title()} | {self._format_size(doc['size'])} | {doc['modified']}"[:29]
                    } for doc in docs[:10]]
                }]}
        }

    async def start_document_upload(self, message):
        """Initiate document upload workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {'type': 'document', 'step': 'awaiting_file'}
        return {
            'type': 'quick_reply',
            'text': '📤 Send me the file you want to upload',
            'options': {'cancel': '❌ Cancel Upload'}
        }

    async def search_documents(self, message):
        """Initiate document search workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {'type': 'search', 'step': 'awaiting_query'}
        return {
            'type': 'quick_reply',
            'text': '🔍 What are you looking for?',
            'options': {'cancel': '❌ Cancel Search'}
        }

    async def handle_media_message(self, message: 'Message'):
        """Handle document/image/video uploads"""
        user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})

        if user_state.get('step') == 'awaiting_file':
            file_type = message.type
            if file_type not in ['document', 'image', 'video']:
                return "Unsupported file type"

            try:
                # Download media
                #media_url = message.document.url if hasattr(message, 'document') else \
                #    message.image.url if hasattr(message, 'image') else \
                #        message.video.url
                if file_type =='video':
                    content = self.whc.messenger.get_video(message.data)
                if file_type =='image':
                    content = self.whc.messenger.get_image(message.data)
                if file_type =='document':
                    content = self.whc.messenger.get_document(message.data)
                print("Media content:", content)
                media_data = self.whc.messenger.download_media(media_url=self.whc.messenger.query_media_url(media_id=content.get('id')),  mime_type=content.get('mime_type'), file_path='.data/temp')
                print("Media media_data:", media_data)
                # Save to blob storage
                filename = f"file_{file_type}_{datetime.now().isoformat()}_{content.get('sha256', '')}"
                blob_id = self.blob_docs_system.save_document(
                    open(media_data, 'rb').read(),
                    filename=filename,
                    file_type=file_type
                )

                self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
                return f"✅ File uploaded successfully!\nID: {blob_id}"

            except Exception as e:
                logging.error(f"Upload failed: {str(e)}")
                return f"❌ Failed to upload file Error : {str(e)}"

        return "No pending uploads"

    async def delete_document(self, message):
        """Delete document workflow"""
        docs = self.blob_docs_system.list_documents()
        return {
            'type': 'quick_reply',
            'text': 'Select document to delete:',
            'options': {doc['id']: doc['name'] for doc in docs[:5]},
            'handler': self._confirm_delete
        }

    async def _confirm_delete(self, doc_id, message):
        """Confirm deletion workflow"""
        doc = next((d for d in self.blob_docs_system.list_documents() if d['id'] == doc_id), None)
        if not doc:
            return "Document not found"

        if self.blob_docs_system.delete_document(doc_id):
            return f"✅ {doc['name']} deleted successfully"
        return "❌ Failed to delete document"

    # Helper methods
    def _get_icon(self, file_type: str) -> str:
        icons = {
            'document': '📄',
            'image': '🖼️',
            'video': '🎥'
        }
        return icons.get(file_type, '📁')

    def _format_size(self, size: int) -> str:
        if size < 1024:
            return f"{size}B"
        elif size < 1024 ** 2:
            return f"{size / 1024:.1f}KB"
        elif size < 1024 ** 3:
            return f"{size / (1024 ** 2):.1f}MB"
        return f"{size / (1024 ** 3):.1f}GB"

    # Utility Methods

    def _clean_processed_messages(self):
        """Clean old messages from processed cache"""
        now = time.time()
        self.processed_messages = {
            msg_id for msg_id, timestamp in self.processed_messages
            if now - timestamp < 3600  # 1 hour retention
        }

    def send_email(self, to, subject, body):
        """Actual email sending function to be called by agent"""
        if not self.gmail_service:
            return False

        message = MIMEText(body)
        message['to'] = to
        message['subject'] = subject

        encoded_message = base64.urlsafe_b64encode(message.as_bytes()).decode()
        self.gmail_service.users().messages().send(
            userId='me',
            body={'raw': encoded_message}
        ).execute()
        return True

    async def start_agent(self, *a):
        """Start the agent in background mode"""
        if self.agent:
            self.agent.run_in_background()
            return True
        return False

    async def stop_agent(self, *b):
        """Stop the currently running agent"""
        if self.agent:
            self.agent.stop()
            return True
        return False

    async def show_task_stack(self, *a):
        """Display current task stack"""
        if self.agent and len(self.agent.taskstack.tasks) > 0:
            tasks = self.agent.taskstack.tasks
            return self.agent.mini_task("\n".join([f"Task {t.id}: {t.description}" for t in tasks]), "system", "Format to nice and clean whatsapp format")
        return "No tasks in stack"

    def run(self):
        """Start the WhatsApp assistant"""
        try:
            self.state = AssistantState.ONLINE
            # Send welcome message

            mas = self.whc.messenger.create_message(
                content="Digital Assistant is online! Send /help for available commands.",to=self.whc.progress_messenger0.recipient_phone,
            ).send(sender=0)
            mas_id = mas.get("messages", [{}])[0].get("id")
            print(mas_id)

        except Exception as e:
            logging.error(f"Assistant error: {str(e)}")
            self.state = AssistantState.OFFLINE
            raise

    async def handle_agent_actions(self, message):
        user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})
        def helper():

            stop_flag = threading.Event()
            try:
                progress = self.progress_messengers['task']
                # message_id = progress.send_initial_message(mode="loading")
                progress.message_id = message.id
                progress.start_loading_in_background(stop_flag)
                res = message.content
                print(message.data.get('entry', [{}])[0].get('changes', [{}])[0].get('value', {}).get('messages', [{}])[0].get(
                    'context'))
                if context := message.data.get('entry', [{}])[0].get('changes', [{}])[0].get('value', {}).get('messages', [{}])[0].get(
                    'context'):
                    context_str = f"Context : source {'USER' if context.get('from') in self.whc.progress_messenger0.recipient_phone else 'AGENT'}"
                    cd = self.history.get(context.get('id'))
                    context_str += "\n" + (cd if cd is not None else "The ref Message is not in the history")
                    res += "\n" + context_str
                if user_state.get('type') == 'system':
                    res = self.isaa.run(res)
                    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
                elif user_state.get('type') == 'self-agent':
                    res = self.agent.run(res)
                    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
                self.agent.mode = LLMMode(
                    name="Chatter",
                    description="whatsapp Chat LLM",
                    system_msg="Response precise and short style using whatsapp syntax!",
                    post_msg=None
                )
                response = self.agent.mini_task(res, "user", persist=True)
                self.save_reply(message, response)
            except Exception as e:
                stop_flag.set()
                message.reply("❌ Error in agent "+str(e))
            finally:
                self.agent.mode = None
                stop_flag.set()
        threading.Thread(target=helper, daemon=True).start()

    def save_reply(self, message, content):
        res = message.reply(content)
        res_id = res.get("messages", [{}])[0].get("id")
        if res_id is not None:
            self.history.set(res_id, content)
        else:
            print(f"No ID to add to history: {res}")
agent_task(message) async

Initiate email search workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
752
753
754
755
756
757
758
759
760
761
762
async def agent_task(self, message):
    """Initiate email search workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
        'type': 'self-agent',
        'step': 'await_query'
    }
    return {
        'type': 'quick_reply',
        'text': "Now prompt the self-agent 📝",
        'options': {'cancel': '❌ Cancel Search'}
    }
check_emails(message, query='') async

Improved email checking with WhatsApp API formatting

Source code in toolboxv2/mods/WhatsAppTb/client.py
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
async def check_emails(self, message, query=""):
    """Improved email checking with WhatsApp API formatting"""
    if not self.gmail_service:
        return "⚠️ Gmail service not configured"

    try:
        results = self.gmail_service.users().messages().list(
            userId='me',
            maxResults=10,
            labelIds=['INBOX'],
            q=query
        ).execute()

        emails = []
        for msg in results.get('messages', [])[:10]:
            email_data = self.gmail_service.users().messages().get(
                userId='me',
                id=msg['id'],
                format='metadata'
            ).execute()

            headers = {h['name']: h['value'] for h in email_data['payload']['headers']}
            emails.append({
                'id': msg['id'],
                'from': headers.get('From', 'Unknown'),
                'subject': headers.get('Subject', 'No Subject'),
                'date': headers.get('Date', 'Unknown'),
                'snippet': email_data.get('snippet', ''),
                'unread': 'UNREAD' in email_data.get('labelIds', [])
            })

        return {
            'type': 'list',
            'header': '📨 Recent Emails',
            'body': 'Tap to view full email',
            'footer': 'Email Manager',
            'sections': [{
                'title': f"Inbox ({len(emails)} emails)",
                'rows': [{
                    'id': f"email_{email['id']}",
                    'title': f"{'📬' if email['unread'] else '📭'} {email['subject']}"[:23],
                    'description': f"From: {email['from']}\n{email['snippet']}"[:45]
                } for email in emails]
            }]
        }
    except Exception as e:
        return f"⚠️ Error fetching emails: {str(e)}"
complete_authorization(message)

Complete the authorization process using the authorization code

:param authorization_code: Authorization code received from Google

Source code in toolboxv2/mods/WhatsAppTb/client.py
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
def complete_authorization(self, message: Message):
    """
    Complete the authorization process using the authorization code

    :param authorization_code: Authorization code received from Google
    """
    from google_auth_oauthlib.flow import Flow
    authorization_code = message.content
    # Define the scopes required for Gmail and Calendar
    SCOPES = [
        'https://www.googleapis.com/auth/gmail.modify',
        'https://www.googleapis.com/auth/calendar'
    ]

    # Create a flow instance to manage the OAuth 2.0 authorization process
    flow = Flow.from_client_secrets_file(
        self.credentials_path,
        scopes=SCOPES,
        redirect_uri='urn:ietf:wg:oauth:2.0:oob'
    )

    # Exchange the authorization code for credentials
    flow.fetch_token(code=authorization_code)
    self.credentials = flow.credentials

    # Save the credentials for future use
    self.save_credentials()

    # Initialize services
    self.init_services()
    return "Done"
delete_document(message) async

Delete document workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
1421
1422
1423
1424
1425
1426
1427
1428
1429
async def delete_document(self, message):
    """Delete document workflow"""
    docs = self.blob_docs_system.list_documents()
    return {
        'type': 'quick_reply',
        'text': 'Select document to delete:',
        'options': {doc['id']: doc['name'] for doc in docs[:5]},
        'handler': self._confirm_delete
    }

Initiate email search workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
871
872
873
874
875
876
877
878
879
880
881
async def email_search(self, message):
    """Initiate email search workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
        'type': 'email_search',
        'step': 'await_query'
    }
    return {
        'type': 'quick_reply',
        'text': "🔍 What would you like to search for?",
        'options': {'cancel': '❌ Cancel Search'}
    }
email_summary(message) async

Generate AI-powered email summaries

Source code in toolboxv2/mods/WhatsAppTb/client.py
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
async def email_summary(self, message):
    """Generate AI-powered email summaries"""
    try:
        messages = self.gmail_service.users().messages().list(
            userId='me',
            maxResults=3,
            labelIds=['INBOX']
        ).execute().get('messages', [])

        email_contents = []
        for msg in messages[:3]:
            email_data = self.gmail_service.users().messages().get(
                userId='me',
                id=msg['id'],
                format='full'
            ).execute()
            email_contents.append(self._parse_email_content(email_data))

        summary = self.agent.mini_task(
            "\n\n".join(email_contents) , "system", "Summarize these emails in bullet points with key details:"
        )

        return f"📋 Email Summary:\n{summary}\n\n*Powered by AI*"
    except Exception as e:
        logging.error(f"Summary failed: {str(e)}")
        return f"❌ Could not generate summary: {str(e)}"
find_time_slot(message) async

Find and display the next 5 available time slots with dynamic durations

Source code in toolboxv2/mods/WhatsAppTb/client.py
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
async def find_time_slot(self, message):
    """Find and display the next 5 available time slots with dynamic durations"""
    if not self.calendar_service:
        return "⚠️ Calendar service not configured"

    try:
        # Define the time range for the search (next 24 hours)
        now = datetime.now(UTC)
        end_time = now + timedelta(days=1)

        # FreeBusy Request
        freebusy_request = {
            "timeMin": now.isoformat(),
            "timeMax": end_time.isoformat(),
            "items": [{"id": 'primary'}]
        }

        freebusy_response = self.calendar_service.freebusy().query(body=freebusy_request).execute()
        busy_slots = freebusy_response['calendars']['primary']['busy']

        # Slot-Berechnung
        available_slots = self._calculate_efficient_slots(
            busy_slots,
            self.duration_minutes
        )

        # Format the response for WhatsApp
        return {
            'type': 'list',
            'header': "⏰ Available Time Slots",
            'body': "Tap to select a time slot",
            'footer': "Time Slot Finder",
            'sections': [{
                'title': "Next 5 Available Slots",
                'rows': [{
                    'id': f"slot_{slot['start'].timestamp()}",
                    'title': f"🕒 {slot['start'].strftime('%H:%M')} - {slot['end'].strftime('%H:%M')}",
                    'description': f"Duration: {slot['duration']}"
                } for slot in available_slots[:5]]
            }]
        }
    except Exception as e:
        return f"⚠️ Error finding time slots: {str(e)}"
generate_authorization_url(*a) async

Generate an authorization URL for user consent

:return: Authorization URL for the user to click and authorize access

Source code in toolboxv2/mods/WhatsAppTb/client.py
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
async def generate_authorization_url(self, *a):
    """
    Generate an authorization URL for user consent

    :return: Authorization URL for the user to click and authorize access
    """
    from google_auth_oauthlib.flow import Flow
    # Define the scopes required for Gmail and Calendar
    SCOPES = [
        'https://www.googleapis.com/auth/gmail.modify',
        'https://www.googleapis.com/auth/calendar'
    ]

    # Create a flow instance to manage the OAuth 2.0 authorization process
    flow = Flow.from_client_secrets_file(
        self.credentials_path,
        scopes=SCOPES,
        redirect_uri='urn:ietf:wg:oauth:2.0:oob'  # Use 'urn:ietf:wg:oauth:2.0:oob' for desktop apps
    )

    # Generate the authorization URL
    authorization_url, _ = flow.authorization_url(
        access_type='offline',  # Allows obtaining refresh token
        prompt='consent'  # Ensures user is always prompted for consent
    )
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {'type': 'auth',
                                                                          'step': 'awaiting_key'}
    return {
        'type': 'quick_reply',
        'text': f'Url to log in {authorization_url}',
        'options': {'cancel': '❌ Cancel Upload'}
    }
get_email_details(email_id) async

Retrieve and format full email details

Source code in toolboxv2/mods/WhatsAppTb/client.py
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
async def get_email_details(self, email_id):
    """Retrieve and format full email details"""
    if not self.gmail_service:
        return "⚠️ Gmail service not configured"

    try:
        email_data = self.gmail_service.users().messages().get(
            userId='me',
            id=email_id,
            format='full'
        ).execute()

        headers = {h['name']: h['value'] for h in email_data['payload']['headers']}
        body = ""
        for part in email_data.get('payload', {}).get('parts', []):
            if part['mimeType'] == 'text/plain':
                body = base64.urlsafe_b64decode(part['body']['data']).decode('utf-8')
                break

        formatted_text = (
            f"📧 *Email Details*\n\n"
            f"From: {headers.get('From', 'Unknown')}\n"
            f"Subject: {headers.get('Subject', 'No Subject')}\n"
            f"Date: {headers.get('Date', 'Unknown')}\n\n"
            f"{body[:15000]}{'...' if len(body) > 15000 else ''}"
        )
        return  self.agent.mini_task(
            formatted_text , "system", "Summarize the email in bullet points with key details"
        )
    except Exception as e:
        return f"⚠️ Error fetching email: {str(e)}"
get_event_details(event_id) async

Retrieve and format calendar event details with location support

Source code in toolboxv2/mods/WhatsAppTb/client.py
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
async def get_event_details(self, event_id):
    """Retrieve and format calendar event details with location support"""
    if not self.calendar_service:
        return "⚠️ Calendar service not configured"

    try:
        event = self.calendar_service.events().get(
            calendarId='primary',
            eventId=event_id
        ).execute()

        response = [ (
                f"📅 *Event Details*\n\n"
                f"Title: {event.get('summary', 'No title')}\n"
                f"Time: {self._format_event_time(event)}\n"
                f"Location: {event.get('location', 'Not specified')}\n\n"
                f"{event.get('description', 'No description')[:1000]}"
            )]

        if 'geo' in event:
            response.append({
                'lat': float(event['geo']['latitude']),
                'long': float(event['geo']['longitude']),
                'name': event.get('location', 'Event Location'),
                'address': event.get('location', ''),
                'recipient_id': self.whc.progress_messenger0.recipient_phone
            })
        return response
    except Exception as e:
        return f"⚠️ Error fetching event: {str(e)}"
handle_audio_message(message) async

Process audio messages with STT and TTS

Source code in toolboxv2/mods/WhatsAppTb/client.py
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
async def handle_audio_message(self, message: 'Message'):
    """Process audio messages with STT and TTS"""
    # Download audio
    progress = self.progress_messengers['task']
    stop_flag = threading.Event()
    # message_id = progress.send_initial_message(mode="loading")
    progress.message_id = message.id
    progress.start_loading_in_background(stop_flag)

    content = self.whc.messenger.get_audio(message.data)
    audio_file_name = self.whc.messenger.download_media(media_url=self.whc.messenger.query_media_url(media_id=content.get('id')), mime_type='audio/opus', file_path=".data/temp")
    print(f"audio_file_name {audio_file_name}")
    if audio_file_name is None:
        message.reply("Could not process audio file")
        stop_flag.set()
        return

    text = self.stt(audio_file_name)['text']
    if not text:
        message.reply("Could not process audio")
        stop_flag.set()
        return

    message.reply("Transcription :\n "+ text)
    message.content = text
    agent_res = await self.helper_text(message, return_text=True)

    if agent_res is not None:
        pass

    stop_flag.set()
handle_button_interaction(content, message) async

Handle button click interactions

Source code in toolboxv2/mods/WhatsAppTb/client.py
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
async def handle_button_interaction(self, content: dict, message: Message):
    """Handle button click interactions"""
    button_id = content['id']

    # First check if it's a main menu button
    if button_id in self.buttons:
        self.whc.messenger.send_button(
            recipient_id=self.whc.progress_messenger0.recipient_phone,
            button=self.buttons[button_id]
        )
        return

    # Handle action buttons
    action_handlers = {
        # Agent controls
        'start': self.start_agent,
        'stop': self.stop_agent,
        'tasks': self.show_task_stack,
        'memory': self.clear_memory,
        'system-task': self.system_task,
        'agent-task': self.agent_task,

        # Email controls
        'check': self.check_emails,
        'send': self.start_email_compose,
        'summary': self.email_summary,
        'search': self.email_search,

        # Calendar controls
        'today': self.show_today_events,
        'add': self.start_event_create,
        'upcoming': self.show_upcoming_events,
        'find_slot': self.find_time_slot,

        # Document controls
        'upload': self.start_document_upload,
        'list': self.list_documents,
        'search_docs': self.search_documents,
        'delete': self.delete_document,

        # System controls
        'status': self.system_status,
        'restart': self.restart_system,
        'connect': self.generate_authorization_url,

        'cancel': self.cancel,
        'confirm': self.confirm,
    }
    if button_id in action_handlers:
        try:
            # Start progress indicator
            progress = self.progress_messengers['task']
            stop_flag = threading.Event()
            # message_id = progress.send_initial_message(mode="loading")
            progress.message_id = message.id
            progress.start_loading_in_background(stop_flag)

            # Execute handler

            result = await action_handlers[button_id](message)


            # Send result
            if isinstance(result, str):
                self.save_reply(message, result)
            elif isinstance(result, dict):  # For structured responses
                self.send_structured_response(result)

            stop_flag.set()
        finally:
            #except Exception as e:
            stop_flag.set()
        #    message.reply(f"❌ Error processing {button_id}: {str(e)}")
    elif 'event_' in button_id:
        res = await self.get_event_details(button_id.replace("event_", ''))
        if isinstance(res, str):
            self.save_reply(message, res)
            return
        for r in res:
            if isinstance(r, str):
                self.save_reply(message, r)
            else:
                self.whc.messenger.send_location(**r)

    elif 'email_' in button_id:
        res = await self.get_email_details(button_id.replace("email_", ''))
        self.save_reply(message, res)
    else:
        message.reply("⚠️ Unknown command")
handle_calendar_actions(message) async

Handle calendar-related pending actions

Source code in toolboxv2/mods/WhatsAppTb/client.py
1188
1189
1190
1191
1192
1193
1194
1195
async def handle_calendar_actions(self, message):
    """Handle calendar-related pending actions"""
    user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})

    if user_state.get('type') == 'create_event':
        return await self._handle_event_creation(message, user_state)

    return None
handle_email_actions(message) async

Handle multi-step email workflows

Source code in toolboxv2/mods/WhatsAppTb/client.py
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
    async def handle_email_actions(self, message):
        """Handle multi-step email workflows"""
        user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})

        if user_state.get('type') == 'compose_email':
            return await self._handle_email_composition(message, user_state)
        if user_state.get('type') == 'email_search':
            return await self.check_emails(message, self.agent.mini_task("""Conventire Pezise zu einer googel str only query using : Gmail Suchoperatoren!

Basis-Operatoren:
- from: Absender
- to: Empfänger
- subject: Betreff
- label: Gmail Label
- has:attachment Anhänge
- newer_than:7d Zeitfilter
- before: Datum vor
- after: Datum nach

Erweiterte Operatoren:
- in:inbox
- in:sent
- in:spam
- cc: Kopie
- bcc: Blindkopie
- is:unread
- is:read
- larger:10M Größenfilter
- smaller:5M
- filename:pdf Dateityp

Profi-Tipps:
- Kombinierbar mit UND/ODER
- Anführungszeichen für exakte Suche
- Negation mit -
 beispeile : 'Ungelesene Mails letzte Woche': -> 'is:unread newer_than:7d'

""", "user",message.content))


        return None
handle_interactive(message) async

Handle all interactive messages

Source code in toolboxv2/mods/WhatsAppTb/client.py
528
529
530
531
532
533
534
async def handle_interactive(self, message: Message):
    """Handle all interactive messages"""
    content = self.whc.messenger.get_interactive_response(message.data)
    if content.get("type") == "list_reply":
        await self.handle_button_interaction(content.get("list_reply"), message)
    elif content.get("type") == "button_reply":
        print(content)
handle_media_message(message) async

Handle document/image/video uploads

Source code in toolboxv2/mods/WhatsAppTb/client.py
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
async def handle_media_message(self, message: 'Message'):
    """Handle document/image/video uploads"""
    user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})

    if user_state.get('step') == 'awaiting_file':
        file_type = message.type
        if file_type not in ['document', 'image', 'video']:
            return "Unsupported file type"

        try:
            # Download media
            #media_url = message.document.url if hasattr(message, 'document') else \
            #    message.image.url if hasattr(message, 'image') else \
            #        message.video.url
            if file_type =='video':
                content = self.whc.messenger.get_video(message.data)
            if file_type =='image':
                content = self.whc.messenger.get_image(message.data)
            if file_type =='document':
                content = self.whc.messenger.get_document(message.data)
            print("Media content:", content)
            media_data = self.whc.messenger.download_media(media_url=self.whc.messenger.query_media_url(media_id=content.get('id')),  mime_type=content.get('mime_type'), file_path='.data/temp')
            print("Media media_data:", media_data)
            # Save to blob storage
            filename = f"file_{file_type}_{datetime.now().isoformat()}_{content.get('sha256', '')}"
            blob_id = self.blob_docs_system.save_document(
                open(media_data, 'rb').read(),
                filename=filename,
                file_type=file_type
            )

            self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
            return f"✅ File uploaded successfully!\nID: {blob_id}"

        except Exception as e:
            logging.error(f"Upload failed: {str(e)}")
            return f"❌ Failed to upload file Error : {str(e)}"

    return "No pending uploads"
handle_message(message) async

Main message handler for incoming WhatsApp messages

Source code in toolboxv2/mods/WhatsAppTb/client.py
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
async def handle_message(self, message: 'Message'):
    """Main message handler for incoming WhatsApp messages"""

    # Deduplication check
    with self.message_lock:
        if message.id in self.processed_messages:
            return
        last_ts = time.time()
        print(last_ts)
        if len(self.processed_messages) > 0:
            m_id, last_ts = self.processed_messages.pop()
            self.processed_messages.add((m_id, last_ts))

        print("DUPLICATION P", message.data.get('entry', [{}])[0].get('changes', [{}])[0].get('value', {}).get('messages', [{}])[0].get('timestamp', 0) , last_ts)
        if float(message.data.get('entry', [{}])[0].get('changes', [{}])[0].get('value', {}).get('messages', [{}])[0].get('timestamp', 0)) < last_ts - 120:
            return
        self.processed_messages.add((message.id, time.perf_counter()))

    # Mark message as read
    message.mark_as_read()

    # Extract content and type
    content_type = message.type
    content = message.content

    print(f"message.content {content=} {content_type=} {message.data=}")

    try:
        if content_type == 'interactive':
            await self.handle_interactive(message)
        elif content_type == 'audio':
            await self.handle_audio_message(message)
        elif content_type in ['document', 'image', 'video']:
            response = await self.handle_media_message(message)
            self.save_reply(message, response)
        elif content_type == 'text':
            if content.lower() == "menu":
                self.whc.messenger.send_button(
                    recipient_id=self.whc.progress_messenger0.recipient_phone,
                    button=self.buttons[content.lower()]
                )
            else:
                await self.helper_text(message)
        else:
            message.reply("Unsupported message type")
    #except Exception as e:
    #    logging.error(f"Message handling error: {str(e)}")
    #   message.reply("❌ Error processing request")
    finally:
        # Cleanup old messages (keep 1 hour history)
        with self.message_lock:
            self._clean_processed_messages()
init_services()

Initialize Gmail and Calendar services

Source code in toolboxv2/mods/WhatsAppTb/client.py
276
277
278
279
280
281
282
283
284
def init_services(self):
    """
    Initialize Gmail and Calendar services
    """
    from googleapiclient.discovery import build

    self.gmail_service = build('gmail', 'v1', credentials=self.credentials)
    self.calendar_service = build('calendar', 'v3', credentials=self.credentials)
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
load_credentials()

Load previously saved credentials if available

:return: Whether credentials were successfully loaded

Source code in toolboxv2/mods/WhatsAppTb/client.py
262
263
264
265
266
267
268
269
270
271
272
273
def load_credentials(self):
    """
    Load previously saved credentials if available

    :return: Whether credentials were successfully loaded
    """
    try:
        self.credentials = Credentials.from_authorized_user_file('token/google_token.json')
        self.init_services()
        return True
    except FileNotFoundError:
        return False
run()

Start the WhatsApp assistant

Source code in toolboxv2/mods/WhatsAppTb/client.py
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
def run(self):
    """Start the WhatsApp assistant"""
    try:
        self.state = AssistantState.ONLINE
        # Send welcome message

        mas = self.whc.messenger.create_message(
            content="Digital Assistant is online! Send /help for available commands.",to=self.whc.progress_messenger0.recipient_phone,
        ).send(sender=0)
        mas_id = mas.get("messages", [{}])[0].get("id")
        print(mas_id)

    except Exception as e:
        logging.error(f"Assistant error: {str(e)}")
        self.state = AssistantState.OFFLINE
        raise
save_credentials()

Save the obtained credentials to a file for future use

Source code in toolboxv2/mods/WhatsAppTb/client.py
251
252
253
254
255
256
257
258
259
def save_credentials(self):
    """
    Save the obtained credentials to a file for future use
    """
    if not os.path.exists('token'):
        os.makedirs('token')

    with open('token/google_token.json', 'w') as token_file:
        token_file.write(self.credentials.to_json())
search_documents(message) async

Initiate document search workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
1372
1373
1374
1375
1376
1377
1378
1379
async def search_documents(self, message):
    """Initiate document search workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {'type': 'search', 'step': 'awaiting_query'}
    return {
        'type': 'quick_reply',
        'text': '🔍 What are you looking for?',
        'options': {'cancel': '❌ Cancel Search'}
    }
send_email(to, subject, body)

Actual email sending function to be called by agent

Source code in toolboxv2/mods/WhatsAppTb/client.py
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
def send_email(self, to, subject, body):
    """Actual email sending function to be called by agent"""
    if not self.gmail_service:
        return False

    message = MIMEText(body)
    message['to'] = to
    message['subject'] = subject

    encoded_message = base64.urlsafe_b64encode(message.as_bytes()).decode()
    self.gmail_service.users().messages().send(
        userId='me',
        body={'raw': encoded_message}
    ).execute()
    return True
send_structured_response(result)

Send complex responses using appropriate WhatsApp features

Source code in toolboxv2/mods/WhatsAppTb/client.py
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
def send_structured_response(self, result: dict):
    """Send complex responses using appropriate WhatsApp features"""
    if result['type'] == 'list':
        self.whc.messenger.send_button(
            recipient_id=self.whc.progress_messenger0.recipient_phone,
            button={
                'header': result.get('header', ''),
                'body': result.get('body', ''),
                'footer': result.get('footer', ''),
                'action': {
                    'button': 'Action',
                    'sections': result['sections']
                }
            }
        )
    elif result['type'] == 'quick_reply':
        self.whc.messenger.send_button(
            recipient_id=self.whc.progress_messenger0.recipient_phone,
            button={
                'header': "Quick reply",
                'body': result['text'],
                'footer': '',
                'action': {'button': 'Action', 'sections': [{
                    'title': 'View',
                    'rows': [{'id': k, 'title': v[:23]} for k, v in result['options'].items()]
                }]}
            }
        )

    elif result['type'] == 'media':
        if result['media_type'] == 'image':
            self.whc.messenger.send_image(
                image=result['url'],
                recipient_id=self.whc.progress_messenger0.recipient_phone,
                caption=result.get('caption', '')
            )
        elif result['media_type'] == 'document':
            self.whc.messenger.send_document(
                document=result['url'],
                recipient_id=self.whc.progress_messenger0.recipient_phone,
                caption=result.get('caption', '')
            )
setup_interaction_buttons()

Define WhatsApp interaction buttons for different functionalities

Source code in toolboxv2/mods/WhatsAppTb/client.py
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
def setup_interaction_buttons(self):
    """Define WhatsApp interaction buttons for different functionalities"""
    self.buttons = {
        'menu': {
            'header': 'Digital Assistant',
            'body': 'Please select an option:',
            'footer': '-- + --',
            'action': {
                'button': 'Menu',
                'sections': [
                    {
                        'title': 'Main Functions',
                        'rows': [
                            {'id': 'agent', 'title': 'Agent Controls', 'description': 'Manage your AI assistant'},
                            {'id': 'email', 'title': 'Email Management', 'description': 'Handle your emails'},
                            {'id': 'calendar', 'title': 'Calendar', 'description': 'Manage your schedule'},
                            {'id': 'docs', 'title': 'Documents', 'description': 'Handle documents'},
                            {'id': 'system', 'title': 'System', 'description': 'System controls and metrics'}
                        ]
                    }
                ]
            }
        },
        'agent': self._create_agent_controls_buttons(),
        'email': self._create_email_controls_buttons(),
        'calendar': self._create_calendar_controls_buttons(),
        'docs': self._create_docs_controls_buttons(),
        'system': self._create_system_controls_buttons()
    }
setup_progress_messengers()

Initialize progress messengers for different types of tasks

Source code in toolboxv2/mods/WhatsAppTb/client.py
286
287
288
289
290
291
292
def setup_progress_messengers(self):
    """Initialize progress messengers for different types of tasks"""
    self.progress_messengers = {
        'task': self.whc.progress_messenger0,
        'email': self.whc.progress_messenger1,
        'calendar': self.whc.progress_messenger2
    }
show_task_stack(*a) async

Display current task stack

Source code in toolboxv2/mods/WhatsAppTb/client.py
1499
1500
1501
1502
1503
1504
async def show_task_stack(self, *a):
    """Display current task stack"""
    if self.agent and len(self.agent.taskstack.tasks) > 0:
        tasks = self.agent.taskstack.tasks
        return self.agent.mini_task("\n".join([f"Task {t.id}: {t.description}" for t in tasks]), "system", "Format to nice and clean whatsapp format")
    return "No tasks in stack"
show_today_events(message) async

Show today's calendar events

Source code in toolboxv2/mods/WhatsAppTb/client.py
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
async def show_today_events(self, message):
    """Show today's calendar events"""
    if not self.calendar_service:
        message.replay("service not online")

    now = datetime.utcnow().isoformat() + 'Z'
    end_of_day = (datetime.now() + timedelta(days=1)).replace(
        hour=0, minute=0, second=0).isoformat() + 'Z'

    events_result = self.calendar_service.events().list(
        calendarId='primary',
        timeMin=now,
        timeMax=end_of_day,
        singleEvents=True,
        orderBy='startTime'
    ).execute()

    events = events_result.get('items', [])
    return self._format_calendar_response(events, "Today's Events")
show_upcoming_events(message) async

Show upcoming events with interactive support

Source code in toolboxv2/mods/WhatsAppTb/client.py
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
async def show_upcoming_events(self, message):
    """Show upcoming events with interactive support"""
    if not self.calendar_service:
        return "⚠️ Calendar service not configured"

    try:
        now = datetime.utcnow().isoformat() + 'Z'
        next_week = (datetime.now() + timedelta(days=7)).isoformat() + 'Z'

        events_result = self.calendar_service.events().list(
            calendarId='primary',
            timeMin=now,
            timeMax=next_week,
            singleEvents=True,
            orderBy='startTime',
            maxResults=10
        ).execute()

        events = events_result.get('items', [])
        return self._format_calendar_response(events, "Upcoming Events")
    except Exception as e:
        return f"⚠️ Error fetching events: {str(e)}"
start_agent(*a) async

Start the agent in background mode

Source code in toolboxv2/mods/WhatsAppTb/client.py
1485
1486
1487
1488
1489
1490
async def start_agent(self, *a):
    """Start the agent in background mode"""
    if self.agent:
        self.agent.run_in_background()
        return True
    return False
start_document_upload(message) async

Initiate document upload workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
1363
1364
1365
1366
1367
1368
1369
1370
async def start_document_upload(self, message):
    """Initiate document upload workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {'type': 'document', 'step': 'awaiting_file'}
    return {
        'type': 'quick_reply',
        'text': '📤 Send me the file you want to upload',
        'options': {'cancel': '❌ Cancel Upload'}
    }
start_email_compose(message) async

Enhanced email composition workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
883
884
885
886
887
888
889
890
891
892
893
894
async def start_email_compose(self, message):
    """Enhanced email composition workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
        'type': 'compose_email',
        'step': 'subject',
        'draft': {'attachments': []}
    }
    return {
        'type': 'quick_reply',
        'text': "📝 Let's compose an email\n\nSubject:",
        'options': {'cancel': '❌ Cancel Composition'}
    }
start_event_create(message) async

Initiate event creation workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
async def start_event_create(self, message):
    """Initiate event creation workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
        'type': 'create_event',
        'step': 'title',
        'event_data': {}
    }
    return {
        'type': 'quick_reply',
        'text': "Let's create an event! What's the title?",
        'options': {'cancel': '❌ Cancel'}
    }
stop_agent(*b) async

Stop the currently running agent

Source code in toolboxv2/mods/WhatsAppTb/client.py
1492
1493
1494
1495
1496
1497
async def stop_agent(self, *b):
    """Stop the currently running agent"""
    if self.agent:
        self.agent.stop()
        return True
    return False
system_task(message) async

Initiate email search workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
740
741
742
743
744
745
746
747
748
749
750
async def system_task(self, message):
    """Initiate email search workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
        'type': 'system',
        'step': 'await_query'
    }
    return {
        'type': 'quick_reply',
        'text': "Now prompt the 🧠ISAA-System 📝",
        'options': {'cancel': '❌ Cancel Search'}
    }

server

AppManager
Source code in toolboxv2/mods/WhatsAppTb/server.py
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
class AppManager(metaclass=Singleton):
    pepper = "pepper0"

    def __init__(self, start_port: int = 8000, port_range: int = 10, em=None):
        self.instances: dict[str, dict] = {}
        self.start_port = start_port
        self.port_range = port_range
        self.threads: dict[str, Thread] = {}
        self.stop_events: dict[str, Event] = {}
        self.message_queue: asyncio.Queue = asyncio.Queue()
        self.last_messages: dict[str, datetime] = {}
        self.keys: dict[str, str] = {}
        self.forwarders: dict[str, dict] = {}
        self.runner = lambda :None

        if em is None:
            from toolboxv2 import get_app
            em = get_app().get_mod("EventManager")
        from toolboxv2.mods import EventManager
        self.event_manager: EventManager = em.get_manager()

        # Set up signal handlers for graceful shutdown
        try:
            if threading.current_thread() is threading.main_thread():
                signal.signal(signal.SIGINT, self.signal_handler)
                signal.signal(signal.SIGTERM, self.signal_handler)
        except Exception:
            pass

    def offline(self, instance_id):

        def mark_as_offline():
            self.forwarders[instance_id]['send'] = None
            return 'done'

        return mark_as_offline

    def online(self, instance_id):

        def mark_as_online():
            return self.instances[instance_id]['app']

        def set_callbacks(callback, e_callback=None):
            if callback is not None:
                self.forwarders[instance_id]['send'] = callback
            if e_callback is not None:
                self.forwarders[instance_id]['sende'] = e_callback

        return mark_as_online(), set_callbacks

    def get_next_available_port(self) -> int:
        """Find the next available port in the range."""
        used_ports = {instance['port'] for instance in self.instances.values()}
        for port in range(self.start_port, self.start_port + self.port_range):
            if port not in used_ports:
                return port
        raise RuntimeError("No available ports in range")

    def add_instance(self, instance_id: str, **kwargs):
        """
        Add a new app instance to the manager with automatic port assignment.
        """
        if instance_id in self.instances:
            raise ValueError(f"Instance {instance_id} already exists")

        port = self.get_next_available_port()
        app_instance = WhatsApp(**kwargs)

        self.instances[instance_id] = {
            'app': app_instance,
            'port': port,
            'kwargs': kwargs,
            'phone_number_id': kwargs.get("phone_number_id", {}),
            'retry_count': 0,
            'max_retries': 3,
            'retry_delay': 5
        }
        self.keys[instance_id] = Code.one_way_hash(kwargs.get("phone_number_id", {}).get("key"), "WhatsappAppManager",
                                                   self.pepper)
        self.forwarders[instance_id] = {}

        # Set up message handlers
        @app_instance.on_message
        async def message_handler(message):
            await self.on_message(instance_id, message)

        @app_instance.on_event
        async def event_handler(event):
            await self.on_event(instance_id, event)

        @app_instance.on_verification
        async def verification_handler(verification):
            await self.on_verification(instance_id, verification)

        # Create stop event for this instance Error parsing message1:
        self.stop_events[instance_id] = Event()

    def run_instance(self, instance_id: str):
        """Run a single instance in a separate thread with error handling and automatic restart."""
        instance_data = self.instances[instance_id]
        stop_event = self.stop_events[instance_id]

        while not stop_event.is_set():
            try:
                logger.info(f"Starting instance {instance_id} on port {instance_data['port']}")
                instance_data['app'].run(host='0.0.0.0', port=instance_data['port'])

            except Exception as e:
                logger.error(f"Error in instance {instance_id}: {str(e)}")
                instance_data['retry_count'] += 1

                if instance_data['retry_count'] > instance_data['max_retries']:
                    logger.error(f"Max retries exceeded for instance {instance_id}")
                    break

                logger.info(f"Restarting instance {instance_id} in {instance_data['retry_delay']} seconds...")
                time.sleep(instance_data['retry_delay'])

                # Recreate the instance
                instance_data['app'] = WhatsApp(**instance_data['kwargs'])
                continue

    async def on_message(self, instance_id: str, message: Message):
        """Handle and forward incoming messages."""
        logger.info(f"Message from instance {instance_id}: {message}")
        if instance_id in self.forwarders and 'send' in self.forwarders[instance_id]:
            await self.forwarders[instance_id]['send'](message)

    async def on_event(self, instance_id: str, event):
        """Handle events."""
        logger.info(f"Event from instance {instance_id}: {event}")
        if instance_id in self.forwarders and 'sende' in self.forwarders[instance_id] and self.forwarders[instance_id]['sende'] is not None:
            self.forwarders[instance_id]['sende'](event)

    async def on_verification(self, instance_id: str, verification):
        """Handle verification events."""
        logger.info(f"Verification from instance {instance_id}: {verification}")

    def run_all_instances(self):
        """Start all instances in separate daemon threads."""
        # Start message forwarder

        # Start all instances
        for instance_id in self.instances:
            thread = Thread(
                target=self.run_instance,
                args=(instance_id,),
                daemon=True,
                name=f"WhatsApp-{instance_id}"
            )
            self.threads[instance_id] = thread
            thread.start()

    def signal_handler(self, signum, frame):
        """Handle shutdown signals gracefully."""
        logger.info("Shutdown signal received, stopping all instances...")
        self.stop_all_instances()
        sys.exit(0)

    def stop_all_instances(self):
        """Stop all running instances gracefully."""
        for instance_id in self.stop_events:
            self.stop_events[instance_id].set()

        for thread in self.threads.values():
            thread.join(timeout=5)

    def create_manager_ui(self, start_assistant):
        """Enhanced WhatsApp Manager UI with instance configuration controls"""
        self.runner = start_assistant
        def ui_manager():
            # Track instance states and messages
            original_on_message = self.on_message

            async def enhanced_on_message(instance_id: str, message):
                self.last_messages[instance_id] = datetime.now()
                await original_on_message(instance_id, message)

            self.on_message = enhanced_on_message

            def create_instance_card(instance_id: str):
                """Interactive instance control card"""
                config = self.instances[instance_id]
                with ui.card().classes('w-full p-4 mb-4 bg-gray-50 dark:bg-gray-800').style("background-color: var(--background-color) !important"):
                    # Header Section
                    with ui.row().classes('w-full justify-between items-center'):
                        ui.label(f'📱 {instance_id}').classes('text-xl font-bold')

                        # Status Indicator
                        ui.label().bind_text_from(
                            self.threads, instance_id,
                            lambda x: 'Running' if x and x.is_alive() else 'Stopped'
                        )

                    # Configuration Display
                    with ui.grid(columns=2).classes('w-full mt-4 gap-2'):

                        ui.label('port:').classes('font-bold')
                        ui.label(config['port'])

                        ui.label('Last Activity:').classes('font-bold')
                        ui.label().bind_text_from(
                            self.last_messages, instance_id,
                            lambda x: x.strftime("%Y-%m-%d %H:%M:%S") if x else 'Never'
                        )

                    # Action Controls
                    with ui.row().classes('w-full mt-4 gap-2'):
                        with ui.button(icon='settings', on_click=lambda: edit_dialog.open()).props('flat'):
                            ui.tooltip('Configure')

                        with ui.button(icon='refresh', color='orange',
                                       on_click=lambda: self.restart_instance(instance_id)):
                            ui.tooltip('Restart')

                        with ui.button(icon='stop', color='red',
                                       on_click=lambda: self.stop_instance(instance_id)):
                            ui.tooltip('Stop')

                    # Edit Configuration Dialog
                    with ui.dialog() as edit_dialog, ui.card().classes('p-4 gap-4'):
                        new_key = ui.input('API Key', value=config['phone_number_id'].get('key', ''))
                        new_number = ui.input('Phone Number', value=config['phone_number_id'].get('number', ''))

                        with ui.row().classes('w-full justify-end'):
                            ui.button('Cancel', on_click=edit_dialog.close)
                            ui.button('Save', color='primary', on_click=lambda: (
                                self.update_instance_config(
                                    instance_id,
                                    new_key.value,
                                    new_number.value
                                ),
                                edit_dialog.close()
                            ))

            # Main UI Layout
            with ui.column().classes('w-full max-w-4xl mx-auto p-4'):
                ui.label('WhatsApp Instance Manager').classes('text-2xl font-bold mb-6')

                # Add Instance Section
                with ui.expansion('➕ Add New Instance', icon='add').classes('w-full'):
                    with ui.card().classes('w-full p-4 mt-2'):
                        instance_id = ui.input('Instance ID').classes('w-full')
                        token = ui.input('API Token').classes('w-full')
                        phone_key = ui.input('Phone Number Key').classes('w-full')
                        phone_number = ui.input('Phone Number').classes('w-full')

                        with ui.row().classes('w-full justify-end gap-2'):
                            ui.button('Clear', on_click=lambda: (
                                instance_id.set_value(''),
                                token.set_value(''),
                                phone_key.set_value(''),
                                phone_number.set_value('')
                            ))
                            ui.button('Create', color='positive', on_click=lambda: (
                                self.add_update_instance(
                                    instance_id.value,
                                    token.value,
                                    phone_key.value,
                                    phone_number.value
                                ),
                                instances_container.refresh()
                            ))

                # Instances Display
                instances_container = ui.column().classes('w-full')
                with instances_container:
                    for instance_id in self.instances:
                        create_instance_card(instance_id)

        return ui_manager

    # Add to manager class
    def add_update_instance(self, instance_id, token, phone_key, phone_number):
        """Add or update instance configuration"""
        if instance_id in self.instances:
            self.stop_instance(instance_id)
            del self.instances[instance_id]

        self.add_instance(
            instance_id,
            token=token,
            phone_number_id={
                'key': phone_key,
                'number': phone_number
            },
            verify_token=os.getenv("WHATSAPP_VERIFY_TOKEN")
        )
        self.start_instance(instance_id)

    def update_instance_config(self, instance_id, new_key, new_number):
        """Update existing instance configuration"""
        if instance_id in self.instances:
            self.instances[instance_id]['phone_number_id'] = {
                'key': new_key,
                'number': new_number
            }
            self.restart_instance(instance_id)

    def restart_instance(self, instance_id):
        """Safe restart of instance"""
        self.stop_instance(instance_id)
        self.start_instance(instance_id)

    def stop_instance(self, instance_id):
        """Graceful stop of instance"""
        if instance_id in self.threads:
            self.stop_events[instance_id].set()
            self.threads[instance_id].join(timeout=5)
            del self.threads[instance_id]

    def start_instance(self, instance_id):
        """Start instance thread"""
        print("Starting Istance")

        self.stop_events[instance_id] = threading.Event()
        self.threads[instance_id] = threading.Thread(
            target=self.run_instance,
            args=(instance_id,),
            daemon=True
        )
        self.threads[instance_id].start()
        print("Running starter", self.runner())
add_instance(instance_id, **kwargs)

Add a new app instance to the manager with automatic port assignment.

Source code in toolboxv2/mods/WhatsAppTb/server.py
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
def add_instance(self, instance_id: str, **kwargs):
    """
    Add a new app instance to the manager with automatic port assignment.
    """
    if instance_id in self.instances:
        raise ValueError(f"Instance {instance_id} already exists")

    port = self.get_next_available_port()
    app_instance = WhatsApp(**kwargs)

    self.instances[instance_id] = {
        'app': app_instance,
        'port': port,
        'kwargs': kwargs,
        'phone_number_id': kwargs.get("phone_number_id", {}),
        'retry_count': 0,
        'max_retries': 3,
        'retry_delay': 5
    }
    self.keys[instance_id] = Code.one_way_hash(kwargs.get("phone_number_id", {}).get("key"), "WhatsappAppManager",
                                               self.pepper)
    self.forwarders[instance_id] = {}

    # Set up message handlers
    @app_instance.on_message
    async def message_handler(message):
        await self.on_message(instance_id, message)

    @app_instance.on_event
    async def event_handler(event):
        await self.on_event(instance_id, event)

    @app_instance.on_verification
    async def verification_handler(verification):
        await self.on_verification(instance_id, verification)

    # Create stop event for this instance Error parsing message1:
    self.stop_events[instance_id] = Event()
add_update_instance(instance_id, token, phone_key, phone_number)

Add or update instance configuration

Source code in toolboxv2/mods/WhatsAppTb/server.py
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
def add_update_instance(self, instance_id, token, phone_key, phone_number):
    """Add or update instance configuration"""
    if instance_id in self.instances:
        self.stop_instance(instance_id)
        del self.instances[instance_id]

    self.add_instance(
        instance_id,
        token=token,
        phone_number_id={
            'key': phone_key,
            'number': phone_number
        },
        verify_token=os.getenv("WHATSAPP_VERIFY_TOKEN")
    )
    self.start_instance(instance_id)
create_manager_ui(start_assistant)

Enhanced WhatsApp Manager UI with instance configuration controls

Source code in toolboxv2/mods/WhatsAppTb/server.py
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
def create_manager_ui(self, start_assistant):
    """Enhanced WhatsApp Manager UI with instance configuration controls"""
    self.runner = start_assistant
    def ui_manager():
        # Track instance states and messages
        original_on_message = self.on_message

        async def enhanced_on_message(instance_id: str, message):
            self.last_messages[instance_id] = datetime.now()
            await original_on_message(instance_id, message)

        self.on_message = enhanced_on_message

        def create_instance_card(instance_id: str):
            """Interactive instance control card"""
            config = self.instances[instance_id]
            with ui.card().classes('w-full p-4 mb-4 bg-gray-50 dark:bg-gray-800').style("background-color: var(--background-color) !important"):
                # Header Section
                with ui.row().classes('w-full justify-between items-center'):
                    ui.label(f'📱 {instance_id}').classes('text-xl font-bold')

                    # Status Indicator
                    ui.label().bind_text_from(
                        self.threads, instance_id,
                        lambda x: 'Running' if x and x.is_alive() else 'Stopped'
                    )

                # Configuration Display
                with ui.grid(columns=2).classes('w-full mt-4 gap-2'):

                    ui.label('port:').classes('font-bold')
                    ui.label(config['port'])

                    ui.label('Last Activity:').classes('font-bold')
                    ui.label().bind_text_from(
                        self.last_messages, instance_id,
                        lambda x: x.strftime("%Y-%m-%d %H:%M:%S") if x else 'Never'
                    )

                # Action Controls
                with ui.row().classes('w-full mt-4 gap-2'):
                    with ui.button(icon='settings', on_click=lambda: edit_dialog.open()).props('flat'):
                        ui.tooltip('Configure')

                    with ui.button(icon='refresh', color='orange',
                                   on_click=lambda: self.restart_instance(instance_id)):
                        ui.tooltip('Restart')

                    with ui.button(icon='stop', color='red',
                                   on_click=lambda: self.stop_instance(instance_id)):
                        ui.tooltip('Stop')

                # Edit Configuration Dialog
                with ui.dialog() as edit_dialog, ui.card().classes('p-4 gap-4'):
                    new_key = ui.input('API Key', value=config['phone_number_id'].get('key', ''))
                    new_number = ui.input('Phone Number', value=config['phone_number_id'].get('number', ''))

                    with ui.row().classes('w-full justify-end'):
                        ui.button('Cancel', on_click=edit_dialog.close)
                        ui.button('Save', color='primary', on_click=lambda: (
                            self.update_instance_config(
                                instance_id,
                                new_key.value,
                                new_number.value
                            ),
                            edit_dialog.close()
                        ))

        # Main UI Layout
        with ui.column().classes('w-full max-w-4xl mx-auto p-4'):
            ui.label('WhatsApp Instance Manager').classes('text-2xl font-bold mb-6')

            # Add Instance Section
            with ui.expansion('➕ Add New Instance', icon='add').classes('w-full'):
                with ui.card().classes('w-full p-4 mt-2'):
                    instance_id = ui.input('Instance ID').classes('w-full')
                    token = ui.input('API Token').classes('w-full')
                    phone_key = ui.input('Phone Number Key').classes('w-full')
                    phone_number = ui.input('Phone Number').classes('w-full')

                    with ui.row().classes('w-full justify-end gap-2'):
                        ui.button('Clear', on_click=lambda: (
                            instance_id.set_value(''),
                            token.set_value(''),
                            phone_key.set_value(''),
                            phone_number.set_value('')
                        ))
                        ui.button('Create', color='positive', on_click=lambda: (
                            self.add_update_instance(
                                instance_id.value,
                                token.value,
                                phone_key.value,
                                phone_number.value
                            ),
                            instances_container.refresh()
                        ))

            # Instances Display
            instances_container = ui.column().classes('w-full')
            with instances_container:
                for instance_id in self.instances:
                    create_instance_card(instance_id)

    return ui_manager
get_next_available_port()

Find the next available port in the range.

Source code in toolboxv2/mods/WhatsAppTb/server.py
78
79
80
81
82
83
84
def get_next_available_port(self) -> int:
    """Find the next available port in the range."""
    used_ports = {instance['port'] for instance in self.instances.values()}
    for port in range(self.start_port, self.start_port + self.port_range):
        if port not in used_ports:
            return port
    raise RuntimeError("No available ports in range")
on_event(instance_id, event) async

Handle events.

Source code in toolboxv2/mods/WhatsAppTb/server.py
156
157
158
159
160
async def on_event(self, instance_id: str, event):
    """Handle events."""
    logger.info(f"Event from instance {instance_id}: {event}")
    if instance_id in self.forwarders and 'sende' in self.forwarders[instance_id] and self.forwarders[instance_id]['sende'] is not None:
        self.forwarders[instance_id]['sende'](event)
on_message(instance_id, message) async

Handle and forward incoming messages.

Source code in toolboxv2/mods/WhatsAppTb/server.py
150
151
152
153
154
async def on_message(self, instance_id: str, message: Message):
    """Handle and forward incoming messages."""
    logger.info(f"Message from instance {instance_id}: {message}")
    if instance_id in self.forwarders and 'send' in self.forwarders[instance_id]:
        await self.forwarders[instance_id]['send'](message)
on_verification(instance_id, verification) async

Handle verification events.

Source code in toolboxv2/mods/WhatsAppTb/server.py
162
163
164
async def on_verification(self, instance_id: str, verification):
    """Handle verification events."""
    logger.info(f"Verification from instance {instance_id}: {verification}")
restart_instance(instance_id)

Safe restart of instance

Source code in toolboxv2/mods/WhatsAppTb/server.py
327
328
329
330
def restart_instance(self, instance_id):
    """Safe restart of instance"""
    self.stop_instance(instance_id)
    self.start_instance(instance_id)
run_all_instances()

Start all instances in separate daemon threads.

Source code in toolboxv2/mods/WhatsAppTb/server.py
166
167
168
169
170
171
172
173
174
175
176
177
178
179
def run_all_instances(self):
    """Start all instances in separate daemon threads."""
    # Start message forwarder

    # Start all instances
    for instance_id in self.instances:
        thread = Thread(
            target=self.run_instance,
            args=(instance_id,),
            daemon=True,
            name=f"WhatsApp-{instance_id}"
        )
        self.threads[instance_id] = thread
        thread.start()
run_instance(instance_id)

Run a single instance in a separate thread with error handling and automatic restart.

Source code in toolboxv2/mods/WhatsAppTb/server.py
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
def run_instance(self, instance_id: str):
    """Run a single instance in a separate thread with error handling and automatic restart."""
    instance_data = self.instances[instance_id]
    stop_event = self.stop_events[instance_id]

    while not stop_event.is_set():
        try:
            logger.info(f"Starting instance {instance_id} on port {instance_data['port']}")
            instance_data['app'].run(host='0.0.0.0', port=instance_data['port'])

        except Exception as e:
            logger.error(f"Error in instance {instance_id}: {str(e)}")
            instance_data['retry_count'] += 1

            if instance_data['retry_count'] > instance_data['max_retries']:
                logger.error(f"Max retries exceeded for instance {instance_id}")
                break

            logger.info(f"Restarting instance {instance_id} in {instance_data['retry_delay']} seconds...")
            time.sleep(instance_data['retry_delay'])

            # Recreate the instance
            instance_data['app'] = WhatsApp(**instance_data['kwargs'])
            continue
signal_handler(signum, frame)

Handle shutdown signals gracefully.

Source code in toolboxv2/mods/WhatsAppTb/server.py
181
182
183
184
185
def signal_handler(self, signum, frame):
    """Handle shutdown signals gracefully."""
    logger.info("Shutdown signal received, stopping all instances...")
    self.stop_all_instances()
    sys.exit(0)
start_instance(instance_id)

Start instance thread

Source code in toolboxv2/mods/WhatsAppTb/server.py
339
340
341
342
343
344
345
346
347
348
349
350
def start_instance(self, instance_id):
    """Start instance thread"""
    print("Starting Istance")

    self.stop_events[instance_id] = threading.Event()
    self.threads[instance_id] = threading.Thread(
        target=self.run_instance,
        args=(instance_id,),
        daemon=True
    )
    self.threads[instance_id].start()
    print("Running starter", self.runner())
stop_all_instances()

Stop all running instances gracefully.

Source code in toolboxv2/mods/WhatsAppTb/server.py
187
188
189
190
191
192
193
def stop_all_instances(self):
    """Stop all running instances gracefully."""
    for instance_id in self.stop_events:
        self.stop_events[instance_id].set()

    for thread in self.threads.values():
        thread.join(timeout=5)
stop_instance(instance_id)

Graceful stop of instance

Source code in toolboxv2/mods/WhatsAppTb/server.py
332
333
334
335
336
337
def stop_instance(self, instance_id):
    """Graceful stop of instance"""
    if instance_id in self.threads:
        self.stop_events[instance_id].set()
        self.threads[instance_id].join(timeout=5)
        del self.threads[instance_id]
update_instance_config(instance_id, new_key, new_number)

Update existing instance configuration

Source code in toolboxv2/mods/WhatsAppTb/server.py
318
319
320
321
322
323
324
325
def update_instance_config(self, instance_id, new_key, new_number):
    """Update existing instance configuration"""
    if instance_id in self.instances:
        self.instances[instance_id]['phone_number_id'] = {
            'key': new_key,
            'number': new_number
        }
        self.restart_instance(instance_id)

utils

ProgressMessenger
Source code in toolboxv2/mods/WhatsAppTb/utils.py
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
class ProgressMessenger:
    def __init__(self, messenger, recipient_phone: str, max_steps: int = 5, emoji_set: list[str] = None, content=None):
        self.messenger = messenger
        self.recipient_phone = recipient_phone
        self.max_steps = max_steps
        self.emoji_set = emoji_set or ["⬜", "⬛", "🟩", "🟨", "🟦"]
        self.message_id = None
        self.content = content

    def send_initial_message(self, mode: str = "progress"):
        """
        Sends the initial message. Modes can be 'progress' or 'loading'.
        """
        if mode == "progress":
            emoji_legend = "\n".join(
                f"{emoji} - Step {i + 1}" for i, emoji in enumerate(self.emoji_set)
            )
            content = (
                "Progress is being updated in real-time!\n\n"
                "Legend:\n"
                f"{emoji_legend}\n\n"
                "Stay tuned for updates!"
            )
        elif mode == "loading":
            content = (
                "Loading in progress! 🌀\n"
                "The indicator will loop until work is done."
            )
        else:
            raise ValueError("Invalid mode. Use 'progress' or 'loading'.")

        if self.content is not None:
            content += '\n'+self.content
        message = self.messenger.create_message(content=content, to=self.recipient_phone)
        response = message.send(sender=0)
        self.message_id = response.get("messages", [{}])[0].get("id")
        logging.info(f"Initial message sent: {content}")
        return self.message_id

    def update_progress(self, step_flag: threading.Event):
        """
        Updates the reaction on the message to represent progress.
        """
        if not self.message_id:
            raise ValueError("Message ID not found. Ensure the initial message is sent first.")
        message = self.messenger.create_message(id=self.message_id, to=self.recipient_phone)
        for step in range(self.max_steps):
            emoji = self.emoji_set[step % len(self.emoji_set)]
            message.react(emoji)
            logging.info(f"Progress updated: Step {step + 1}/{self.max_steps} with emoji {emoji}")
            while not step_flag.is_set():
                time.sleep(0.5)
            step_flag.clear()
        # Final acknowledgment
        message.react("👍")
        logging.info("Progress completed with final acknowledgment.")

    def update_loading(self, stop_flag: threading.Event):
        """
        Continuously updates the reaction to represent a looping 'loading' indicator.
        """
        if not self.message_id:
            raise ValueError("Message ID not found. Ensure the initial message is sent first.")
        message = self.messenger.create_message(id=self.message_id, to=self.recipient_phone)
        step = 0
        while not stop_flag.is_set():
            emoji = self.emoji_set[step % len(self.emoji_set)]
            message.react(emoji)
            logging.info(f"Loading update: {emoji}")
            time.sleep(1)  # Faster updates for loading
            step += 1
        # Final acknowledgment
        message.react("✅")
        logging.info("Loading completed with final acknowledgment.")
        message.reply("✅Done✅")

    def start_progress_in_background(self, step_flag):
        """
        Starts the progress update in a separate thread.
        """
        threading.Thread(target=self.update_progress, args=(step_flag, ), daemon=True).start()

    def start_loading_in_background(self, stop_flag: threading.Event):
        """
        Starts the loading update in a separate thread.
        """
        threading.Thread(target=self.update_loading, args=(stop_flag,), daemon=True).start()
send_initial_message(mode='progress')

Sends the initial message. Modes can be 'progress' or 'loading'.

Source code in toolboxv2/mods/WhatsAppTb/utils.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
def send_initial_message(self, mode: str = "progress"):
    """
    Sends the initial message. Modes can be 'progress' or 'loading'.
    """
    if mode == "progress":
        emoji_legend = "\n".join(
            f"{emoji} - Step {i + 1}" for i, emoji in enumerate(self.emoji_set)
        )
        content = (
            "Progress is being updated in real-time!\n\n"
            "Legend:\n"
            f"{emoji_legend}\n\n"
            "Stay tuned for updates!"
        )
    elif mode == "loading":
        content = (
            "Loading in progress! 🌀\n"
            "The indicator will loop until work is done."
        )
    else:
        raise ValueError("Invalid mode. Use 'progress' or 'loading'.")

    if self.content is not None:
        content += '\n'+self.content
    message = self.messenger.create_message(content=content, to=self.recipient_phone)
    response = message.send(sender=0)
    self.message_id = response.get("messages", [{}])[0].get("id")
    logging.info(f"Initial message sent: {content}")
    return self.message_id
start_loading_in_background(stop_flag)

Starts the loading update in a separate thread.

Source code in toolboxv2/mods/WhatsAppTb/utils.py
 97
 98
 99
100
101
def start_loading_in_background(self, stop_flag: threading.Event):
    """
    Starts the loading update in a separate thread.
    """
    threading.Thread(target=self.update_loading, args=(stop_flag,), daemon=True).start()
start_progress_in_background(step_flag)

Starts the progress update in a separate thread.

Source code in toolboxv2/mods/WhatsAppTb/utils.py
91
92
93
94
95
def start_progress_in_background(self, step_flag):
    """
    Starts the progress update in a separate thread.
    """
    threading.Thread(target=self.update_progress, args=(step_flag, ), daemon=True).start()
update_loading(stop_flag)

Continuously updates the reaction to represent a looping 'loading' indicator.

Source code in toolboxv2/mods/WhatsAppTb/utils.py
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
def update_loading(self, stop_flag: threading.Event):
    """
    Continuously updates the reaction to represent a looping 'loading' indicator.
    """
    if not self.message_id:
        raise ValueError("Message ID not found. Ensure the initial message is sent first.")
    message = self.messenger.create_message(id=self.message_id, to=self.recipient_phone)
    step = 0
    while not stop_flag.is_set():
        emoji = self.emoji_set[step % len(self.emoji_set)]
        message.react(emoji)
        logging.info(f"Loading update: {emoji}")
        time.sleep(1)  # Faster updates for loading
        step += 1
    # Final acknowledgment
    message.react("✅")
    logging.info("Loading completed with final acknowledgment.")
    message.reply("✅Done✅")
update_progress(step_flag)

Updates the reaction on the message to represent progress.

Source code in toolboxv2/mods/WhatsAppTb/utils.py
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
def update_progress(self, step_flag: threading.Event):
    """
    Updates the reaction on the message to represent progress.
    """
    if not self.message_id:
        raise ValueError("Message ID not found. Ensure the initial message is sent first.")
    message = self.messenger.create_message(id=self.message_id, to=self.recipient_phone)
    for step in range(self.max_steps):
        emoji = self.emoji_set[step % len(self.emoji_set)]
        message.react(emoji)
        logging.info(f"Progress updated: Step {step + 1}/{self.max_steps} with emoji {emoji}")
        while not step_flag.is_set():
            time.sleep(0.5)
        step_flag.clear()
    # Final acknowledgment
    message.react("👍")
    logging.info("Progress completed with final acknowledgment.")

cli_functions

replace_bracketed_content(text, replacements, inlist=False)

Ersetzt Inhalte in eckigen Klammern mit entsprechenden Werten aus einem Wörterbuch.

:param text: Der zu verarbeitende Text als String. :param replacements: Ein Wörterbuch mit Schlüssel-Wert-Paaren für die Ersetzung. :return: Den modifizierten Text.

Source code in toolboxv2/mods/cli_functions.py
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
def replace_bracketed_content(text, replacements, inlist=False):
    """
    Ersetzt Inhalte in eckigen Klammern mit entsprechenden Werten aus einem Wörterbuch.

    :param text: Der zu verarbeitende Text als String.
    :param replacements: Ein Wörterbuch mit Schlüssel-Wert-Paaren für die Ersetzung.
    :return: Den modifizierten Text.
    """
    # Finde alle Vorkommen von Texten in eckigen Klammern
    matches = re.findall(r'\[([^\]]+)\]', text)

    # Ersetze jeden gefundenen Text durch den entsprechenden Wert aus dem Wörterbuch
    as_list = text.split(' ')
    i = 0
    for key in matches:
        if key in replacements:
            if not inlist:
                text = text.replace(f'[{key}]', str(replacements[key]))
            else:
                as_list[i] = replacements[key]
        i += 1
    if not inlist:
        return text
    return as_list

isaa

CodingAgent

live
AsyncCodeDetector

Bases: NodeVisitor

Detect async code and top-level await

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
class AsyncCodeDetector(ast.NodeVisitor):
    """Detect async code and top-level await"""
    def __init__(self):
        self.has_async = False
        self.has_top_level_await = False
        self.await_nodes = []

    def visit_AsyncFunctionDef(self, node):
        self.has_async = True
        self.generic_visit(node)

    def visit_Await(self, node):
        self.has_async = True
        # Track all await nodes
        self.await_nodes.append(node)
        # Check if this await is at top level
        parent = node
        while hasattr(parent, 'parent'):
            parent = parent.parent
            if isinstance(parent, ast.AsyncFunctionDef | ast.FunctionDef):
                break
        else:
            self.has_top_level_await = True
        self.generic_visit(node)
BrowserWrapper

A wrapper for browser agent functionality that allows seamless interaction with web browsers.

This class provides a system-agnostic interface to control browsers through the browser_use library, supporting both local and remote browser connections.

Attributes:

Name Type Description
browser

The Browser instance for web automation

agent

The BrowserAgent instance for intelligent browsing

is_initialized bool

Whether the browser has been initialized

config Dict

Configuration for the browser

remote_url Optional[str]

URL for remote browser connection if applicable

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
class BrowserWrapper:
    """
    A wrapper for browser agent functionality that allows seamless interaction with web browsers.

    This class provides a system-agnostic interface to control browsers through the browser_use
    library, supporting both local and remote browser connections.

    Attributes:
        browser: The Browser instance for web automation
        agent: The BrowserAgent instance for intelligent browsing
        is_initialized (bool): Whether the browser has been initialized
        config (Dict): Configuration for the browser
        remote_url (Optional[str]): URL for remote browser connection if applicable
    """

    def __init__(self,
                 llm: Any = None,
                 headless: bool = False,
                 chrome_path: str | None = None,
                 remote_url: str | None = None,
                 api_key: str | None=None,
                 config: dict[str, Any] | None = None):
        """
        Initialize the browser wrapper.

        Args:
            llm: Language model to use for the browser agent
            headless: Whether to run the browser in headless mode
            chrome_path: Path to local Chrome executable
            remote_url: URL for remote browser connection (wss or cdp)
            config: Additional browser configuration
        """
        self.is_initialized = False
        self.agent = None
        self.browser = None
        self.context = None
        import os

        from pydantic import SecretStr
        def pars(x):
            return x.split('/')[-1] if '/' in x else x
        if llm is None:
            llm = 'google/gemini-2.0-flash-exp'
        if not isinstance(llm, str):
            llm = llm
        elif 'deepseek' in llm:
            from langchain_openai import ChatOpenAI
            llm = ChatOpenAI(base_url='https://api.deepseek.com/v1', model=pars(llm), api_key=SecretStr(api_key or os.getenv('DEEPSEEK_API_KEY')))
        elif 'google' in llm:
            from langchain_google_genai import ChatGoogleGenerativeAI
            llm = ChatGoogleGenerativeAI(model=pars(llm), api_key=SecretStr(api_key or os.getenv('GEMINI_API_KEY')))
        elif 'claude' in llm:
            from langchain_anthropic import ChatAnthropic
            llm = ChatAnthropic(
                model_name=pars(llm),
                temperature=0.0,
                timeout=400,  # Increase for complex tasks
                api_key=SecretStr(api_key or os.getenv('ANTHROPIC_API_KEY')))
        elif isinstance(llm, str):
            from langchain_openai import ChatOpenAI
            llm = ChatOpenAI(
                model=pars(llm),
                temperature=0.0,api_key=SecretStr(api_key or os.getenv('OPENAI_API_KEY'))
            )



        self.llm = ChatLiteLLM(model=llm) if isinstance(llm,str) else llm
        self.parser = None

        browser_config = {
            'headless': headless,
            'disable_security': True
        }

        if config:
            browser_config.update(config)

        self.config = browser_config

        # Set up remote connection if specified
        if remote_url:
            if remote_url.startswith('wss://'):
                self.config['wss_url'] = remote_url
            elif remote_url.startswith('http'):
                self.config['cdp_url'] = remote_url
            self.remote_url = remote_url
        else:
            self.remote_url = None

        # Set up local Chrome path if specified
        if not headless and remote_url is None and chrome_path is None:
            import os
            import platform

            def get_chrome_path():
                """
                Returns the correct path to the Chrome executable based on the OS.
                If Chrome is not found, returns None.
                """
                chrome_paths = {
                    "Darwin": "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",  # macOS
                    "Windows": "C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe",  # Windows
                    "Linux": "/usr/bin/google-chrome"  # Linux
                }

                system = platform.system()
                chrome_path_ = chrome_paths.get(system)

                if chrome_path_ and os.path.isfile(chrome_path_):
                    return chrome_path_

                return None

            chrome_path = get_chrome_path()
        if chrome_path:
            self.config['chrome_instance_path'] = chrome_path


    async def initialize(self):
        """Initialize the browser and context"""
        if self.is_initialized:
            return

        try:
            # Create browser instance
            self.browser = Browser(
                config=BrowserConfig(**self.config)
            )

            # Create context configuration with better settings for scraping
            context_config = BrowserContextConfig(
                wait_for_network_idle_page_load_time=3.0,
                highlight_elements=True,
                viewport_expansion=500,
                wait_between_actions=0.5  # Add a small delay between actions
            )

            # Initialize context
            self.context = await self.browser.new_context(config=context_config)

            # Create an initial page
            browser_state = await self.context.get_state()
            if not browser_state or not browser_state.tabs:
                # If no tabs exist, create a new page
                await self.browser.get_playwright_browser()
                browser_context = await self.context.get_playwright_context()
                self.page = await browser_context.new_page()
            else:
                # Use the existing active tab
                self.page = await self.context.get_current_page()

            self.is_initialized = True

        except Exception as e:
            # Clean up resources in case of initialization error
            if self.context:
                await self.context.close()
            if self.browser:
                await self.browser.close()
            raise Exception(f"Failed to initialize browser: {str(e)}")

    async def create_agent(self, task: str, initial_actions=None):
        """Create a browser agent with the specified task"""
        #if not self.is_initialized:
        #    await self.initialize()

        self.agent = BrowserAgent(
            task=task,
            llm=self.llm,
            #browser_context=self.context,
            initial_actions=initial_actions,
            #browser=self.browser,
        )
        return self.agent

    async def run(self, task: str):
        """Run the browser agent with the specified task"""
        agent = await self.create_agent(task)
        result = await agent.run()
        return result

    async def navigate(self, url: str):
        """Navigate to a URL"""
        if not self.is_initialized:
            await self.initialize()

        # Get the current active page or create a new one if needed
        try:
            page = await self.context.get_current_page()
            if not page:
                browser_context = await self.context.get_playwright_context()
                page = await browser_context.new_page()

            # Navigate to the URL
            await page.goto(url)
            self.page = page
            return page
        except Exception as e:
            raise Exception(f"Failed to navigate to {url}: {str(e)}")

    async def get_tabs(self):
        """Get all open tabs/pages"""
        if not self.is_initialized:
            await self.initialize()

        browser_state = await self.context.get_state()
        return browser_state.tabs if browser_state else []

    async def switch_to_tab(self, tab_index: int):
        """Switch to a specific tab by index"""
        if not self.is_initialized:
            await self.initialize()

        browser_state = await self.context.get_state()
        if not browser_state or not browser_state.tabs or tab_index >= len(browser_state.tabs):
            raise ValueError(f"Tab index {tab_index} is out of range")

        tab_id = browser_state.tabs[tab_index].id
        await self.context.switch_to_tab(tab_id)
        self.page = await self.context.get_current_page()
        return self.page

    async def create_new_tab(self):
        """Create a new tab/page"""
        if not self.is_initialized:
            await self.initialize()

        browser_context = await self.context.get_playwright_context()
        new_page = await browser_context.new_page()
        self.page = new_page
        return new_page

    async def close_current_tab(self):
        """Close the current tab/page"""
        if not self.is_initialized:
            return

        page = await self.context.get_current_page()
        if page:
            await page.close()

        # Update the current page reference
        browser_state = await self.context.get_state()
        if browser_state and browser_state.tabs:
            await self.switch_to_tab(0)

    async def execute_js(self, code: str, page=None):
        """Execute JavaScript code in the browser context"""
        if not self.is_initialized:
            await self.initialize()

        if page is None:
            pages = await self.context.pages()
            if not pages:
                page = await self.context.new_page()
            else:
                page = pages[0]

        result = await page.evaluate(code)
        return result

    async def save_context(self):
        """Save browser context state"""
        if not self.is_initialized:
            return None

        return await self.browser.export_context(self.context)

    async def restore_context(self, context_data):
        """Restore browser context from saved state"""
        if not self.is_initialized:
            await self.initialize()

        await self.browser.import_context(context_data)

    async def close(self):
        """Close the browser"""
        if self.is_initialized and self.browser:
            await self.browser.close()
            self.is_initialized = False

    # Add these methods to the BrowserWrapper class

    def get_parser(self):
        """Get a content parser for the browser"""
        if self.parser is None:
            self.parser = WebContentParser(self)
        return self.parser

    async def extract_markdown(self, page=None, selector="body", include_images=True):
        """
        Extract content from a webpage and convert it to markdown.
        """
        if not self.is_initialized:
            await self.initialize()

        if page is None:
            pages = await self.context.pages()
            if not pages:
                page = await self.context.new_page()
            else:
                page = pages[0]

        # JavaScript to convert HTML to markdown
        script = """
        (selector, includeImages) => {
            const element = document.querySelector(selector);
            if (!element) return '';

            // Simple HTML to Markdown conversion function
            const htmlToMarkdown = (node) => {
                let result = '';

                // Process text nodes
                if (node.nodeType === Node.TEXT_NODE) {
                    return node.textContent;
                }

                // Process element nodes
                if (node.nodeType === Node.ELEMENT_NODE) {
                    const tagName = node.tagName.toLowerCase();

                    // Process by tag type
                    switch(tagName) {
                        case 'h1': return '# ' + getInnerText(node) + '\\n\\n';
                        case 'h2': return '## ' + getInnerText(node) + '\\n\\n';
                        case 'h3': return '### ' + getInnerText(node) + '\\n\\n';
                        case 'h4': return '#### ' + getInnerText(node) + '\\n\\n';
                        case 'h5': return '##### ' + getInnerText(node) + '\\n\\n';
                        case 'h6': return '###### ' + getInnerText(node) + '\\n\\n';
                        case 'p': return getInnerText(node) + '\\n\\n';
                        case 'br': return '\\n';
                        case 'hr': return '---\\n\\n';
                        case 'b':
                        case 'strong': return '**' + getInnerText(node) + '**';
                        case 'i':
                        case 'em': return '*' + getInnerText(node) + '*';
                        case 'a': {
                            const href = node.getAttribute('href');
                            return '[' + getInnerText(node) + '](' + href + ')';
                        }
                        case 'img': {
                            if (!includeImages) return '';
                            const src = node.getAttribute('src');
                            const alt = node.getAttribute('alt') || 'image';
                            return '![' + alt + '](' + src + ')\\n\\n';
                        }
                        case 'code':
                        case 'pre': return '`' + getInnerText(node) + '`';
                        case 'ul': {
                            let listResult = '\\n';
                            Array.from(node.children).forEach(li => {
                                if (li.tagName.toLowerCase() === 'li') {
                                    listResult += '- ' + getInnerText(li) + '\\n';
                                }
                            });
                            return listResult + '\\n';
                        }
                        case 'ol': {
                            let listResult = '\\n';
                            Array.from(node.children).forEach((li, index) => {
                                if (li.tagName.toLowerCase() === 'li') {
                                    listResult += (index + 1) + '. ' + getInnerText(li) + '\\n';
                                }
                            });
                            return listResult + '\\n';
                        }
                        case 'blockquote': return '> ' + getInnerText(node) + '\\n\\n';
                        default: {
                            // Process child nodes for other elements
                            for (const child of node.childNodes) {
                                result += htmlToMarkdown(child);
                            }
                            return result;
                        }
                    }
                }

                return '';
            };

            // Helper function to get inner text with special handling
            const getInnerText = (node) => {
                let text = '';
                for (const child of node.childNodes) {
                    text += htmlToMarkdown(child);
                }
                return text;
            };

            return htmlToMarkdown(element);
        }
        """

        try:
            # Try to convert to markdown using our script
            markdown = await page.evaluate(script, selector, include_images)

            # Add a title if we have one
            title = await page.title()
            if title and not markdown.startswith("# "):
                markdown = f"# {title}\n\n{markdown}"

            return markdown
        except Exception:
            # Fallback to basic extraction if script fails
            content = await self.extract_text(page, selector)
            title = await page.title()
            return f"# {title}\n\n{content}"

    async def take_scrolling_screenshot(self, page=None, full_page=True, path=None,
                                        initial_delay=1000, scroll_delay=500, format='png'):
        """
        Take a screenshot with scrolling functionality and delay.
        """
        if not self.is_initialized:
            await self.initialize()

        if page is None:
            pages = await self.context.pages()
            if not pages:
                page = await self.context.new_page()
            else:
                page = pages[0]

        # Wait for the initial delay to let content load
        if initial_delay > 0:
            await page.wait_for_timeout(initial_delay)

        if full_page and scroll_delay > 0:
            # Get page dimensions
            dimensions = await page.evaluate("""
                () => {
                    return {
                        width: document.documentElement.scrollWidth,
                        height: document.documentElement.scrollHeight,
                        windowHeight: window.innerHeight
                    }
                }
            """)

            # Scroll down the page gradually to trigger lazy loading
            current_position = 0
            while current_position < dimensions['height']:
                await page.evaluate(f"window.scrollTo(0, {current_position})")
                await page.wait_for_timeout(scroll_delay)
                current_position += dimensions['windowHeight'] // 2  # Scroll by half viewport

        # Reset scroll position to top
        await page.evaluate("window.scrollTo(0, 0)")

        # Take the screenshot
        screenshot_params = {
            'full_page': full_page,
            'type': format
        }

        if path:
            screenshot_params['path'] = path

        return await page.screenshot(**screenshot_params)

    async def extract_text(self, page=None, selector="body"):
        """
        Extract plain text from a webpage.
        """
        if not self.is_initialized:
            await self.initialize()

        if page is None:
            pages = await self.context.pages()
            if not pages:
                page = await self.context.new_page()
            else:
                page = pages[0]

        text = await page.evaluate("""
            (selector) => {
                const element = document.querySelector(selector);
                return element ? element.innerText : '';
            }
        """, selector)

        return text

    async def extract_structured_content(self, page=None, config=None):
        """
        Extract structured content from a webpage based on a configuration.
        """
        if not self.is_initialized:
            await self.initialize()

        if page is None:
            pages = await self.context.pages()
            if not pages:
                page = await self.context.new_page()
            else:
                page = pages[0]

        if not config:
            # Default configuration if none provided
            config = {
                'title': 'h1',
                'headings': 'h2, h3, h4, h5, h6',
                'paragraphs': 'p',
                'links': 'a',
                'images': 'img'
            }

        result = {}

        for key, selector in config.items():
            if key == 'links':
                # Extract links with their href and text
                result[key] = await page.evaluate("""
                    (selector) => {
                        return Array.from(document.querySelectorAll(selector))
                            .map(el => ({
                                text: el.innerText.trim(),
                                href: el.href
                            }))
                            .filter(item => item.text && item.href);
                    }
                """, selector)
            elif key == 'images':
                # Extract images with their src and alt
                result[key] = await page.evaluate("""
                    (selector) => {
                        return Array.from(document.querySelectorAll(selector))
                            .map(el => ({
                                src: el.src,
                                alt: el.alt || ''
                            }))
                            .filter(item => item.src);
                    }
                """, selector)
            else:
                # Extract text content for other elements
                result[key] = await page.evaluate("""
                    (selector) => {
                        return Array.from(document.querySelectorAll(selector))
                            .map(el => el.innerText.trim())
                            .filter(text => text);
                    }
                """, selector)

        return result
__init__(llm=None, headless=False, chrome_path=None, remote_url=None, api_key=None, config=None)

Initialize the browser wrapper.

Parameters:

Name Type Description Default
llm Any

Language model to use for the browser agent

None
headless bool

Whether to run the browser in headless mode

False
chrome_path str | None

Path to local Chrome executable

None
remote_url str | None

URL for remote browser connection (wss or cdp)

None
config dict[str, Any] | None

Additional browser configuration

None
Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
def __init__(self,
             llm: Any = None,
             headless: bool = False,
             chrome_path: str | None = None,
             remote_url: str | None = None,
             api_key: str | None=None,
             config: dict[str, Any] | None = None):
    """
    Initialize the browser wrapper.

    Args:
        llm: Language model to use for the browser agent
        headless: Whether to run the browser in headless mode
        chrome_path: Path to local Chrome executable
        remote_url: URL for remote browser connection (wss or cdp)
        config: Additional browser configuration
    """
    self.is_initialized = False
    self.agent = None
    self.browser = None
    self.context = None
    import os

    from pydantic import SecretStr
    def pars(x):
        return x.split('/')[-1] if '/' in x else x
    if llm is None:
        llm = 'google/gemini-2.0-flash-exp'
    if not isinstance(llm, str):
        llm = llm
    elif 'deepseek' in llm:
        from langchain_openai import ChatOpenAI
        llm = ChatOpenAI(base_url='https://api.deepseek.com/v1', model=pars(llm), api_key=SecretStr(api_key or os.getenv('DEEPSEEK_API_KEY')))
    elif 'google' in llm:
        from langchain_google_genai import ChatGoogleGenerativeAI
        llm = ChatGoogleGenerativeAI(model=pars(llm), api_key=SecretStr(api_key or os.getenv('GEMINI_API_KEY')))
    elif 'claude' in llm:
        from langchain_anthropic import ChatAnthropic
        llm = ChatAnthropic(
            model_name=pars(llm),
            temperature=0.0,
            timeout=400,  # Increase for complex tasks
            api_key=SecretStr(api_key or os.getenv('ANTHROPIC_API_KEY')))
    elif isinstance(llm, str):
        from langchain_openai import ChatOpenAI
        llm = ChatOpenAI(
            model=pars(llm),
            temperature=0.0,api_key=SecretStr(api_key or os.getenv('OPENAI_API_KEY'))
        )



    self.llm = ChatLiteLLM(model=llm) if isinstance(llm,str) else llm
    self.parser = None

    browser_config = {
        'headless': headless,
        'disable_security': True
    }

    if config:
        browser_config.update(config)

    self.config = browser_config

    # Set up remote connection if specified
    if remote_url:
        if remote_url.startswith('wss://'):
            self.config['wss_url'] = remote_url
        elif remote_url.startswith('http'):
            self.config['cdp_url'] = remote_url
        self.remote_url = remote_url
    else:
        self.remote_url = None

    # Set up local Chrome path if specified
    if not headless and remote_url is None and chrome_path is None:
        import os
        import platform

        def get_chrome_path():
            """
            Returns the correct path to the Chrome executable based on the OS.
            If Chrome is not found, returns None.
            """
            chrome_paths = {
                "Darwin": "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",  # macOS
                "Windows": "C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe",  # Windows
                "Linux": "/usr/bin/google-chrome"  # Linux
            }

            system = platform.system()
            chrome_path_ = chrome_paths.get(system)

            if chrome_path_ and os.path.isfile(chrome_path_):
                return chrome_path_

            return None

        chrome_path = get_chrome_path()
    if chrome_path:
        self.config['chrome_instance_path'] = chrome_path
close() async

Close the browser

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1823
1824
1825
1826
1827
async def close(self):
    """Close the browser"""
    if self.is_initialized and self.browser:
        await self.browser.close()
        self.is_initialized = False
close_current_tab() async

Close the current tab/page

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
async def close_current_tab(self):
    """Close the current tab/page"""
    if not self.is_initialized:
        return

    page = await self.context.get_current_page()
    if page:
        await page.close()

    # Update the current page reference
    browser_state = await self.context.get_state()
    if browser_state and browser_state.tabs:
        await self.switch_to_tab(0)
create_agent(task, initial_actions=None) async

Create a browser agent with the specified task

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
async def create_agent(self, task: str, initial_actions=None):
    """Create a browser agent with the specified task"""
    #if not self.is_initialized:
    #    await self.initialize()

    self.agent = BrowserAgent(
        task=task,
        llm=self.llm,
        #browser_context=self.context,
        initial_actions=initial_actions,
        #browser=self.browser,
    )
    return self.agent
create_new_tab() async

Create a new tab/page

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1770
1771
1772
1773
1774
1775
1776
1777
1778
async def create_new_tab(self):
    """Create a new tab/page"""
    if not self.is_initialized:
        await self.initialize()

    browser_context = await self.context.get_playwright_context()
    new_page = await browser_context.new_page()
    self.page = new_page
    return new_page
execute_js(code, page=None) async

Execute JavaScript code in the browser context

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
async def execute_js(self, code: str, page=None):
    """Execute JavaScript code in the browser context"""
    if not self.is_initialized:
        await self.initialize()

    if page is None:
        pages = await self.context.pages()
        if not pages:
            page = await self.context.new_page()
        else:
            page = pages[0]

    result = await page.evaluate(code)
    return result
extract_markdown(page=None, selector='body', include_images=True) async

Extract content from a webpage and convert it to markdown.

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
async def extract_markdown(self, page=None, selector="body", include_images=True):
    """
    Extract content from a webpage and convert it to markdown.
    """
    if not self.is_initialized:
        await self.initialize()

    if page is None:
        pages = await self.context.pages()
        if not pages:
            page = await self.context.new_page()
        else:
            page = pages[0]

    # JavaScript to convert HTML to markdown
    script = """
    (selector, includeImages) => {
        const element = document.querySelector(selector);
        if (!element) return '';

        // Simple HTML to Markdown conversion function
        const htmlToMarkdown = (node) => {
            let result = '';

            // Process text nodes
            if (node.nodeType === Node.TEXT_NODE) {
                return node.textContent;
            }

            // Process element nodes
            if (node.nodeType === Node.ELEMENT_NODE) {
                const tagName = node.tagName.toLowerCase();

                // Process by tag type
                switch(tagName) {
                    case 'h1': return '# ' + getInnerText(node) + '\\n\\n';
                    case 'h2': return '## ' + getInnerText(node) + '\\n\\n';
                    case 'h3': return '### ' + getInnerText(node) + '\\n\\n';
                    case 'h4': return '#### ' + getInnerText(node) + '\\n\\n';
                    case 'h5': return '##### ' + getInnerText(node) + '\\n\\n';
                    case 'h6': return '###### ' + getInnerText(node) + '\\n\\n';
                    case 'p': return getInnerText(node) + '\\n\\n';
                    case 'br': return '\\n';
                    case 'hr': return '---\\n\\n';
                    case 'b':
                    case 'strong': return '**' + getInnerText(node) + '**';
                    case 'i':
                    case 'em': return '*' + getInnerText(node) + '*';
                    case 'a': {
                        const href = node.getAttribute('href');
                        return '[' + getInnerText(node) + '](' + href + ')';
                    }
                    case 'img': {
                        if (!includeImages) return '';
                        const src = node.getAttribute('src');
                        const alt = node.getAttribute('alt') || 'image';
                        return '![' + alt + '](' + src + ')\\n\\n';
                    }
                    case 'code':
                    case 'pre': return '`' + getInnerText(node) + '`';
                    case 'ul': {
                        let listResult = '\\n';
                        Array.from(node.children).forEach(li => {
                            if (li.tagName.toLowerCase() === 'li') {
                                listResult += '- ' + getInnerText(li) + '\\n';
                            }
                        });
                        return listResult + '\\n';
                    }
                    case 'ol': {
                        let listResult = '\\n';
                        Array.from(node.children).forEach((li, index) => {
                            if (li.tagName.toLowerCase() === 'li') {
                                listResult += (index + 1) + '. ' + getInnerText(li) + '\\n';
                            }
                        });
                        return listResult + '\\n';
                    }
                    case 'blockquote': return '> ' + getInnerText(node) + '\\n\\n';
                    default: {
                        // Process child nodes for other elements
                        for (const child of node.childNodes) {
                            result += htmlToMarkdown(child);
                        }
                        return result;
                    }
                }
            }

            return '';
        };

        // Helper function to get inner text with special handling
        const getInnerText = (node) => {
            let text = '';
            for (const child of node.childNodes) {
                text += htmlToMarkdown(child);
            }
            return text;
        };

        return htmlToMarkdown(element);
    }
    """

    try:
        # Try to convert to markdown using our script
        markdown = await page.evaluate(script, selector, include_images)

        # Add a title if we have one
        title = await page.title()
        if title and not markdown.startswith("# "):
            markdown = f"# {title}\n\n{markdown}"

        return markdown
    except Exception:
        # Fallback to basic extraction if script fails
        content = await self.extract_text(page, selector)
        title = await page.title()
        return f"# {title}\n\n{content}"
extract_structured_content(page=None, config=None) async

Extract structured content from a webpage based on a configuration.

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
async def extract_structured_content(self, page=None, config=None):
    """
    Extract structured content from a webpage based on a configuration.
    """
    if not self.is_initialized:
        await self.initialize()

    if page is None:
        pages = await self.context.pages()
        if not pages:
            page = await self.context.new_page()
        else:
            page = pages[0]

    if not config:
        # Default configuration if none provided
        config = {
            'title': 'h1',
            'headings': 'h2, h3, h4, h5, h6',
            'paragraphs': 'p',
            'links': 'a',
            'images': 'img'
        }

    result = {}

    for key, selector in config.items():
        if key == 'links':
            # Extract links with their href and text
            result[key] = await page.evaluate("""
                (selector) => {
                    return Array.from(document.querySelectorAll(selector))
                        .map(el => ({
                            text: el.innerText.trim(),
                            href: el.href
                        }))
                        .filter(item => item.text && item.href);
                }
            """, selector)
        elif key == 'images':
            # Extract images with their src and alt
            result[key] = await page.evaluate("""
                (selector) => {
                    return Array.from(document.querySelectorAll(selector))
                        .map(el => ({
                            src: el.src,
                            alt: el.alt || ''
                        }))
                        .filter(item => item.src);
                }
            """, selector)
        else:
            # Extract text content for other elements
            result[key] = await page.evaluate("""
                (selector) => {
                    return Array.from(document.querySelectorAll(selector))
                        .map(el => el.innerText.trim())
                        .filter(text => text);
                }
            """, selector)

    return result
extract_text(page=None, selector='body') async

Extract plain text from a webpage.

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
async def extract_text(self, page=None, selector="body"):
    """
    Extract plain text from a webpage.
    """
    if not self.is_initialized:
        await self.initialize()

    if page is None:
        pages = await self.context.pages()
        if not pages:
            page = await self.context.new_page()
        else:
            page = pages[0]

    text = await page.evaluate("""
        (selector) => {
            const element = document.querySelector(selector);
            return element ? element.innerText : '';
        }
    """, selector)

    return text
get_parser()

Get a content parser for the browser

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1831
1832
1833
1834
1835
def get_parser(self):
    """Get a content parser for the browser"""
    if self.parser is None:
        self.parser = WebContentParser(self)
    return self.parser
get_tabs() async

Get all open tabs/pages

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1748
1749
1750
1751
1752
1753
1754
async def get_tabs(self):
    """Get all open tabs/pages"""
    if not self.is_initialized:
        await self.initialize()

    browser_state = await self.context.get_state()
    return browser_state.tabs if browser_state else []
initialize() async

Initialize the browser and context

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
async def initialize(self):
    """Initialize the browser and context"""
    if self.is_initialized:
        return

    try:
        # Create browser instance
        self.browser = Browser(
            config=BrowserConfig(**self.config)
        )

        # Create context configuration with better settings for scraping
        context_config = BrowserContextConfig(
            wait_for_network_idle_page_load_time=3.0,
            highlight_elements=True,
            viewport_expansion=500,
            wait_between_actions=0.5  # Add a small delay between actions
        )

        # Initialize context
        self.context = await self.browser.new_context(config=context_config)

        # Create an initial page
        browser_state = await self.context.get_state()
        if not browser_state or not browser_state.tabs:
            # If no tabs exist, create a new page
            await self.browser.get_playwright_browser()
            browser_context = await self.context.get_playwright_context()
            self.page = await browser_context.new_page()
        else:
            # Use the existing active tab
            self.page = await self.context.get_current_page()

        self.is_initialized = True

    except Exception as e:
        # Clean up resources in case of initialization error
        if self.context:
            await self.context.close()
        if self.browser:
            await self.browser.close()
        raise Exception(f"Failed to initialize browser: {str(e)}")
navigate(url) async

Navigate to a URL

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
async def navigate(self, url: str):
    """Navigate to a URL"""
    if not self.is_initialized:
        await self.initialize()

    # Get the current active page or create a new one if needed
    try:
        page = await self.context.get_current_page()
        if not page:
            browser_context = await self.context.get_playwright_context()
            page = await browser_context.new_page()

        # Navigate to the URL
        await page.goto(url)
        self.page = page
        return page
    except Exception as e:
        raise Exception(f"Failed to navigate to {url}: {str(e)}")
restore_context(context_data) async

Restore browser context from saved state

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1816
1817
1818
1819
1820
1821
async def restore_context(self, context_data):
    """Restore browser context from saved state"""
    if not self.is_initialized:
        await self.initialize()

    await self.browser.import_context(context_data)
run(task) async

Run the browser agent with the specified task

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1723
1724
1725
1726
1727
async def run(self, task: str):
    """Run the browser agent with the specified task"""
    agent = await self.create_agent(task)
    result = await agent.run()
    return result
save_context() async

Save browser context state

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1809
1810
1811
1812
1813
1814
async def save_context(self):
    """Save browser context state"""
    if not self.is_initialized:
        return None

    return await self.browser.export_context(self.context)
switch_to_tab(tab_index) async

Switch to a specific tab by index

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
async def switch_to_tab(self, tab_index: int):
    """Switch to a specific tab by index"""
    if not self.is_initialized:
        await self.initialize()

    browser_state = await self.context.get_state()
    if not browser_state or not browser_state.tabs or tab_index >= len(browser_state.tabs):
        raise ValueError(f"Tab index {tab_index} is out of range")

    tab_id = browser_state.tabs[tab_index].id
    await self.context.switch_to_tab(tab_id)
    self.page = await self.context.get_current_page()
    return self.page
take_scrolling_screenshot(page=None, full_page=True, path=None, initial_delay=1000, scroll_delay=500, format='png') async

Take a screenshot with scrolling functionality and delay.

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
async def take_scrolling_screenshot(self, page=None, full_page=True, path=None,
                                    initial_delay=1000, scroll_delay=500, format='png'):
    """
    Take a screenshot with scrolling functionality and delay.
    """
    if not self.is_initialized:
        await self.initialize()

    if page is None:
        pages = await self.context.pages()
        if not pages:
            page = await self.context.new_page()
        else:
            page = pages[0]

    # Wait for the initial delay to let content load
    if initial_delay > 0:
        await page.wait_for_timeout(initial_delay)

    if full_page and scroll_delay > 0:
        # Get page dimensions
        dimensions = await page.evaluate("""
            () => {
                return {
                    width: document.documentElement.scrollWidth,
                    height: document.documentElement.scrollHeight,
                    windowHeight: window.innerHeight
                }
            }
        """)

        # Scroll down the page gradually to trigger lazy loading
        current_position = 0
        while current_position < dimensions['height']:
            await page.evaluate(f"window.scrollTo(0, {current_position})")
            await page.wait_for_timeout(scroll_delay)
            current_position += dimensions['windowHeight'] // 2  # Scroll by half viewport

    # Reset scroll position to top
    await page.evaluate("window.scrollTo(0, 0)")

    # Take the screenshot
    screenshot_params = {
        'full_page': full_page,
        'type': format
    }

    if path:
        screenshot_params['path'] = path

    return await page.screenshot(**screenshot_params)
CargoRustInterface

Usage :

Create interface

cargo_interface = CargoRustInterface()

Set up new project

await cargo_interface.setup_project("hello_rust")

Add a dependency

await cargo_interface.add_dependency("serde", "1.0")

Write and run some code

code = """ fn main() { println!("Hello, Rust!"); } """ result = await cargo_interface.run_code(code)

Modify code

new_function = """ fn main() { println!("Modified Hello, Rust!"); } """ await cargo_interface.modify_code(new_function, "main()")

Build and test

await cargo_interface.build() await cargo_interface.test()

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
class CargoRustInterface:
    '''Usage :
# Create interface
cargo_interface = CargoRustInterface()

# Set up new project
await cargo_interface.setup_project("hello_rust")

# Add a dependency
await cargo_interface.add_dependency("serde", "1.0")

# Write and run some code
code = """
fn main() {
    println!("Hello, Rust!");
}
"""
result = await cargo_interface.run_code(code)

# Modify code
new_function = """
fn main() {
    println!("Modified Hello, Rust!");
}
"""
await cargo_interface.modify_code(new_function, "main()")

# Build and test
await cargo_interface.build()
await cargo_interface.test()

    '''
    def __init__(self, session_dir=None, auto_remove=True):
        """Initialize the Rust/Cargo interface"""
        self.auto_remove = auto_remove
        self._session_dir = session_dir or Path.home() / '.cargo_sessions'
        self._session_dir.mkdir(exist_ok=True)
        self.vfs = VirtualFileSystem(self._session_dir / 'virtual_fs')
        self.output_history = {}
        self._execution_count = 0
        self.current_project = None

    def reset(self):
        """Reset the interface state"""
        if self.auto_remove and self.current_project:
            shutil.rmtree(self.current_project, ignore_errors=True)
        self.output_history.clear()
        self._execution_count = 0
        self.current_project = None

    async def setup_project(self, name: str) -> str:
        """Set up a new Cargo project"""
        try:
            project_path = self.vfs.base_dir / name
            if project_path.exists():
                shutil.rmtree(project_path)

            result = subprocess.run(
                ['cargo', 'new', str(project_path)],
                capture_output=True,
                text=True, check=True
            )

            if result.returncode != 0:
                return f"Error creating project: {result.stderr}"

            self.current_project = project_path
            return f"Created new project at {project_path}"

        except Exception as e:
            return f"Failed to create project: {str(e)}"

    async def add_dependency(self, name: str, version: str | None = None) -> str:
        """Add a dependency to Cargo.toml"""
        if not self.current_project:
            return "No active project"

        try:
            cargo_toml = self.current_project / "Cargo.toml"
            if not cargo_toml.exists():
                return "Cargo.toml not found"

            cmd = ['cargo', 'add', name]
            if version:
                cmd.extend(['--vers', version])

            result = subprocess.run(
                cmd,
                cwd=self.current_project,
                capture_output=True,
                text=True,check=True
            )

            return result.stdout if result.returncode == 0 else f"Error: {result.stderr}"

        except Exception as e:
            return f"Failed to add dependency: {str(e)}"

    async def build(self, release: bool = False) -> str:
        """Build the project"""
        if not self.current_project:
            return "No active project"

        try:
            cmd = ['cargo', 'build']
            if release:
                cmd.append('--release')

            result = subprocess.run(
                cmd,
                cwd=self.current_project,
                capture_output=True,
                text=True
            )

            return result.stdout if result.returncode == 0 else f"Build error: {result.stderr}"

        except Exception as e:
            return f"Build failed: {str(e)}"

    async def test(self) -> str:
        """Run project tests"""
        if not self.current_project:
            return "No active project"

        try:
            result = subprocess.run(
                ['cargo', 'test'],
                cwd=self.current_project,
                capture_output=True,
                text=True, check=True
            )

            return result.stdout if result.returncode == 0 else f"Test error: {result.stderr}"

        except Exception as e:
            return f"Tests failed: {str(e)}"

    async def run_code(self, code: str) -> str:
        """Run Rust code"""
        if not self.current_project:
            return "No active project"

        try:
            # Write code to main.rs
            main_rs = self.current_project / "src" / "main.rs"
            with open(main_rs, 'w') as f:
                f.write(code)

            # Build and run
            build_result = subprocess.run(
                ['cargo', 'build'],
                cwd=self.current_project,
                capture_output=True,
                text=True
            )

            if build_result.returncode != 0:
                return f"Compilation error: {build_result.stderr}"

            run_result = subprocess.run(
                ['cargo', 'run'],
                cwd=self.current_project,
                capture_output=True,
                text=True
            )

            self._execution_count += 1
            output = {
                'code': code,
                'stdout': run_result.stdout,
                'stderr': run_result.stderr,
                'result': run_result.returncode == 0
            }
            self.output_history[self._execution_count] = output

            return run_result.stdout if run_result.returncode == 0 else f"Runtime error: {run_result.stderr}"

        except Exception as e:
            return f"Execution failed: {str(e)}"

    async def modify_code(self, code: str, object_name: str, file: str = "src/main.rs") -> str:
        """Modify existing Rust code"""
        if not self.current_project:
            return "No active project"

        try:
            file_path = self.current_project / file
            if not file_path.exists():
                return f"File {file} not found"

            with open(file_path) as f:
                content = f.read()

            # Handle function modification
            if object_name.endswith("()"):
                func_name = object_name[:-2]
                # Find and replace function definition
                pattern = f"fn {func_name}.*?}}(?=\n|$)"
                updated_content = re.sub(pattern, code.strip(), content, flags=re.DOTALL)
            else:
                # Handle other modifications (structs, constants, etc.)
                pattern = f"{object_name}.*?(?=\n|$)"
                updated_content = re.sub(pattern, code.strip(), content)

            with open(file_path, 'w') as f:
                f.write(updated_content)

            return f"Modified {object_name} in {file}"

        except Exception as e:
            return f"Modification failed: {str(e)}"

    def save_session(self, name: str):
        """Save current session state"""
        session_file = self._session_dir / f"{name}.json"
        state = {
            'output_history': self.output_history,
            'current_project': str(self.current_project) if self.current_project else None
        }

        with open(session_file, 'w') as f:
            json.dump(state, f)

    def load_session(self, name: str):
        """Load saved session state"""
        session_file = self._session_dir / f"{name}.json"
        if session_file.exists():
            with open(session_file) as f:
                state = json.load(f)
                self.output_history = state['output_history']
                self.current_project = Path(state['current_project']) if state['current_project'] else None
__init__(session_dir=None, auto_remove=True)

Initialize the Rust/Cargo interface

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
288
289
290
291
292
293
294
295
296
def __init__(self, session_dir=None, auto_remove=True):
    """Initialize the Rust/Cargo interface"""
    self.auto_remove = auto_remove
    self._session_dir = session_dir or Path.home() / '.cargo_sessions'
    self._session_dir.mkdir(exist_ok=True)
    self.vfs = VirtualFileSystem(self._session_dir / 'virtual_fs')
    self.output_history = {}
    self._execution_count = 0
    self.current_project = None
add_dependency(name, version=None) async

Add a dependency to Cargo.toml

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
async def add_dependency(self, name: str, version: str | None = None) -> str:
    """Add a dependency to Cargo.toml"""
    if not self.current_project:
        return "No active project"

    try:
        cargo_toml = self.current_project / "Cargo.toml"
        if not cargo_toml.exists():
            return "Cargo.toml not found"

        cmd = ['cargo', 'add', name]
        if version:
            cmd.extend(['--vers', version])

        result = subprocess.run(
            cmd,
            cwd=self.current_project,
            capture_output=True,
            text=True,check=True
        )

        return result.stdout if result.returncode == 0 else f"Error: {result.stderr}"

    except Exception as e:
        return f"Failed to add dependency: {str(e)}"
build(release=False) async

Build the project

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
async def build(self, release: bool = False) -> str:
    """Build the project"""
    if not self.current_project:
        return "No active project"

    try:
        cmd = ['cargo', 'build']
        if release:
            cmd.append('--release')

        result = subprocess.run(
            cmd,
            cwd=self.current_project,
            capture_output=True,
            text=True
        )

        return result.stdout if result.returncode == 0 else f"Build error: {result.stderr}"

    except Exception as e:
        return f"Build failed: {str(e)}"
load_session(name)

Load saved session state

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
480
481
482
483
484
485
486
487
def load_session(self, name: str):
    """Load saved session state"""
    session_file = self._session_dir / f"{name}.json"
    if session_file.exists():
        with open(session_file) as f:
            state = json.load(f)
            self.output_history = state['output_history']
            self.current_project = Path(state['current_project']) if state['current_project'] else None
modify_code(code, object_name, file='src/main.rs') async

Modify existing Rust code

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
async def modify_code(self, code: str, object_name: str, file: str = "src/main.rs") -> str:
    """Modify existing Rust code"""
    if not self.current_project:
        return "No active project"

    try:
        file_path = self.current_project / file
        if not file_path.exists():
            return f"File {file} not found"

        with open(file_path) as f:
            content = f.read()

        # Handle function modification
        if object_name.endswith("()"):
            func_name = object_name[:-2]
            # Find and replace function definition
            pattern = f"fn {func_name}.*?}}(?=\n|$)"
            updated_content = re.sub(pattern, code.strip(), content, flags=re.DOTALL)
        else:
            # Handle other modifications (structs, constants, etc.)
            pattern = f"{object_name}.*?(?=\n|$)"
            updated_content = re.sub(pattern, code.strip(), content)

        with open(file_path, 'w') as f:
            f.write(updated_content)

        return f"Modified {object_name} in {file}"

    except Exception as e:
        return f"Modification failed: {str(e)}"
reset()

Reset the interface state

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
298
299
300
301
302
303
304
def reset(self):
    """Reset the interface state"""
    if self.auto_remove and self.current_project:
        shutil.rmtree(self.current_project, ignore_errors=True)
    self.output_history.clear()
    self._execution_count = 0
    self.current_project = None
run_code(code) async

Run Rust code

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
async def run_code(self, code: str) -> str:
    """Run Rust code"""
    if not self.current_project:
        return "No active project"

    try:
        # Write code to main.rs
        main_rs = self.current_project / "src" / "main.rs"
        with open(main_rs, 'w') as f:
            f.write(code)

        # Build and run
        build_result = subprocess.run(
            ['cargo', 'build'],
            cwd=self.current_project,
            capture_output=True,
            text=True
        )

        if build_result.returncode != 0:
            return f"Compilation error: {build_result.stderr}"

        run_result = subprocess.run(
            ['cargo', 'run'],
            cwd=self.current_project,
            capture_output=True,
            text=True
        )

        self._execution_count += 1
        output = {
            'code': code,
            'stdout': run_result.stdout,
            'stderr': run_result.stderr,
            'result': run_result.returncode == 0
        }
        self.output_history[self._execution_count] = output

        return run_result.stdout if run_result.returncode == 0 else f"Runtime error: {run_result.stderr}"

    except Exception as e:
        return f"Execution failed: {str(e)}"
save_session(name)

Save current session state

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
469
470
471
472
473
474
475
476
477
478
def save_session(self, name: str):
    """Save current session state"""
    session_file = self._session_dir / f"{name}.json"
    state = {
        'output_history': self.output_history,
        'current_project': str(self.current_project) if self.current_project else None
    }

    with open(session_file, 'w') as f:
        json.dump(state, f)
setup_project(name) async

Set up a new Cargo project

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
async def setup_project(self, name: str) -> str:
    """Set up a new Cargo project"""
    try:
        project_path = self.vfs.base_dir / name
        if project_path.exists():
            shutil.rmtree(project_path)

        result = subprocess.run(
            ['cargo', 'new', str(project_path)],
            capture_output=True,
            text=True, check=True
        )

        if result.returncode != 0:
            return f"Error creating project: {result.stderr}"

        self.current_project = project_path
        return f"Created new project at {project_path}"

    except Exception as e:
        return f"Failed to create project: {str(e)}"
test() async

Run project tests

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
async def test(self) -> str:
    """Run project tests"""
    if not self.current_project:
        return "No active project"

    try:
        result = subprocess.run(
            ['cargo', 'test'],
            cwd=self.current_project,
            capture_output=True,
            text=True, check=True
        )

        return result.stdout if result.returncode == 0 else f"Test error: {result.stderr}"

    except Exception as e:
        return f"Tests failed: {str(e)}"
EnhancedVerboseOutput
Source code in toolboxv2/mods/isaa/CodingAgent/live.py
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
class EnhancedVerboseOutput:
    def __init__(self, verbose: bool = True,print_f=None):
        self.verbose = verbose
        self.print = print_f or print
        self.formatter = VerboseFormatter(self.print)


    async def log_message(self, role: str, content: str):
        """Log chat messages with role-based formatting"""
        if not self.verbose:
            return

        role_formats = {
            'user': (self.formatter.style.GREEN, "👤"),
            'assistant': (self.formatter.style.BLUE, "🤖"),
            'system': (self.formatter.style.YELLOW, "⚙️")
        }

        color_func, icon = role_formats.get(role, (self.formatter.style.WHITE, "•"))
        self.print(f"\n{icon} {color_func(f'[{role}]')}")
        self.print(f"{self.formatter.style.GREY('└─')} {content}\n")

    async def log_think_result(self, result: dict[str, Any]):
        """Log thinking results with structured formatting"""
        if not self.verbose:
            return

        self.formatter.print_section(
            "Action Result",
            f"Action: {result.get('action', 'N/A')}\n"
            f"context: {result.get('context', 'N/A')}\n"
            f"Content:\n{result.get('content', '')}"
        )

    async def log_process_result(self, result: dict[str, Any]):
        """Log processing results with structured formatting"""
        if not self.verbose:
            return

        self.formatter.print_section(
            "Process Result",
            f"Completed: {result.get('is_completed', False)}\n"
            f"Effectiveness: {result.get('effectiveness', 'N/A')}\n"
            f"Recommendations: \n{result.get('recommendations', 'None')}\n"
            f"workflow: \n{result.get('workflow', 'None')}\n"
            f"errors: {result.get('errors', 'None')}\n"
            f"text: {result.get('text', 'None')}"
        )

    def log_header(self, text: str):
        """Log method update with structured formatting"""
        if not self.verbose:
            return

        self.formatter.print_header(text)

    def log_state(self, state: str, user_ns:dict, override=False):
        """Log method update with structured formatting"""
        if not self.verbose and override:
            return

        return self.formatter.print_state(state, user_ns)

    async def process(self, message: str, coroutine):
        if not self.verbose:
            return await coroutine
        if message == "code":
            return await coroutine
        return await self.formatter.process_with_spinner(message, coroutine)
log_header(text)

Log method update with structured formatting

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
192
193
194
195
196
197
def log_header(self, text: str):
    """Log method update with structured formatting"""
    if not self.verbose:
        return

    self.formatter.print_header(text)
log_message(role, content) async

Log chat messages with role-based formatting

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
150
151
152
153
154
155
156
157
158
159
160
161
162
163
async def log_message(self, role: str, content: str):
    """Log chat messages with role-based formatting"""
    if not self.verbose:
        return

    role_formats = {
        'user': (self.formatter.style.GREEN, "👤"),
        'assistant': (self.formatter.style.BLUE, "🤖"),
        'system': (self.formatter.style.YELLOW, "⚙️")
    }

    color_func, icon = role_formats.get(role, (self.formatter.style.WHITE, "•"))
    self.print(f"\n{icon} {color_func(f'[{role}]')}")
    self.print(f"{self.formatter.style.GREY('└─')} {content}\n")
log_process_result(result) async

Log processing results with structured formatting

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
177
178
179
180
181
182
183
184
185
186
187
188
189
190
async def log_process_result(self, result: dict[str, Any]):
    """Log processing results with structured formatting"""
    if not self.verbose:
        return

    self.formatter.print_section(
        "Process Result",
        f"Completed: {result.get('is_completed', False)}\n"
        f"Effectiveness: {result.get('effectiveness', 'N/A')}\n"
        f"Recommendations: \n{result.get('recommendations', 'None')}\n"
        f"workflow: \n{result.get('workflow', 'None')}\n"
        f"errors: {result.get('errors', 'None')}\n"
        f"text: {result.get('text', 'None')}"
    )
log_state(state, user_ns, override=False)

Log method update with structured formatting

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
199
200
201
202
203
204
def log_state(self, state: str, user_ns:dict, override=False):
    """Log method update with structured formatting"""
    if not self.verbose and override:
        return

    return self.formatter.print_state(state, user_ns)
log_think_result(result) async

Log thinking results with structured formatting

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
165
166
167
168
169
170
171
172
173
174
175
async def log_think_result(self, result: dict[str, Any]):
    """Log thinking results with structured formatting"""
    if not self.verbose:
        return

    self.formatter.print_section(
        "Action Result",
        f"Action: {result.get('action', 'N/A')}\n"
        f"context: {result.get('context', 'N/A')}\n"
        f"Content:\n{result.get('content', '')}"
    )
JSExecutionRecord dataclass

Records JavaScript execution details

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
44
45
46
47
48
49
50
51
@dataclass
class JSExecutionRecord:
    """Records JavaScript execution details"""
    code: str
    result: Any
    error: str | None = None
    page_state: dict | None = None
    extracted_data: dict | None = None
MockIPython
Source code in toolboxv2/mods/isaa/CodingAgent/live.py
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
class MockIPython:
    def __init__(self, _session_dir=None, auto_remove=True):
        self.auto_remove = auto_remove
        self.output_history = {}
        self._execution_count = 0
        self._session_dir = _session_dir or Path(get_app().appdata) / '.pipeline_sessions'
        self._session_dir.mkdir(exist_ok=True)
        self.vfs = VirtualFileSystem(self._session_dir / 'virtual_fs')
        self._venv_path = self._session_dir / 'venv'
        self.user_ns: dict[str, Any] = {}
        nest_asyncio.apply()
        # Set up virtual environment if it doesn't exist
        with Spinner("Starting virtual environment"):
            self._setup_venv()
        self.reset()

    def _setup_venv(self):
        """Create virtual environment if it doesn't exist"""
        if not self._venv_path.exists():
            try:
                subprocess.run([sys.executable, "-m", "venv", str(self._venv_path)], check=True)
            except subprocess.CalledProcessError as e:
                raise RuntimeError(f"Failed to create virtual environment: {str(e)}")

    def _virtual_open(self, filepath, mode='r', *args, **kwargs):
        """Custom open function that uses virtual filesystem"""
        abs_path = self.vfs._resolve_path(filepath)

        if 'w' in mode or 'a' in mode:
            # Ensure parent directory exists
            abs_path.parent.mkdir(parents=True, exist_ok=True)

        # Use actual filesystem but track in virtual fs
        real_file = open(abs_path, mode, *args, **kwargs)

        if 'r' in mode:
            # Track file content in virtual filesystem when reading
            rel_path = str(abs_path.relative_to(self.vfs.base_dir))
            if rel_path not in self.vfs.virtual_files:
                try:
                    self.vfs.virtual_files[rel_path] = real_file.read()
                    real_file.seek(0)
                except UnicodeDecodeError:
                    # Handle binary files
                    pass

        return real_file

    def reset(self):
        """Reset the interpreter state"""
        self.user_ns = {
            '__name__': '__main__',
            '__builtins__': __builtins__,
            'toolboxv2': toolboxv2,
            '__file__': None,
            '__path__': [str(self.vfs.current_dir)],
            'auto_install': auto_install,
            'modify_code': self.modify_code,
        }
        self.output_history.clear()
        self._execution_count = 0
        if self.auto_remove:
            shutil.rmtree(self.vfs.base_dir, ignore_errors=True)

    def get_namespace(self) -> dict[str, Any]:
        """Get current namespace"""
        return self.user_ns.copy()

    def update_namespace(self, variables: dict[str, Any]):
        """Update namespace with new variables"""
        self.user_ns.update(variables)

    @staticmethod
    def _parse_code(code: str) -> tuple[Any, Any | None, bool, bool]:
        """Parse code and handle top-level await"""
        code_ = ""
        for line in code.split('\n'):
            if line.strip().startswith('#'):
                continue
            if line.strip().startswith('asyncio.run('):
                line = (' ' *(len(line) - len(line.strip()))) + 'await ' + line.strip()[len('asyncio.run('):-1]
            code_ += line + '\n'
        try:
            tree = ast.parse(code)
            # Add parent references
            ParentNodeTransformer().visit(tree)

            # Detect async features
            detector = AsyncCodeDetector()
            detector.visit(tree)

            if detector.has_top_level_await:
                # Wrap code in async function
                wrapped_code = "async def __wrapper():\n"
                wrapped_code += "    global result\n"  # Allow writing to global scope
                wrapped_code += "    result = None\n"
                # add try:
                wrapped_code +="    try:\n"
                # Indent the original code
                wrapped_code += "\n".join(f"        {line}" for line in code.splitlines())
                # Add return statement for last expression
                wrapped_code +="\n    except Exception as e:\n"
                wrapped_code +="        import traceback\n"
                wrapped_code +="        print(traceback.format_exc())\n"
                wrapped_code +="        raise e\n"
                if isinstance(tree.body[-1], ast.Expr):
                    wrapped_code += "\n    return result"

                # Parse and compile wrapped code
                wrapped_tree = ast.parse(wrapped_code)
                return (
                    compile(wrapped_tree, '<exec>', 'exec'),
                    None,
                    True,
                    True
                )

            # Handle regular code
            if isinstance(tree.body[-1], ast.Expr):
                exec_code = ast.Module(
                    body=tree.body[:-1],
                    type_ignores=[]
                )
                eval_code = ast.Expression(
                    body=tree.body[-1].value
                )
                return (
                    compile(exec_code, '<exec>', 'exec'),
                    compile(eval_code, '<eval>', 'eval'),
                    detector.has_async,
                    False
                )

            return (
                compile(tree, '<exec>', 'exec'),
                None,
                detector.has_async,
                False
            )

        except SyntaxError as e:
            lines = code.splitlines()
            if e.lineno and e.lineno <= len(lines):
                line = lines[e.lineno - 1]
                arrow = ' ' * (e.offset - 1) + '^' if e.offset else ''
                error_msg = (
                    f"Syntax error at line {e.lineno}:\n"
                    f"{line}\n"
                    f"{arrow}\n"
                    f"{e.msg}"
                )
            else:
                error_msg = str(e)

            error_msg += traceback.format_exc()

            raise SyntaxError(error_msg) from e

    async def run_cell(self, code: str, live_output: bool = True) -> Any:
        """Async version of run_cell that handles both sync and async code"""
        result = None
        error = None
        tb = None
        original_dir = os.getcwd()

        if live_output:
            stdout_buffer = io.StringIO()
            stderr_buffer = io.StringIO()
            stdout = TeeStream(sys.__stdout__, stdout_buffer)
            stderr = TeeStream(sys.__stderr__, stderr_buffer)
        else:
            stdout = io.StringIO()
            stderr = io.StringIO()

        try:
            # Check if a file is already specified
            original_file = self.user_ns.get('__file__')
            if original_file is None:
                # Create temp file if no file specified
                temp_file = self.vfs.write_file(
                    f'src/temp/_temp_{self._execution_count}.py',
                    code
                )
                # work_ns = self.user_ns.copy()
                self.user_ns['__file__'] = str(temp_file)
            else:
                # Use existing file
                temp_file = Path(original_file)
                # Write code to the existing file
                self.vfs.write_file(temp_file, code)
                #work_ns = self.user_ns.copy()

            self.user_ns['__builtins__'] = __builtins__
            with VirtualEnvContext(self._venv_path) as python_exec:
                try:
                    exec_code, eval_code, is_async, has_top_level_await = self._parse_code(
                        code.encode('utf-8', errors='replace').decode('utf-8')
                    )
                    if exec_code is None:
                        return "No executable code"
                    os.makedirs(str(temp_file.parent.absolute()), exist_ok=True)
                    os.chdir(str(temp_file.parent.absolute()))
                    self.user_ns['PYTHON_EXEC'] = python_exec

                    with redirect_stdout(stdout), redirect_stderr(stderr):
                        if has_top_level_await:
                            try:
                                # Execute wrapped code and await it
                                exec(exec_code, self.user_ns)
                                result = self.user_ns['__wrapper']()
                                if asyncio.iscoroutine(result):
                                    result = await result
                            finally:
                                self.user_ns.pop('__wrapper', None)
                        elif is_async:
                            # Execute async code
                            exec(exec_code, self.user_ns)
                            if eval_code:
                                result = eval(eval_code, self.user_ns)
                                if asyncio.iscoroutine(result):
                                    result = await result
                        else:
                            # Execute sync code
                            exec(exec_code, self.user_ns)
                            if eval_code:
                                result = eval(eval_code, self.user_ns)

                        if result is not None:
                            self.user_ns['_'] = result
                except KeyboardInterrupt:
                    print("Stop execution manuel!")

                except Exception as e:
                    error = str(e)
                    tb = traceback.format_exc()
                    if live_output:
                        sys.__stderr__.write(f"{error}\n{tb}")
                    stderr.write(f"{error}\n{tb}")

                finally:
                    os.chdir(original_dir)
                    self._execution_count += 1
                    # self.user_ns = work_ns.copy()
                    if live_output:
                        stdout_value = stdout_buffer.getvalue()
                        stderr_value = stderr_buffer.getvalue()
                    else:
                        stdout_value = stdout.getvalue()
                        stderr_value = stderr.getvalue()

                    output = {
                        'code': code,
                        'stdout': stdout_value,
                        'stderr': stderr_value,
                        'result': result if result else "stdout"
                    }
                    self.output_history[self._execution_count] = output

                    if not result:
                        result = ""
                    if output['stdout']:
                        result = f"{result}\nstdout:{output['stdout']}"
                    if output['stderr']:
                        result = f"{result}\nstderr:{output['stderr']}"

                    if self.auto_remove and original_file is None:
                        # Only remove temp files, not user-specified files
                        self.vfs.delete_file(temp_file)

                    return result

        except Exception as e:
            error_msg = f"Error executing code: {str(e)}\n{traceback.format_exc()}"
            if live_output:
                sys.__stderr__.write(error_msg)
            return error_msg

    async def modify_code(self, code: str = None, object_name: str = None, file: str = None) -> str:
        '''
        Modify existing code in memory (user namespace) and optionally in the corresponding file.

        This method updates variables, functions, or methods in the current Python session and can
        also update the corresponding source file if specified.

        Args:
            code: New value or implementation for the object
            object_name: Name of the object to modify (variable, function, or method)
            file: Path to the file to update (if None, only updates in memory)

        Returns:
            String describing the modification result

        Examples:

        # 1. Update a variable in memory
        await ipython.modify_code(code="5", object_name="x")

    # 2. Change a method implementation
    await ipython.modify_code(
        code='"""def sound(self):\n        return "Woof""""',
        object_name="Dog.sound"
    )

    # 3. Modify a function
    await ipython.modify_code(
        code='"""def calculate_age():\n    return 25"""',
        object_name="calculate_age"
    )

    # 4. Update variable in memory and file
    await ipython.modify_code(
        code="100",
        object_name="MAX_SIZE",
        file="config.py"
    )

    # 5. Modifying an attribute in __init__
    await ipython.modify_code(
        code='"""def __init__(self):\n        self.name = "Buddy""""',
        object_name="Dog.__init__"
    )
        '''
        try:
            if not object_name:
                raise ValueError("Object name must be specified")
            if code is None:
                raise ValueError("New code or value must be provided")

            # Process object name (handle methods with parentheses)
            clean_object_name = object_name.replace("()", "")

            # Step 1: Update in memory (user namespace)
            result_message = []

            # Handle different types of objects
            if "." in clean_object_name:
                # For methods or class attributes
                parts = clean_object_name.split(".")
                base_obj_name = parts[0]
                attr_name = parts[1]

                if base_obj_name not in self.user_ns:
                    raise ValueError(f"Object '{base_obj_name}' not found in namespace")

                base_obj = self.user_ns[base_obj_name]

                # Handle method definitions which are passed as docstrings
                if code.split('\n'):
                    method_code = code

                    # Parse the method code to extract its body
                    method_ast = ast.parse(method_code).body[0]
                    method_name = method_ast.name

                    # Create a new function object from the code
                    method_locals = {}
                    exec(
                        f"def _temp_func{signature(getattr(base_obj.__class__, attr_name, None))}: {method_ast.body[0].value.s}",
                        globals(), method_locals)
                    new_method = method_locals['_temp_func']

                    # Set the method on the class
                    setattr(base_obj.__class__, attr_name, new_method)
                    result_message.append(f"Updated method '{clean_object_name}' in memory")
                else:
                    # For simple attributes
                    setattr(base_obj, attr_name, eval(code, self.user_ns))
                    result_message.append(f"Updated attribute '{clean_object_name}' in memory")
            else:
                # For variables and functions
                if code.startswith('"""') and code.endswith('"""'):
                    # Handle function definitions
                    func_code = code.strip('"""')
                    func_ast = ast.parse(func_code).body[0]
                    func_name = func_ast.name

                    # Create a new function object from the code
                    func_locals = {}
                    exec(f"{func_code}", globals(), func_locals)
                    self.user_ns[clean_object_name] = func_locals[func_name]
                    result_message.append(f"Updated function '{clean_object_name}' in memory")
                else:
                    # Simple variable assignment
                    self.user_ns[clean_object_name] = eval(code, self.user_ns)
                    result_message.append(f"Updated variable '{clean_object_name}' in memory")

            # Step 2: Update in file if specified
            if file is not None:
                file_path = self.vfs._resolve_path(file)

                if not file_path.exists():
                    self.user_ns['__file__'] = str(file_path)
                    return await self.run_cell(code)

                # Read original content
                original_content = self.vfs.read_file(file_path)
                updated_content = original_content

                # Handle different object types for file updates
                if "." in clean_object_name:
                    # For methods
                    parts = clean_object_name.split(".")
                    class_name = parts[0]
                    method_name = parts[1]

                    if code.startswith('"""') and code.endswith('"""'):
                        method_code = code.strip('"""')

                        # Use ast to parse the file and find the method to replace
                        file_ast = ast.parse(original_content)
                        for node in ast.walk(file_ast):
                            if isinstance(node, ast.ClassDef) and node.name == class_name:
                                for method in node.body:
                                    if isinstance(method, ast.FunctionDef) and method.name == method_name:
                                        # Find the method in the source code
                                        method_pattern = fr"def {method_name}.*?:(.*?)(?=\n    \w|\n\w|\Z)"
                                        method_match = re.search(method_pattern, original_content, re.DOTALL)

                                        if method_match:
                                            indentation = re.match(r"^(\s*)", method_match.group(0)).group(1)
                                            method_indented = textwrap.indent(method_code, indentation)
                                            updated_content = original_content.replace(
                                                method_match.group(0),
                                                method_indented
                                            )
                                            self.vfs.write_file(file_path, updated_content)
                                            result_message.append(
                                                f"Updated method '{clean_object_name}' in file '{file}'")
                else:
                    # For variables and functions
                    if code.startswith('"""') and code.endswith('"""'):
                        # Handle function updates
                        func_code = code.strip('"""')
                        func_pattern = fr"def {clean_object_name}.*?:(.*?)(?=\n\w|\Z)"
                        func_match = re.search(func_pattern, original_content, re.DOTALL)

                        if func_match:
                            indentation = re.match(r"^(\s*)", func_match.group(0)).group(1)
                            func_indented = textwrap.indent(func_code, indentation)
                            updated_content = original_content.replace(
                                func_match.group(0),
                                func_indented
                            )
                            self.vfs.write_file(file_path, updated_content)
                            result_message.append(f"Updated function '{clean_object_name}' in file '{file}'")
                    else:
                        # Handle variable updates
                        var_pattern = fr"{clean_object_name}\s*=.*"
                        var_replacement = f"{clean_object_name} = {code}"
                        updated_content = re.sub(var_pattern, var_replacement, original_content)

                        if updated_content != original_content:
                            self.vfs.write_file(file_path, updated_content)
                            result_message.append(f"Updated variable '{clean_object_name}' in file '{file}'")
                        else:
                            result_message.append(f"Could not find variable '{clean_object_name}' in file '{file}'")

            return "\n".join(result_message)

        except Exception as e:
            return f"Error during code modification: {str(e)}\n{traceback.format_exc()}"


    def save_session(self, name: str):
        """Save session with UTF-8 encoding"""
        session_file = self._session_dir / f"{name}.pkl"
        user_ns = self.user_ns.copy()
        output_history = self.output_history.copy()

        # Ensure all strings are properly encoded
        for key, value in user_ns.items():
            try:
                if isinstance(value, str):
                    value = value.encode('utf-8').decode('utf-8')
                pickle.dumps(value)
            except Exception:
                user_ns[key] = f"not serializable: {str(value)}"

        for key, value in output_history.items():
            try:
                if isinstance(value, dict):
                    for k, v in value.items():
                        if isinstance(v, str):
                            value[k] = v.encode('utf-8').decode('utf-8')
                pickle.dumps(value)
            except Exception:
                output_history[key] = f"not serializable: {str(value)}"


        session_data = {
            'user_ns': user_ns,
            'output_history': output_history,

        }

        with open(session_file, 'wb') as f:
            pickle.dump(session_data, f)

        # Save VFS state with UTF-8 encoding
        vfs_state_file = self._session_dir / f"{name}_vfs.json"
        with open(vfs_state_file, 'w', encoding='utf-8') as f:
            json.dump(self.vfs.virtual_files, f, ensure_ascii=False)

    def load_session(self, name: str):
        """Load session with UTF-8 encoding"""
        session_file = self._session_dir / f"{name}.pkl"
        if session_file.exists():
            with open(session_file, 'rb') as f:
                session_data = pickle.load(f)
                # self.user_ns.update(session_data['user_ns'])
                self.output_history.update(session_data['output_history'])

        # Load VFS state with UTF-8 encoding
        vfs_state_file = self._session_dir / f"{name}_vfs.json"
        if vfs_state_file.exists():
            with open(vfs_state_file, encoding='utf-8') as f:
                self.vfs.virtual_files = json.load(f)

    def __str__(self):
        """String representation of current session"""
        output = []
        for count, data in self.output_history.items():
            output.append(f"In [{count}]: {data['code']}")
            if data['stdout']:
                output.append(data['stdout'])
            if data['stderr']:
                output.append(f"Error: {data['stderr']}")
            if data['result'] is not None:
                output.append(f"Out[{count}]: {data['result']}")
        return "\n".join(output)
__str__()

String representation of current session

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
def __str__(self):
    """String representation of current session"""
    output = []
    for count, data in self.output_history.items():
        output.append(f"In [{count}]: {data['code']}")
        if data['stdout']:
            output.append(data['stdout'])
        if data['stderr']:
            output.append(f"Error: {data['stderr']}")
        if data['result'] is not None:
            output.append(f"Out[{count}]: {data['result']}")
    return "\n".join(output)
get_namespace()

Get current namespace

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
837
838
839
def get_namespace(self) -> dict[str, Any]:
    """Get current namespace"""
    return self.user_ns.copy()
load_session(name)

Load session with UTF-8 encoding

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
def load_session(self, name: str):
    """Load session with UTF-8 encoding"""
    session_file = self._session_dir / f"{name}.pkl"
    if session_file.exists():
        with open(session_file, 'rb') as f:
            session_data = pickle.load(f)
            # self.user_ns.update(session_data['user_ns'])
            self.output_history.update(session_data['output_history'])

    # Load VFS state with UTF-8 encoding
    vfs_state_file = self._session_dir / f"{name}_vfs.json"
    if vfs_state_file.exists():
        with open(vfs_state_file, encoding='utf-8') as f:
            self.vfs.virtual_files = json.load(f)
modify_code(code=None, object_name=None, file=None) async
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
Modify existing code in memory (user namespace) and optionally in the corresponding file.

This method updates variables, functions, or methods in the current Python session and can
also update the corresponding source file if specified.

Args:
    code: New value or implementation for the object
    object_name: Name of the object to modify (variable, function, or method)
    file: Path to the file to update (if None, only updates in memory)

Returns:
    String describing the modification result

Examples:

# 1. Update a variable in memory
await ipython.modify_code(code="5", object_name="x")
2. Change a method implementation

await ipython.modify_code( code='"""def sound(self): return "Woof""""', object_name="Dog.sound" )

3. Modify a function

await ipython.modify_code( code='"""def calculate_age(): return 25"""', object_name="calculate_age" )

4. Update variable in memory and file

await ipython.modify_code( code="100", object_name="MAX_SIZE", file="config.py" )

5. Modifying an attribute in init

await ipython.modify_code( code='"""def init(self): self.name = "Buddy""""', object_name="Dog.init" )

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
async def modify_code(self, code: str = None, object_name: str = None, file: str = None) -> str:
    '''
    Modify existing code in memory (user namespace) and optionally in the corresponding file.

    This method updates variables, functions, or methods in the current Python session and can
    also update the corresponding source file if specified.

    Args:
        code: New value or implementation for the object
        object_name: Name of the object to modify (variable, function, or method)
        file: Path to the file to update (if None, only updates in memory)

    Returns:
        String describing the modification result

    Examples:

    # 1. Update a variable in memory
    await ipython.modify_code(code="5", object_name="x")

# 2. Change a method implementation
await ipython.modify_code(
    code='"""def sound(self):\n        return "Woof""""',
    object_name="Dog.sound"
)

# 3. Modify a function
await ipython.modify_code(
    code='"""def calculate_age():\n    return 25"""',
    object_name="calculate_age"
)

# 4. Update variable in memory and file
await ipython.modify_code(
    code="100",
    object_name="MAX_SIZE",
    file="config.py"
)

# 5. Modifying an attribute in __init__
await ipython.modify_code(
    code='"""def __init__(self):\n        self.name = "Buddy""""',
    object_name="Dog.__init__"
)
    '''
    try:
        if not object_name:
            raise ValueError("Object name must be specified")
        if code is None:
            raise ValueError("New code or value must be provided")

        # Process object name (handle methods with parentheses)
        clean_object_name = object_name.replace("()", "")

        # Step 1: Update in memory (user namespace)
        result_message = []

        # Handle different types of objects
        if "." in clean_object_name:
            # For methods or class attributes
            parts = clean_object_name.split(".")
            base_obj_name = parts[0]
            attr_name = parts[1]

            if base_obj_name not in self.user_ns:
                raise ValueError(f"Object '{base_obj_name}' not found in namespace")

            base_obj = self.user_ns[base_obj_name]

            # Handle method definitions which are passed as docstrings
            if code.split('\n'):
                method_code = code

                # Parse the method code to extract its body
                method_ast = ast.parse(method_code).body[0]
                method_name = method_ast.name

                # Create a new function object from the code
                method_locals = {}
                exec(
                    f"def _temp_func{signature(getattr(base_obj.__class__, attr_name, None))}: {method_ast.body[0].value.s}",
                    globals(), method_locals)
                new_method = method_locals['_temp_func']

                # Set the method on the class
                setattr(base_obj.__class__, attr_name, new_method)
                result_message.append(f"Updated method '{clean_object_name}' in memory")
            else:
                # For simple attributes
                setattr(base_obj, attr_name, eval(code, self.user_ns))
                result_message.append(f"Updated attribute '{clean_object_name}' in memory")
        else:
            # For variables and functions
            if code.startswith('"""') and code.endswith('"""'):
                # Handle function definitions
                func_code = code.strip('"""')
                func_ast = ast.parse(func_code).body[0]
                func_name = func_ast.name

                # Create a new function object from the code
                func_locals = {}
                exec(f"{func_code}", globals(), func_locals)
                self.user_ns[clean_object_name] = func_locals[func_name]
                result_message.append(f"Updated function '{clean_object_name}' in memory")
            else:
                # Simple variable assignment
                self.user_ns[clean_object_name] = eval(code, self.user_ns)
                result_message.append(f"Updated variable '{clean_object_name}' in memory")

        # Step 2: Update in file if specified
        if file is not None:
            file_path = self.vfs._resolve_path(file)

            if not file_path.exists():
                self.user_ns['__file__'] = str(file_path)
                return await self.run_cell(code)

            # Read original content
            original_content = self.vfs.read_file(file_path)
            updated_content = original_content

            # Handle different object types for file updates
            if "." in clean_object_name:
                # For methods
                parts = clean_object_name.split(".")
                class_name = parts[0]
                method_name = parts[1]

                if code.startswith('"""') and code.endswith('"""'):
                    method_code = code.strip('"""')

                    # Use ast to parse the file and find the method to replace
                    file_ast = ast.parse(original_content)
                    for node in ast.walk(file_ast):
                        if isinstance(node, ast.ClassDef) and node.name == class_name:
                            for method in node.body:
                                if isinstance(method, ast.FunctionDef) and method.name == method_name:
                                    # Find the method in the source code
                                    method_pattern = fr"def {method_name}.*?:(.*?)(?=\n    \w|\n\w|\Z)"
                                    method_match = re.search(method_pattern, original_content, re.DOTALL)

                                    if method_match:
                                        indentation = re.match(r"^(\s*)", method_match.group(0)).group(1)
                                        method_indented = textwrap.indent(method_code, indentation)
                                        updated_content = original_content.replace(
                                            method_match.group(0),
                                            method_indented
                                        )
                                        self.vfs.write_file(file_path, updated_content)
                                        result_message.append(
                                            f"Updated method '{clean_object_name}' in file '{file}'")
            else:
                # For variables and functions
                if code.startswith('"""') and code.endswith('"""'):
                    # Handle function updates
                    func_code = code.strip('"""')
                    func_pattern = fr"def {clean_object_name}.*?:(.*?)(?=\n\w|\Z)"
                    func_match = re.search(func_pattern, original_content, re.DOTALL)

                    if func_match:
                        indentation = re.match(r"^(\s*)", func_match.group(0)).group(1)
                        func_indented = textwrap.indent(func_code, indentation)
                        updated_content = original_content.replace(
                            func_match.group(0),
                            func_indented
                        )
                        self.vfs.write_file(file_path, updated_content)
                        result_message.append(f"Updated function '{clean_object_name}' in file '{file}'")
                else:
                    # Handle variable updates
                    var_pattern = fr"{clean_object_name}\s*=.*"
                    var_replacement = f"{clean_object_name} = {code}"
                    updated_content = re.sub(var_pattern, var_replacement, original_content)

                    if updated_content != original_content:
                        self.vfs.write_file(file_path, updated_content)
                        result_message.append(f"Updated variable '{clean_object_name}' in file '{file}'")
                    else:
                        result_message.append(f"Could not find variable '{clean_object_name}' in file '{file}'")

        return "\n".join(result_message)

    except Exception as e:
        return f"Error during code modification: {str(e)}\n{traceback.format_exc()}"
reset()

Reset the interpreter state

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
def reset(self):
    """Reset the interpreter state"""
    self.user_ns = {
        '__name__': '__main__',
        '__builtins__': __builtins__,
        'toolboxv2': toolboxv2,
        '__file__': None,
        '__path__': [str(self.vfs.current_dir)],
        'auto_install': auto_install,
        'modify_code': self.modify_code,
    }
    self.output_history.clear()
    self._execution_count = 0
    if self.auto_remove:
        shutil.rmtree(self.vfs.base_dir, ignore_errors=True)
run_cell(code, live_output=True) async

Async version of run_cell that handles both sync and async code

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
async def run_cell(self, code: str, live_output: bool = True) -> Any:
    """Async version of run_cell that handles both sync and async code"""
    result = None
    error = None
    tb = None
    original_dir = os.getcwd()

    if live_output:
        stdout_buffer = io.StringIO()
        stderr_buffer = io.StringIO()
        stdout = TeeStream(sys.__stdout__, stdout_buffer)
        stderr = TeeStream(sys.__stderr__, stderr_buffer)
    else:
        stdout = io.StringIO()
        stderr = io.StringIO()

    try:
        # Check if a file is already specified
        original_file = self.user_ns.get('__file__')
        if original_file is None:
            # Create temp file if no file specified
            temp_file = self.vfs.write_file(
                f'src/temp/_temp_{self._execution_count}.py',
                code
            )
            # work_ns = self.user_ns.copy()
            self.user_ns['__file__'] = str(temp_file)
        else:
            # Use existing file
            temp_file = Path(original_file)
            # Write code to the existing file
            self.vfs.write_file(temp_file, code)
            #work_ns = self.user_ns.copy()

        self.user_ns['__builtins__'] = __builtins__
        with VirtualEnvContext(self._venv_path) as python_exec:
            try:
                exec_code, eval_code, is_async, has_top_level_await = self._parse_code(
                    code.encode('utf-8', errors='replace').decode('utf-8')
                )
                if exec_code is None:
                    return "No executable code"
                os.makedirs(str(temp_file.parent.absolute()), exist_ok=True)
                os.chdir(str(temp_file.parent.absolute()))
                self.user_ns['PYTHON_EXEC'] = python_exec

                with redirect_stdout(stdout), redirect_stderr(stderr):
                    if has_top_level_await:
                        try:
                            # Execute wrapped code and await it
                            exec(exec_code, self.user_ns)
                            result = self.user_ns['__wrapper']()
                            if asyncio.iscoroutine(result):
                                result = await result
                        finally:
                            self.user_ns.pop('__wrapper', None)
                    elif is_async:
                        # Execute async code
                        exec(exec_code, self.user_ns)
                        if eval_code:
                            result = eval(eval_code, self.user_ns)
                            if asyncio.iscoroutine(result):
                                result = await result
                    else:
                        # Execute sync code
                        exec(exec_code, self.user_ns)
                        if eval_code:
                            result = eval(eval_code, self.user_ns)

                    if result is not None:
                        self.user_ns['_'] = result
            except KeyboardInterrupt:
                print("Stop execution manuel!")

            except Exception as e:
                error = str(e)
                tb = traceback.format_exc()
                if live_output:
                    sys.__stderr__.write(f"{error}\n{tb}")
                stderr.write(f"{error}\n{tb}")

            finally:
                os.chdir(original_dir)
                self._execution_count += 1
                # self.user_ns = work_ns.copy()
                if live_output:
                    stdout_value = stdout_buffer.getvalue()
                    stderr_value = stderr_buffer.getvalue()
                else:
                    stdout_value = stdout.getvalue()
                    stderr_value = stderr.getvalue()

                output = {
                    'code': code,
                    'stdout': stdout_value,
                    'stderr': stderr_value,
                    'result': result if result else "stdout"
                }
                self.output_history[self._execution_count] = output

                if not result:
                    result = ""
                if output['stdout']:
                    result = f"{result}\nstdout:{output['stdout']}"
                if output['stderr']:
                    result = f"{result}\nstderr:{output['stderr']}"

                if self.auto_remove and original_file is None:
                    # Only remove temp files, not user-specified files
                    self.vfs.delete_file(temp_file)

                return result

    except Exception as e:
        error_msg = f"Error executing code: {str(e)}\n{traceback.format_exc()}"
        if live_output:
            sys.__stderr__.write(error_msg)
        return error_msg
save_session(name)

Save session with UTF-8 encoding

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
def save_session(self, name: str):
    """Save session with UTF-8 encoding"""
    session_file = self._session_dir / f"{name}.pkl"
    user_ns = self.user_ns.copy()
    output_history = self.output_history.copy()

    # Ensure all strings are properly encoded
    for key, value in user_ns.items():
        try:
            if isinstance(value, str):
                value = value.encode('utf-8').decode('utf-8')
            pickle.dumps(value)
        except Exception:
            user_ns[key] = f"not serializable: {str(value)}"

    for key, value in output_history.items():
        try:
            if isinstance(value, dict):
                for k, v in value.items():
                    if isinstance(v, str):
                        value[k] = v.encode('utf-8').decode('utf-8')
            pickle.dumps(value)
        except Exception:
            output_history[key] = f"not serializable: {str(value)}"


    session_data = {
        'user_ns': user_ns,
        'output_history': output_history,

    }

    with open(session_file, 'wb') as f:
        pickle.dump(session_data, f)

    # Save VFS state with UTF-8 encoding
    vfs_state_file = self._session_dir / f"{name}_vfs.json"
    with open(vfs_state_file, 'w', encoding='utf-8') as f:
        json.dump(self.vfs.virtual_files, f, ensure_ascii=False)
update_namespace(variables)

Update namespace with new variables

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
841
842
843
def update_namespace(self, variables: dict[str, Any]):
    """Update namespace with new variables"""
    self.user_ns.update(variables)
ParentNodeTransformer

Bases: NodeTransformer

Add parent references to AST nodes

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
688
689
690
691
692
693
class ParentNodeTransformer(ast.NodeTransformer):
    """Add parent references to AST nodes"""
    def visit(self, node):
        for child in ast.iter_child_nodes(node):
            child.parent = node
        return super().visit(node)
Pipeline

A pipeline for executing AI agent-driven tasks with interactive code execution and variable management.

The Pipeline class provides a structured environment for AI agents to: 1. Execute code in a controlled environment 2. Manage and track variables 3. Update methods dynamically 4. Save and load session states 5. Generate detailed variable descriptions

Attributes:

Name Type Description
agent

The AI agent instance used for task execution

task str

The task to be performed

mas_iter int

Maximum number of iterations allowed (default: 12)

variables Dict[str, Any]

Dictionary of variables available to the pipeline

top_n Optional[int]

Limit variable descriptions to top N most used

execution_history List[ExecutionRecord]

History of executed code and results

session_name Optional[str]

Name of the current session if saved

ipython

IPython or MockIPython instance for code execution

Example

agent = get_free_agent("demo", "anthropic/claude-3-haiku-20240307") pipeline = Pipeline( ... agent=agent, ... task="Calculate fibonacci sequence", ... variables={"n": 10} ... ) result = pipeline.run("...") print(result.result)

Notes
  • The pipeline uses either IPython if available or a MockIPython implementation
  • Variables can be provided as either a dictionary or list
  • Session state can be saved and loaded
  • Method updates are handled through a structured BaseModel approach
Source code in toolboxv2/mods/isaa/CodingAgent/live.py
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
class Pipeline:
    """
        A pipeline for executing AI agent-driven tasks with interactive code execution and variable management.

        The Pipeline class provides a structured environment for AI agents to:
        1. Execute code in a controlled environment
        2. Manage and track variables
        3. Update methods dynamically
        4. Save and load session states
        5. Generate detailed variable descriptions

        Attributes:
            agent: The AI agent instance used for task execution
            task (str): The task to be performed
            mas_iter (int): Maximum number of iterations allowed (default: 12)
            variables (Dict[str, Any]): Dictionary of variables available to the pipeline
            top_n (Optional[int]): Limit variable descriptions to top N most used
            execution_history (List[ExecutionRecord]): History of executed code and results
            session_name (Optional[str]): Name of the current session if saved
            ipython: IPython or MockIPython instance for code execution

        Example:
            >>> agent = get_free_agent("demo", "anthropic/claude-3-haiku-20240307")
            >>> pipeline = Pipeline(
            ...     agent=agent,
            ...     task="Calculate fibonacci sequence",
            ...     variables={"n": 10}
            ... )
            >>> result = pipeline.run("...")
            >>> print(result.result)

        Notes:
            - The pipeline uses either IPython if available or a MockIPython implementation
            - Variables can be provided as either a dictionary or list
            - Session state can be saved and loaded
            - Method updates are handled through a structured BaseModel approach
        """
    def __init__(
        self,
        agent: Any,
        verbose: bool=False,
        max_iter: int= 12,
        variables: dict[str, Any] | list[Any] | None = None,
        top_n: bool | None = None,
        restore: bool | None = None,
        max_think_after_think = None,
        print_f=None,
        web_js=False,
        timeout_timer=25,
        v_agent=None,
        web_llm=None,
    ):
        """
        Initialize the Pipeline.

        Args:
            agent: AI agent instance to use for task execution
            verbose: print internal results
            max_iter: Maximum number of iterations (default: 12)
            variables: Dictionary or list of variables to make available
            top_n: Limit variable descriptions to top N most used
            web_js: if the agent is allow to use the web
        """

        self.timeout_timer = timeout_timer
        self.top_n = top_n
        self.max_iter = max_iter
        self.max_think_after_think = max_think_after_think or max_iter // 2
        self.agent = agent
        self.v_agent = v_agent or agent
        # self.agent.verbose = verbose
        self.task = None
        self.web_js = web_js
        self.print_f = print_f
        self.verbose_output = EnhancedVerboseOutput(verbose=verbose, print_f=self.print_f)
        self.variables = self._process_variables(variables or {})
        self.variables['auto_install'] = auto_install
        self.execution_history = []
        self.session_name = None

        self.browser_session: BrowserWrapper | None = BrowserWrapper(llm=web_llm or agent.amd.model)
        self.js_history: list[JSExecutionRecord] = []

        self._session_dir = Path(get_app().appdata) / 'ChatSession' / agent.amd.name
        self.ipython = MockIPython(self._session_dir, auto_remove=False)
        self.chat_session = ChatSession(get_app().get_mod("isaa").get_memory(), space_name=f"ChatSession/{agent.amd.name}/Pipeline.session", max_length=max_iter)
        self.process_memory = ChatSession(get_app().get_mod("isaa").get_memory(), space_name=f"ChatSession/{agent.amd.name}/Process.session", max_length=max_iter)

        # Initialize interpreter with variables
        self.init_keys = list(self.ipython.user_ns.keys()).copy()
        if self.web_js:
            self.variables['web_actions'] = self.browser_session.run
            self.variables['browser_session'] = self.browser_session
        self.ipython.user_ns.update(self.variables)

        self.restore_var = restore

        if restore:
            self.restore()

    def on_exit(self):
        self.chat_session.on_exit()
        self.process_memory.on_exit()
        self.save_session(f"Pipeline_Session_{self.agent.amd.name}")

    def restore(self):
        self.load_session(f"Pipeline_Session_{self.agent.amd.name}")

    def save_session(self, name: str):
        """Save current session"""
        self.session_name = name
        self.ipython.save_session(name)

    def load_session(self, name: str):
        """Load saved session"""
        self.ipython.load_session(name)
        self.variables.update(self.ipython.user_ns)


    def show_graph_html(self, output_file=None, get_output_html=False, get_output_net=False):

        if output_file is None:
            chat_graph = self.ipython._session_dir / 'chat_graph.html'
            process_graph = self.ipython._session_dir / 'process_graph.html'
            output_file = str(chat_graph.absolute())
            p_output_file = str(process_graph.absolute())
        else:
            output_file = output_file + '_chat_graph.html'
            p_output_file = output_file + '_process_graph.html'

        return (self.chat_session.mem.memories.get(
            self.chat_session.mem._sanitize_name(
                self.chat_session.space_name)).vis(output_file=output_file,
        get_output_html=get_output_html, get_output_net=get_output_net)  ,
                self.process_memory.mem.memories.get(
            self.process_memory.mem._sanitize_name(
                self.process_memory.space_name)).vis(output_file=p_output_file,
        get_output_html=get_output_html, get_output_net=get_output_net))

    @staticmethod
    def _process_variables(variables: dict[str, Any] | list[Any]) -> dict[str, Any]:
        """
        Process variables to generate meaningful names, using actual variable names where possible.
        Instances get lowercase names based on their class names.

        Args:
            variables: Dictionary of variables or list of variables to process

        Returns:
            Dict[str, Any]: Processed variables with meaningful names
        """
        if isinstance(variables, dict):
            return variables

        processed = {}
        name_counts = defaultdict(int)

        # Get caller's frame to find variable names
        caller_frame = currentframe().f_back
        caller_locals = {**caller_frame.f_locals, **caller_frame.f_globals}

        def find_var_name(obj: Any) -> str:
            # Find original variable name if exists
            var_names = [name for name, val in caller_locals.items()
                         if val is obj and not name.startswith('_')]
            if var_names:
                return var_names[0]

            # Special handling for functions
            if isfunction(obj) or isclass(obj):
                return obj.__name__
            # Handle instances
            elif hasattr(obj, '__class__'):
                base_name = obj.__class__.__name__.lower()  # Lowercase for instances
                count = name_counts[base_name]
                name_counts[base_name] += 1
                return f"{base_name}_{count + 1}" if count > 0 else base_name

            return type(obj).__name__

        # Process each variable
        for var in variables:
            name = find_var_name(var)
            while name in processed:
                if name.rpartition('_')[0]:
                    base, _, num = name.rpartition('_')
                    try:
                        num = int(num) + 1
                        name = f"{base}_{num}"
                    except ValueError:
                        name = f"{name}"
                else:
                    name = f"{name}"

            processed[name] = var

        return processed

    def _generate_variable_descriptions(
        self,
        top_n: int | None = None
    ) -> str:
        """
        Generate detailed descriptions of variables, showing args, kwargs, docstrings, and return values.

        Args:
            top_n: Optional limit to show only top N variables

        Returns:
            str: Formatted variable descriptions in Markdown
        """
        if top_n is None:
            top_n = self.top_n

        def format_value_preview(var: Any) -> str:
            """Format preview of variable contents"""
            try:
                if isinstance(var, int | float | bool | str):
                    return f"`{repr(var)}`"
                elif isinstance(var, list | tuple | set):
                    preview = str(list(var)[:3])[:-1] + ", ...]"
                    return f"{len(var)} items: {preview}"
                elif isinstance(var, dict):
                    preview_items = [f"{repr(k)}: {repr(v)}" for k, v in list(var.items())[:3]]
                    return f"{len(var)} pairs: {{{', '.join(preview_items)}, ...}}"
                return f"<{type(var).__name__}>"
            except:
                return "<error getting value>"

        def get_instance_state(var: Any) -> dict[str, Any]:
            """Get current instance state"""
            state = {}
            if hasattr(var, '__dict__'):
                for name, value in var.__dict__.items():
                    if not name.startswith('_') and not callable(value):
                        state[name] = format_value_preview(value)
            return state

        # Process variables
        variables = self.variables.items()
        if top_n:
            variables = list(variables)[:top_n]

        descriptions = []
        for name, var in variables:
            if name in ["PYTHON_EXEC", "__name__", "__builtins__", "__path__", "asyncio"]:
                continue

            desc_parts = [f"### {name}"]

            # Handle different types
            if isinstance(var, type):  # Class
                desc_parts.append(f"**Type:** `class '{var.__name__}'`")
                if var.__doc__:
                    desc_parts.append(f"**Documentation:**\n{var.__doc__.strip()}")

                # Show methods
                methods = []
                for attr_name, attr in var.__dict__.items():
                    if (not attr_name.startswith('_') or attr_name == "__init__") and (isfunction(attr) or ismethod(attr)):
                        try:
                            sig = signature(attr)
                            is_a = asyncio.iscoroutinefunction(var)
                            methods.append(f"- `{attr_name}{sig}` Async: `{is_a}")
                            if attr.__doc__:
                                r = attr.__doc__.split('\n')[0]
                                methods.append(f"  {r}")
                        except:
                            methods.append(f"- `{attr_name}()`")
                if methods:
                    desc_parts.append("**Methods:**\n" + "\n".join(methods))

            elif isfunction(var) or ismethod(var):  # Function
                try:
                    sig = signature(var)
                    desc_parts.append(f"**Signature:** `{var.__name__}{sig}`")
                    is_a = asyncio.iscoroutinefunction(var)
                    desc_parts.append(f"**IS Async:** `{is_a}`")
                    if var.__doc__:
                        desc_parts.append(f"**Documentation:**\n{var.__doc__.strip()}")
                    ret_anno = sig.return_annotation
                    if ret_anno != Signature.empty:
                        desc_parts.append(f"**Returns:** `{ret_anno}`")
                except:
                    desc_parts.append(f"**Function:** `{var.__name__}()`")

            elif isinstance(var, BaseModel):  # Pydantic model
                desc_parts.append(f"**Type:** Pydantic model '{var.__class__.__name__}'")
                fields = []
                for field_name, field in var.model_fields.items():
                    value = getattr(var, field_name, None)
                    fields.append(f"- `{field_name}: {field.annotation.__name__}` = {repr(value)}")
                if fields:
                    desc_parts.append("**Fields:**\n" + "\n".join(fields))

            else:  # Instance
                class_type = var.__class__
                desc_parts.append(f"**Type:** `{class_type.__module__}.{class_type.__name__}`")

                # Instance initialization details
                try:
                    init = class_type.__init__
                    sig = signature(init)
                    params = list(sig.parameters.items())[1:]  # Skip self
                    if params:
                        args = []
                        for name, param in params:
                            if param.default == param.empty:
                                args.append(name)
                            else:
                                args.append(f"{name}={param.default}")
                        desc_parts.append(f"**Init Args:** `{', '.join(args)}`")
                except:
                    pass

                # Instance state
                state = get_instance_state(var)
                if state:
                    desc_parts.append("**Current instance State:**")
                    for attr_name, attr_value in state.items():
                        desc_parts.append(f"- `{attr_name}` = {attr_value}")

                # Documentation
                doc = getdoc(var) or getdoc(class_type)
                if doc:
                    desc_parts.append(f"**Documentation:**\n{doc.strip()}")

            descriptions.append("\n".join(desc_parts))

        return "\n\n".join(descriptions)

    async def _execute_code(self, code: str, context:dict) -> ExecutionRecord:
        """Execute code and track results"""
        lang = context.get('lang', 'py')
        try:

            if'py' in lang:

                return await self._execute_py(code)

            elif self.web_js and 'js' in lang:
                return await self._execute_js(code, context)

        except Exception as e:
            record = ExecutionRecord(code=code, result=None, error=str(e))
            self.execution_history.append(record)
            return record
        record = ExecutionRecord(code=code, result=None, error=f"Invalid lang {lang} valid is, {'js' if self.web_js else 'py'}]")
        self.execution_history.append(record)
        return record

    async def _execute_py(self, code) -> ExecutionRecord:
        show = True #len(code) > 450 and code.count('while') > 1 and code.count('print') >= 1
        result = await self.ipython.run_cell(code, show)

        all_keys = list(self.ipython.user_ns.keys())

        new_keys = [key for key in all_keys if key not in self.init_keys]
        # Update pipeline variables from IPython namespace

        for var_name in new_keys:
            if var_name.startswith('_'):
                continue
            self.variables[var_name] = self.ipython.user_ns[var_name]

        record = ExecutionRecord(code=code, result=result, error=None)
        self.execution_history.append(record)
        return record

    async def _execute_js(self, code: str, context: dict) -> ExecutionRecord:
        """Execute JavaScript code in browser context"""

        if '<script>' in code:
            code = code.split('<script>')[1]
        if '</script>' in code:
            code = code.split('</script>')[0]
        def _format_error_markdown(error: str) -> str:
            """Format error as Markdown"""
            return f"""
# Execution Error
{error}
"""

        def _format_result_markdown(result_: dict) -> str:
            """Format execution result as Markdown"""

            def _clean_html_content(html: str) -> str:
                """Clean HTML content and convert to Markdown-like format"""
                soup = BeautifulSoup(html, 'html.parser')

                # Remove scripts and styles
                for script in soup(["script", "style"]):
                    script.decompose()

                # Extract text
                text = soup.get_text()

                # Clean up whitespace
                lines = (line.strip() for line in text.splitlines())
                chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
                text = '\n'.join(chunk for chunk in chunks if chunk)

                # Add Markdown formatting
                text = re.sub(r'^(.+)$', r'> \1', text, flags=re.MULTILINE)

                return text

            md_parts = []

            # Add title
            md_parts.append("# Page Analysis Results\n")

            # Format JavaScript result
            if result_.get('js_result'):
                md_parts.append("## JavaScript Execution Result")
                md_parts.append("```")
                md_parts.append(str(result_['js_result']))
                md_parts.append("```\n")

            # Format page state
            if 'page_state' in result_:
                md_parts.append("## Page Information")
                md_parts.append(f"- **URL**: {result_['page_state']['url']}")
                md_parts.append(f"- **Title**: {result_['page_state']['title']}\n")

                # Clean and format content
                if 'content' in result_['page_state']:
                    content = _clean_html_content(result_['page_state']['content'])
                    if content:
                        md_parts.append("### Page Content")
                        md_parts.append(content + "\n")

            # Format extracted data
            if result_.get('extracted_data'):
                md_parts.append("## Extracted Data")
                for key, value in result_['extracted_data'].items():
                    if value:
                        md_parts.append(f"### {key.replace('_', ' ').title()}")
                        if isinstance(value, list):
                            for item in value:
                                md_parts.append(f"- {item}")
                        else:
                            md_parts.append(str(value))
                        md_parts.append("")

            return "\n".join(md_parts)

        try:
            # Prepare execution context
            url = context.get('url')
            page = None
            result = None
            page_state = {}

            extracted_data = None
            if url:
                page = await self.browser_session.navigate(url)
                parser = self.browser_session.get_parser()
                markdown = await parser.to_markdown(page)

                if 'patterns' in context:
                    extracted_data = await parser.to_structured(page, context['patterns'])

                page_state = {
                    'url': page.url,
                    'title': await page.title(),
                    'content': markdown,
                }

            if code:
                result = await self.browser_session.execute_js(code, page)

                if isinstance(result, dict) and 'success' in result:
                    if not result['success']:
                        raise Exception(f"JavaScript Error: {result.get('error')}\nStack: {result.get('stack')}")
                    result = result.get('result')

            # Capture page state after execution


            # Extract data using patterns if specified

            # Create execution record
            record = JSExecutionRecord(
                code=code,
                result=result,
                page_state=page_state,
                extracted_data=extracted_data
            )

            self.js_history.append(record)

            # Convert to standard ExecutionRecord for pipeline
            return ExecutionRecord(
                code=code,
                result=_format_result_markdown({
                    'js_result': result,
                    'page_state': page_state,
                    'extracted_data': extracted_data
                }),
                error=None
            )

        except Exception as e:
            error_md = _format_error_markdown(str(e))
            return ExecutionRecord(code=code, result=None, error=error_md)


    def __str__(self):
        """String representation of pipeline session"""
        return str(self.ipython)

    async def _process_think_result(self, think_result: ThinkResult, task:str) -> tuple[ThinkState,  ExecutionRecord | str | None]:
        """Process the result of agent thinking"""
        if think_result.action == 'brake':
            return ThinkState.BRAKE, think_result.content

        elif think_result.action == 'update':
            if think_result.context.get('object_name') is None:
                return ThinkState.ACTION, "no object_name specified in context!"
            if think_result.context.get('file') is not None:
                self.ipython.user_ns['__file__'] = think_result.context.get('file')
            result = await self.verbose_output.process(think_result.action,
                                                       self.ipython.modify_code(code=think_result.content,
                                                    object_name=think_result.context.get('object_name'),))
            return ThinkState.PROCESSING, result

        elif think_result.action == 'code':
            if think_result.context.get('file') is not None:
                self.ipython.user_ns['__file__'] = think_result.context.get('file')
            result = await self._execute_code(think_result.content, think_result.context)
            return ThinkState.PROCESSING, result

        elif think_result.action == 'done':
            return ThinkState.DONE, think_result.content

        elif think_result.action == 'infos':
            infos = await self.chat_session.get_reference(think_result.content, to_str=True)
            return ThinkState.ACTION, infos

        elif think_result.action == 'guide':
            details = await self.process_memory.get_reference(think_result.content, to_str=True)
            plan = await self.agent.a_mini_task(f"""You are an AI guidance system designed to help determine the next step in a task and provide instructions on how to proceed. Your role is to analyze the given information and offer clear, actionable guidance for the next steps.

First, carefully read and understand the main task:
<main_task>
{task}
</main_task>

Next, review the last thought of the agent, if available:
<last_thought>
{think_result.content}
{think_result.context}
</last_thought>

Then, examine the processing history, if provided:
<processing_history>
{details}
</processing_history>

To determine the next step and provide guidance, follow these instructions:

1. Analyze the main task, breaking it down into smaller, manageable steps if necessary.
2. Consider the last thought and processing history to understand the current progress and context.
3. Identify any gaps, challenges, or areas that need further attention.
4. Determine the most logical and efficient next step to move the task forward.
5. Provide clear, concise instructions on how to complete this next step.

When formulating your response, follow this structure:

1. Begin with a brief summary of the current situation, referencing the main task and any relevant information from the last thought or processing history.
2. Clearly state the next step that should be taken.
3. Provide detailed instructions on how to complete this step, including any specific techniques, methods, or considerations to keep in mind.
4. If applicable, mention any potential challenges or pitfalls to be aware of during this step.
5. Conclude with a brief statement on how this step contributes to the overall progress of the main task.

Format your response using the following sections:
<summary>
(Include your summary of the current situation here)
</summary>

<next_step>
(State the next step to be taken here)
</next_step>

<instructions>
(Provide detailed instructions for completing the next step here)
</instructions>

<challenges>
(If applicable, mention potential challenges or pitfalls here)
</challenges>

<conclusion>
(Briefly state how this step contributes to overall progress)
</conclusion>

Remember to be clear, concise, and specific in your guidance. Avoid vague or ambiguous instructions, and provide concrete examples or explanations where necessary.""")
            return ThinkState.ACTION, plan

        return ThinkState.ACTION, None

    async def execute(self, code:str):
        return str(await self._execute_code(code))

    def clear(self):
        self.chat_session.history = []
        self.process_memory.history = []
        self.execution_history = []
        self.variables = {}
        self.ipython.reset()
        self.js_history = []

    async def get_process_hint(self, task):
        return await self.process_memory.get_reference(task, to_str=True), await self.chat_session.get_reference(task, to_str=True)

    def show_vars(self):
        return self.verbose_output.log_state("VARS", self.variables, override=True)

    def set_file(self, full_file_path_and_name):
        if not os.path.exists(full_file_path_and_name):
            print("Invalid file")
            return
        self.ipython.user_ns["__file__"] = full_file_path_and_name

    async def run(self, task, do_continue=False) -> PipelineResult:
        """Run the pipeline with separated thinking and processing phases"""
        state = ThinkState.ACTION
        result = None
        original_task = task
        if not do_continue:
            task = self.agent.mini_task(task, "user", f"""You are an AI assistant tasked with refactoring a user-provided task description into a more structured format with context learning and examples. Your goal is to create a comprehensive and well-organized task description that incorporates model flows and potential code fixes.

First, I will provide you with a task description and some example tasks. Please read them carefully:

<existing_globals>
{self._generate_variable_descriptions()}
</existing_globals>

<example_tasks>
Task: Create a simple analysis of a list of numbers
- Generate a list of 100 random numbers between 1-1000
- Calculate the mean, median, and standard deviation
- Create a histogram of the distribution
- Print all results and display the plot

Task: Create a reinforcement learning (RL) agent to play a simple game
- Set up an OpenAI Gym environment (e.g., CartPole)
- Implement a Q-learning or Deep Q-Network (DQN) agent
- Train the model and optimize hyperparameters
- Visualize learning progress with reward graphs
- Save and reload trained models for inference
- Provide an option to let the trained agent play in real time

Task: Perform edge detection on an image
- Load an image from a URL or local file
- Convert the image to grayscale
- Apply Gaussian blur to reduce noise
- Use Canny edge detection to extract edges
- Display the original and processed images side by side
- Save the output image

Task: Build a basic sentiment analysis system
- Load a dataset of movie reviews (you can use a small sample)
- Preprocess the text (remove punctuation, lowercase, etc.)
- Create a TF-IDF vectorizer
- Split data into training and testing sets
- Train a classifier (e.g., Naive Bayes or LogisticRegression)
- Evaluate performance with accuracy, precision, recall
- Create a confusion matrix visualization
- Make predictions on new sample texts
</example_tasks>

Now, please refactor the given task description using the following guidelines:

1. Analyze the task description and identify the main components and objectives.

2. Structure the refactored task in a similar format to the example tasks, including:
   - A clear title that summarizes the task
   - A difficulty level (Easy, Intermediate, Hard, or Super Hard)
   - A brief introduction to the task's context and purpose
   - A code block containing step-by-step instructions
   - A list of required skills, libraries, or technologies

3. Incorporate model flows by breaking down the task into logical steps and explaining the process flow.

4. Include potential code fixes or common pitfalls that users might encounter while working on the task.

5. Add context learning elements by providing brief explanations or resources for key concepts related to the task.

6. Ensure that the refactored task is comprehensive and can stand alone as a learning exercise.

Please provide your refactored task description within <refactored_task> tags. Use appropriate subheadings and formatting to make the description clear and easy to read.

Additional tips:
- Mention any prerequisites or assumed knowledge
- Suggest potential extensions or variations of the task for further learning

Remember to maintain the original intent and complexity of the task while improving its structure and clarity.""")
            if '<refactored_task>' in task:
                task = task.split('<refactored_task>')[1]
            if '</refactored_task>' in task:
                task = task.split('</refactored_task>')[0]
        code_follow_up_prompt = f"""
You are an AI assistant responsible for evaluating task completion and providing feedback on the execution process. Your goal is to determine if a given task has been completed based on the execution result, and to offer insights for future improvements.

You will be provided with two inputs:
<task_description>
{original_task}
{f'<refactored_task_description_from_ai>{task}</refactored_task_description_from_ai>' if not do_continue else ''}
</task_description>

<code>
#CODE#
</code>

<execution_result>
#EXECUTION_RESULT#
</execution_result>

First, carefully analyze the task description and the execution result. Determine whether the task has been completed successfully based on the information provided.

If the task is completed:
1. Prepare a brief statement indicating that the task is done.
2. Summarize the output for the user in a clear and concise manner.

If the task is not completed:
1. Prepare a brief statement indicating that the task is not done.
2. Identify the specific aspects of the task that remain incomplete.

Regardless of task completion status, evaluate the procedure and effectiveness of the execution:
1. Analyze the workflow: Describe the steps taken in the execution process.
2. Assess effectiveness: Determine how well the procedure achieved the desired outcome.
3. Identify errors: Pinpoint any mistakes or inefficiencies in the execution.
4. Provide recommendations: Suggest improvements for future task executions.

tip: Enclose mutil line strings property for python eval to function!
tip: Set is_completed True if all requirements are completed from <task_description>.
tip: Help the Agent with your analyses to finalize the <task_description>.
{'tip: Prefer new informations from <execution_result> over <refactored_task_description_from_ai> based of <code>' if not do_continue else ''}
note : for the final result only toke information from the <execution_result>. if the relevant informations is not avalabel try string withe tips in the recommendations. else set is_completed True and return the teh Task failed!
Ensure that your evaluation is thorough, constructive, and provides actionable insights for improving future task executions.
Add guidance based on the the last execution result"""
        code_follow_up_prompt_ = [code_follow_up_prompt]
        initial_prompt = f"""
You are an AI py coding agent specializing in iterative development and code refinement, designed to perform tasks that involve thinking. Your goal is to complete the given task while demonstrating a clear thought process throughout the execution.
SYSTEM STATE:
<current_state>
Iteration: #ITER#
Status: #STATE#
Last EXECUTION: #EXECUTION#
</current_state>

ENVIRONMENT: {'current file :'+self.ipython.user_ns.get("__file__")  if self.ipython.user_ns.get("__file__") is not None else ''}

'''<global_variables>
#LOCALS#
</global_variables>'''

MEMORY:
<process_memory>
#PHINT#
</process_memory>

<chat_memory>
#CHINT#
</chat_memory>

VALIDATION CHECKLIST (Must verify before each action):
1. ✓ Check existing variables in ENVIRONMENT <global_variables>
2. ✓ Verify existing functions and classes
3. ✓ Review current imports
4. ✓ Confirm method signatures
5. ✓ Validate state preservation

WORKFLOW STEPS:
1. Analyze Current State:
   - Reason and use all avalabel context
   - Do not repeat the same errors
   - Review existing implementations
   - Check variable values
   - Verify import statements
   - Document dependencies

2. Plan Change:
   - NO example/simulation/simulate
   - No demo er moc Data no Simulations Allowed or u will die!!
   - Use existing variables and code when possible
   - Prefer updates over rewrites

3. Execute Change:
   - Use appropriate action
   - Maintain existing state
   - Document modifications
   - Verify results

You will use a structure called ThinkResult to organize your thoughts and actions.
For each step of your task, follow this process:

ACTIONS:
1. 'code':
    - MUST check <global_variables> first
    - NEVER create demo functions
    - Include 'reason'
    - lang default 'py'
    - Required: code in content
    - code MUST call a function or display the row variabel / value at the end!
    - Required: {{'context':{{'lang':'py',  'reason': ... }}...}}
    - Optional file key in context example {{'context':{{'lang':'py',  'file': 'main.py' ,  'reason': ... }}...}}
    - py code allows for toplevel await !!! use it !!! like
:file-start:
print("using toplevel await")
await abc()
:file-end:

    - Tip: use comments to reason with in the code
3. 'infos': Request specific details
4. 'guide': Get step clarification use on complex task and ery 5 step for staying on trak!
5. 'brake': Pause for assessment
6. 'done': Summarize changes

CODE CONSTRAINTS:
1. State Preservation:
   - ALL variables ar persist
   - ALL functions remain
   - ALL classes ar maintained

2. Import Management:
   - Check <global_variables> for modules
   - Use absolute imports
   - Document new dependencies

3. Function Handling:
   - NEVER overwrite existing
   - Use update for changes
   - Preserve signatures

4. Variable Scope:
   - Maintain existing scope
   - Check for conflicts
   - Document state changes

EXECUTION RULES:
1. VERIFY before create
2. UPDATE don't replace
3. TEST after each change

Next Action Required:
1. Review current state
2. Check existing code
3. Execute with state preservation

!!CRITICAL!!
- NO demo functions
- NO placeholder functions
- USE existing code
- FOR Implementations prefer writing large production redy code chunks.
- FOR reasoning and validation write small code blocks.
- THE CODE must call something or end the code with an value!
- NO INFINIT LOOPS! none breakable while loops ar not allowed, exception ui (closed by user)
- NO 'python' top level return, only write the variabel or value itself!
- 'code is run using exec! do not use !pip ...'
'- instead use auto_install(package_name, install_method="pip", upgrade=False, quiet=False, version=None, extra_args=None)'
# Example usage first time
│ auto_install('pandas', version='1.3.0')
│ import pandas
│ auto_install('pygame')
│ import pygame
│ auto_install('numpy')
│ import numpy as np
!TIPS!
- '<global_variables> can contain instances and functions you can use in your python' code
- if the function is async you can use top level await
- if their is missing of informations try running code to get the infos
- if you got stuck or need assistance break with a question to the user.
'- run functions from <global_variables> using name(*args, **kwargs) or await name(*args, **kwargs)'
'- <global_variables> ar global accessible!'
'- if an <global_variables> name is lower lists an redy to use instance'
"""
        p_hint, c_hint = await self.get_process_hint(task)
        initial_prompt = initial_prompt.replace('#PHINT#', p_hint)
        initial_prompt = initial_prompt.replace('#CHINT#', c_hint)
        initial_prompt_ = initial_prompt
        iter_i = 0
        iter_p = 0
        iter_tat = 0
        next_infos = ""
        if not do_continue:
            await self.chat_session.add_message({'role': 'user', 'content': task})
        else:
            self.restore()
            await self.chat_session.add_message({'role': 'user', 'content': task})

        if self.web_js and self.browser_session is None:
            self.browser_session = BrowserWrapper(llm=self.agent.amd.modle)

        # await self.verbose_output.log_message('user', task)
        self.verbose_output.log_header(task)
        while state != ThinkState.DONE:
            iter_i += 1
            t0 = time.perf_counter()
            prompt = initial_prompt.replace('#ITER#', f'{iter_i} max {self.max_iter}')
            prompt = prompt.replace('#STATE#', f'{state.name}')
            prompt = prompt.replace('#EXECUTION#', f'{next_infos}')  if next_infos else prompt.replace('Last EXECUTION: #EXECUTION#', '')
            prompt = prompt.replace('#LOCALS#', f'{self._generate_variable_descriptions()}')
            self.verbose_output.log_state(state.name, {})
            self.verbose_output.formatter.print_iteration(iter_i, self.max_iter)
            if state == ThinkState.ACTION:
                iter_tat +=1
                if iter_tat > self.max_think_after_think:
                    state = ThinkState.BRAKE
            else:
                iter_tat = 0

            if state == ThinkState.ACTION:
                # Get agent's thoughts
                think_dicts = await self.verbose_output.process(state.name, self.agent.a_format_class(
                    ThinkResults,
                    prompt,
                    message=self.chat_session.get_past_x(self.max_iter*2, last_u=not do_continue).copy()+([self.process_memory.history[-1]] if self.process_memory.history else []) ,
                ))
                think_dicts = think_dicts.get("actions")
                if think_dicts is None:
                    think_dicts = [await self.verbose_output.process(state.name, self.agent.a_format_class(
                        ThinkResult,
                        prompt,
                        message=self.chat_session.get_past_x(self.max_iter * 2, last_u=not do_continue).copy() + (
                            [self.process_memory.history[-1]] if self.process_memory.history else []),
                    ))]
                if len(think_dicts) == 1:
                    think_dict = think_dicts[0]
                else:
                    for think_dict in think_dicts[:-1]:
                        if think_dict.get('context') is None:
                            think_dict['context'] = {'context': 'N/A'}
                        if not isinstance(think_dict.get('context'), dict):
                            think_dict['context'] = {'context': think_dict.get('context')}
                        think_result = ThinkResult(**think_dict)
                        await self.chat_session.add_message(
                            {'role': 'assistant', 'content': think_result.content + str(think_result.context)})
                        state, result = await self.verbose_output.process(think_dict.get("action"),
                                                                          self._process_think_result(think_result,
                                                                                                     task=task))
                        if result:
                            await self.chat_session.add_message(
                                {'role': 'system', 'content': 'Evaluation: ' + str(result)})
                            await self.verbose_output.log_message('system', str(result))
                    think_dict = think_dicts[-1]
                await self.verbose_output.log_think_result(think_dict)
                if think_dict.get('context') is None:
                    think_dict['context'] = {'context': 'N/A'}
                if not isinstance(think_dict.get('context'), dict):
                    think_dict['context'] = {'context': think_dict.get('context')}
                think_result = ThinkResult(**think_dict)
                state, result = await self.verbose_output.process(think_dict.get("action"), self._process_think_result(think_result, task=task))
                await self.chat_session.add_message({'role': 'assistant', 'content': think_result.content + str(think_result.context)})
                if result:
                    await self.chat_session.add_message({'role': 'system', 'content': 'Evaluation: '+str(result)})
                    await self.verbose_output.log_message('system', str(result))
                    code_follow_up_prompt_[0] = code_follow_up_prompt.replace("#EXECUTION_RESULT#", str(result))
                    if isinstance(result ,ExecutionRecord):
                        code_follow_up_prompt_[0] = code_follow_up_prompt_[0].replace("#CODE#", result.code)
                    else:
                        code_follow_up_prompt_[0] = code_follow_up_prompt_[0].replace("#CODE#", self._generate_variable_descriptions())
                else:
                    code_follow_up_prompt_[0] = code_follow_up_prompt.replace("#EXECUTION_RESULT#", str(think_result))
                    code_follow_up_prompt_[0] = code_follow_up_prompt_[0].replace("#CODE#",
                                                                              self._generate_variable_descriptions())


            elif state == ThinkState.PROCESSING:
                # Get agent's thoughts
                class Next(BaseModel):
                    is_completed: bool
                    recommendations: str
                    errors: str
                    effectiveness: str
                    workflow: str
                    text: str
                # Format the agent's thoughts into a structured response
                _agent = self.v_agent if self.v_agent is not None else self.agent
                next_dict = await self.verbose_output.process(state.name, _agent.a_format_class(
                    Next,
                    code_follow_up_prompt_[0],
                    message=self.chat_session.get_past_x(self.max_iter*2, last_u=not do_continue).copy(),
                ))
                next_infos = json.dumps(next_dict)
                await self.verbose_output.log_process_result(next_dict)
                await self.process_memory.add_message({'role': 'assistant', 'content': next_infos.replace('workflow:', 'past-workflow:')})
                iter_p += 1
                code_follow_up_prompt_[0] = code_follow_up_prompt
                if not next_dict.get('is_completed', True):
                    state = ThinkState.ACTION
                    initial_prompt = initial_prompt_.replace('#ITER#',f'#ITER#\nReasoning assist result: {next_dict}')
                    continue
                elif next_dict.get('is_completed', False):
                    result = next_dict.get('text', '')
                    state = ThinkState.DONE
                    continue
                else:
                    result = next_dict.get('text', '')
                    break

            elif state == ThinkState.BRAKE:
                break

            if iter_i < self.max_iter:
                if time.perf_counter() -t0 < self.timeout_timer*2.5:
                    with Spinner(f"Prevent rate limit posing for {self.timeout_timer}s", symbols='+', time_in_s=self.timeout_timer, count_down=True):
                        await asyncio.sleep(self.timeout_timer)
            else:
                state = ThinkState.BRAKE
                if isinstance(result, ExecutionRecord):
                    result = result.result
                elif isinstance(result, str):
                    pass
                else:
                    result = "Max iterations"
                break

        self.verbose_output.log_state(state.name, {})

        return PipelineResult(
            variables=self.variables,
            result=result,
            execution_history=self.execution_history,
            message=self.chat_session.get_past_x(iter_i*2, last_u=not do_continue),
        )

    async def run_project(self, task, lang='py', execute_function=None):
        if execute_function is None:
            if lang == 'py':
                execute_function = default_python_execute_function
            elif lang == 'rust':
                execute_function = default_rust_execute_function
            else:
                raise ValueError(f"Unsupported language: {lang}")
        class FileAction(BaseModel):
            action: str
            path: str
            content: str | None = None

        class ProjectThinkResult(BaseModel):
            action: str
            file_actions: list[FileAction]
            reasoning: str

        class ProjectPipelineResult(BaseModel):
            result: str
            execution_history: list[str]
            files: dict[str, str]
        state = ThinkState.ACTION
        result = None
        vfs = VirtualFileSystem(self._session_dir / f"project_{lang}")

        project_prompt = f"""
    You are an AI coding agent specializing in {lang} project development. Your task is to create, modify, and manage files within a project structure to complete the given task. Use the VirtualFileSystem to interact with files.

    TASK DESCRIPTION:
    {task}
    CURRENT FILES:
    #files#

    WORKFLOW STEPS:
    1. Analyze the current project state
    2. Plan necessary changes or additions
    3. Execute changes using file actions
    4. Evaluate the project's progress

    Use the ProjectThinkResult structure to organize your thoughts and actions:

    class ProjectThinkResult(BaseModel):
        action: str  # 'code', 'evaluate', 'done'
        file_actions: List[FileAction]
        reasoning: str

    class FileAction(BaseModel):
        action: str  # 'write', 'read', 'delete', 'list'
        path: str
        content: Optional[str] = None

    EXECUTION RULES:
    1. Use absolute paths for all file operations
    2. Maintain a clear project structure
    3. Document your code and reasoning
    4. Ensure all necessary files are created and properly linked
    5. Use the appropriate language syntax and best practices for {lang}

    Next Action Required:
    1. Review the current project state
    2. Plan the next step in project development
    3. Execute file actions to implement changes
    """

        execution_history = []
        files = {}

        iter_i = 0
        self.verbose_output.log_header(task)

        while state != ThinkState.DONE:
            iter_i += 1
            self.verbose_output.formatter.print_iteration(iter_i, self.max_iter)
            if iter_i>self.max_iter:
                break
            if state == ThinkState.ACTION:
                think_result = await self.agent.a_format_class(
                    ProjectThinkResult,
                    project_prompt.replace('#files#', vfs.print_file_structure()),
                    message=execution_history
                )
                self.verbose_output.log_state(state.name, think_result)
                think_result = ProjectThinkResult(**think_result)
                for file_action in think_result.file_actions:
                    path = file_action.path
                    Path(file_action.path).mkdir(exist_ok=True)
                    if file_action.action == 'write':
                        vfs.write_file(path, file_action.content)
                        files[path] = file_action.content
                    elif file_action.action == 'read':
                        content = vfs.read_file(path)
                        files[path] = content
                    elif file_action.action == 'delete':
                        vfs.delete_file(path)
                        files.pop(path, None)
                    elif file_action.action == 'list':
                        dir_contents = vfs.list_directory(path)
                        files[path] = str(dir_contents)

                if think_result.action == 'evaluate':
                    state = ThinkState.PROCESSING
                elif think_result.action == 'done':
                    state = ThinkState.DONE

                execution_history.append(f"Action: {think_result.action}\nReasoning: {think_result.reasoning}")

            elif state == ThinkState.PROCESSING:
                if execute_function:
                    execution_result = await execute_function(files)
                    execution_history.append(f"Execution Result: {execution_result}")

                    evaluation_prompt = f"""
    Evaluate the current state of the project based on the execution result:

    {execution_result}

    Determine if the project is complete or if further modifications are needed.
    """
                    evaluation = await self.agent.a_format_class(
                        ProjectThinkResult,
                        evaluation_prompt,
                        message=execution_history
                    )
                    self.verbose_output.log_state(state.name, evaluation)
                    evaluation = ProjectThinkResult(**evaluation)
                    if evaluation.action == 'done':
                        state = ThinkState.DONE
                        result = execution_result
                    else:
                        state = ThinkState.ACTION
                else:
                    state = ThinkState.ACTION
            else:
                break

        return ProjectPipelineResult(
            result=result,
            execution_history=execution_history,
            files=files
        )

    async def __aenter__(self):
        self.clear()
        return self

    async def configure(self, verbose=None, print_function=None, with_js=False, agent=None, variables=None, web_kwargs=None):
        if verbose is not None and (print_function is not None or verbose != self.verbose_output.verbose):
            if agent is None:
                agent = self.agent
            else:
                self.agent = agent
            agent.verbose = verbose
            self.verbose_output = EnhancedVerboseOutput(verbose=verbose, print_f=print_function)

            if print_function is not None:
                agent.print_verbose = print_function
        if variables:
            self.variables = {**self.variables, **self._process_variables(variables)}
        if with_js and web_kwargs:
            self.browser_session: BrowserWrapper | None = BrowserWrapper(**web_kwargs)
        self.web_js = with_js
        if self.restore_var:
            self.restore()

        return self

    async def __aexit__(self, exc_type, exc_value, traceback):
        if self.web_js:
            await self.browser_session.close()
            if self.restore_var:
                self.save_session(f"Pipeline_Session_{self.agent.amd.name}")
        if exc_type is not None:
            print(f"Exception occurred: {exc_value}")
        else:
            print("Pipe Exit")
__init__(agent, verbose=False, max_iter=12, variables=None, top_n=None, restore=None, max_think_after_think=None, print_f=None, web_js=False, timeout_timer=25, v_agent=None, web_llm=None)

Initialize the Pipeline.

Parameters:

Name Type Description Default
agent Any

AI agent instance to use for task execution

required
verbose bool

print internal results

False
max_iter int

Maximum number of iterations (default: 12)

12
variables dict[str, Any] | list[Any] | None

Dictionary or list of variables to make available

None
top_n bool | None

Limit variable descriptions to top N most used

None
web_js

if the agent is allow to use the web

False
Source code in toolboxv2/mods/isaa/CodingAgent/live.py
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
def __init__(
    self,
    agent: Any,
    verbose: bool=False,
    max_iter: int= 12,
    variables: dict[str, Any] | list[Any] | None = None,
    top_n: bool | None = None,
    restore: bool | None = None,
    max_think_after_think = None,
    print_f=None,
    web_js=False,
    timeout_timer=25,
    v_agent=None,
    web_llm=None,
):
    """
    Initialize the Pipeline.

    Args:
        agent: AI agent instance to use for task execution
        verbose: print internal results
        max_iter: Maximum number of iterations (default: 12)
        variables: Dictionary or list of variables to make available
        top_n: Limit variable descriptions to top N most used
        web_js: if the agent is allow to use the web
    """

    self.timeout_timer = timeout_timer
    self.top_n = top_n
    self.max_iter = max_iter
    self.max_think_after_think = max_think_after_think or max_iter // 2
    self.agent = agent
    self.v_agent = v_agent or agent
    # self.agent.verbose = verbose
    self.task = None
    self.web_js = web_js
    self.print_f = print_f
    self.verbose_output = EnhancedVerboseOutput(verbose=verbose, print_f=self.print_f)
    self.variables = self._process_variables(variables or {})
    self.variables['auto_install'] = auto_install
    self.execution_history = []
    self.session_name = None

    self.browser_session: BrowserWrapper | None = BrowserWrapper(llm=web_llm or agent.amd.model)
    self.js_history: list[JSExecutionRecord] = []

    self._session_dir = Path(get_app().appdata) / 'ChatSession' / agent.amd.name
    self.ipython = MockIPython(self._session_dir, auto_remove=False)
    self.chat_session = ChatSession(get_app().get_mod("isaa").get_memory(), space_name=f"ChatSession/{agent.amd.name}/Pipeline.session", max_length=max_iter)
    self.process_memory = ChatSession(get_app().get_mod("isaa").get_memory(), space_name=f"ChatSession/{agent.amd.name}/Process.session", max_length=max_iter)

    # Initialize interpreter with variables
    self.init_keys = list(self.ipython.user_ns.keys()).copy()
    if self.web_js:
        self.variables['web_actions'] = self.browser_session.run
        self.variables['browser_session'] = self.browser_session
    self.ipython.user_ns.update(self.variables)

    self.restore_var = restore

    if restore:
        self.restore()
__str__()

String representation of pipeline session

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
2605
2606
2607
def __str__(self):
    """String representation of pipeline session"""
    return str(self.ipython)
load_session(name)

Load saved session

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
2210
2211
2212
2213
def load_session(self, name: str):
    """Load saved session"""
    self.ipython.load_session(name)
    self.variables.update(self.ipython.user_ns)
run(task, do_continue=False) async

Run the pipeline with separated thinking and processing phases

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
    async def run(self, task, do_continue=False) -> PipelineResult:
        """Run the pipeline with separated thinking and processing phases"""
        state = ThinkState.ACTION
        result = None
        original_task = task
        if not do_continue:
            task = self.agent.mini_task(task, "user", f"""You are an AI assistant tasked with refactoring a user-provided task description into a more structured format with context learning and examples. Your goal is to create a comprehensive and well-organized task description that incorporates model flows and potential code fixes.

First, I will provide you with a task description and some example tasks. Please read them carefully:

<existing_globals>
{self._generate_variable_descriptions()}
</existing_globals>

<example_tasks>
Task: Create a simple analysis of a list of numbers
- Generate a list of 100 random numbers between 1-1000
- Calculate the mean, median, and standard deviation
- Create a histogram of the distribution
- Print all results and display the plot

Task: Create a reinforcement learning (RL) agent to play a simple game
- Set up an OpenAI Gym environment (e.g., CartPole)
- Implement a Q-learning or Deep Q-Network (DQN) agent
- Train the model and optimize hyperparameters
- Visualize learning progress with reward graphs
- Save and reload trained models for inference
- Provide an option to let the trained agent play in real time

Task: Perform edge detection on an image
- Load an image from a URL or local file
- Convert the image to grayscale
- Apply Gaussian blur to reduce noise
- Use Canny edge detection to extract edges
- Display the original and processed images side by side
- Save the output image

Task: Build a basic sentiment analysis system
- Load a dataset of movie reviews (you can use a small sample)
- Preprocess the text (remove punctuation, lowercase, etc.)
- Create a TF-IDF vectorizer
- Split data into training and testing sets
- Train a classifier (e.g., Naive Bayes or LogisticRegression)
- Evaluate performance with accuracy, precision, recall
- Create a confusion matrix visualization
- Make predictions on new sample texts
</example_tasks>

Now, please refactor the given task description using the following guidelines:

1. Analyze the task description and identify the main components and objectives.

2. Structure the refactored task in a similar format to the example tasks, including:
   - A clear title that summarizes the task
   - A difficulty level (Easy, Intermediate, Hard, or Super Hard)
   - A brief introduction to the task's context and purpose
   - A code block containing step-by-step instructions
   - A list of required skills, libraries, or technologies

3. Incorporate model flows by breaking down the task into logical steps and explaining the process flow.

4. Include potential code fixes or common pitfalls that users might encounter while working on the task.

5. Add context learning elements by providing brief explanations or resources for key concepts related to the task.

6. Ensure that the refactored task is comprehensive and can stand alone as a learning exercise.

Please provide your refactored task description within <refactored_task> tags. Use appropriate subheadings and formatting to make the description clear and easy to read.

Additional tips:
- Mention any prerequisites or assumed knowledge
- Suggest potential extensions or variations of the task for further learning

Remember to maintain the original intent and complexity of the task while improving its structure and clarity.""")
            if '<refactored_task>' in task:
                task = task.split('<refactored_task>')[1]
            if '</refactored_task>' in task:
                task = task.split('</refactored_task>')[0]
        code_follow_up_prompt = f"""
You are an AI assistant responsible for evaluating task completion and providing feedback on the execution process. Your goal is to determine if a given task has been completed based on the execution result, and to offer insights for future improvements.

You will be provided with two inputs:
<task_description>
{original_task}
{f'<refactored_task_description_from_ai>{task}</refactored_task_description_from_ai>' if not do_continue else ''}
</task_description>

<code>
#CODE#
</code>

<execution_result>
#EXECUTION_RESULT#
</execution_result>

First, carefully analyze the task description and the execution result. Determine whether the task has been completed successfully based on the information provided.

If the task is completed:
1. Prepare a brief statement indicating that the task is done.
2. Summarize the output for the user in a clear and concise manner.

If the task is not completed:
1. Prepare a brief statement indicating that the task is not done.
2. Identify the specific aspects of the task that remain incomplete.

Regardless of task completion status, evaluate the procedure and effectiveness of the execution:
1. Analyze the workflow: Describe the steps taken in the execution process.
2. Assess effectiveness: Determine how well the procedure achieved the desired outcome.
3. Identify errors: Pinpoint any mistakes or inefficiencies in the execution.
4. Provide recommendations: Suggest improvements for future task executions.

tip: Enclose mutil line strings property for python eval to function!
tip: Set is_completed True if all requirements are completed from <task_description>.
tip: Help the Agent with your analyses to finalize the <task_description>.
{'tip: Prefer new informations from <execution_result> over <refactored_task_description_from_ai> based of <code>' if not do_continue else ''}
note : for the final result only toke information from the <execution_result>. if the relevant informations is not avalabel try string withe tips in the recommendations. else set is_completed True and return the teh Task failed!
Ensure that your evaluation is thorough, constructive, and provides actionable insights for improving future task executions.
Add guidance based on the the last execution result"""
        code_follow_up_prompt_ = [code_follow_up_prompt]
        initial_prompt = f"""
You are an AI py coding agent specializing in iterative development and code refinement, designed to perform tasks that involve thinking. Your goal is to complete the given task while demonstrating a clear thought process throughout the execution.
SYSTEM STATE:
<current_state>
Iteration: #ITER#
Status: #STATE#
Last EXECUTION: #EXECUTION#
</current_state>

ENVIRONMENT: {'current file :'+self.ipython.user_ns.get("__file__")  if self.ipython.user_ns.get("__file__") is not None else ''}

'''<global_variables>
#LOCALS#
</global_variables>'''

MEMORY:
<process_memory>
#PHINT#
</process_memory>

<chat_memory>
#CHINT#
</chat_memory>

VALIDATION CHECKLIST (Must verify before each action):
1. ✓ Check existing variables in ENVIRONMENT <global_variables>
2. ✓ Verify existing functions and classes
3. ✓ Review current imports
4. ✓ Confirm method signatures
5. ✓ Validate state preservation

WORKFLOW STEPS:
1. Analyze Current State:
   - Reason and use all avalabel context
   - Do not repeat the same errors
   - Review existing implementations
   - Check variable values
   - Verify import statements
   - Document dependencies

2. Plan Change:
   - NO example/simulation/simulate
   - No demo er moc Data no Simulations Allowed or u will die!!
   - Use existing variables and code when possible
   - Prefer updates over rewrites

3. Execute Change:
   - Use appropriate action
   - Maintain existing state
   - Document modifications
   - Verify results

You will use a structure called ThinkResult to organize your thoughts and actions.
For each step of your task, follow this process:

ACTIONS:
1. 'code':
    - MUST check <global_variables> first
    - NEVER create demo functions
    - Include 'reason'
    - lang default 'py'
    - Required: code in content
    - code MUST call a function or display the row variabel / value at the end!
    - Required: {{'context':{{'lang':'py',  'reason': ... }}...}}
    - Optional file key in context example {{'context':{{'lang':'py',  'file': 'main.py' ,  'reason': ... }}...}}
    - py code allows for toplevel await !!! use it !!! like
:file-start:
print("using toplevel await")
await abc()
:file-end:

    - Tip: use comments to reason with in the code
3. 'infos': Request specific details
4. 'guide': Get step clarification use on complex task and ery 5 step for staying on trak!
5. 'brake': Pause for assessment
6. 'done': Summarize changes

CODE CONSTRAINTS:
1. State Preservation:
   - ALL variables ar persist
   - ALL functions remain
   - ALL classes ar maintained

2. Import Management:
   - Check <global_variables> for modules
   - Use absolute imports
   - Document new dependencies

3. Function Handling:
   - NEVER overwrite existing
   - Use update for changes
   - Preserve signatures

4. Variable Scope:
   - Maintain existing scope
   - Check for conflicts
   - Document state changes

EXECUTION RULES:
1. VERIFY before create
2. UPDATE don't replace
3. TEST after each change

Next Action Required:
1. Review current state
2. Check existing code
3. Execute with state preservation

!!CRITICAL!!
- NO demo functions
- NO placeholder functions
- USE existing code
- FOR Implementations prefer writing large production redy code chunks.
- FOR reasoning and validation write small code blocks.
- THE CODE must call something or end the code with an value!
- NO INFINIT LOOPS! none breakable while loops ar not allowed, exception ui (closed by user)
- NO 'python' top level return, only write the variabel or value itself!
- 'code is run using exec! do not use !pip ...'
'- instead use auto_install(package_name, install_method="pip", upgrade=False, quiet=False, version=None, extra_args=None)'
# Example usage first time
│ auto_install('pandas', version='1.3.0')
│ import pandas
│ auto_install('pygame')
│ import pygame
│ auto_install('numpy')
│ import numpy as np
!TIPS!
- '<global_variables> can contain instances and functions you can use in your python' code
- if the function is async you can use top level await
- if their is missing of informations try running code to get the infos
- if you got stuck or need assistance break with a question to the user.
'- run functions from <global_variables> using name(*args, **kwargs) or await name(*args, **kwargs)'
'- <global_variables> ar global accessible!'
'- if an <global_variables> name is lower lists an redy to use instance'
"""
        p_hint, c_hint = await self.get_process_hint(task)
        initial_prompt = initial_prompt.replace('#PHINT#', p_hint)
        initial_prompt = initial_prompt.replace('#CHINT#', c_hint)
        initial_prompt_ = initial_prompt
        iter_i = 0
        iter_p = 0
        iter_tat = 0
        next_infos = ""
        if not do_continue:
            await self.chat_session.add_message({'role': 'user', 'content': task})
        else:
            self.restore()
            await self.chat_session.add_message({'role': 'user', 'content': task})

        if self.web_js and self.browser_session is None:
            self.browser_session = BrowserWrapper(llm=self.agent.amd.modle)

        # await self.verbose_output.log_message('user', task)
        self.verbose_output.log_header(task)
        while state != ThinkState.DONE:
            iter_i += 1
            t0 = time.perf_counter()
            prompt = initial_prompt.replace('#ITER#', f'{iter_i} max {self.max_iter}')
            prompt = prompt.replace('#STATE#', f'{state.name}')
            prompt = prompt.replace('#EXECUTION#', f'{next_infos}')  if next_infos else prompt.replace('Last EXECUTION: #EXECUTION#', '')
            prompt = prompt.replace('#LOCALS#', f'{self._generate_variable_descriptions()}')
            self.verbose_output.log_state(state.name, {})
            self.verbose_output.formatter.print_iteration(iter_i, self.max_iter)
            if state == ThinkState.ACTION:
                iter_tat +=1
                if iter_tat > self.max_think_after_think:
                    state = ThinkState.BRAKE
            else:
                iter_tat = 0

            if state == ThinkState.ACTION:
                # Get agent's thoughts
                think_dicts = await self.verbose_output.process(state.name, self.agent.a_format_class(
                    ThinkResults,
                    prompt,
                    message=self.chat_session.get_past_x(self.max_iter*2, last_u=not do_continue).copy()+([self.process_memory.history[-1]] if self.process_memory.history else []) ,
                ))
                think_dicts = think_dicts.get("actions")
                if think_dicts is None:
                    think_dicts = [await self.verbose_output.process(state.name, self.agent.a_format_class(
                        ThinkResult,
                        prompt,
                        message=self.chat_session.get_past_x(self.max_iter * 2, last_u=not do_continue).copy() + (
                            [self.process_memory.history[-1]] if self.process_memory.history else []),
                    ))]
                if len(think_dicts) == 1:
                    think_dict = think_dicts[0]
                else:
                    for think_dict in think_dicts[:-1]:
                        if think_dict.get('context') is None:
                            think_dict['context'] = {'context': 'N/A'}
                        if not isinstance(think_dict.get('context'), dict):
                            think_dict['context'] = {'context': think_dict.get('context')}
                        think_result = ThinkResult(**think_dict)
                        await self.chat_session.add_message(
                            {'role': 'assistant', 'content': think_result.content + str(think_result.context)})
                        state, result = await self.verbose_output.process(think_dict.get("action"),
                                                                          self._process_think_result(think_result,
                                                                                                     task=task))
                        if result:
                            await self.chat_session.add_message(
                                {'role': 'system', 'content': 'Evaluation: ' + str(result)})
                            await self.verbose_output.log_message('system', str(result))
                    think_dict = think_dicts[-1]
                await self.verbose_output.log_think_result(think_dict)
                if think_dict.get('context') is None:
                    think_dict['context'] = {'context': 'N/A'}
                if not isinstance(think_dict.get('context'), dict):
                    think_dict['context'] = {'context': think_dict.get('context')}
                think_result = ThinkResult(**think_dict)
                state, result = await self.verbose_output.process(think_dict.get("action"), self._process_think_result(think_result, task=task))
                await self.chat_session.add_message({'role': 'assistant', 'content': think_result.content + str(think_result.context)})
                if result:
                    await self.chat_session.add_message({'role': 'system', 'content': 'Evaluation: '+str(result)})
                    await self.verbose_output.log_message('system', str(result))
                    code_follow_up_prompt_[0] = code_follow_up_prompt.replace("#EXECUTION_RESULT#", str(result))
                    if isinstance(result ,ExecutionRecord):
                        code_follow_up_prompt_[0] = code_follow_up_prompt_[0].replace("#CODE#", result.code)
                    else:
                        code_follow_up_prompt_[0] = code_follow_up_prompt_[0].replace("#CODE#", self._generate_variable_descriptions())
                else:
                    code_follow_up_prompt_[0] = code_follow_up_prompt.replace("#EXECUTION_RESULT#", str(think_result))
                    code_follow_up_prompt_[0] = code_follow_up_prompt_[0].replace("#CODE#",
                                                                              self._generate_variable_descriptions())


            elif state == ThinkState.PROCESSING:
                # Get agent's thoughts
                class Next(BaseModel):
                    is_completed: bool
                    recommendations: str
                    errors: str
                    effectiveness: str
                    workflow: str
                    text: str
                # Format the agent's thoughts into a structured response
                _agent = self.v_agent if self.v_agent is not None else self.agent
                next_dict = await self.verbose_output.process(state.name, _agent.a_format_class(
                    Next,
                    code_follow_up_prompt_[0],
                    message=self.chat_session.get_past_x(self.max_iter*2, last_u=not do_continue).copy(),
                ))
                next_infos = json.dumps(next_dict)
                await self.verbose_output.log_process_result(next_dict)
                await self.process_memory.add_message({'role': 'assistant', 'content': next_infos.replace('workflow:', 'past-workflow:')})
                iter_p += 1
                code_follow_up_prompt_[0] = code_follow_up_prompt
                if not next_dict.get('is_completed', True):
                    state = ThinkState.ACTION
                    initial_prompt = initial_prompt_.replace('#ITER#',f'#ITER#\nReasoning assist result: {next_dict}')
                    continue
                elif next_dict.get('is_completed', False):
                    result = next_dict.get('text', '')
                    state = ThinkState.DONE
                    continue
                else:
                    result = next_dict.get('text', '')
                    break

            elif state == ThinkState.BRAKE:
                break

            if iter_i < self.max_iter:
                if time.perf_counter() -t0 < self.timeout_timer*2.5:
                    with Spinner(f"Prevent rate limit posing for {self.timeout_timer}s", symbols='+', time_in_s=self.timeout_timer, count_down=True):
                        await asyncio.sleep(self.timeout_timer)
            else:
                state = ThinkState.BRAKE
                if isinstance(result, ExecutionRecord):
                    result = result.result
                elif isinstance(result, str):
                    pass
                else:
                    result = "Max iterations"
                break

        self.verbose_output.log_state(state.name, {})

        return PipelineResult(
            variables=self.variables,
            result=result,
            execution_history=self.execution_history,
            message=self.chat_session.get_past_x(iter_i*2, last_u=not do_continue),
        )
save_session(name)

Save current session

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
2205
2206
2207
2208
def save_session(self, name: str):
    """Save current session"""
    self.session_name = name
    self.ipython.save_session(name)
SyncReport dataclass

Report of variables synced from namespace to pipeline

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
@dataclass
class SyncReport:
    """Report of variables synced from namespace to pipeline"""
    added: dict[str, str]
    skipped: dict[str, str]  # var_name -> reason
    errors: dict[str, str]  # var_name -> error message

    def __str__(self) -> str:
        parts = []
        if self.added:
            parts.append("Added variables:")
            for name, type_ in self.added.items():
                parts.append(f"  - {name}: {type_}")
        if self.skipped:
            parts.append("\nSkipped variables:")
            for name, reason in self.skipped.items():
                parts.append(f"  - {name}: {reason}")
        if self.errors:
            parts.append("\nErrors:")
            for name, error in self.errors.items():
                parts.append(f"  - {name}: {error}")
        return "\n".join(parts)
TeeStream

Stream that writes to both console and buffer

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
672
673
674
675
676
677
678
679
680
681
682
683
684
685
class TeeStream:
    """Stream that writes to both console and buffer"""
    def __init__(self, console_stream, buffer_stream):
        self.console_stream = console_stream
        self.buffer_stream = buffer_stream

    def write(self, data):
        self.console_stream.write(data)
        self.buffer_stream.write(data)
        self.console_stream.flush()  # Ensure immediate console output

    def flush(self):
        self.console_stream.flush()
        self.buffer_stream.flush()
VerboseFormatter
Source code in toolboxv2/mods/isaa/CodingAgent/live.py
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
class VerboseFormatter:
    def __init__(self,print_f, spinner_style: str = "d"):
        self.style = Style()
        self.current_spinner = None
        self.spinner_style = spinner_style
        self.print = print_f

    def print_header(self, text: str):
        """Print a formatted header with separator line"""
        width = 80
        self.print(f"\n{self.style.BLUE('=' * width)}")
        self.print(self.style.BLUE2(f"⚡ {text.center(width - 4)} ⚡"))
        self.print(f"{self.style.BLUE('=' * width)}\n")

    def print_section(self, title: str, content: str):
        """Print a formatted section with title and content"""
        self.print(f"{self.style.YELLOW('┌─')} {self.style.YELLOW2(title)}")
        for line in content.split('\n'):
            try:
                self.print(f"{self.style.YELLOW('│')} {line}")
            except Exception as e:
                try:
                    pos = int(str(e).split('position ')[1].split('-')[0])
                    line = line[:pos] + line[pos+1:]
                    self.print(f"{self.style.YELLOW('│')} {line}")
                except Exception as e:
                    self.print(f"{self.style.RED('│')} UNABLE TO PRINT {str(e)}")
        self.print(f"{self.style.YELLOW('└─')} {self.style.GREY('End of section')}\n")

    def print_iteration(self, current: int, maximum: int):
        """Print iteration progress with visual bar"""
        progress = int((current / maximum) * 20)
        bar = "█" * progress + "░" * (20 - progress)
        self.print(f"\r{self.style.CYAN(f'Iteration [{bar}] {current}/{maximum}')}  ", end='')

    def print_state(self, state: str, details: dict[str, Any] | None = None):
        """Print current state with optional details"""
        state_color = {
            'ACTION': self.style.GREEN2,
            'PROCESSING': self.style.YELLOW2,
            'BRAKE': self.style.RED2,
            'DONE': self.style.BLUE2
        }.get(state, self.style.WHITE2)
        res_str = f"\nCurrent State: {state}"
        self.print(f"\n{self.style.Bold('Current State:')} {state_color(state)}")

        if details:
            for key, value in details.items():
                self.print(f"  {self.style.GREY('├─')} {self.style.CYAN(key)}: {value}")
                res_str += f"  ├─ {key}: {value}\n"
        return res_str

    def print_method_update(self, method_update: 'MethodUpdate'):
        """Print a formatted view of a MethodUpdate structure"""
        # Header with class and method name
        self.print(f"\n{self.style.BLUE('┏━')} {self.style.Bold('Method Update Details')}")

        # Class and method information
        self.print(f"{self.style.BLUE('┣━')} Class: {self.style.GREEN2(method_update.class_name)}")
        self.print(f"{self.style.BLUE('┣━')} Method: {self.style.YELLOW2(method_update.method_name)}")

        # Description if available
        if method_update.description:
            self.print(f"{self.style.BLUE('┣━')} Description:")
            for line in method_update.description.split('\n'):
                self.print(f"{self.style.BLUE('┃')}  {self.style.GREY(line)}")

        # Code section
        self.print(f"{self.style.BLUE('┣━')} Code:")
        code_lines = method_update.code.split('\n')
        for i, line in enumerate(code_lines):
            # Different styling for first and last lines
            if i == 0:
                self.print(f"{self.style.BLUE('┃')}  {self.style.CYAN('┌─')} {line}")
            elif i == len(code_lines) - 1:
                self.print(f"{self.style.BLUE('┃')}  {self.style.CYAN('└─')} {line}")
            else:
                self.print(f"{self.style.BLUE('┃')}  {self.style.CYAN('│')} {line}")

        # Footer
        self.print(f"{self.style.BLUE('┗━')} {self.style.GREY('End of method update')}\n")

    async def process_with_spinner(self, message: str, coroutine):
        """Execute a coroutine with a spinner indicator"""
        with Spinner(message, symbols=self.spinner_style):
            result = await coroutine
            return result
print_header(text)

Print a formatted header with separator line

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
61
62
63
64
65
66
def print_header(self, text: str):
    """Print a formatted header with separator line"""
    width = 80
    self.print(f"\n{self.style.BLUE('=' * width)}")
    self.print(self.style.BLUE2(f"⚡ {text.center(width - 4)} ⚡"))
    self.print(f"{self.style.BLUE('=' * width)}\n")
print_iteration(current, maximum)

Print iteration progress with visual bar

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
83
84
85
86
87
def print_iteration(self, current: int, maximum: int):
    """Print iteration progress with visual bar"""
    progress = int((current / maximum) * 20)
    bar = "█" * progress + "░" * (20 - progress)
    self.print(f"\r{self.style.CYAN(f'Iteration [{bar}] {current}/{maximum}')}  ", end='')
print_method_update(method_update)

Print a formatted view of a MethodUpdate structure

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
def print_method_update(self, method_update: 'MethodUpdate'):
    """Print a formatted view of a MethodUpdate structure"""
    # Header with class and method name
    self.print(f"\n{self.style.BLUE('┏━')} {self.style.Bold('Method Update Details')}")

    # Class and method information
    self.print(f"{self.style.BLUE('┣━')} Class: {self.style.GREEN2(method_update.class_name)}")
    self.print(f"{self.style.BLUE('┣━')} Method: {self.style.YELLOW2(method_update.method_name)}")

    # Description if available
    if method_update.description:
        self.print(f"{self.style.BLUE('┣━')} Description:")
        for line in method_update.description.split('\n'):
            self.print(f"{self.style.BLUE('┃')}  {self.style.GREY(line)}")

    # Code section
    self.print(f"{self.style.BLUE('┣━')} Code:")
    code_lines = method_update.code.split('\n')
    for i, line in enumerate(code_lines):
        # Different styling for first and last lines
        if i == 0:
            self.print(f"{self.style.BLUE('┃')}  {self.style.CYAN('┌─')} {line}")
        elif i == len(code_lines) - 1:
            self.print(f"{self.style.BLUE('┃')}  {self.style.CYAN('└─')} {line}")
        else:
            self.print(f"{self.style.BLUE('┃')}  {self.style.CYAN('│')} {line}")

    # Footer
    self.print(f"{self.style.BLUE('┗━')} {self.style.GREY('End of method update')}\n")
print_section(title, content)

Print a formatted section with title and content

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
68
69
70
71
72
73
74
75
76
77
78
79
80
81
def print_section(self, title: str, content: str):
    """Print a formatted section with title and content"""
    self.print(f"{self.style.YELLOW('┌─')} {self.style.YELLOW2(title)}")
    for line in content.split('\n'):
        try:
            self.print(f"{self.style.YELLOW('│')} {line}")
        except Exception as e:
            try:
                pos = int(str(e).split('position ')[1].split('-')[0])
                line = line[:pos] + line[pos+1:]
                self.print(f"{self.style.YELLOW('│')} {line}")
            except Exception as e:
                self.print(f"{self.style.RED('│')} UNABLE TO PRINT {str(e)}")
    self.print(f"{self.style.YELLOW('└─')} {self.style.GREY('End of section')}\n")
print_state(state, details=None)

Print current state with optional details

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
def print_state(self, state: str, details: dict[str, Any] | None = None):
    """Print current state with optional details"""
    state_color = {
        'ACTION': self.style.GREEN2,
        'PROCESSING': self.style.YELLOW2,
        'BRAKE': self.style.RED2,
        'DONE': self.style.BLUE2
    }.get(state, self.style.WHITE2)
    res_str = f"\nCurrent State: {state}"
    self.print(f"\n{self.style.Bold('Current State:')} {state_color(state)}")

    if details:
        for key, value in details.items():
            self.print(f"  {self.style.GREY('├─')} {self.style.CYAN(key)}: {value}")
            res_str += f"  ├─ {key}: {value}\n"
    return res_str
process_with_spinner(message, coroutine) async

Execute a coroutine with a spinner indicator

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
136
137
138
139
140
async def process_with_spinner(self, message: str, coroutine):
    """Execute a coroutine with a spinner indicator"""
    with Spinner(message, symbols=self.spinner_style):
        result = await coroutine
        return result
VirtualEnvContext

Context manager for temporary virtual environment activation

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
class VirtualEnvContext:
    """Context manager for temporary virtual environment activation"""

    def __init__(self, venv_path: Path):
        self.venv_path = venv_path
        self._original_path = None
        self._original_sys_path = None
        self._original_prefix = None
        self._original_virtual_env = None

    def _get_venv_paths(self):
        """Get virtual environment paths based on platform"""
        if sys.platform == 'win32':
            site_packages = self.venv_path / 'Lib' / 'site-packages'
            scripts_dir = self.venv_path / 'Scripts'
            python_path = scripts_dir / 'python.exe'
        else:
            python_version = f'python{sys.version_info.major}.{sys.version_info.minor}'
            site_packages = self.venv_path / 'lib' / python_version / 'site-packages'
            scripts_dir = self.venv_path / 'bin'
            python_path = scripts_dir / 'python'

        return site_packages, scripts_dir, python_path

    def __enter__(self):
        # Save original state
        self._original_path = os.environ.get('PATH', '')
        self._original_sys_path = sys.path.copy()
        self._original_prefix = sys.prefix
        self._original_virtual_env = os.environ.get('VIRTUAL_ENV')

        # Get venv paths
        site_packages, scripts_dir, python_path = self._get_venv_paths()

        # Modify environment for venv
        if scripts_dir.exists():
            new_path = os.pathsep.join([str(scripts_dir), self._original_path])
            os.environ['PATH'] = new_path

        if site_packages.exists():
            sys.path.insert(0, str(site_packages))

        os.environ['VIRTUAL_ENV'] = str(self.venv_path)

        # Return the python executable path for potential subprocess calls
        return str(python_path)

    def __exit__(self, exc_type, exc_val, exc_tb):
        # Restore original state
        os.environ['PATH'] = self._original_path
        sys.path = self._original_sys_path

        if self._original_virtual_env is None:
            os.environ.pop('VIRTUAL_ENV', None)
        else:
            os.environ['VIRTUAL_ENV'] = self._original_virtual_env
VirtualFileSystem
Source code in toolboxv2/mods/isaa/CodingAgent/live.py
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
class VirtualFileSystem:
    def __init__(self, base_dir: Path):
        self.base_dir = base_dir
        self.current_dir = base_dir
        self.virtual_files: dict[str, str] = {}
        self.base_dir.mkdir(parents=True, exist_ok=True)

    def write_file(self, filepath: str | Path, content: str) -> Path:
        """Write content to a virtual file and persist to disk using UTF-8"""
        try:
            abs_path = self._resolve_path(filepath)
        except ValueError:
            print("invalid :", filepath)
            filepath = "src/temp_js/_temp_fix.py"
            abs_path = self._resolve_path(filepath)
        abs_path.parent.mkdir(parents=True, exist_ok=True)

        # Store in virtual filesystem
        rel_path = str(abs_path.relative_to(self.base_dir))
        self.virtual_files[rel_path] = content

        # Write to actual filesystem with UTF-8 encoding
        with open(abs_path, 'w', encoding='utf-8', errors='replace') as f:
            f.write(content)

        return abs_path

    def read_file(self, filepath: str | Path) -> str:
        """Read content from a virtual file using UTF-8"""
        abs_path = self._resolve_path(filepath)
        if not abs_path.exists():
            raise FileNotFoundError(f"File not found: {filepath}")

        rel_path = str(abs_path.relative_to(self.base_dir))

        # Check virtual filesystem first
        if rel_path in self.virtual_files:
            return self.virtual_files[rel_path]

        # Fall back to reading from disk with UTF-8 encoding
        with open(abs_path, encoding='utf-8', errors='replace') as f:
            content = f.read()
            self.virtual_files[rel_path] = content
            return content

    def delete_file(self, filepath: str | Path):
        """Delete a virtual file"""
        abs_path = self._resolve_path(filepath)
        rel_path = str(abs_path.relative_to(self.base_dir))

        if rel_path in self.virtual_files:
            del self.virtual_files[rel_path]

        if abs_path.exists():
            abs_path.unlink()

    def create_directory(self, dirpath: str | Path):
        """Create a new directory"""
        abs_path = self._resolve_path(dirpath)
        abs_path.mkdir(parents=True, exist_ok=True)
        return abs_path


    def list_directory(self, dirpath: str | Path = '.') -> list:
        """List contents of a directory"""
        abs_path = self._resolve_path(dirpath)
        if not abs_path.exists():
            raise FileNotFoundError(f"Directory not found: {dirpath}")
        return [p.name for p in abs_path.iterdir()]

    def change_directory(self, dirpath: str | Path):
        """Change current working directory"""
        new_dir = self._resolve_path(dirpath)
        if not new_dir.exists() or not new_dir.is_dir():
            raise NotADirectoryError(f"Directory not found: {dirpath}")
        self.current_dir = new_dir

    def _resolve_path(self, filepath: str | Path) -> Path:
        """Convert relative path to absolute path"""
        filepath = Path(filepath)
        if filepath.is_absolute():
            if not str(filepath).startswith(str(self.base_dir)):
                raise ValueError("Path must be within base directory")
            return filepath
        return (self.current_dir / filepath).resolve()

    def save_state(self, state_file: Path):
        """Save virtual filesystem state to disk"""
        state = {
            'current_dir': str(self.current_dir.relative_to(self.base_dir)),
            'virtual_files': self.virtual_files
        }
        with open(state_file, 'w') as f:
            json.dump(state, f)

    def load_state(self, state_file: Path):
        """Load virtual filesystem state from disk"""
        if not state_file.exists():
            return

        with open(state_file) as f:
            state = json.load(f)
            self.current_dir = self.base_dir / state['current_dir']
            self.virtual_files = state['virtual_files']

    def print_file_structure(self, start_path: str | Path = '.', indent: str = ''):
        """Print the file structure starting from the given path"""
        start_path = self._resolve_path(start_path)
        if not start_path.exists():
            s = f"Path not found: {start_path}"
            return s

        s = f"{indent}{start_path.name}/"
        for item in sorted(start_path.iterdir()):
            if item.is_dir():
               s+= self.print_file_structure(item, indent + '  ')
            else:
                s = f"{indent}  {item.name}"
        return s
change_directory(dirpath)

Change current working directory

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
560
561
562
563
564
565
def change_directory(self, dirpath: str | Path):
    """Change current working directory"""
    new_dir = self._resolve_path(dirpath)
    if not new_dir.exists() or not new_dir.is_dir():
        raise NotADirectoryError(f"Directory not found: {dirpath}")
    self.current_dir = new_dir
create_directory(dirpath)

Create a new directory

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
546
547
548
549
550
def create_directory(self, dirpath: str | Path):
    """Create a new directory"""
    abs_path = self._resolve_path(dirpath)
    abs_path.mkdir(parents=True, exist_ok=True)
    return abs_path
delete_file(filepath)

Delete a virtual file

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
535
536
537
538
539
540
541
542
543
544
def delete_file(self, filepath: str | Path):
    """Delete a virtual file"""
    abs_path = self._resolve_path(filepath)
    rel_path = str(abs_path.relative_to(self.base_dir))

    if rel_path in self.virtual_files:
        del self.virtual_files[rel_path]

    if abs_path.exists():
        abs_path.unlink()
list_directory(dirpath='.')

List contents of a directory

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
553
554
555
556
557
558
def list_directory(self, dirpath: str | Path = '.') -> list:
    """List contents of a directory"""
    abs_path = self._resolve_path(dirpath)
    if not abs_path.exists():
        raise FileNotFoundError(f"Directory not found: {dirpath}")
    return [p.name for p in abs_path.iterdir()]
load_state(state_file)

Load virtual filesystem state from disk

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
585
586
587
588
589
590
591
592
593
def load_state(self, state_file: Path):
    """Load virtual filesystem state from disk"""
    if not state_file.exists():
        return

    with open(state_file) as f:
        state = json.load(f)
        self.current_dir = self.base_dir / state['current_dir']
        self.virtual_files = state['virtual_files']
print_file_structure(start_path='.', indent='')

Print the file structure starting from the given path

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
595
596
597
598
599
600
601
602
603
604
605
606
607
608
def print_file_structure(self, start_path: str | Path = '.', indent: str = ''):
    """Print the file structure starting from the given path"""
    start_path = self._resolve_path(start_path)
    if not start_path.exists():
        s = f"Path not found: {start_path}"
        return s

    s = f"{indent}{start_path.name}/"
    for item in sorted(start_path.iterdir()):
        if item.is_dir():
           s+= self.print_file_structure(item, indent + '  ')
        else:
            s = f"{indent}  {item.name}"
    return s
read_file(filepath)

Read content from a virtual file using UTF-8

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
def read_file(self, filepath: str | Path) -> str:
    """Read content from a virtual file using UTF-8"""
    abs_path = self._resolve_path(filepath)
    if not abs_path.exists():
        raise FileNotFoundError(f"File not found: {filepath}")

    rel_path = str(abs_path.relative_to(self.base_dir))

    # Check virtual filesystem first
    if rel_path in self.virtual_files:
        return self.virtual_files[rel_path]

    # Fall back to reading from disk with UTF-8 encoding
    with open(abs_path, encoding='utf-8', errors='replace') as f:
        content = f.read()
        self.virtual_files[rel_path] = content
        return content
save_state(state_file)

Save virtual filesystem state to disk

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
576
577
578
579
580
581
582
583
def save_state(self, state_file: Path):
    """Save virtual filesystem state to disk"""
    state = {
        'current_dir': str(self.current_dir.relative_to(self.base_dir)),
        'virtual_files': self.virtual_files
    }
    with open(state_file, 'w') as f:
        json.dump(state, f)
write_file(filepath, content)

Write content to a virtual file and persist to disk using UTF-8

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
def write_file(self, filepath: str | Path, content: str) -> Path:
    """Write content to a virtual file and persist to disk using UTF-8"""
    try:
        abs_path = self._resolve_path(filepath)
    except ValueError:
        print("invalid :", filepath)
        filepath = "src/temp_js/_temp_fix.py"
        abs_path = self._resolve_path(filepath)
    abs_path.parent.mkdir(parents=True, exist_ok=True)

    # Store in virtual filesystem
    rel_path = str(abs_path.relative_to(self.base_dir))
    self.virtual_files[rel_path] = content

    # Write to actual filesystem with UTF-8 encoding
    with open(abs_path, 'w', encoding='utf-8', errors='replace') as f:
        f.write(content)

    return abs_path
WebContentParser

Parser for extracting content from web pages in various formats.

Provides methods to extract content as markdown, plain text, structured data, and take screenshots with scrolling support.

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
class WebContentParser:
    """
    Parser for extracting content from web pages in various formats.

    Provides methods to extract content as markdown, plain text,
    structured data, and take screenshots with scrolling support.
    """

    def __init__(self, browser_wrapper):
        """Initialize the parser with a browser wrapper instance"""
        self.browser = browser_wrapper

    async def to_markdown(self, page=None, selector="main, article, #content, .content, body",
                          include_images=True):
        """
        Convert webpage content to markdown format

        Args:
            page: The page to parse (uses current page if None)
            selector: CSS selector for the content to extract
            include_images: Whether to include image references

        Returns:
            str: Markdown content
        """
        return await self.browser.extract_markdown(page, selector, include_images)

    async def to_text(self, page=None, selector="body"):
        """Extract plain text from webpage"""
        return await self.browser.extract_text(page, selector)

    async def to_structured(self, page=None, config=None):
        """Extract structured data from webpage using selector configuration"""
        return await self.browser.extract_structured_content(page, config)

    async def to_screenshot(self, page=None, full_page=True, path=None,
                            initial_delay=1000, scroll_delay=500, format='png'):
        """
        Take a screenshot with scrolling functionality

        Args:
            page: The page to screenshot
            full_page: Whether to capture the full page
            path: Path to save the screenshot
            initial_delay: Delay in ms before starting screenshot
            scroll_delay: Delay in ms between scrolls
            format: Image format ('png' or 'jpeg')
        """
        return await self.browser.take_scrolling_screenshot(
            page, full_page, path, initial_delay, scroll_delay, format
        )

    async def extract_all(self, page=None, selector="body", include_images=True,
                          screenshot=True, screenshot_path=None):
        """Extract all content types (markdown, text, structured data, screenshot)"""
        result = {
            'markdown': await self.to_markdown(page, selector, include_images),
            'text': await self.to_text(page, selector),
            'structured': await self.to_structured(page)
        }

        if screenshot:
            result['screenshot'] = await self.to_screenshot(
                page, path=screenshot_path, initial_delay=1000
            )

        return result
__init__(browser_wrapper)

Initialize the parser with a browser wrapper instance

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1487
1488
1489
def __init__(self, browser_wrapper):
    """Initialize the parser with a browser wrapper instance"""
    self.browser = browser_wrapper
extract_all(page=None, selector='body', include_images=True, screenshot=True, screenshot_path=None) async

Extract all content types (markdown, text, structured data, screenshot)

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
async def extract_all(self, page=None, selector="body", include_images=True,
                      screenshot=True, screenshot_path=None):
    """Extract all content types (markdown, text, structured data, screenshot)"""
    result = {
        'markdown': await self.to_markdown(page, selector, include_images),
        'text': await self.to_text(page, selector),
        'structured': await self.to_structured(page)
    }

    if screenshot:
        result['screenshot'] = await self.to_screenshot(
            page, path=screenshot_path, initial_delay=1000
        )

    return result
to_markdown(page=None, selector='main, article, #content, .content, body', include_images=True) async

Convert webpage content to markdown format

Parameters:

Name Type Description Default
page

The page to parse (uses current page if None)

None
selector

CSS selector for the content to extract

'main, article, #content, .content, body'
include_images

Whether to include image references

True

Returns:

Name Type Description
str

Markdown content

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
async def to_markdown(self, page=None, selector="main, article, #content, .content, body",
                      include_images=True):
    """
    Convert webpage content to markdown format

    Args:
        page: The page to parse (uses current page if None)
        selector: CSS selector for the content to extract
        include_images: Whether to include image references

    Returns:
        str: Markdown content
    """
    return await self.browser.extract_markdown(page, selector, include_images)
to_screenshot(page=None, full_page=True, path=None, initial_delay=1000, scroll_delay=500, format='png') async

Take a screenshot with scrolling functionality

Parameters:

Name Type Description Default
page

The page to screenshot

None
full_page

Whether to capture the full page

True
path

Path to save the screenshot

None
initial_delay

Delay in ms before starting screenshot

1000
scroll_delay

Delay in ms between scrolls

500
format

Image format ('png' or 'jpeg')

'png'
Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
async def to_screenshot(self, page=None, full_page=True, path=None,
                        initial_delay=1000, scroll_delay=500, format='png'):
    """
    Take a screenshot with scrolling functionality

    Args:
        page: The page to screenshot
        full_page: Whether to capture the full page
        path: Path to save the screenshot
        initial_delay: Delay in ms before starting screenshot
        scroll_delay: Delay in ms between scrolls
        format: Image format ('png' or 'jpeg')
    """
    return await self.browser.take_scrolling_screenshot(
        page, full_page, path, initial_delay, scroll_delay, format
    )
to_structured(page=None, config=None) async

Extract structured data from webpage using selector configuration

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1510
1511
1512
async def to_structured(self, page=None, config=None):
    """Extract structured data from webpage using selector configuration"""
    return await self.browser.extract_structured_content(page, config)
to_text(page=None, selector='body') async

Extract plain text from webpage

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1506
1507
1508
async def to_text(self, page=None, selector="body"):
    """Extract plain text from webpage"""
    return await self.browser.extract_text(page, selector)
auto_install(package_name, install_method='pip', upgrade=False, quiet=False, version=None, extra_args=None)

Enhanced auto-save import with version and extra arguments support

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
def auto_install(package_name, install_method='pip', upgrade=False, quiet=False, version=None, extra_args=None):
    '''
    Enhanced auto-save import with version and extra arguments support
    '''
    try:
        # Attempt to import the package
        return importlib.import_module(package_name)
    except ImportError:
        # Package not found, prepare for installation
        print(f"Package '{package_name}' not found. Attempting to install...")
        try:
            # Determine Python executable based on virtual environment
            venv_path = os.environ.get('VIRTUAL_ENV')
            if venv_path:
                venv_path = Path(venv_path)
                if sys.platform == 'win32':
                    python_exec = str(venv_path / 'Scripts' / 'python.exe')
                else:
                    python_exec = str(venv_path / 'bin' / 'python')
                # Check if the Python executable exists
                if not Path(python_exec).exists():
                    python_exec = sys.executable
            else:
                python_exec = sys.executable

            # Construct installation command with more flexibility
            install_cmd = [python_exec, "-m", install_method, "install"]
            if upgrade:
                install_cmd.append("--upgrade")
            # Support specific version installation
            if version:
                install_cmd.append(f"{package_name}=={version}")
            else:
                install_cmd.append(package_name)
            # Add extra arguments if provided
            if extra_args:
                install_cmd.extend(extra_args)
            # Run installation with appropriate verbosity
            installation_output = subprocess.run(
                install_cmd,
                capture_output=quiet,
                text=True
            )
            # Check installation status
            if installation_output.returncode == 0:
                print(f"Successfully installed {package_name}")
                return importlib.import_module(package_name)
            else:
                raise Exception(f"Installation failed: {installation_output.stderr}")
        except Exception as install_error:
            print(f"Error installing {package_name}: {install_error}")
            return None
sync_globals_to_vars(pipeline, namespace=None, prefix=None, include_types=None, exclude_patterns=None, exclude_private=True, deep_copy=False, only_serializable=False)
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
Sync global variables or a specific namespace to pipeline variables.

Args:
    pipeline: Pipeline instance to sync variables to
    namespace: Optional dictionary of variables (defaults to globals())
    prefix: Optional prefix for variable names (e.g., 'global_')
    include_types: Only include variables of these types
    exclude_patterns: List of regex patterns to exclude
    exclude_private: Exclude variables starting with underscore
    deep_copy: Create deep copies of variables instead of references
    only_serializable: Only include variables that can be serialized

Returns:
    SyncReport with details about added, skipped and error variables

Usage example:
Basic usage - sync all globals

report = sync_globals_to_vars(pipeline)

Sync only numeric types with prefix

report = sync_globals_to_vars( pipeline, include_types=[int, float], prefix="global_" )

Sync from specific namespace

import numpy as np namespace = {"arr": np.array([1,2,3])} report = sync_globals_to_vars(pipeline, namespace=namespace)

Sync with deep copy and serialization check

report = sync_globals_to_vars( pipeline, deep_copy=True, only_serializable=True )

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
def sync_globals_to_vars(
    pipeline: Any,
    namespace: dict[str, Any] | None = None,
    prefix: str | None = None,
    include_types: type | list[type] | None = None,
    exclude_patterns: list[str] | None = None,
    exclude_private: bool = True,
    deep_copy: bool = False,
    only_serializable: bool = False
) -> SyncReport:
    """
    Sync global variables or a specific namespace to pipeline variables.

    Args:
        pipeline: Pipeline instance to sync variables to
        namespace: Optional dictionary of variables (defaults to globals())
        prefix: Optional prefix for variable names (e.g., 'global_')
        include_types: Only include variables of these types
        exclude_patterns: List of regex patterns to exclude
        exclude_private: Exclude variables starting with underscore
        deep_copy: Create deep copies of variables instead of references
        only_serializable: Only include variables that can be serialized

    Returns:
        SyncReport with details about added, skipped and error variables

    Usage example:
# Basic usage - sync all globals
report = sync_globals_to_vars(pipeline)

# Sync only numeric types with prefix
report = sync_globals_to_vars(
    pipeline,
    include_types=[int, float],
    prefix="global_"
)

# Sync from specific namespace
import numpy as np
namespace = {"arr": np.array([1,2,3])}
report = sync_globals_to_vars(pipeline, namespace=namespace)

# Sync with deep copy and serialization check
report = sync_globals_to_vars(
    pipeline,
    deep_copy=True,
    only_serializable=True
)
    """
    # Initialize report
    report = SyncReport(
        added={},
        skipped={},
        errors={}
    )

    # Get namespace
    if namespace is None:
        # Get caller's globals
        namespace = currentframe().f_back.f_globals

    # Compile exclude patterns
    if exclude_patterns:
        patterns = [re.compile(pattern) for pattern in exclude_patterns]
    else:
        patterns = []

    # Normalize include_types
    if include_types and not isinstance(include_types, list | tuple | set):
        include_types = [include_types]
    def get_type_info(var: Any) -> str:
        """Helper to get detailed type information"""
        if isinstance(var, type):
            return f"class '{var.__name__}'"
        elif isinstance(var, BaseModel):
            return f"Pydantic model '{var.__class__.__name__}'"
        elif hasattr(var, '__class__'):
            type_name = var.__class__.__name__
            module_name = var.__class__.__module__
            if module_name != 'builtins':
                return f"{module_name}.{type_name}"
            return type_name
        return type(var).__name__
    # Process each variable
    for name, value in namespace.items():
        try:
            # Skip if matches exclude criteria
            if exclude_private and name.startswith('_'):
                report.skipped[name] = "private variable"
                continue

            if any(pattern.match(name) for pattern in patterns):
                report.skipped[name] = "matched exclude pattern"
                continue

            if include_types and not isinstance(value, tuple(include_types)):
                report.skipped[name] = f"type {type(value).__name__} not in include_types"
                continue

            # Test serialization if required
            if only_serializable:
                try:
                    import pickle
                    pickle.dumps(value)
                except Exception as e:
                    report.skipped[name] = f"not serializable: {str(e)}"
                    continue

            # Prepare variable
            var_value = deepcopy(value) if deep_copy else value
            var_name = f"{prefix}{name}" if prefix else name

            # Add to pipeline variables
            pipeline.variables[var_name] = var_value
            report.added[var_name] = get_type_info(value)

        except Exception as e:
            report.errors[name] = str(e)

    return report
parser
CodeProcessor
Source code in toolboxv2/mods/isaa/CodingAgent/parser.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
class CodeProcessor:
    def __init__(self, code_base='./'):
        self.language_patterns = [
            r'```([\w-]+)\n((?:#|//|<!--)\s*(\S+))?\n([\s\S]*?)```',  # Standard pattern
            r'```([\w-]+)\s*\n([\s\S]*?)```'  # Pattern without filename comment
        ]
        self.code_base = code_base

    def extract_code(self, text):
        code_blocks = {}
        seen = set()
        for pattern in self.language_patterns:
            matches = re.finditer(pattern, text, re.DOTALL)
            for match in matches:

                print(match.groups())

                if len(match.groups()) < 3:
                    continue

                code = match.groups()[3]
                filename = match.groups()[2]

                if code == code_blocks.get(filename):
                    continue

                if code_blocks.get(filename) is not None and code != code_blocks.get(filename):
                    comment_prfix = match.groups()[1].replace(filename, '')
                    filename = code.split('\n')[0].replace(comment_prfix, '')
                    code = code.replace(comment_prfix + filename + '\n', '')

                    print("new code", code)

                seen.add(filename)

                code_blocks[filename] = code
        return code_blocks

    def write_code(self, code_dict):
        for filename, code in code_dict.items():
            filepath = os.path.join(self.code_base, filename)
            os.makedirs(os.path.dirname(filepath), exist_ok=True)
            print("Writing", filepath)
            with open(filepath, "w") as f:
                f.write(code)

    def extract_and_write_code(self, text):
        code_blocks = self.extract_code(text)
        files = []
        for filename, new_code in code_blocks.items():
            filepath = os.path.join(self.code_base, filename)
            files.append(filepath)
            if os.path.exists(filepath):
                self.update_existing_file(filepath, new_code)
            else:
                self.write_code({filename: new_code})
        return files

    def update_existing_file(self, filepath, new_code):
        """
            Update an existing Python file with new code while preserving existing implementations.

            Args:
                filepath (str): Path to the file to be updated
                new_code (str): New code to merge with existing code
            """
        try:
            # Read existing code
            with open(filepath) as f:
                existing_code = f.read()

            # Parse existing and new code
            existing_ast_tree = ast.parse(existing_code)
            new_ast_tree = ast.parse(new_code)

            # Create updater and transform the AST
            updater = CodeUpdater(existing_ast_tree)
            updated_ast = updater.visit(new_ast_tree)

            # Convert AST back to source code
            updated_code = astor.to_source(updated_ast)

            # Write updated code back to file
            with open(filepath, 'w') as f:
                f.write(updated_code)

            print(f"Successfully updated {filepath}")
            return True

        except Exception as e:
            print(f"Error updating {filepath}: {e}")
            return False
update_existing_file(filepath, new_code)

Update an existing Python file with new code while preserving existing implementations.

Parameters:

Name Type Description Default
filepath str

Path to the file to be updated

required
new_code str

New code to merge with existing code

required
Source code in toolboxv2/mods/isaa/CodingAgent/parser.py
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
def update_existing_file(self, filepath, new_code):
    """
        Update an existing Python file with new code while preserving existing implementations.

        Args:
            filepath (str): Path to the file to be updated
            new_code (str): New code to merge with existing code
        """
    try:
        # Read existing code
        with open(filepath) as f:
            existing_code = f.read()

        # Parse existing and new code
        existing_ast_tree = ast.parse(existing_code)
        new_ast_tree = ast.parse(new_code)

        # Create updater and transform the AST
        updater = CodeUpdater(existing_ast_tree)
        updated_ast = updater.visit(new_ast_tree)

        # Convert AST back to source code
        updated_code = astor.to_source(updated_ast)

        # Write updated code back to file
        with open(filepath, 'w') as f:
            f.write(updated_code)

        print(f"Successfully updated {filepath}")
        return True

    except Exception as e:
        print(f"Error updating {filepath}: {e}")
        return False

SearchAgentCluster

search_tool
WebContentParser

Utility class for parsing web content using BrowserAnt

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
class WebContentParser:
    """Utility class for parsing web content using BrowserAnt"""

    def __init__(self, browser_wrapper: BrowserWrapper):
        """Initialize with a browser wrapper"""
        self.browser_wrapper = browser_wrapper

    async def extract_article(self, url: str) -> dict[str, Any]:
        """Extract article content with title, text, and metadata"""
        await self.browser_wrapper.initialize()
        page = await self.browser_wrapper.navigate(url)

        # Execute readability.js to extract article content
        readability_js = """
        function extractArticle() {
            // Simple article extraction logic
            const article = {
                title: document.title,
                byline: '',
                content: '',
                textContent: '',
                excerpt: '',
                siteName: '',
                publishedTime: ''
            };

            // Try to find article elements
            const articleElement = document.querySelector('article') ||
                                   document.querySelector('main') ||
                                   document.querySelector('.post-content') ||
                                   document.querySelector('.entry-content');

            if (articleElement) {
                article.content = articleElement.innerHTML;
                article.textContent = articleElement.textContent;
            } else {
                // Fallback to body content
                article.content = document.body.innerHTML;
                article.textContent = document.body.textContent;
            }

            // Try to extract metadata
            const metaTags = document.querySelectorAll('meta');
            metaTags.forEach(tag => {
                const property = tag.getAttribute('property') || tag.getAttribute('name');
                const content = tag.getAttribute('content');

                if (property && content) {
                    if (property === 'og:site_name') article.siteName = content;
                    if (property === 'og:title' && !article.title) article.title = content;
                    if (property === 'og:description' && !article.excerpt) article.excerpt = content;
                    if (property === 'article:published_time') article.publishedTime = content;
                    if (property === 'author' || property === 'article:author') article.byline = content;
                }
            });

            // Extract first paragraph as excerpt if not found
            if (!article.excerpt) {
                const paragraphs = document.querySelectorAll('p');
                if (paragraphs.length > 0) {
                    for (let i = 0; i < paragraphs.length; i++) {
                        const text = paragraphs[i].textContent.trim();
                        if (text.length > 50) {
                            article.excerpt = text;
                            break;
                        }
                    }
                }
            }

            return article;
        }

        return extractArticle();
        """

        # Extract article content
        article = await page.evaluate(readability_js)

        # Add markdown version
        article['markdown'] = await self.browser_wrapper.extract_markdown(page)

        # Take a screenshot
        screenshot_data = await self.browser_wrapper.take_scrolling_screenshot(page)
        article['screenshot'] = base64.b64encode(screenshot_data).decode('utf-8')

        return article

    async def extract_table_data(self, url: str, table_selector: str = 'table') -> list[dict[str, Any]]:
        """Extract tabular data from a webpage"""
        await self.browser_wrapper.initialize()
        page = await self.browser_wrapper.navigate(url)

        # Script to extract table data
        extract_table_js = """
        (tableSelector) => {
            const tables = document.querySelectorAll(tableSelector);
            if (tables.length === 0) return [];

            // Use the first table found
            const table = tables[0];
            const headers = Array.from(table.querySelectorAll('th')).map(th => th.textContent.trim());

            // If no headers found, try using the first row
            const headerRow = headers.length > 0 ? headers :
                            Array.from(table.querySelectorAll('tr:first-child td')).map(td => td.textContent.trim());

            const rows = Array.from(table.querySelectorAll('tr'));
            const result = [];

            // Start from 1 if we have headers, otherwise from 0
            const startIdx = headers.length > 0 ? 1 : 0;

            for (let i = startIdx; i < rows.length; i++) {
                const row = rows[i];
                const cells = Array.from(row.querySelectorAll('td')).map(td => td.textContent.trim());

                if (cells.length > 0) {
                    const rowData = {};
                    for (let j = 0; j < Math.min(headerRow.length, cells.length); j++) {
                        // Create a valid object key from header
                        const key = headerRow[j].replace(/[^a-zA-Z0-9]/g, '_').toLowerCase();
                        rowData[key] = cells[j];
                    }
                    result.push(rowData);
                }
            }

            return result;
        }
        """

        # Extract data
        table_data = await page.evaluate(extract_table_js, table_selector)
        return table_data

    async def extract_links(self, url: str, link_selector: str = 'a') -> list[dict[str, str]]:
        """Extract all links from a webpage"""
        await self.browser_wrapper.initialize()
        page = await self.browser_wrapper.navigate(url)

        # Script to extract links
        extract_links_js = """
        (linkSelector) => {
            const links = Array.from(document.querySelectorAll(linkSelector));
            return links.map(link => {
                return {
                    text: link.textContent.trim(),
                    href: link.href,
                    title: link.getAttribute('title') || '',
                    isExternal: link.hostname !== window.location.hostname
                };
            }).filter(link => link.href && link.href.startsWith('http'));
        }
        """

        # Extract links
        links = await page.evaluate(extract_links_js, link_selector)
        return links
__init__(browser_wrapper)

Initialize with a browser wrapper

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
583
584
585
def __init__(self, browser_wrapper: BrowserWrapper):
    """Initialize with a browser wrapper"""
    self.browser_wrapper = browser_wrapper
extract_article(url) async

Extract article content with title, text, and metadata

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
async def extract_article(self, url: str) -> dict[str, Any]:
    """Extract article content with title, text, and metadata"""
    await self.browser_wrapper.initialize()
    page = await self.browser_wrapper.navigate(url)

    # Execute readability.js to extract article content
    readability_js = """
    function extractArticle() {
        // Simple article extraction logic
        const article = {
            title: document.title,
            byline: '',
            content: '',
            textContent: '',
            excerpt: '',
            siteName: '',
            publishedTime: ''
        };

        // Try to find article elements
        const articleElement = document.querySelector('article') ||
                               document.querySelector('main') ||
                               document.querySelector('.post-content') ||
                               document.querySelector('.entry-content');

        if (articleElement) {
            article.content = articleElement.innerHTML;
            article.textContent = articleElement.textContent;
        } else {
            // Fallback to body content
            article.content = document.body.innerHTML;
            article.textContent = document.body.textContent;
        }

        // Try to extract metadata
        const metaTags = document.querySelectorAll('meta');
        metaTags.forEach(tag => {
            const property = tag.getAttribute('property') || tag.getAttribute('name');
            const content = tag.getAttribute('content');

            if (property && content) {
                if (property === 'og:site_name') article.siteName = content;
                if (property === 'og:title' && !article.title) article.title = content;
                if (property === 'og:description' && !article.excerpt) article.excerpt = content;
                if (property === 'article:published_time') article.publishedTime = content;
                if (property === 'author' || property === 'article:author') article.byline = content;
            }
        });

        // Extract first paragraph as excerpt if not found
        if (!article.excerpt) {
            const paragraphs = document.querySelectorAll('p');
            if (paragraphs.length > 0) {
                for (let i = 0; i < paragraphs.length; i++) {
                    const text = paragraphs[i].textContent.trim();
                    if (text.length > 50) {
                        article.excerpt = text;
                        break;
                    }
                }
            }
        }

        return article;
    }

    return extractArticle();
    """

    # Extract article content
    article = await page.evaluate(readability_js)

    # Add markdown version
    article['markdown'] = await self.browser_wrapper.extract_markdown(page)

    # Take a screenshot
    screenshot_data = await self.browser_wrapper.take_scrolling_screenshot(page)
    article['screenshot'] = base64.b64encode(screenshot_data).decode('utf-8')

    return article
extract_links(url, link_selector='a') async

Extract all links from a webpage

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
async def extract_links(self, url: str, link_selector: str = 'a') -> list[dict[str, str]]:
    """Extract all links from a webpage"""
    await self.browser_wrapper.initialize()
    page = await self.browser_wrapper.navigate(url)

    # Script to extract links
    extract_links_js = """
    (linkSelector) => {
        const links = Array.from(document.querySelectorAll(linkSelector));
        return links.map(link => {
            return {
                text: link.textContent.trim(),
                href: link.href,
                title: link.getAttribute('title') || '',
                isExternal: link.hostname !== window.location.hostname
            };
        }).filter(link => link.href && link.href.startsWith('http'));
    }
    """

    # Extract links
    links = await page.evaluate(extract_links_js, link_selector)
    return links
extract_table_data(url, table_selector='table') async

Extract tabular data from a webpage

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
async def extract_table_data(self, url: str, table_selector: str = 'table') -> list[dict[str, Any]]:
    """Extract tabular data from a webpage"""
    await self.browser_wrapper.initialize()
    page = await self.browser_wrapper.navigate(url)

    # Script to extract table data
    extract_table_js = """
    (tableSelector) => {
        const tables = document.querySelectorAll(tableSelector);
        if (tables.length === 0) return [];

        // Use the first table found
        const table = tables[0];
        const headers = Array.from(table.querySelectorAll('th')).map(th => th.textContent.trim());

        // If no headers found, try using the first row
        const headerRow = headers.length > 0 ? headers :
                        Array.from(table.querySelectorAll('tr:first-child td')).map(td => td.textContent.trim());

        const rows = Array.from(table.querySelectorAll('tr'));
        const result = [];

        // Start from 1 if we have headers, otherwise from 0
        const startIdx = headers.length > 0 ? 1 : 0;

        for (let i = startIdx; i < rows.length; i++) {
            const row = rows[i];
            const cells = Array.from(row.querySelectorAll('td')).map(td => td.textContent.trim());

            if (cells.length > 0) {
                const rowData = {};
                for (let j = 0; j < Math.min(headerRow.length, cells.length); j++) {
                    // Create a valid object key from header
                    const key = headerRow[j].replace(/[^a-zA-Z0-9]/g, '_').toLowerCase();
                    rowData[key] = cells[j];
                }
                result.push(rowData);
            }
        }

        return result;
    }
    """

    # Extract data
    table_data = await page.evaluate(extract_table_js, table_selector)
    return table_data
WebScraper
1
2
A high-performance web scraper using BrowserAnt with multi-tab parallel processing.
Handles both structured and unstructured data collection efficiently.

import asyncio from pydantic import BaseModel, Field from typing import List, Optional

Define a structured data model

class ProductInfo(BaseModel): title: str price: str description: Optional[str] = None rating: Optional[str] = None availability: Optional[str] = None

async def main(): # Initialize the scraper scraper = WebScraper()

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# Example 1: Simple scraping of a single URL
result = await scraper.scrape_url("https://example.com")
print(f"Title: {result['title']}")
print(f"Content: {result['markdown'][:200]}...")

# Example 2: Parallel scraping of multiple URLs
urls = [
    "https://example.com/page1",
    "https://example.com/page2",
    "https://example.com/page3"
]
results = await scraper.scrape_urls(urls)

# Example 3: Structured data extraction
products = await scraper.scrape_structured_data(
    urls=["https://example.com/product1", "https://example.com/product2"],
    model=ProductInfo,
    extraction_task="Extract product information including title, price, and availability status"
)

for product in products:
    if product:
        print(f"Product: {product.title}, Price: {product.price}")

# Clean up
await scraper.close()
Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
class WebScraper:
    """
    A high-performance web scraper using BrowserAnt with multi-tab parallel processing.
    Handles both structured and unstructured data collection efficiently.
import asyncio
from pydantic import BaseModel, Field
from typing import List, Optional

# Define a structured data model
class ProductInfo(BaseModel):
    title: str
    price: str
    description: Optional[str] = None
    rating: Optional[str] = None
    availability: Optional[str] = None

async def main():
    # Initialize the scraper
    scraper = WebScraper()

    # Example 1: Simple scraping of a single URL
    result = await scraper.scrape_url("https://example.com")
    print(f"Title: {result['title']}")
    print(f"Content: {result['markdown'][:200]}...")

    # Example 2: Parallel scraping of multiple URLs
    urls = [
        "https://example.com/page1",
        "https://example.com/page2",
        "https://example.com/page3"
    ]
    results = await scraper.scrape_urls(urls)

    # Example 3: Structured data extraction
    products = await scraper.scrape_structured_data(
        urls=["https://example.com/product1", "https://example.com/product2"],
        model=ProductInfo,
        extraction_task="Extract product information including title, price, and availability status"
    )

    for product in products:
        if product:
            print(f"Product: {product.title}, Price: {product.price}")

    # Clean up
    await scraper.close()
    """

    def __init__(
        self,
        config: WebScraperConfig = WebScraperConfig(),
        llm: str | BaseChatModel | None = None,
        chrome_path: str | None = None,
        remote_url: str | None = None,
        browser_config: dict[str, Any] | None = None
    ):
        """
        Initialize the web scraper with configuration.

        Args:
            config: Configuration for scraper behavior
            llm: Language model for intelligent data extraction
            chrome_path: Path to Chrome executable
            remote_url: URL for remote browser connection
            browser_config: Additional browser configuration
        """
        self.config = config
        self.browser_wrapper = BrowserWrapper(
            llm=llm,
            headless=config.headless,
            chrome_path=chrome_path,
            remote_url=remote_url,
            config=browser_config
        )
        self.active_tasks = set()
        self._semaphore = asyncio.Semaphore(config.max_concurrent_tabs)
        self._results = {}

        # Create screenshot directory if needed
        if config.save_screenshots and not os.path.exists(config.screenshot_dir):
            os.makedirs(config.screenshot_dir)

    async def initialize(self):
        """Initialize the browser if not already initialized"""
        await self.browser_wrapper.initialize()

    async def close(self):
        """Close the browser and clean up resources"""
        # Wait for all active tasks to complete
        if self.active_tasks:
            await asyncio.gather(*self.active_tasks)
        await self.browser_wrapper.close()


    # Add this method to your WebScraper class
    async def search_web(
        self,
        query: str,
        max_results: int = 5,
        include_content: bool = True,
        extract_images: bool = False,
        extract_tables: bool = False,
        extract_links: bool = False,
        save_to_file: str | None = None
    ) -> dict[str, Any]:
        """
        Perform a comprehensive web search and return high-quality data for the given query.

        Args:
            query: Search query string
            max_results: Maximum number of results to process (default: 5)
            include_content: Whether to include full content from result pages (default: True)
            extract_images: Whether to extract images from result pages (default: False)
            extract_tables: Whether to extract tables from result pages (default: False)
            extract_links: Whether to extract links from result pages (default: False)
            save_to_file: Path to save results as JSON (optional)

        Returns:
            Dictionary containing search results and extracted information
        """
        await self.initialize()
        try:
            start_time = datetime.now()

            # Try different search engines in order
            search_engines = [
                {
                    "url": f"https://www.google.com/search?q={urllib.parse.quote_plus(query)}",
                    "result_selector": ".g",
                    "title_selector": "h3",
                    "link_selector": "a",
                    "snippet_selector": ".VwiC3b",
                    "name": "google"
                },
                {
                    "url": f"https://www.bing.com/search?q={urllib.parse.quote_plus(query)}",
                    "result_selector": ".b_algo",
                    "title_selector": "h2",
                    "link_selector": "a",
                    "snippet_selector": ".b_caption p",
                    "name": "bing"
                },
                {
                    "url": f"https://duckduckgo.com/?q={urllib.parse.quote_plus(query)}",
                    "result_selector": ".result",
                    "title_selector": "h2",
                    "link_selector": "a.result__a",
                    "snippet_selector": ".result__snippet",
                    "name": "duckduckgo"
                }
            ]

            results = []

            for engine in search_engines:
                try:
                    # Navigate to search engine
                    page = await self.browser_wrapper.navigate(engine["url"])
                    await page.wait_for_load_state("networkidle")
                    await page.wait_for_timeout(2000)  # Wait for results to load

                    # Extract search results
                    search_results = await page.evaluate(
                        """
                        (selectors) => {
                            const results = [];
                            const elements = document.querySelectorAll(selectors.result_selector);

                            for (const element of elements) {
                                const titleElement = element.querySelector(selectors.title_selector);
                                const linkElement = element.querySelector(selectors.link_selector);
                                const snippetElement = element.querySelector(selectors.snippet_selector);

                                if (titleElement && linkElement) {
                                    const url = linkElement.href;
                                    // Skip non-http links and same-domain results
                                    if (url && url.startsWith('http') &&
                                        !url.includes('google.com/search') &&
                                        !url.includes('bing.com/search') &&
                                        !url.includes('duckduckgo.com')) {
                                        results.push({
                                            title: titleElement.textContent.trim(),
                                            url: url,
                                            snippet: snippetElement ? snippetElement.textContent.trim() : '',
                                            source: selectors.name
                                        });
                                    }
                                }
                            }
                            return results;
                        }
                        """,
                        engine
                    )

                    if search_results and len(search_results) > 0:
                        # We got results, add them and break
                        results = search_results
                        break

                except Exception as e:
                    print(f"Error searching with {engine['name']}: {str(e)}")
                    continue  # Try next engine

            # Filter and limit results
            unique_urls = set()
            filtered_results = []

            for result in results:
                if result['url'] not in unique_urls and len(filtered_results) < max_results:
                    unique_urls.add(result['url'])
                    filtered_results.append(result)

            results = filtered_results

            # Get detailed content if requested
            if include_content and results:
                # Extract content from each result page
                urls_to_scrape = [result['url'] for result in results]

                # Configure what to extract
                extract_config = {}
                if extract_tables:
                    extract_config['tables'] = 'table'
                if extract_images:
                    extract_config['images'] = 'img'
                if extract_links:
                    extract_config['links'] = 'a'

                # Scrape all pages in parallel using our efficient multi-tab approach
                scraped_data = await self.scrape_urls(
                    urls_to_scrape,
                    extract_config=extract_config if extract_config else None
                )

                # Add content to results
                for i, result in enumerate(results):
                    if i < len(scraped_data) and 'error' not in scraped_data[i]:
                        result['content'] = {
                            'title': scraped_data[i].get('title', result['title']),
                            'markdown': scraped_data[i].get('markdown', ''),
                            'text': scraped_data[i].get('text', ''),
                        }

                        # Add structured data if available
                        if extract_config and 'structured_data' in scraped_data[i]:
                            structured_data = scraped_data[i]['structured_data']
                            for key, value in structured_data.items():
                                if value:  # Only add non-empty data
                                    result['content'][key] = value

            # Prepare final response
            response = {
                'query': query,
                'timestamp': datetime.now().isoformat(),
                'num_results': len(results),
                'results': results,
                'execution_time': (datetime.now() - start_time).total_seconds()
            }

            # Save to file if requested
            if save_to_file:
                os.makedirs(os.path.dirname(os.path.abspath(save_to_file)), exist_ok=True)
                with open(save_to_file, 'w', encoding='utf-8') as f:
                    json.dump(response, f, ensure_ascii=False, indent=2)

            return response

        finally:
            # Make sure we clean up browser resources
            await self.close()

    async def _scrape_url(self, url: str, task_id: str, extract_config: dict[str, Any] = None):
        """
        Internal method to scrape a single URL

        Args:
            url: URL to scrape
            task_id: Unique identifier for this scraping task
            extract_config: Configuration for what/how to extract
        """
        try:
            async with self._semaphore:
                # Navigate to the URL
                page = await self.browser_wrapper.navigate(url)

                # Wait for network to become idle
                await page.wait_for_load_state("networkidle")

                # Perform initial delay
                if self.config.initial_delay > 0:
                    await page.wait_for_timeout(self.config.initial_delay)

                # Auto-scroll if configured
                if self.config.auto_scroll:
                    await self._auto_scroll(page)

                # Initialize result dictionary
                result = {
                    "url": url,
                    "title": await page.title(),
                    "timestamp": datetime.now().isoformat(),
                }

                # Take screenshot if needed
                if self.config.save_screenshots:
                    file_name = f"{urlparse(url).netloc}_{task_id}.png"
                    screenshot_path = os.path.join(self.config.screenshot_dir, file_name)
                    result["screenshot"] = screenshot_path
                    await self.browser_wrapper.take_scrolling_screenshot(
                        page=page,
                        path=screenshot_path,
                        initial_delay=0,  # We've already waited
                        scroll_delay=self.config.scroll_delay
                    )

                # Extract content based on configuration
                if extract_config:
                    result["structured_data"] = await self.browser_wrapper.extract_structured_content(
                        page=page,
                        config=extract_config
                    )

                # Extract markdown if configured
                if self.config.extract_markdown:
                    result["markdown"] = await self.browser_wrapper.extract_markdown(page=page)

                # Extract text if configured
                if self.config.extract_text:
                    result["text"] = await self.browser_wrapper.extract_text(page=page)

                # Extract HTML if configured
                if self.config.extract_html:
                    result["html"] = await page.content()

                self._results[task_id] = result
                return result

        except Exception as e:
            self._results[task_id] = {"error": str(e), "url": url}
            return {"error": str(e), "url": url}

    async def _auto_scroll(self, page):
        """Automatically scroll down the page to load lazy content"""
        try:
            # Get page dimensions
            dimensions = await page.evaluate("""
                () => {
                    return {
                        width: document.documentElement.scrollWidth,
                        height: document.documentElement.scrollHeight,
                        windowHeight: window.innerHeight
                    }
                }
            """)

            # Scroll down the page gradually
            current_position = 0
            while current_position < dimensions['height']:
                await page.evaluate(f"window.scrollTo(0, {current_position})")
                await page.wait_for_timeout(self.config.scroll_delay)
                current_position += dimensions['windowHeight'] // 2

            # Scroll back to top
            await page.evaluate("window.scrollTo(0, 0)")
        except Exception as e:
            print(f"Error during auto-scroll: {e}")

    async def scrape_url(self, url: str, extract_config: dict[str, Any] = None) -> dict[str, Any]:
        """
        Scrape a single URL and return the results

        Args:
            url: URL to scrape
            extract_config: Configuration for structured data extraction

        Returns:
            Dictionary containing scraped data
        """
        await self.initialize()
        task_id = f"{len(self._results)}_{datetime.now().timestamp()}"
        result = await self._scrape_url(url, task_id, extract_config)
        return result

    async def scrape_urls(
        self,
        urls: list[str],
        extract_config: dict[str, Any] | None = None
    ) -> list[dict[str, Any]]:
        """
        Scrape multiple URLs in parallel and return all results

        Args:
            urls: List of URLs to scrape
            extract_config: Configuration for structured data extraction

        Returns:
            List of dictionaries containing scraped data
        """
        await self.initialize()
        tasks = []

        for i, url in enumerate(urls):
            task_id = f"{i}_{datetime.now().timestamp()}"
            task = asyncio.create_task(self._scrape_url(url, task_id, extract_config))
            self.active_tasks.add(task)
            task.add_done_callback(self.active_tasks.discard)
            tasks.append(task)

        results = await asyncio.gather(*tasks, return_exceptions=True)
        return [r if not isinstance(r, Exception) else {"error": str(r)} for r in results]

    async def scrape_structured_data(
        self,
        urls: list[str],
        model: type[T],
        extraction_task: str = None
    ) -> list[T]:
        """
        Scrape and parse structured data into pydantic models

        Args:
            urls: List of URLs to scrape
            model: Pydantic model class for structured data
            extraction_task: Natural language description of what to extract

        Returns:
            List of parsed data objects
        """
        await self.initialize()

        # Create intelligent extraction task if provided
        if extraction_task:
            # Create a custom system prompt for extraction
            class ExtractionPrompt(SystemPrompt):
                def important_rules(self) -> str:
                    existing_rules = super().important_rules()
                    new_rules = f"""
                    9. EXTRACTION GOAL:
                    - Your primary goal is to extract data according to this specific task: {extraction_task}
                    - You should carefully identify and extract the information as accurately as possible.
                    - Focus only on relevant information that matches the specified data structure.
                    """
                    return f'{existing_rules}\n{new_rules}'

            # Define the extraction task for each URL
            tasks = []
            for url in urls:
                # Setup intelligent extraction for each URL
                task = asyncio.create_task(self._run_extraction_agent(
                    url=url,
                    model=model,
                    extraction_task=extraction_task,
                    system_prompt_class=ExtractionPrompt
                ))
                self.active_tasks.add(task)
                task.add_done_callback(self.active_tasks.discard)
                tasks.append(task)

            # Wait for all extractions to complete
            results = await asyncio.gather(*tasks, return_exceptions=True)
            return [r if not isinstance(r, Exception) else None for r in results]
        else:
            # Manual extraction based on model fields
            field_selectors = {}
            for field_name in model.__annotations__:
                # Convert field name to likely CSS selector
                snake_case = field_name
                selector = f".{snake_case.replace('_', '-')}, #{snake_case.replace('_', '-')}"
                field_selectors[field_name] = selector

            # Scrape with these selectors
            raw_results = await self.scrape_urls(urls, extract_config=field_selectors)

            # Convert to pydantic models
            parsed_results = []
            for result in raw_results:
                try:
                    if "structured_data" in result and "error" not in result:
                        # Map the extracted data to model fields
                        model_data = {}
                        for field_name in model.__annotations__:
                            if field_name in result["structured_data"]:
                                field_value = result["structured_data"][field_name]
                                if isinstance(field_value, list) and len(field_value) > 0:
                                    model_data[field_name] = field_value[0]  # Take first match
                                else:
                                    model_data[field_name] = field_value

                        # Create the model instance
                        parsed_results.append(model(**model_data))
                    else:
                        parsed_results.append(None)
                except Exception as e:
                    print(f"Error parsing result: {e}")
                    parsed_results.append(None)

            return parsed_results

    async def _run_extraction_agent(
        self,
        url: str,
        model: type[T],
        extraction_task: str,
        system_prompt_class: type[SystemPrompt]
    ) -> T:
        """Run an intelligent agent to extract structured data"""
        # Define output model for the agent
        controller = Controller(output_model=model)

        # Create the task description
        fields_info = "\n".join([f"- {field}: {model.__annotations__[field].__name__}"
                                 for field in model.__annotations__])

        task = f"""
        Go to {url} and extract the following information:
        {fields_info}

        Specific extraction instructions: {extraction_task}
        """

        # Create and run the agent
        agent = await self.browser_wrapper.create_agent(task=task)
        agent._controller = controller
        agent._system_prompt_class = system_prompt_class

        history = await agent.run()

        # Parse the result
        result = history.final_result()
        if result:
            try:
                return model.model_validate_json(result)
            except Exception as e:
                print(f"Error parsing agent result: {e}")
                return None
        return None
__init__(config=WebScraperConfig(), llm=None, chrome_path=None, remote_url=None, browser_config=None)

Initialize the web scraper with configuration.

Parameters:

Name Type Description Default
config WebScraperConfig

Configuration for scraper behavior

WebScraperConfig()
llm str | BaseChatModel | None

Language model for intelligent data extraction

None
chrome_path str | None

Path to Chrome executable

None
remote_url str | None

URL for remote browser connection

None
browser_config dict[str, Any] | None

Additional browser configuration

None
Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
def __init__(
    self,
    config: WebScraperConfig = WebScraperConfig(),
    llm: str | BaseChatModel | None = None,
    chrome_path: str | None = None,
    remote_url: str | None = None,
    browser_config: dict[str, Any] | None = None
):
    """
    Initialize the web scraper with configuration.

    Args:
        config: Configuration for scraper behavior
        llm: Language model for intelligent data extraction
        chrome_path: Path to Chrome executable
        remote_url: URL for remote browser connection
        browser_config: Additional browser configuration
    """
    self.config = config
    self.browser_wrapper = BrowserWrapper(
        llm=llm,
        headless=config.headless,
        chrome_path=chrome_path,
        remote_url=remote_url,
        config=browser_config
    )
    self.active_tasks = set()
    self._semaphore = asyncio.Semaphore(config.max_concurrent_tabs)
    self._results = {}

    # Create screenshot directory if needed
    if config.save_screenshots and not os.path.exists(config.screenshot_dir):
        os.makedirs(config.screenshot_dir)
close() async

Close the browser and clean up resources

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
127
128
129
130
131
132
async def close(self):
    """Close the browser and clean up resources"""
    # Wait for all active tasks to complete
    if self.active_tasks:
        await asyncio.gather(*self.active_tasks)
    await self.browser_wrapper.close()
initialize() async

Initialize the browser if not already initialized

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
123
124
125
async def initialize(self):
    """Initialize the browser if not already initialized"""
    await self.browser_wrapper.initialize()
scrape_structured_data(urls, model, extraction_task=None) async

Scrape and parse structured data into pydantic models

Parameters:

Name Type Description Default
urls list[str]

List of URLs to scrape

required
model type[T]

Pydantic model class for structured data

required
extraction_task str

Natural language description of what to extract

None

Returns:

Type Description
list[T]

List of parsed data objects

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
async def scrape_structured_data(
    self,
    urls: list[str],
    model: type[T],
    extraction_task: str = None
) -> list[T]:
    """
    Scrape and parse structured data into pydantic models

    Args:
        urls: List of URLs to scrape
        model: Pydantic model class for structured data
        extraction_task: Natural language description of what to extract

    Returns:
        List of parsed data objects
    """
    await self.initialize()

    # Create intelligent extraction task if provided
    if extraction_task:
        # Create a custom system prompt for extraction
        class ExtractionPrompt(SystemPrompt):
            def important_rules(self) -> str:
                existing_rules = super().important_rules()
                new_rules = f"""
                9. EXTRACTION GOAL:
                - Your primary goal is to extract data according to this specific task: {extraction_task}
                - You should carefully identify and extract the information as accurately as possible.
                - Focus only on relevant information that matches the specified data structure.
                """
                return f'{existing_rules}\n{new_rules}'

        # Define the extraction task for each URL
        tasks = []
        for url in urls:
            # Setup intelligent extraction for each URL
            task = asyncio.create_task(self._run_extraction_agent(
                url=url,
                model=model,
                extraction_task=extraction_task,
                system_prompt_class=ExtractionPrompt
            ))
            self.active_tasks.add(task)
            task.add_done_callback(self.active_tasks.discard)
            tasks.append(task)

        # Wait for all extractions to complete
        results = await asyncio.gather(*tasks, return_exceptions=True)
        return [r if not isinstance(r, Exception) else None for r in results]
    else:
        # Manual extraction based on model fields
        field_selectors = {}
        for field_name in model.__annotations__:
            # Convert field name to likely CSS selector
            snake_case = field_name
            selector = f".{snake_case.replace('_', '-')}, #{snake_case.replace('_', '-')}"
            field_selectors[field_name] = selector

        # Scrape with these selectors
        raw_results = await self.scrape_urls(urls, extract_config=field_selectors)

        # Convert to pydantic models
        parsed_results = []
        for result in raw_results:
            try:
                if "structured_data" in result and "error" not in result:
                    # Map the extracted data to model fields
                    model_data = {}
                    for field_name in model.__annotations__:
                        if field_name in result["structured_data"]:
                            field_value = result["structured_data"][field_name]
                            if isinstance(field_value, list) and len(field_value) > 0:
                                model_data[field_name] = field_value[0]  # Take first match
                            else:
                                model_data[field_name] = field_value

                    # Create the model instance
                    parsed_results.append(model(**model_data))
                else:
                    parsed_results.append(None)
            except Exception as e:
                print(f"Error parsing result: {e}")
                parsed_results.append(None)

        return parsed_results
scrape_url(url, extract_config=None) async

Scrape a single URL and return the results

Parameters:

Name Type Description Default
url str

URL to scrape

required
extract_config dict[str, Any]

Configuration for structured data extraction

None

Returns:

Type Description
dict[str, Any]

Dictionary containing scraped data

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
async def scrape_url(self, url: str, extract_config: dict[str, Any] = None) -> dict[str, Any]:
    """
    Scrape a single URL and return the results

    Args:
        url: URL to scrape
        extract_config: Configuration for structured data extraction

    Returns:
        Dictionary containing scraped data
    """
    await self.initialize()
    task_id = f"{len(self._results)}_{datetime.now().timestamp()}"
    result = await self._scrape_url(url, task_id, extract_config)
    return result
scrape_urls(urls, extract_config=None) async

Scrape multiple URLs in parallel and return all results

Parameters:

Name Type Description Default
urls list[str]

List of URLs to scrape

required
extract_config dict[str, Any] | None

Configuration for structured data extraction

None

Returns:

Type Description
list[dict[str, Any]]

List of dictionaries containing scraped data

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
async def scrape_urls(
    self,
    urls: list[str],
    extract_config: dict[str, Any] | None = None
) -> list[dict[str, Any]]:
    """
    Scrape multiple URLs in parallel and return all results

    Args:
        urls: List of URLs to scrape
        extract_config: Configuration for structured data extraction

    Returns:
        List of dictionaries containing scraped data
    """
    await self.initialize()
    tasks = []

    for i, url in enumerate(urls):
        task_id = f"{i}_{datetime.now().timestamp()}"
        task = asyncio.create_task(self._scrape_url(url, task_id, extract_config))
        self.active_tasks.add(task)
        task.add_done_callback(self.active_tasks.discard)
        tasks.append(task)

    results = await asyncio.gather(*tasks, return_exceptions=True)
    return [r if not isinstance(r, Exception) else {"error": str(r)} for r in results]
search_web(query, max_results=5, include_content=True, extract_images=False, extract_tables=False, extract_links=False, save_to_file=None) async

Perform a comprehensive web search and return high-quality data for the given query.

Parameters:

Name Type Description Default
query str

Search query string

required
max_results int

Maximum number of results to process (default: 5)

5
include_content bool

Whether to include full content from result pages (default: True)

True
extract_images bool

Whether to extract images from result pages (default: False)

False
extract_tables bool

Whether to extract tables from result pages (default: False)

False
extract_links bool

Whether to extract links from result pages (default: False)

False
save_to_file str | None

Path to save results as JSON (optional)

None

Returns:

Type Description
dict[str, Any]

Dictionary containing search results and extracted information

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
async def search_web(
    self,
    query: str,
    max_results: int = 5,
    include_content: bool = True,
    extract_images: bool = False,
    extract_tables: bool = False,
    extract_links: bool = False,
    save_to_file: str | None = None
) -> dict[str, Any]:
    """
    Perform a comprehensive web search and return high-quality data for the given query.

    Args:
        query: Search query string
        max_results: Maximum number of results to process (default: 5)
        include_content: Whether to include full content from result pages (default: True)
        extract_images: Whether to extract images from result pages (default: False)
        extract_tables: Whether to extract tables from result pages (default: False)
        extract_links: Whether to extract links from result pages (default: False)
        save_to_file: Path to save results as JSON (optional)

    Returns:
        Dictionary containing search results and extracted information
    """
    await self.initialize()
    try:
        start_time = datetime.now()

        # Try different search engines in order
        search_engines = [
            {
                "url": f"https://www.google.com/search?q={urllib.parse.quote_plus(query)}",
                "result_selector": ".g",
                "title_selector": "h3",
                "link_selector": "a",
                "snippet_selector": ".VwiC3b",
                "name": "google"
            },
            {
                "url": f"https://www.bing.com/search?q={urllib.parse.quote_plus(query)}",
                "result_selector": ".b_algo",
                "title_selector": "h2",
                "link_selector": "a",
                "snippet_selector": ".b_caption p",
                "name": "bing"
            },
            {
                "url": f"https://duckduckgo.com/?q={urllib.parse.quote_plus(query)}",
                "result_selector": ".result",
                "title_selector": "h2",
                "link_selector": "a.result__a",
                "snippet_selector": ".result__snippet",
                "name": "duckduckgo"
            }
        ]

        results = []

        for engine in search_engines:
            try:
                # Navigate to search engine
                page = await self.browser_wrapper.navigate(engine["url"])
                await page.wait_for_load_state("networkidle")
                await page.wait_for_timeout(2000)  # Wait for results to load

                # Extract search results
                search_results = await page.evaluate(
                    """
                    (selectors) => {
                        const results = [];
                        const elements = document.querySelectorAll(selectors.result_selector);

                        for (const element of elements) {
                            const titleElement = element.querySelector(selectors.title_selector);
                            const linkElement = element.querySelector(selectors.link_selector);
                            const snippetElement = element.querySelector(selectors.snippet_selector);

                            if (titleElement && linkElement) {
                                const url = linkElement.href;
                                // Skip non-http links and same-domain results
                                if (url && url.startsWith('http') &&
                                    !url.includes('google.com/search') &&
                                    !url.includes('bing.com/search') &&
                                    !url.includes('duckduckgo.com')) {
                                    results.push({
                                        title: titleElement.textContent.trim(),
                                        url: url,
                                        snippet: snippetElement ? snippetElement.textContent.trim() : '',
                                        source: selectors.name
                                    });
                                }
                            }
                        }
                        return results;
                    }
                    """,
                    engine
                )

                if search_results and len(search_results) > 0:
                    # We got results, add them and break
                    results = search_results
                    break

            except Exception as e:
                print(f"Error searching with {engine['name']}: {str(e)}")
                continue  # Try next engine

        # Filter and limit results
        unique_urls = set()
        filtered_results = []

        for result in results:
            if result['url'] not in unique_urls and len(filtered_results) < max_results:
                unique_urls.add(result['url'])
                filtered_results.append(result)

        results = filtered_results

        # Get detailed content if requested
        if include_content and results:
            # Extract content from each result page
            urls_to_scrape = [result['url'] for result in results]

            # Configure what to extract
            extract_config = {}
            if extract_tables:
                extract_config['tables'] = 'table'
            if extract_images:
                extract_config['images'] = 'img'
            if extract_links:
                extract_config['links'] = 'a'

            # Scrape all pages in parallel using our efficient multi-tab approach
            scraped_data = await self.scrape_urls(
                urls_to_scrape,
                extract_config=extract_config if extract_config else None
            )

            # Add content to results
            for i, result in enumerate(results):
                if i < len(scraped_data) and 'error' not in scraped_data[i]:
                    result['content'] = {
                        'title': scraped_data[i].get('title', result['title']),
                        'markdown': scraped_data[i].get('markdown', ''),
                        'text': scraped_data[i].get('text', ''),
                    }

                    # Add structured data if available
                    if extract_config and 'structured_data' in scraped_data[i]:
                        structured_data = scraped_data[i]['structured_data']
                        for key, value in structured_data.items():
                            if value:  # Only add non-empty data
                                result['content'][key] = value

        # Prepare final response
        response = {
            'query': query,
            'timestamp': datetime.now().isoformat(),
            'num_results': len(results),
            'results': results,
            'execution_time': (datetime.now() - start_time).total_seconds()
        }

        # Save to file if requested
        if save_to_file:
            os.makedirs(os.path.dirname(os.path.abspath(save_to_file)), exist_ok=True)
            with open(save_to_file, 'w', encoding='utf-8') as f:
                json.dump(response, f, ensure_ascii=False, indent=2)

        return response

    finally:
        # Make sure we clean up browser resources
        await self.close()
WebScraperConfig

Bases: BaseModel

Configuration for web scraper operations

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
class WebScraperConfig(BaseModel):
    """Configuration for web scraper operations"""
    max_concurrent_tabs: int = 5
    default_timeout: float = 30000
    scroll_delay: int = 500
    initial_delay: int = 1000
    viewport_height: int = 900
    viewport_width: int = 1600
    wait_for_selectors: bool = True
    auto_scroll: bool = True
    save_screenshots: bool = False
    screenshot_dir: str = "./screenshots"
    extract_markdown: bool = True
    extract_text: bool = True
    extract_html: bool = False
    headless: bool = False
    disable_images: bool = False
    user_agent: str | None = None
sanitize_filename(filename)

Convert a string to a valid filename

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
978
979
980
981
982
def sanitize_filename(filename: str) -> str:
    """Convert a string to a valid filename"""
    # Replace spaces with underscores and remove invalid characters
    sanitized = re.sub(r'[^\w\s-]', '', filename).strip().lower()
    return re.sub(r'[-\s]+', '_', sanitized)
scrape_documentation_to_markdown(start_url, topic=None, max_pages=30, max_depth=3, output_dir=None, toc_filename='table_of_contents.md') async

Recursively scrape documentation pages starting from a URL, focused on a specific topic, and convert to Markdown.

Parameters:

Name Type Description Default
start_url str

The documentation homepage or entry point

required
topic str | None

The topic to focus on (e.g., "streaming", "authentication")

None
max_pages int

Maximum number of pages to scrape

30
max_depth int

Maximum depth of link traversal

3
output_dir str | None

Directory to save the MD files (if None, just returns them)

None
toc_filename str

Filename for the table of contents

'table_of_contents.md'

Returns:

Type Description
dict[str, str]

Dictionary mapping page titles to markdown content

Source code in toolboxv2/mods/isaa/SearchAgentCluster/search_tool.py
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
async def scrape_documentation_to_markdown(
    start_url: str,
    topic: str | None = None,
    max_pages: int = 30,
    max_depth: int = 3,
    output_dir: str | None = None,
    toc_filename: str = "table_of_contents.md"
) -> dict[str, str]:
    """
    Recursively scrape documentation pages starting from a URL,
    focused on a specific topic, and convert to Markdown.

    Args:
        start_url: The documentation homepage or entry point
        topic: The topic to focus on (e.g., "streaming", "authentication")
        max_pages: Maximum number of pages to scrape
        max_depth: Maximum depth of link traversal
        output_dir: Directory to save the MD files (if None, just returns them)
        toc_filename: Filename for the table of contents

    Returns:
        Dictionary mapping page titles to markdown content
    """
    # Initialize scraper with efficient settings for docs
    scraper_config = WebScraperConfig(
        max_concurrent_tabs=5,
        headless=global_headless,
        disable_images=True,
        extract_html=False,
        auto_scroll=True,
        scroll_delay=300,
        initial_delay=500,
        save_screenshots=False
    )

    scraper = WebScraper(config=scraper_config)
    await scraper.initialize()

    # Track visited and pending URLs
    visited_urls: set[str] = set()
    pending_urls: list[dict] = [{"url": start_url, "depth": 0, "parent": None}]
    results: dict[str, dict] = {}
    domain = urlparse(start_url).netloc

    logging.info(f"Starting documentation scrape from {start_url}")
    if topic:
        logging.info(f"Focusing on topic: {topic}")

    # Create a regular expression pattern for topic if provided
    topic_pattern = re.compile(rf'\b{re.escape(topic)}\b', re.IGNORECASE) if topic else None

    try:
        # Process URLs breadth-first until we hit max pages or have no more URLs
        while pending_urls and len(results) < max_pages:
            # Get the next URL to process
            current = pending_urls.pop(0)
            current_url = current["url"]
            current_depth = current["depth"]

            # Skip if we've already visited this URL
            if current_url in visited_urls:
                continue

            logging.info(f"Scraping: {current_url} (depth: {current_depth})")
            visited_urls.add(current_url)

            # Scrape the current page
            page_result = await scraper.scrape_url(current_url)

            # Skip pages with errors
            if "error" in page_result:
                logging.warning(f"Error scraping {current_url}: {page_result['error']}")
                continue

            # Check if page is relevant to the topic
            is_relevant = True
            if topic_pattern:
                markdown_content = page_result.get("markdown", "")
                text_content = page_result.get("text", "")

                # Check if topic appears in title, URL, or content
                has_topic_in_title = topic_pattern.search(page_result.get("title", ""))
                has_topic_in_url = topic_pattern.search(current_url)
                has_topic_in_content = (
                    topic_pattern.search(markdown_content) or
                    topic_pattern.search(text_content)
                )

                is_relevant = has_topic_in_title or has_topic_in_url or has_topic_in_content

            # Process this page if it's relevant
            if is_relevant:
                # Extract title and content
                title = page_result.get("title", f"Page {len(results) + 1}")

                # Store the result
                results[current_url] = {
                    "title": title,
                    "markdown": page_result.get("markdown", ""),
                    "depth": current_depth,
                    "parent": current["parent"]
                }

                # Only proceed deeper if we haven't hit max depth
                if current_depth < max_depth:
                    # Extract links to follow
                    parser = scraper.browser_wrapper.get_parser()
                    links = await parser.extract_links(current_url)

                    # Filter links for internal documentation pages
                    doc_links = []
                    for link in links:
                        link_url = link["href"]
                        parsed_url = urlparse(link_url)

                        # Only include links to the same domain
                        if parsed_url.netloc == domain or not parsed_url.netloc:
                            # Normalize URL
                            if not parsed_url.netloc:
                                link_url = urljoin(current_url, link_url)

                            # Skip anchor links to same page
                            if link_url.split('#')[0] == current_url.split('#')[0]:
                                continue

                            # Skip non-documentation links (common patterns)
                            skip_patterns = [
                                r'(\.pdf|\.zip|\.tar|\.gz)$',  # Downloads
                                r'/search/',  # Search pages
                                r'/login/',  # Auth pages
                                r'/logout/',  # Auth pages
                                r'/tag/',  # Tag pages
                                r'/version/',  # Version switching
                                r'/latest/',  # Version switching
                                r'/download/',  # Download pages
                                r'/contact/',  # Contact pages
                                r'/blog/',  # Blog posts (unless that's what we want)
                            ]

                            should_skip = any(re.search(pattern, link_url) for pattern in skip_patterns)
                            if should_skip:
                                continue

                            # Check if it's potentially relevant to the topic
                            is_potentially_relevant = True
                            if topic_pattern:
                                has_topic_in_link_text = topic_pattern.search(link["text"])
                                has_topic_in_link_url = topic_pattern.search(link_url)
                                is_potentially_relevant = has_topic_in_link_text or has_topic_in_link_url

                            # Add to pending if it's potentially relevant and not already visited
                            if is_potentially_relevant and link_url not in visited_urls:
                                doc_links.append({
                                    "url": link_url,
                                    "depth": current_depth + 1,
                                    "parent": current_url
                                })

                    # Add the filtered links to our pending list
                    pending_urls.extend(doc_links)

        # Generate markdown output
        markdown_results = {}

        # Create a hierarchy for building a table of contents
        pages_hierarchy = {}
        for url, page_data in results.items():
            title = page_data["title"]
            markdown = page_data["markdown"]

            # Add page URL reference at the bottom
            markdown += f"\n\n---\n*Source: [{url}]({url})*"

            # Add to outputs
            markdown_results[url] = markdown

            # Track in hierarchy for TOC
            depth = page_data["depth"]
            parent = page_data["parent"]

            if depth not in pages_hierarchy:
                pages_hierarchy[depth] = []

            pages_hierarchy[depth].append({
                "url": url,
                "title": title,
                "parent": parent
            })

        # Generate table of contents
        toc = f"# Documentation: {topic if topic else 'All Topics'}\n\n"
        toc += f"*Generated from: [{start_url}]({start_url})*\n\n"
        toc += "## Table of Contents\n\n"

        # Sort by depth to build hierarchy
        for depth in sorted(pages_hierarchy.keys()):
            pages = pages_hierarchy[depth]

            for page in pages:
                # Calculate indentation based on depth
                indent = "  " * depth
                page_filename = sanitize_filename(page["title"]) + ".md"
                toc += f"{indent}- [{page['title']}]({page_filename})\n"

        # Save the results if output directory specified
        if output_dir:
            os.makedirs(output_dir, exist_ok=True)

            # Write the TOC file
            with open(os.path.join(output_dir, toc_filename), "w", encoding="utf-8") as f:
                f.write(toc)

            # Write each page file
            for url, content in markdown_results.items():
                page_title = results[url]["title"]
                filename = sanitize_filename(page_title) + ".md"
                filepath = os.path.join(output_dir, filename)

                with open(filepath, "w", encoding="utf-8") as f:
                    f.write(content)

            logging.info(f"Saved {len(markdown_results)} documentation pages to {output_dir}")

        # Include the TOC in the results
        markdown_results["table_of_contents"] = toc

        return markdown_results

    finally:
        # Make sure we clean up browser resources
        await scraper.close()

base

Agent
agent
AgentModelData

Bases: BaseModel

Configuration for the LLM model and API settings via LiteLLM.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
class AgentModelData(BaseModel):
    """Configuration for the LLM model and API settings via LiteLLM."""
    name: str | None = Field(default=None, description="Agent's internal name, often derived from builder.")
    model: str = Field(..., description="Primary LiteLLM model identifier (e.g., 'gemini/gemini-1.5-flash-latest', 'ollama/mistral').")
    provider: str | None = Field(default=None, description="LiteLLM provider override if needed.")
    system_message: str = Field(default="You are a helpful AI assistant.", description="Base system prompt.")

    temperature: float | None = Field(default=None, ge=0.0, le=2.0) # Use LiteLLM defaults if None
    top_k: int | None = Field(default=None, ge=1)
    top_p: float | None = Field(default=None, ge=0.0, le=1.0)
    max_tokens: int | None = Field(default=None, ge=1, description="Max tokens for LLM generation.")
    max_input_tokens: int | None = Field(default=None, ge=1, description="Max context window size (for trimming).")

    api_key: str | None = Field(default=None, description="API key (use env vars in production).")
    api_base: str | None = Field(default=None, description="API base URL (for local models/proxies).")
    api_version: str | None = Field(default=None, description="API version (e.g., Azure).")

    stop_sequence: list[str] | None = Field(default=None, alias="stop") # Alias for LiteLLM
    presence_penalty: float | None = Field(default=None)
    frequency_penalty: float | None = Field(default=None)

    user_id: str | None = Field(default=None, description="User identifier for LLM calls ('user' param).")
    budget_manager: BudgetManager | None = Field(default=None, description="LiteLLM BudgetManager instance.")
    caching: bool | None = Field(default=True, description="Enable/disable LiteLLM caching.")

    # Model config for Pydantic v2
    model_config = ConfigDict(
        arbitrary_types_allowed=True,
        extra='ignore', # Ignore extra fields from builder/ADK init
        populate_by_name=True # Allow using 'stop' alias
    )
EnhancedAgent

Bases: *_AgentBaseClass

Enhanced, production-oriented Unified Agent integrating LiteLLM, ADK, A2A, and MCP (via ADK).

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
class EnhancedAgent(*_AgentBaseClass):
    """
    Enhanced, production-oriented Unified Agent integrating LiteLLM, ADK, A2A, and MCP (via ADK).
    """
    # --- Core Configuration ---
    amd: AgentModelData # Primary model config
    format_model: str | None = Field(default=None, description="Optional separate model for JSON formatting (a_format_class).")
    format_model_: str | None = Field(default=None, description="helper var for format_model", exclude=True)
    world_model: WorldModel = Field(default_factory=WorldModel)
    verbose: bool = Field(default=False)
    internal_state: InternalAgentState = Field(default=InternalAgentState.IDLE)

    # --- LiteLLM Specific ---
    stream: bool = Field(default=False, description="Whether LLM calls should stream chunks.")
    # Use a simple dict for history for now, can be replaced with persistent store interface
    # Keyed by session_id
    message_history: dict[str, list[dict[str, Any]]] = Field(default_factory=dict)
    max_history_tokens: int | None = Field(default=None, description="Alternative to max_turns for history trimming based on token count.")
    max_history_turns: int = Field(default=20, description="Max conversation turns (user+assistant) for history.") # Used if max_history_tokens is None
    trim_strategy: Literal["litellm", "basic"] = Field(default="litellm")
    total_cost: float = Field(default=0.0, description="Accumulated cost tracked via LiteLLM.")

    # --- Framework Components (Initialized via Builder/Setup) ---
    # ADK
    adk_runner: Runner | None = Field(default=None, description="ADK Runner instance if enabled.")
    adk_session_service: BaseSessionService | None = Field(default=None, description="ADK Session Service (often from runner).")
    sync_adk_state: bool = Field(default=True, description="Sync WorldModel with ADK Session.state.")
    # Exit stack to manage lifecycles of components like MCPToolset connections
    # CRITICAL FIX: Use contextlib.AsyncExitStack type hint
    adk_exit_stack: contextlib.AsyncExitStack | None = Field(default=None, description="AsyncExitStack for managing ADK toolset lifecycles.")

    # MCP Server (Agent acts AS an MCP Server)
    mcp_server: FastMCP | None = Field(default=None, description="MCP server instance if agent exposes MCP capabilities.")
    # A2A Server (Agent acts AS an A2A Server)
    a2a_server: A2AServer | None = Field(default=None, description="A2A server instance if agent exposes A2A capabilities.")
    # A2A Client (Agent acts AS an A2A Client)
    a2a_clients: dict[str, A2AClient] = Field(default_factory=dict, description="Cached A2A clients for target agents.")
    a2a_client_lock: asyncio.Lock = Field(default_factory=asyncio.Lock, description="Lock for A2A client cache access.")
    a2a_poll_interval: float = Field(default=2.0, description="Polling interval for A2A task results (seconds).")
    a2a_poll_timeout: float = Field(default=60.0, description="Max time to wait for A2A task completion.")

    # --- Callbacks ---
    stream_callback: Callable[[str], None | Awaitable[None]] | None = Field(default=None, description="Callback for each LLM stream chunk.")
    post_run_callback: Callable[[str, str, float], None | Awaitable[None]] | None = Field(default=None, description="Callback after a_run completes (session_id, final_response, turn_cost).")
    progress_callback: Callable[[Any], None | Awaitable[None]] | None = Field(default=None, description="Callback for progress updates (e.g., tool execution, A2A polling).")
    human_in_loop_callback: Callable[[dict], str | Awaitable[str]] | None = Field(default=None, description="Callback for HIL intervention points.")

    # --- Observability ---
    tracer: Any | None = Field(default=None, description="OpenTelemetry Tracer instance.") # Type hint depends on OTel setup

    # --- Internal State ---
    last_llm_result: Any | None = Field(default=None, description="Raw result from the last LiteLLM call.")

    # Model config
    model_config = ConfigDict(
        arbitrary_types_allowed=True,
        extra='ignore' # Critical for compatibility with ADK LlmAgent init
    )

    @model_validator(mode='after')
    def _enhanced_agent_post_init(self) -> 'EnhancedAgent':
        """
        Performs initialization steps after Pydantic has validated fields.
        """
        # --- (Existing post_init logic remains the same) ---
        logger.setLevel(logging.DEBUG if self.verbose else logging.INFO)
        os.environ['LITELLM_LOG'] = 'DEBUG' if self.verbose else 'NONE'
        logger.debug(f"Verbose logging {'enabled' if self.verbose else 'disabled'} for agent {self.amd.name}")
        self._setup_telemetry()
        if ADK_AVAILABLE and isinstance(self, LlmAgent):
            logger.debug(f"Running post-init logic for ADK agent '{self.amd.name}'")
            self._ensure_internal_adk_tools() # Ensure tools are added *after* Pydantic init
            if self.adk_runner and hasattr(self.adk_runner, 'session_service'):
                self.adk_session_service = self.adk_runner.session_service
                logger.debug("Associated ADK session service from runner.")
        if 'default' not in self.message_history:
            self.message_history['default'] = []
        logger.info(
            f"EnhancedAgent '{self.amd.name}' initialized. Model: {self.amd.model}. "
            f"Capabilities: ADK({ADK_AVAILABLE}), A2A({A2A_AVAILABLE}), MCP({MCP_AVAILABLE})"
        )
        self.model =  LiteLlm(model=self.amd.model)
        return self

    # --- ADK Post Init (Called automatically by Pydantic if method exists in base) ---
    # This method name is expected by ADK's BaseModel integration.
    # Pydantic v2 runs validators based on MRO, so if LlmAgent has this, it runs.
    # We don't strictly need to define it here unless overriding LlmAgent's version.
    # def model_post_init(self, __context: Any) -> None:
    #     """ADK post-initialization (if inheriting from ADK BaseModel)."""
    #     # Call super() if overriding LlmAgent's method
    #     # super().model_post_init(__context) # If LlmAgent has this method
    #     logger.debug(f"ADK model_post_init for Agent '{self.amd.name}' (EnhancedAgent)")
    #     # Add post-init logic specific to ADK features here, AFTER ADK's own init
    #     self._ensure_internal_adk_tools()
    #     if self.adk_runner:
    #         self.adk_session_service = self.adk_runner.session_service


    # --- Telemetry Setup ---
    def _setup_telemetry(self):
        """Initializes the OpenTelemetry tracer."""
        if OTEL_AVAILABLE and not self.tracer:
            # Get tracer from global provider (needs to be configured elsewhere)
            # In a real app, you'd configure the TracerProvider with exporters
            # provider = TracerProvider() # Example: basic provider
            # provider.add_span_processor(BatchSpanProcessor(ConsoleSpanExporter())) # Example: console output
            # trace.set_tracer_provider(provider)
            self.tracer = trace.get_tracer("enhanced_agent", "0.1.0")
            logger.info("OpenTelemetry tracer initialized.")
        elif not OTEL_AVAILABLE:
            self.tracer = DummyTracer() # Use NoOp tracer if OTel not installed
            logger.debug("OpenTelemetry not available, using NoOp tracer.")


    # --- Setup Methods (Called by Builder) ---

    def setup_mcp_server(self, host="0.0.0.0", port=8000, **mcp_kwargs):
        """Initialize and configure the MCP server capabilities *for this agent*.
           This agent will ACT AS an MCP Server.
        """
        if not MCP_AVAILABLE:
            logger.warning("MCP library not installed. Cannot setup MCP server.")
            return None
        if self.mcp_server:
            logger.warning("MCP server already initialized.")
            return self.mcp_server
        name = mcp_kwargs.get("name")
        del mcp_kwargs["name"]
        self.mcp_server = FastMCP(name=name or f"{self.amd.name}-mcp-server",
                                  description=f"MCP interface for EnhancedAgent {self.amd.name}",
                                  **mcp_kwargs)
        logger.info(f"Setting up MCP server for agent '{self.amd.name}' on {host}:{port}")

        # --- Register Agent's core functionalities as MCP services ---
        # Example: Expose World Model (Read-only for safety)
        @self.mcp_server.resource(f"agent://{self.amd.name}/world_model")
        def mcp_get_world_model_resource() -> dict[str, Any]:
            """Gets the agent's world model."""
            logger.debug(f"[MCP Resource] agent://{self.amd.name}/world_model accessed")
            return self.world_model.to_dict()

        # Example: Expose a simple query tool via MCP
        @self.mcp_server.tool(name="simple_llm_query")
        async def mcp_simple_query(prompt: str) -> str:
            """Sends a simple prompt to the agent's LLM (non-persistent run)."""
            logger.debug(f"[MCP Tool] simple_llm_query called: {prompt[:50]}...")
            # Use a minimal, non-persistent run, disable recursive calls
            response = await self.a_run(
                prompt, session_id=f"mcp_query_{uuid.uuid4()}",
                persist_history=False, strategy_override=ProcessingStrategy.DIRECT_LLM
            )
            return response

        # If ADK tools exist, potentially expose them via MCP automatically?
        if ADK_AVAILABLE and isinstance(self, LlmAgent) and self.tools:
             logger.info("Attempting to expose ADK tools via MCP server...")
             for adk_tool in self.tools:
                 if adk_tool.name in ["code_execution", "adk_tool_a2a_send_and_wait", "adk_tool_a2a_send_no_wait", "adk_tool_a2a_get_task_status", "adk_tool_a2a_cancel_task"]:
                     continue
                 if not isinstance(adk_tool, BaseTool): continue
                 try:
                     mcp_schema = adk_to_mcp_tool_type(adk_tool)

                     # Define the MCP tool handler dynamically
                     async def mcp_tool_handler(tool_name=adk_tool.name, **kwargs):
                         logger.info(f"[MCP Tool via ADK] Calling {tool_name} with {kwargs}")
                         # ADK tools expect ToolContext, which we don't have here.
                         # We might need to simulate it or adapt the tool execution.
                         # This simple version calls the tool's underlying function if possible.
                         # WARNING: This bypasses ADK's standard tool execution flow.
                         if hasattr(adk_tool, 'func') and callable(adk_tool.func):
                             # This assumes the function doesn't need ToolContext
                             result = await adk_tool.func(**kwargs)
                             # Convert result to MCP content (e.g., TextContent)
                             if isinstance(result, str):
                                 return [mcp_types.TextContent(type="text", text=result)]
                             else:
                                 try:
                                     return [mcp_types.TextContent(type="text", text=json.dumps(result))]
                                 except:
                                     return [mcp_types.TextContent(type="text", text=str(result))]
                         else:
                             logger.warning(f"Cannot directly call ADK tool {tool_name} via MCP.")
                             return [mcp_types.TextContent(type="text", text=f"Error: Cannot execute ADK tool {tool_name} directly.")]

                     # Register the dynamic handler with the MCP server
                     self.mcp_server.tool(name=mcp_schema.name)(mcp_tool_handler)
                     logger.info(f"Exposed ADK tool '{adk_tool.name}' as MCP tool '{mcp_schema.name}'.")

                 except Exception as e:
                     logger.warning(f"Failed to expose ADK tool '{adk_tool.name}' via MCP: {e}")


        logger.info(f"MCP server setup complete for agent '{self.amd.name}'. Run `agent.run_mcp_server()` to start.")
        return self.mcp_server

    def run_mcp_server(self, transport='sse', **kwargs):
        """Starts the MCP server (blocking)."""
        if not self.mcp_server:
            logger.error("MCP server not initialized. Call setup_mcp_server first.")
            return
        if not MCP_AVAILABLE:
             logger.error("MCP library not available. Cannot run MCP server.")
             return
        logger.info(f"Starting MCP server for agent '{self.amd.name}' using {transport} transport...")
        # This is blocking, run in a separate process/thread for a long-running agent
        try:
            self.mcp_server.run(transport=transport, **kwargs)
        except Exception as e:
            logger.error(f"MCP server failed to run: {e}", exc_info=True)

    # MCP Client Setup is now handled by ADK's MCPToolset via the Builder


    def setup_a2a_server(self, host="0.0.0.0", port=5000, **a2a_server_options):
        """
        Initialize and configure the A2A server capabilities using python-a2a.
        This dynamically creates a server class with the agent's capabilities.
        """
        if not A2A_AVAILABLE:
            logger.warning("python-a2a library not installed. Cannot setup A2A server.")
            return None
        if self.a2a_server:
            logger.warning("A2A server already initialized.")
            return self.a2a_server

        logger.info(f"Setting up A2A server for agent '{self.amd.name}' on {host}:{port}")

        agent_instance = self # Reference to the current EnhancedAgent instance

        # Define the A2A Server class dynamically using the decorator
        @a2a_agent_decorator(
            name=self.amd.name or "EnhancedAgent",
            description=f"Enhanced Agent '{self.amd.name}' - Capabilities: ADK({ADK_AVAILABLE}), MCP({MCP_AVAILABLE}), A2A({A2A_AVAILABLE})",
            version="1.0.0",
            # Other AgentCard fields...
        )
        class DynamicA2AServer(A2AServer):
            bound_agent: EnhancedAgent = agent_instance

            def handle_task(self, task: Task) -> Task:
                """ Handles incoming A2A tasks by calling the EnhancedAgent's async logic. """
                # --- (handle_task implementation remains the same as before) ---
                logger.info(f"[A2A Server {self.bound_agent.amd.name}] Received task: {task.id}")
                async def run_agent_async():
                    # ... (logic to extract prompt, call a_run, update task) ...
                    try:
                        user_prompt = ""
                        # ... (extract user_prompt from task.message) ...
                        if task.message and task.message.get("content"):
                            content = task.message["content"]
                            if isinstance(content, dict) and content.get("type") == "text":
                                user_prompt = content.get("text", "").strip()
                            elif isinstance(content, str):
                                user_prompt = content.strip()

                        if not user_prompt:
                            raise ValueError("Task message has no text content.")

                        session_id = task.message.get("session_id", task.id)
                        agent_response = await self.bound_agent.a_run(
                            user_prompt,
                            session_id=session_id,
                            persist_history=False,
                            a2a_task_id=task.id
                        )
                        task.artifacts = [{"parts": [{"type": "text", "text": str(agent_response)}]}]
                        task.status = TaskStatus(state=TaskState.COMPLETED)
                    except Exception as e:
                        # ... (error handling) ...
                        logger.error(f"[A2A Task {task.id}] Error during processing: {e}", exc_info=True)
                        error_msg = f"Internal agent error: {str(e)}"
                        task.artifacts = [{"parts": [{"type": "text", "text": error_msg}]}]
                        task.status = TaskStatus(state=TaskState.FAILED, message={"role": "agent", "content": {"type": "text", "text": error_msg}})
                    return task
                try:
                    updated_task = asyncio.run(run_agent_async())
                    return updated_task
                except RuntimeError as e:
                    # ... (handle RuntimeError) ...
                    logger.error(f"RuntimeError calling asyncio.run in handle_task: {e}.")
                    task.status = TaskStatus(state=TaskState.FAILED, message={"role": "agent", "content": {"type": "text", "text": "Internal Server Error processing task asynchronously."}})
                    return task
                # --- (end of handle_task logic) ---


            # --- Expose Skills ---
            @a2a_skill_decorator(
                name="General Query",
                description="Process general natural language queries using the agent's primary LLM.",
                examples=["What is the capital of France?", "Summarize the plot of Hamlet."]
            )
            def general_query_skill(self, query: str) -> str:
                """Handles general queries via the skill mechanism by calling a_run."""
                logger.info(f"[A2A Skill] Received general_query: {query[:50]}...")
                async def run_skill_async():
                    # Call a_run, forcing direct LLM strategy for simple queries
                    response = await self.bound_agent.a_run(
                        query,
                        a2a_task_id=f"skill_query_{uuid.uuid4()}",
                        strategy_override=ProcessingStrategy.DIRECT_LLM,
                        persist_history=False
                        )
                    return response
                try:
                    # Bridge sync skill call to async agent logic
                    return asyncio.run(run_skill_async())
                except RuntimeError:
                     logger.error("RuntimeError calling asyncio.run in general_query_skill.")
                     return "Error: Could not process skill asynchronously."

            # --- FIXED: Generic Skill for ADK Tools ---
            if ADK_AVAILABLE and isinstance(agent_instance, LlmAgent) and agent_instance.tools:
                # Check if there are any ADK tools to expose
                adk_tool_list = [t for t in agent_instance.tools if isinstance(t, BaseTool)]
                if adk_tool_list:
                    logger.info(f"Exposing {len(adk_tool_list)} ADK tools via 'execute_adk_tool' A2A skill.")

                    @a2a_skill_decorator(
                        name="execute_adk_tool",
                        description=f"Executes a registered ADK tool. Available tools: {', '.join([t.name for t in adk_tool_list])}",
                        examples=["Execute tool 'some_tool_name' with argument 'arg1'='value1'"] # Generic example
                    )
                    def execute_adk_tool_skill(self, tool_name: str, arguments: dict[str, Any]) -> str:
                        """Generic skill to execute an ADK tool by name with arguments."""
                        logger.info(f"[A2A Skill] Request to execute ADK tool: {tool_name} with args: {arguments}")

                        # Find the ADK tool instance on the bound agent
                        tool_to_call: BaseTool | None = None
                        for tool in self.bound_agent.tools:
                            if isinstance(tool, BaseTool) and tool.name == tool_name:
                                tool_to_call = tool
                                break

                        if not tool_to_call:
                            logger.warning(f"[A2A Skill] ADK tool '{tool_name}' not found.")
                            return f"Error: ADK tool '{tool_name}' not found on this agent."

                        # --- Bridge sync skill call to async ADK tool execution ---
                        async def run_adk_tool_async():
                            try:
                                # ADK tools require ToolContext. We can provide a minimal one or None.
                                # Providing None might limit tool functionality.
                                # Let's try providing None for simplicity first.
                                adk_tool_context = None

                                # Check if the tool has an async run method (most ADK tools should)
                                if hasattr(tool_to_call, 'run_async') and iscoroutinefunction(tool_to_call.run_async):
                                    # Pass arguments directly to run_async
                                    result = await tool_to_call.run_async(args=arguments, tool_context=adk_tool_context)
                                    # Convert result to string for A2A response
                                    if isinstance(result, str): return result
                                    try: return json.dumps(result)
                                    except: return str(result)
                                elif hasattr(tool_to_call, 'run') and callable(tool_to_call.run):
                                    # Fallback to synchronous run in thread pool
                                    logger.warning(f"ADK tool '{tool_name}' has no run_async, using synchronous run in thread.")
                                    result = await asyncio.to_thread(tool_to_call.run, args=arguments, tool_context=adk_tool_context)
                                    if isinstance(result, str): return result
                                    try: return json.dumps(result)
                                    except: return str(result)
                                else:
                                     return f"Error: ADK tool '{tool_name}' has no callable run or run_async method."

                            except Exception as e:
                                logger.error(f"[A2A Skill] Error executing ADK tool '{tool_name}': {e}", exc_info=True)
                                return f"Error executing ADK tool {tool_name}: {e}"

                        # Execute the async tool runner
                        try:
                            return asyncio.run(run_adk_tool_async())
                        except RuntimeError:
                            logger.error(f"RuntimeError calling asyncio.run in execute_adk_tool_skill for tool {tool_name}.")
                            return "Error: Could not execute ADK tool asynchronously."

            # --- End of Skill Definitions ---

        # Instantiate the dynamic server class
        try:
             self.a2a_server = DynamicA2AServer(**a2a_server_options)
             logger.info(f"A2A server instance created for agent '{self.amd.name}'.")
             return self.a2a_server
        except Exception as e:
             logger.error(f"Failed to instantiate dynamic A2A Server: {e}", exc_info=True)
             return None


    def run_a2a_server(self, host="0.0.0.0", port=5000, **kwargs):
        """Starts the A2A server (blocking) using the python-a2a run_server function."""
        if not self.a2a_server:
            logger.error("A2A server not initialized. Call setup_a2a_server first.")
            return
        if not A2A_AVAILABLE:
            logger.error("python-a2a library not available. Cannot run A2A server.")
            return

        # Get effective host/port from server instance if set, otherwise use args
        effective_host = getattr(self.a2a_server, 'host', host)
        effective_port = getattr(self.a2a_server, 'port', port)

        logger.info(f"Starting A2A server for agent '{self.amd.name}' via run_server_func on {effective_host}:{effective_port}...")
        try:
            # Call the imported run_server function, passing the agent instance
            run_a2a_server_func(self.a2a_server, host=effective_host, port=effective_port, **kwargs) # This blocks
        except Exception as e:
            logger.error(f"A2A server failed to run: {e}", exc_info=True)

    async def setup_a2a_client(self, target_agent_url: str) -> A2AClient | None:
        """Gets or creates an A2A client for a specific target agent URL using python-a2a."""
        if not A2A_AVAILABLE:
            logger.warning("python-a2a library not installed. Cannot setup A2A client.")
            return None

        async with self.a2a_client_lock:
            if target_agent_url in self.a2a_clients:
                logger.debug(f"Reusing cached A2A client for {target_agent_url}")
                return self.a2a_clients[target_agent_url]

            logger.info(f"Setting up A2A client for target: {target_agent_url}")
            try:
                # python-a2a client likely fetches card on init or first call
                client = A2AClient(base_url=target_agent_url) # Pass the URL directly
                # Verify connection implicitly by getting card (optional, client might do lazy loading)
                # agent_card = await client.get_agent_card() # If method exists
                # logger.info(f"Successfully connected A2A client to agent: {agent_card.name}")
                self.a2a_clients[target_agent_url] = client
                logger.info(f"A2A client created for target: {target_agent_url}")
                return client
            except Exception as e:
                logger.error(f"Failed to setup A2A client for {target_agent_url}: {e}", exc_info=True)
                return None

    async def close_a2a_clients(self):
        """Closes all cached A2A client connections."""
        async with self.a2a_client_lock:
            logger.info(f"Closing {len(self.a2a_clients)} A2A clients.")
            # A2AClient may manage underlying httpx clients automatically.
            # If explicit close needed in future versions, add here.
            # for client in self.a2a_clients.values():
            #     await client.close() # If available
            self.a2a_clients.clear()

    def setup_adk_runner(self, runner_options: dict[str, Any] | None = None):
        """Initializes an ADK runner for this agent (if ADK enabled)."""
        if not ADK_AVAILABLE:
            logger.warning("ADK not available. Cannot setup ADK runner.")
            return None
        if not isinstance(self, LlmAgent):
            logger.error("Agent must inherit from LlmAgent to use ADK runner directly.")
            return None
        if self.adk_runner:
            logger.warning("ADK runner already initialized.")
            return self.adk_runner

        runner_opts = runner_options or {}
        runner_class = runner_opts.pop("runner_class", InMemoryRunner) # Default to InMemory
        app_name = runner_opts.pop("app_name", f"{self.amd.name}_ADKApp")

        if runner_class == InMemoryRunner:
            runner_opts = {}

        logger.info(f"Setting up ADK Runner ({runner_class.__name__}) for app '{app_name}'...")

        try:
             # Pass the agent instance and other options to the runner constructor
            self.adk_runner = runner_class(agent=self, app_name=app_name, **runner_opts)
            self.adk_session_service = self.adk_runner.session_service # Store session service
            logger.info(f"ADK {runner_class.__name__} setup complete for agent '{self.amd.name}'.")
            return self.adk_runner
        except Exception as e:
            logger.error(f"Failed to setup ADK runner: {e}", exc_info=True)
            self.adk_runner = None
            self.adk_session_service = None
            return None


    # --- Core Agent Logic (`a_run`) ---

    async def a_run(self,
                    user_input: str,
                    session_id: Optional[str] = None,
                    persist_history: bool = True,
                    strategy_override: ProcessingStrategy | None = None,
                    kwargs_override: dict[str, Any] | None = None, # For fine-grained control
                    a2a_task_id: Optional[str] = None # Context if called from A2A task
                    ) -> str:
        """
        Main asynchronous execution logic for the agent turn.

        Orchestrates world model updates, state sync, strategy selection,
        execution, cost tracking, and callbacks.
        """
        self.internal_state = InternalAgentState.PROCESSING
        start_time = time.monotonic()
        session_id = session_id or "default" # Use 'default' if none provided
        response = "Error: Processing failed." # Default error
        turn_cost = 0.0
        span = None # OTel span

        if not self.tracer: self._setup_telemetry() # Ensure tracer exists

        try:
            with self.tracer.start_as_current_span(f"Agent Run: {self.amd.name}", attributes={"session_id": session_id}) as span:

                # Ensure session history list exists
                if session_id not in self.message_history:
                    logger.debug(f"Initializing history for session: {session_id}")
                    self.message_history[session_id] = []

                logger.info(f"--- Agent Run Start (Session: {session_id}) ---")
                span.add_event("Agent run started")
                logger.info(f"User Input: {user_input[:100]}...")
                span.set_attribute("user_input", user_input[:500]) # Log truncated input

                # 0. Get ADK Session State (if ADK enabled and syncing)
                adk_session_state = None
                if self.sync_adk_state and self.adk_session_service:
                    try:
                        # ADK SessionService methods are typically synchronous
                        # Run in threadpool to avoid blocking
                        adk_session = await asyncio.to_thread(
                             self.adk_session_service.get_session,
                             app_name=self.adk_runner.app_name, # Assuming runner is set if syncing
                             user_id=self.amd.user_id or "adk_user", # Needs consistent user ID
                             session_id=session_id
                        )
                        if adk_session:
                            adk_session_state = adk_session.state
                        else:
                            logger.warning(f"ADK Session '{session_id}' not found for state sync.")
                            # Optionally create session here? Be careful about race conditions.
                    except Exception as sync_e:
                        logger.error(f"Error getting ADK session state for sync: {sync_e}")

                # 1. Update World Model & Sync State (Run *before* strategy selection)
                # flow_world_model is now responsible for syncing *from* ADK state initially
                await self.flow_world_model(user_input, session_id, adk_session_state)
                span.add_event("World model updated")

                # 2. Prepare message history for this turn
                current_turn_messages = self._prepare_llm_messages(user_input, session_id)
                span.set_attribute("history_length", len(current_turn_messages) -1) # Exclude current input

                # 3. Determine Processing Strategy
                if strategy_override:
                    strategy = strategy_override
                    strategy_reasoning = "Strategy overridden by caller."
                    logger.info(f"Strategy forced by override: {strategy.value}")
                else:
                    strategy, strategy_reasoning = self._determine_strategy_heuristic(user_input, current_turn_messages)
                    logger.info(f"Strategy Selected: {strategy.value} (Reason: {strategy_reasoning})")
                span.set_attribute("selected_strategy", strategy.value)
                span.set_attribute("strategy_reasoning", strategy_reasoning)


                # --- Prepare kwargs for execution based on strategy ---
                exec_kwargs = kwargs_override or {}
                exec_kwargs['session_id'] = session_id
                exec_kwargs['user_input'] = user_input
                exec_kwargs['current_turn_messages'] = current_turn_messages
                exec_kwargs['adk_session_state'] = adk_session_state # Pass state for potential use/update


                # 4. Execute Selected Strategy
                logger.info(f"Executing strategy: {strategy.value}")
                if strategy == ProcessingStrategy.ADK_RUN:
                    if ADK_AVAILABLE and self.adk_runner:
                        response = await self._execute_adk_run(**exec_kwargs)
                    else:
                        logger.warning("ADK_RUN strategy selected, but ADK runner not available/configured. Falling back.")
                        # Fallback strategy? Maybe DIRECT_LLM?
                        strategy = ProcessingStrategy.DIRECT_LLM
                        response = await self._execute_direct_llm(**exec_kwargs)

                elif strategy == ProcessingStrategy.A2A_CALL:
                    if A2A_AVAILABLE:
                        response = await self._execute_a2a_call(**exec_kwargs)
                    else:
                        logger.warning("A2A_CALL strategy selected, but A2A not available. Falling back.")
                        strategy = ProcessingStrategy.DIRECT_LLM
                        response = await self._execute_direct_llm(**exec_kwargs)

                else: # Default: DIRECT_LLM
                    response = await self._execute_direct_llm(**exec_kwargs)

                span.set_attribute("raw_response_length", len(response))
                span.add_event("Strategy execution complete")

                # 5. Persist History (if successful and enabled)
                # Add assistant response to history
                if persist_history and not response.startswith("Error:"):
                     self._add_to_history(session_id, LLMMessage(role="assistant", content=response).to_dict())

                # 6. Sync World Model *back* to ADK State (if changed and enabled)
                if self.sync_adk_state and adk_session_state is not None:
                    try:
                        self.world_model.sync_to_adk_state(adk_session_state)
                        span.add_event("ADK state synchronized and updated")
                    except Exception as sync_e:
                         logger.error(f"Error syncing/updating ADK session state: {sync_e}")
                         span.record_exception(sync_e)

                # 7. Track Cost (using last_llm_result if available)
                if self.last_llm_result:
                    try:
                        cost = completion_cost(completion_response=self.last_llm_result, model=self.amd.model)
                        if cost:
                            turn_cost = cost
                            self.total_cost += turn_cost
                            logger.info(f"Turn Cost: ${turn_cost:.6f}, Total Cost: ${self.total_cost:.6f}")
                            span.set_attribute("llm_cost", turn_cost)
                            span.set_attribute("total_agent_cost", self.total_cost)
                        self.last_llm_result = None # Clear after use
                    except Exception as cost_e:
                        logger.warning(f"Failed to calculate cost: {cost_e}")
                        span.add_event("Cost calculation failed", attributes={"error": str(cost_e)})


                # 8. Run Post Callback
                if self.post_run_callback and not response.startswith("Error:"):
                    try:
                        if iscoroutinefunction(self.post_run_callback):
                            await self.post_run_callback(session_id, response, turn_cost)
                        else:
                            self.post_run_callback(session_id, response, turn_cost)
                        span.add_event("Post-run callback executed")
                    except Exception as cb_e:
                        logger.error(f"Post-run callback failed: {cb_e}", exc_info=True)
                        span.record_exception(cb_e)


                logger.info(f"Agent Run finished in {time.monotonic() - start_time:.2f}s. Response: {response[:100]}...")

        except Exception as e:
            logger.error(f"Error during agent run (Session: {session_id}): {e}", exc_info=True)
            self.internal_state = InternalAgentState.ERROR
            response = f"Error: An internal error occurred during processing: {str(e)}"
            if span:
                 span.set_status(trace.Status(trace.StatusCode.ERROR, f"Agent run failed: {e}"))
                 span.record_exception(e)
        finally:
            self.internal_state = InternalAgentState.IDLE
            if span: span.end() # Ensure span is closed
            logger.info(f"--- Agent Run End (Session: {session_id}) ---")

        return str(response) # Ensure string output

    def run(self, user_input: str, session_id: Optional[str] = None, **kwargs) -> str:
        """Synchronous wrapper for a_run."""
        try:
            # get_event_loop() is deprecated in 3.10+, use get_running_loop() or new_event_loop()
            try:
                asyncio.get_running_loop()
                # If loop is running, cannot use asyncio.run. Need to schedule and wait.
                # This is complex to get right universally (e.g., in notebooks vs servers).
                # Simplest approach for sync call from sync context is asyncio.run()
                # If called from async context, user should await a_run() directly.
                logger.warning("Synchronous 'run' called from a running event loop. "
                               "This might block the loop. Consider using 'await a_run'.")
                # Fallback to basic run, may error if loop is running
                return asyncio.run(self.a_run(user_input, session_id=session_id, **kwargs))
            except RuntimeError: # No running event loop
                 return asyncio.run(self.a_run(user_input, session_id=session_id, **kwargs))
        except Exception as e:
            logger.error(f"Error in synchronous run wrapper: {e}", exc_info=True)
            return f"Error: Failed to execute synchronous run: {e}"

    # --- Strategy Determination ---

    def _determine_strategy_heuristic(self, user_input: str, messages: list[dict]) -> tuple[ProcessingStrategy, str]:
        """Determines the processing strategy using heuristics (faster than LLM)."""
        # 1. Check for keywords indicating specific needs
        input_lower = user_input.lower()
        # Example Keywords:
        code_keywords = {"execute", "run code", "python", "calculate", "script"}
        search_keywords = {"search", "google", "find information", "what is", "who is"}
        agent_keywords = {"ask agent", "tell agent", "delegate to"} # Keywords for A2A/MCP delegation
        tool_keywords = {"use tool", "run tool"} # Keywords for specific tool use

        # 2. Check Agent Capabilities (Tools, Servers, Clients)
        has_adk_tools = ADK_AVAILABLE and isinstance(self, LlmAgent) and bool(self.tools)
        has_adk_code_executor = ADK_AVAILABLE and isinstance(self, LlmAgent) and self.code_executor is not None
        can_do_adk_search = any(isinstance(t, type(adk_google_search) | AdkVertexAiSearchTool) for t in getattr(self, 'tools', []))
        can_do_a2a = A2A_AVAILABLE and bool(self.a2a_clients) # Check if clients configured
        # MCP check relies on tools being added via MCPToolset in ADK
        has_adk_tools and any(isinstance(t, BaseTool) and getattr(t, '_is_mcp_tool', False) for t in self.tools) # Heuristic


        # --- Strategy Logic ---
        # Priority: ADK (if tools/code/search needed) > A2A (if delegation requested) > Direct LLM

        # ADK: If code execution or search is explicitly requested or implied, or specific ADK tools mentioned
        if ADK_AVAILABLE and self.adk_runner:
            if has_adk_code_executor and any(kw in input_lower for kw in code_keywords):
                return ProcessingStrategy.ADK_RUN, "Input suggests code execution, using ADK."
            if can_do_adk_search and any(kw in input_lower for kw in search_keywords):
                 return ProcessingStrategy.ADK_RUN, "Input suggests web/data search, using ADK."
            # Check if input mentions names of specific ADK tools
            if has_adk_tools:
                tool_names = {t.name.lower() for t in self.tools}
                if any(f" {name} " in input_lower for name in tool_names) or any(kw in input_lower for kw in tool_keywords):
                     return ProcessingStrategy.ADK_RUN, "Input mentions specific ADK tool or requests tool use."
            # General ADK case: If ADK is primary mode and input isn't trivial
            if len(user_input.split()) > 5: # Simple heuristic for non-trivial input
                # If ADK tools exist, assume ADK might be needed for planning
                if has_adk_tools:
                    return ProcessingStrategy.ADK_RUN, "Complex input and ADK tools available, using ADK planning."
                # If only basic LLM agent, still might use ADK runner for session mgmt? Check config.
                # Defaulting to DIRECT_LLM if no specific ADK features seem required.

        # A2A: If delegation is requested and A2A clients are available
        if can_do_a2a and any(kw in input_lower for kw in agent_keywords):
             # : Could use LLM here to extract target agent if multiple clients exist
            return ProcessingStrategy.A2A_CALL, "Input suggests delegating to another agent."

        # Fallback: Direct LLM
        return ProcessingStrategy.DIRECT_LLM, "Input seems suitable for direct LLM processing."


    # --- Strategy Execution Helpers ---

    def _prepare_llm_messages(self, user_input: str, session_id: str) -> list[dict]:
        """Prepares the list of messages for the LLM call, including history and system prompts."""
        session_history = self.message_history.get(session_id, [])

        # Construct message list
        messages: list[dict] = []
        messages.extend(self.construct_initial_prompts()) # System/world model/tool prompts
        # Add history (ensure alternating roles if possible, handle potential issues)
        messages.extend(session_history)
        # Add current user input
        messages.append(LLMMessage(role="user", content=user_input).to_dict())

        # Trim messages based on token count or turn limit
        trimmed_messages = self._trim_messages(messages)

        # Add user input to persistent history *before* LLM call
        # Note: assistant response added *after* successful call in a_run
        self._add_to_history(session_id, LLMMessage(role="user", content=user_input).to_dict())

        return trimmed_messages

    async def _execute_direct_llm(self, current_turn_messages: list[dict], session_id: str, **kwargs) -> str:
        """Executes a direct call to the LLM using LiteLLM."""
        logger.debug("Executing direct LLM call...")
        if not current_turn_messages: return "Error: No messages prepared for LLM."
        try:
            response_content = await self.a_run_llm_completion(current_turn_messages)
            return response_content
        except Exception as e:
            logger.error(f"Direct LLM execution failed: {e}", exc_info=True)
            return f"Error during LLM generation: {e}"

    async def _execute_adk_run(self, user_input: str, session_id: str, adk_session_state: State | None, **kwargs) -> str:
        """Executes the agent's logic using the configured ADK runner."""
        if not self.adk_runner or not self.adk_session_service:
            return "Error: ADK Runner or Session Service is not configured for this agent."

        logger.debug(f"Executing ADK run for session {session_id}...")
        final_response_text = "Error: ADK processing did not yield a final textual response."
        # Use user_id from AMD if available, default otherwise
        user_id = self.amd.user_id or "adk_user"
        app_name = self.adk_runner.app_name

        try:
            # 1. Ensure ADK session exists
            try:
                # Check and potentially create session (synchronous, run in thread)
                session_exists = await asyncio.to_thread(
                    self.adk_session_service.get_session, app_name=app_name, user_id=user_id, session_id=session_id
                )
                if not session_exists:
                     logger.info(f"Creating ADK session {session_id} for user {user_id} in app {app_name}")
                     # Pass initial state from World Model if syncing
                     initial_state = self.world_model.to_dict() if self.sync_adk_state else {}
                     await asyncio.to_thread(
                         self.adk_session_service.create_session,
                         app_name=app_name, user_id=user_id, session_id=session_id,
                         state=initial_state
                     )
                elif adk_session_state is None and self.sync_adk_state:
                    # If session existed but we couldn't get state earlier, try again
                     session = await asyncio.to_thread(self.adk_session_service.get_session, app_name=app_name, user_id=user_id, session_id=session_id)
                     if session: adk_session_state = session.state

            except Exception as session_e:
                logger.error(f"Failed to ensure ADK session {session_id}: {session_e}", exc_info=True)
                return f"Error setting up ADK session: {session_e}"

            # 2. Prepare ADK input (handle multi-modal later)
            # Assuming user_input is text for now
            adk_input_content = Content(role='user', parts=[Part(text=user_input)])

            # 3. Execute ADK run_async
            all_events_str = [] # For logging/debugging
            async for event in self.adk_runner.run_async(
                user_id=user_id, session_id=session_id, new_message=adk_input_content):

                # Log event details (optional, can be verbose)
                try:
                    event_dict = event.model_dump(exclude_none=True)
                    all_events_str.append(json.dumps(event_dict, default=str)) # Serialize complex types
                    logger.debug(f"ADK Event ({event.author}): {all_events_str[-1]}")
                except Exception as log_e:
                    logger.debug(f"ADK Event ({event.author}): [Error logging event details: {log_e}]")

                # Call progress callback
                if self.progress_callback:
                     try:
                         progress_data = {"type": "adk_event", "event": event.model_dump(exclude_none=True)}
                         if iscoroutinefunction(self.progress_callback): await self.progress_callback(progress_data)
                         else: self.progress_callback(progress_data)
                     except Exception as cb_e: logger.warning(f"Progress callback failed for ADK event: {cb_e}")

                # Check for Human-in-Loop triggers (example)
                #if event.actions and event.actions.request_human_input:
                #     if self.human_in_loop_callback:
                #         logger.info(f"ADK requesting human input: {event.actions.request_human_input.reason}")
                         # This needs a mechanism to pause and resume the run_async loop
                         # HIL is complex with async generators. Placeholder for now.
                         # human_response = await self.human_in_loop_callback(...)
                         # Need to inject response back into ADK runner - not straightforward
               #          logger.warning("Human-in-Loop requested by ADK, but interaction is not implemented.")
                         # Could potentially send an error response back?
               #      else:
               #         logger.warning("ADK requested human input, but no HIL callback is configured.")


                # Extract final textual response
                if event.is_final_response():
                    # Prioritize text part
                    if event.content and event.content.parts:
                        text_parts = [p.text for p in event.content.parts if hasattr(p, 'text')]
                        if text_parts:
                            final_response_text = "\n".join(text_parts).strip()
                        else: # Handle other content types if needed (e.g., function call results as final)
                            # For now, just serialize the first part if no text found
                            final_response_text = str(event.content.parts[0]) if event.content.parts else "ADK finished with non-text content."
                    elif event.actions and event.actions.escalate:
                        final_response_text = f"Error: Agent escalated: {event.error_message or 'No specific message.'}"
                    elif event.error_message:
                         final_response_text = f"Error: ADK processing failed: {event.error_message}"
                    else:
                         final_response_text = "ADK processing finished without a clear textual response."
                    break # Stop processing events

            # 4. Update World Model from final ADK state (if syncing)
            # This happens *after* the run completes, the sync in a_run updates the persisted state.
            if self.sync_adk_state and adk_session_state is not None:
                 # Fetch potentially updated state after run completion
                 try:
                     final_session = await asyncio.to_thread(self.adk_session_service.get_session, app_name=app_name, user_id=user_id, session_id=session_id)
                     if final_session:
                         self.world_model.sync_from_adk_state(final_session.state)
                     else:
                         logger.warning(f"Could not fetch final ADK state for session {session_id} after run.")
                 except Exception as sync_e:
                     logger.error(f"Error fetching final ADK state: {sync_e}")


            logger.debug("ADK run finished.")
            return final_response_text

        except Exception as e:
            logger.error(f"ADK execution failed: {e}", exc_info=True)
            # Return partial events log on error for debugging
            events_preview = "\n".join(all_events_str[:5])
            return f"Error during ADK processing: {e}\nFirst Events:\n{events_preview}"

    async def _execute_a2a_call(self, user_input: str, session_id: str, **kwargs) -> str:
        """Executes a call to another agent via A2A using python-a2a and waits for the result."""

        client = None
        task_id = None

        if not A2A_AVAILABLE: return "Error: python-a2a library not available."

        logger.debug("Executing A2A call...")

        target_agent_url = kwargs.get('target_a2a_agent_url')
        task_prompt = kwargs.get('a2a_task_prompt', user_input)

        if not target_agent_url:
            if len(self.a2a_clients) == 1:
                target_agent_url = list(self.a2a_clients.keys())[0]
                logger.info(f"Using only available A2A client target: {target_agent_url}")
            else:
                 return "Error: Target A2A agent URL not specified and multiple clients configured."
        try:
            client = await self.setup_a2a_client(target_agent_url)
            if not client:
                return f"Error: Could not connect to A2A agent at {target_agent_url}"

            task_id = str(uuid.uuid4())
            a2a_session_id = f"a2a_{session_id}_{task_id[:8]}"

            logger.info(f"Sending A2A task '{task_id}' to {target_agent_url}...")

            # --- Call python-a2a client's task sending method ---
            # The library might have a high-level `create_task` or similar.
            # Let's assume a `send_task` method exists that takes message content.
            # We construct the message payload expected by the library.
            # This structure might need adjustment based on python-a2a's specifics.
            message_payload = {
                "role": "user", # Assuming MessageRole.USER maps to "user"
                "content": {
                    "type": "text", # Assuming TextContent maps to this
                    "text": task_prompt
                 }
            }
            # The client method might take id/sessionId separately or as part of a task object
            # Assuming a method signature like: send_task(message: Dict, task_id: str, session_id: str)
            # This is an *assumption* based on typical A2A needs.
            if hasattr(client, 'send_task'):
                initial_task_info = await client.send_task(
                    message=message_payload,
                    task_id=task_id,
                    session_id=a2a_session_id
                ) # Adjust call based on actual method signature
            elif hasattr(client, 'create_task'): # Alternative common pattern
                 initial_task_info = await client.create_task(
                     message=message_payload,
                     task_id=task_id,
                     session_id=a2a_session_id
                 )
            else:
                 # Fallback to 'ask' if specific task methods are unavailable (less control)
                 logger.warning("A2A client lacks specific send_task/create_task method, using high-level 'ask'. Polling might not work.")
                 # 'ask' likely blocks and returns the final result directly
                 response_text = await client.ask(task_prompt, session_id=a2a_session_id)
                 return response_text


            # --- Process initial response and Poll ---
            # Check the structure of initial_task_info (might be a Task object, dict, etc.)
            # Extract initial state if possible
            initial_state = TaskState.SUBMITTED # Default if state not returned immediately
            if isinstance(initial_task_info, dict) and initial_task_info.get('status'):
                initial_state_val = initial_task_info['status'].get('state')
                if initial_state_val: initial_state = TaskState(initial_state_val) # Convert string to Enum
            elif hasattr(initial_task_info, 'status') and hasattr(initial_task_info.status, 'state'):
                 initial_state = initial_task_info.status.state

            logger.info(f"A2A task submitted (ID: {task_id}). Initial State: {initial_state}")

            # Don't poll if initial state is already final (unlikely but possible)
            if initial_state in (TaskState.COMPLETED, TaskState.FAILED, TaskState.CANCELLED):
                 logger.warning(f"A2A task {task_id} already in final state {initial_state} after submission.")
                 # Need to extract result from initial_task_info here
                 # ... logic to extract result based on initial_task_info structure ...
                 return f"Task finished immediately with state {initial_state}." # Placeholder

            self.internal_state = InternalAgentState.WAITING_FOR_TOOL
            final_result = await self._poll_a2a_task(client, task_id, target_agent_url)
            self.internal_state = InternalAgentState.PROCESSING
            return final_result

        except TimeoutError:
             logger.error(f"A2A task {task_id} timed out after {self.a2a_poll_timeout}s.")
             # Attempt cancellation?
             cancel_response = "No clinet"
             if client:
                cancel_response = await client.cancel_task(task_id=task_id)
             return f"Error: A2A task timed out waiting for result from {target_agent_url} {cancel_response}."
        except Exception as e:
            logger.error(f"A2A execution failed: {e}", exc_info=True)
            return f"Error during A2A call: {e}"

    async def _poll_a2a_task(self, client: A2AClient, task_id: str, target_url: str) -> str:
        """Polls the GetTask endpoint using python-a2a client until a final state."""
        if not hasattr(client, 'get_task'):
             raise NotImplementedError(f"A2A client for {target_url} does not support 'get_task' for polling.")

        logger.debug(f"Polling A2A task {task_id} on {target_url}...")
        start_time = time.monotonic()

        while time.monotonic() - start_time < self.a2a_poll_timeout:
            try:
                # Assume get_task takes task_id (and potentially historyLength)
                task_details = await client.get_task(task_id=task_id, history_length=1)

                # --- Parse the response (structure depends on python-a2a implementation) ---
                current_state = TaskState.UNKNOWN
                final_text = f"A2A Task {task_id} finished."
                error_message = None

                # Example parsing assuming task_details is dict-like or object-like
                status_info = None
                if isinstance(task_details, dict):
                    status_info = task_details.get('status')
                elif hasattr(task_details, 'status'):
                    status_info = task_details.status

                if status_info:
                    state_val = status_info.get('state') if isinstance(status_info, dict) else getattr(status_info, 'state', None)
                    if state_val:
                        try:
                            current_state = TaskState(state_val) # Convert string to Enum
                        except ValueError:
                             logger.warning(f"Received unknown task state '{state_val}' for task {task_id}")

                    logger.debug(f"A2A task {task_id} current state: {current_state}")

                    # Call progress callback
                    if self.progress_callback:
                         # ... (progress callback logic remains the same) ...
                        pass

                    # Check for final state
                    if current_state in (TaskState.COMPLETED, TaskState.FAILED, TaskState.CANCELLED):
                        logger.info(f"A2A task {task_id} reached final state: {current_state}")

                        # Extract final result from artifacts
                        artifacts = task_details.get('artifacts') if isinstance(task_details, dict) else getattr(task_details, 'artifacts', None)
                        if artifacts and isinstance(artifacts, list) and artifacts:
                            # Simple extraction: assume first artifact, first part is text
                            try:
                                parts = artifacts[0].get('parts') if isinstance(artifacts[0], dict) else getattr(artifacts[0], 'parts', [])
                                if parts and isinstance(parts, list) and parts:
                                    text_part = parts[0].get('text') if isinstance(parts[0], dict) else getattr(parts[0], 'text', None)
                                    if text_part:
                                        final_text = str(text_part).strip()
                            except Exception as parse_e:
                                logger.warning(f"Could not parse artifacts for task {task_id}: {parse_e}")
                                final_text = "[Could not parse final artifact]"

                        # Handle failed/cancelled states
                        if current_state == TaskState.FAILED:
                            # Try to extract error message from status
                            status_message_info = status_info.get('message') if isinstance(status_info, dict) else getattr(status_info, 'message', None)
                            if status_message_info:
                                # Assuming message content is similar structure to artifacts
                                try:
                                     err_content = status_message_info.get('content') if isinstance(status_message_info, dict) else getattr(status_message_info, 'content', None)
                                     if err_content:
                                         error_message = err_content.get('text') if isinstance(err_content, dict) else getattr(err_content, 'text', 'Unknown error')
                                except: pass # Ignore parsing errors here
                            return f"Error: A2A task failed on {target_url}: {error_message or final_text}"
                        elif current_state == TaskState.CANCELLED:
                            return f"Info: A2A task was cancelled on {target_url}."
                        else: # Completed
                            return final_text

                else:
                    logger.warning(f"A2A get_task for {task_id} returned no status info: {task_details}")

            except APIConnectionError as conn_e:
                 logger.warning(f"Connection error polling A2A task {task_id}: {conn_e}. Retrying...")
            except Exception as e:
                logger.error(f"Error polling A2A task {task_id}: {e}", exc_info=True)
                return f"Error polling A2A task status: {e}"

            await asyncio.sleep(self.a2a_poll_interval)

        raise TimeoutError(f"Polling A2A task {task_id} timed out.")

    # --- Internal Helper Methods ---

    def construct_initial_prompts(self) -> list[dict]:
        """Constructs the initial system/context messages for the LLM prompt."""
        messages = []
        # Base System Prompt
        if self.amd.system_message:
            messages.append(LLMMessage("system", self.amd.system_message).to_dict())

        # World Model Context
        wm_repr = self.world_model.show()
        if wm_repr != "[empty]":
            messages.append(LLMMessage("system", f"Current World State:\n{wm_repr}").to_dict())

        # Capabilities Overview (ADK specific parts depend on LlmAgent inheritance)
        caps = ["LiteLLM (Core LLM access)"]
        if ADK_AVAILABLE and isinstance(self, LlmAgent):
            if self.tools: caps.append("ADK Tools (including potential MCP/A2A wrappers)")
            if self.code_executor: caps.append("ADK Code Execution")
            if any(isinstance(t, type(adk_google_search) | AdkVertexAiSearchTool) for t in getattr(self, 'tools', [])):
                 caps.append("ADK Search")
        if A2A_AVAILABLE and self.a2a_clients: caps.append("A2A Client (delegate to other agents)")
        if self.mcp_server: caps.append("MCP Server (exposes capabilities)")
        if self.a2a_server: caps.append("A2A Server (receives tasks)")

        messages.append(LLMMessage("system", f"Your Capabilities: {', '.join(caps)}.").to_dict())

        # ADK Tool Instructions (if ADK enabled and tools exist)
        if ADK_AVAILABLE and isinstance(self, LlmAgent) and self.tools:
            try:
                # Use ADK's internal method to get schema if possible, otherwise basic list
                tool_schemas = getattr(self, 'tool_schemas', None) # ADK might populate this
                if tool_schemas:
                     tool_list_str = json.dumps(tool_schemas, indent=2)
                     messages.append(LLMMessage("system", f"You have access to the following tools (use FunctionCall format):\n{tool_list_str}").to_dict())
                else: # Fallback to basic list
                    tool_list = "\n".join([f"- {tool.name}: {tool.description or 'No description'}" for tool in self.tools])
                    messages.append(LLMMessage("system", f"You can use the following tools:\n{tool_list}\nRespond with a FunctionCall to use a tool.").to_dict())
            except Exception as e:
                 logger.warning(f"Could not generate detailed ADK tool instructions: {e}")


        # Add specific instructions for A2A delegation if needed
        if A2A_AVAILABLE and self.a2a_clients:
             client_names = list(self.a2a_clients.keys()) # Target URLs act as names here
             messages.append(LLMMessage("system", f"You can delegate tasks to other agents via A2A using their URLs (e.g., {client_names[0]} if available). Indicate clearly if you want to delegate.").to_dict())

        return messages

    def _add_to_history(self, session_id: str, message: dict[str, Any]):
         """Adds a message to the session history, respecting limits."""
         if session_id not in self.message_history:
              self.message_history[session_id] = []
         self.message_history[session_id].append(message)

         # Apply trimming immediately after adding (simpler than doing it before call)
         self.message_history[session_id] = self._trim_messages(self.message_history[session_id])


    def _trim_messages(self, messages: list[dict]) -> list[dict]:
        """Trims message list based on configured strategy (tokens or turns)."""
        if self.max_history_tokens and self.amd.model:
            # Token-based trimming
            max_tokens = self.max_history_tokens
            if self.trim_strategy == "litellm":
                try:
                    trimmed = trim_messages(messages, model=self.amd.model, max_tokens=max_tokens)
                    if len(trimmed) < len(messages):
                        logger.debug(f"Trimmed history from {len(messages)} to {len(trimmed)} messages using LiteLLM token strategy ({max_tokens} tokens).")
                    return trimmed
                except Exception as e:
                    logger.warning(f"LiteLLM trimming failed ({e}), falling back to basic token trim.")
                    # Fallthrough to basic token trim
            # Basic token trim (keep system, remove oldest convo pairs)
            system_msgs = [m for m in messages if m.get('role') == 'system']
            convo_msgs = [m for m in messages if m.get('role') != 'system']
            current_tokens = token_counter(messages=messages, model=self.amd.model)
            while current_tokens > max_tokens and len(convo_msgs) >= 2:
                 convo_msgs = convo_msgs[2:] # Remove oldest pair
                 current_tokens = token_counter(messages=system_msgs + convo_msgs, model=self.amd.model)
            final_messages = system_msgs + convo_msgs
            if len(final_messages) < len(messages):
                 logger.debug(f"Trimmed history from {len(messages)} to {len(final_messages)} messages using basic token strategy ({max_tokens} tokens).")
            return final_messages

        elif self.max_history_turns > 0:
            # Turn-based trimming
            system_msgs = [m for m in messages if m.get('role') == 'system']
            convo_msgs = [m for m in messages if m.get('role') != 'system']
            # Keep last N turns (each turn = user + assistant = 2 messages)
            max_convo_messages = self.max_history_turns * 2
            if len(convo_msgs) > max_convo_messages:
                trimmed_convo = convo_msgs[-max_convo_messages:]
                logger.debug(f"Trimmed history from {len(convo_msgs)//2} to {len(trimmed_convo)//2} turns.")
                return system_msgs + trimmed_convo
            else:
                return messages # No trimming needed
        else:
            # No trimming configured or possible
            logger.warning("History trimming not configured or possible (missing max_tokens/model or max_turns).")
            return messages


    async def a_run_llm_completion(self, llm_messages: list[dict]=None, **kwargs) -> str:
        """Core wrapper around LiteLLM acompletion with error handling, streaming, and cost tracking."""
        if not llm_messages:
            if "messages" in kwargs:
                llm_messages = kwargs.pop("messages")
            if "llm_messages" in kwargs:
                llm_messages = kwargs.pop("llm_messages")
            if not llm_messages:
                logger.warning("a_run_llm_completion called with empty message list.")
                return "Error: No message provided to the model."

        self.print_verbose(f"Running model '{self.amd.model}' with {len(llm_messages)} messages.")
        # self.print_verbose("Messages:", json.dumps(llm_messages, indent=2)) # Very verbose

        # Prepare LiteLLM parameters from AgentModelData and kwargs overrides
        params = {
            'model': self.format_model or self.amd.model,
            'messages': llm_messages,
            'temperature': self.amd.temperature,
            'top_p': self.amd.top_p,
            'top_k': self.amd.top_k,
            'max_tokens': self.amd.max_tokens,
            'stream': self.stream,
            'stop': self.amd.stop_sequence,
            'user': self.amd.user_id,
            'api_base': self.amd.api_base,
            'api_version': self.amd.api_version,
            'api_key': self.amd.api_key,
            'presence_penalty': self.amd.presence_penalty,
            'frequency_penalty': self.amd.frequency_penalty,
            'caching': self.amd.caching,
            'response_format': kwargs.get('response_format'), # For a_format_class
            'tools': kwargs.get('tools'), # For LiteLLM function calling (less common now with ADK)
        }
        # Filter out None values as LiteLLM prefers absence over None for some params
        params = {k: v for k, v in params.items() if v is not None}

        # Add budget manager if present
        if self.amd.budget_manager: params['budget_manager'] = self.amd.budget_manager

        full_response_content = ""
        tool_calls_requested = None # Store tool calls if generated

        try:
            response_object = await acompletion(**params)

            if self.stream:
                collected_chunks = []
                async for chunk in response_object:
                    # Store raw chunk for potential analysis or replay
                    collected_chunks.append(chunk)
                    # Extract text delta
                    chunk_delta = chunk.choices[0].delta.content or ""
                    if chunk_delta:
                        full_response_content += chunk_delta
                        if self.stream_callback:
                             try:
                                 # Provide only the new text chunk
                                 if iscoroutinefunction(self.stream_callback): await self.stream_callback(chunk_delta)
                                 else: self.stream_callback(chunk_delta)
                             except Exception as cb_e:
                                 logger.warning(f"Stream callback failed: {cb_e}")
                    # Check for tool call deltas (less common in streaming)
                    tool_deltas = chunk.choices[0].delta.tool_calls
                    if tool_deltas:
                         logger.warning("Received tool call delta during streaming - handling may be incomplete.")
                         # : Implement robust handling of streaming tool calls if needed

                # After stream, construct a final response object mimicking non-streaming one for cost tracking
                # This is an approximation, LiteLLM might offer better ways.
                final_choice = {"message": {"role": "assistant", "content": full_response_content}}
                # If tool calls were detected during streaming, add them (complex to reconstruct accurately)
                # if reconstructed_tool_calls: final_choice["message"]["tool_calls"] = reconstructed_tool_calls
                self.last_llm_result = {
                    "choices": [{"message": final_choice["message"]}],
                    "model": self.amd.model, # Needed for cost tracking
                    # Usage stats are often missing or zero in streaming chunks, need final value if available
                    "usage": getattr(collected_chunks[-1], 'usage', {"prompt_tokens": 0, "completion_tokens": 0})
                }

            else: # Non-streaming
                self.last_llm_result = response_object # Store the full response
                # Extract content and potential tool calls
                message = response_object.choices[0].message
                full_response_content = message.content or ""
                tool_calls_requested = message.tool_calls # List of ToolCall objects

                # Check if LiteLLM did function/tool calling (different from ADK tools)
                # This path is less likely if using ADK, but supported by LiteLLM
                if tool_calls_requested:
                    logger.info(f"LiteLLM requested {len(tool_calls_requested)} tool calls.")
                    # This requires a separate mechanism to execute these LiteLLM-requested tools
                    # and send back 'tool' role messages in the next turn.
                    # Not implemented here as focus is on ADK/A2A tools.
                    # For now, return a message indicating tool call request.
                    calls_repr = ", ".join([f"{tc.function.name}" for tc in tool_calls_requested])
                    return f"Info: LLM requested tool calls ({calls_repr}). Direct execution not implemented."


            self.print_verbose(f"Model Response: {full_response_content[:100]}...")
            return full_response_content

        except RateLimitError as e:
            logger.error(f"Rate limit error from {self.amd.model}: {e}")
            # Implement backoff/retry? For now, re-raise.
            raise
        except (BadRequestError, APIConnectionError, InternalServerError) as e:
            logger.error(f"API/Server error during LiteLLM call for {self.amd.model}: {e}", exc_info=True)
            raise
        except Exception as e:
            logger.error(f"Unexpected error during LiteLLM completion: {e}", exc_info=True)
            raise

    async def a_format_class(self,
                             pydantic_model: type[BaseModel],
                             prompt: str,
                             message_context: list[dict] | None = None,
                             max_retries: int = 2) -> dict[str, Any]:
        """Uses LiteLLM's response_format feature to get structured JSON output, with retries."""
        logger.debug(f"Formatting prompt for Pydantic model: {pydantic_model.__name__}")
        model_schema = pydantic_model.model_json_schema()

        messages = message_context or []
        # System prompt explaining the task and schema
        messages.append({
            "role": "system",
            "content": f"Your task is to analyze the user's request and extract information into a JSON object.\n"
                       f"Strictly adhere to the following Pydantic schema:\n"
                       f"```json\n{json.dumps(model_schema, indent=2)}\n```\n"
                       f"Guidelines:\n"
                       f"- Analyze the request carefully.\n"
                       f"- Output *only* the JSON object, nothing else (no explanations, apologies, or markdown).\n"
                       f"- Ensure the JSON is valid and conforms exactly to the schema.\n"
                       f"- Omit optional fields if the information is not present in the request."
        })
        messages.append({"role": "user", "content": prompt})

        # Use LiteLLM's JSON mode (requires compatible model/provider)
        response_format_config = {"type": "json_object"}
        # Some providers might need the schema explicitly even in json_object mode
        # response_format_config = {"type": "json_object", "schema": model_schema}

        original_stream_state = self.stream
        self.stream = False # Ensure streaming is off for structured output
        try:
            last_exception = None
            for attempt in range(max_retries + 1):
                try:
                    logger.debug(f"Attempt {attempt + 1}/{max_retries + 1} to get structured JSON.")
                    # Use a potentially faster/cheaper model optimized for JSON tasks if configured?
                    self.format_model = self.format_model_
                    response_text = await self.a_run_llm_completion(messages, response_format=response_format_config)
                    self.format_model = None
                    # Clean and parse the JSON response
                    try:
                         # Basic cleaning: remove potential markdown fences
                        cleaned_response = re.sub(r'^```json\s*|\s*```$', '', response_text.strip(), flags=re.MULTILINE)

                         # Try parsing using Pydantic's TypeAdapter for direct validation
                        adapter = TypeAdapter(pydantic_model)
                        validated_obj = adapter.validate_json(cleaned_response)
                        result_dict = validated_obj.model_dump(mode='json') # Get dict representation

                        logger.debug(f"Successfully formatted and validated JSON: {result_dict}")
                        return result_dict

                    except (json.JSONDecodeError, ValidationError) as e:
                        logger.warning(f"Attempt {attempt + 1} failed: Invalid JSON or schema mismatch. Error: {e}. Response: {response_text[:500]}")
                        last_exception = ValueError(f"LLM response did not match schema after cleaning. Error: {e}. Response: '{response_text[:200]}...'")
                        # Add feedback to the model for retry
                        messages.append({"role": "assistant", "content": response_text}) # Show previous attempt
                        messages.append({"role": "system", "content": f"Your previous response was invalid ({e}). Please try again, ensuring you output *only* valid JSON matching the schema."})

                except Exception as e:
                    logger.error(f"Error during a_format_class (attempt {attempt + 1}): {e}", exc_info=True)
                    last_exception = e
                    # Don't retry on non-parsing errors immediately, could be API issue
                    break

                # Wait before retrying
                if attempt < max_retries:
                     await asyncio.sleep(1.5 ** attempt) # Exponential backoff

            # If all retries fail
            logger.error(f"Failed to get valid structured JSON after {max_retries + 1} attempts.")
            raise last_exception or ValueError("Failed to get structured JSON response from LLM.")

        finally:
            self.stream = original_stream_state # Restore stream setting


    async def flow_world_model(self, text_input: str, session_id: str, adk_session_state: State | None):
        """
        Analyzes input, updates internal WorldModel, and syncs with ADK state if enabled.
        Sync Priority: If ADK state exists, sync *from* it first. Then update based on text.
                     The sync *to* ADK happens after the agent run completes.
        """
        logger.debug(f"Flowing world model based on text: {text_input[:100]}...")

        # 1. Sync FROM ADK State (if enabled and state available)
        if self.sync_adk_state and adk_session_state is not None:
             logger.debug("Syncing World Model FROM ADK session state...")
             self.world_model.sync_from_adk_state(adk_session_state)

        # 2. Update World Model based on Text Input (using LLM)
        # This adds/modifies based on the current turn's input
        # Define Pydantic model for structured update extraction
        current_keys = list(self.world_model.to_dict().keys())
        class WorldModelAdaption(BaseModel):
            action: Literal['add', 'update', 'remove', 'none'] = Field(..., description="Action on the world model.")
            key: str | None = Field(None, description=f"Key to modify/add/remove (e.g., 'user_location', 'task_status'). Existing keys: {current_keys}")
            value: Any | None = Field(None, description="New value (for 'add'/'update'). Should be JSON serializable.")
            reasoning: str = Field(..., description="Why this change (or no change) is needed based on the input.")

        prompt = (f"Analyze the following text and current world state to determine if the agent's world model needs changes.\n"
                  f"Current World State Keys: {current_keys}\n"
                  f"Text Input: ```\n{text_input}\n```\n"
                  f"Decide action, key, value, and reasoning. Focus on factual updates derived *from the text*. Do not hallucinate.")

        try:
            # Use a potentially faster/cheaper model for this classification task
            # Could eventually use a separate AMD config for this call
            adaption_dict = await self.a_format_class(WorldModelAdaption, prompt)
            adaption = WorldModelAdaption(**adaption_dict)

            logger.info(f"World Model Adaption proposed: {adaption.action} on key '{adaption.key}'. Reason: {adaption.reasoning}")

            if adaption.action == 'add' or adaption.action == 'update':
                if adaption.key and adaption.value is not None:
                    self.world_model.set(adaption.key, adaption.value)
                else:
                    logger.warning("World model 'add'/'update' ignored: missing key or value.")
            elif adaption.action == 'remove':
                if adaption.key:
                    self.world_model.remove(adaption.key)
                else:
                    logger.warning("World model 'remove' ignored: missing key.")
            # Else ('none'): do nothing

        except (ValidationError, Exception) as e:
            logger.warning(f"Failed to determine world model adaption via LLM: {e}. World model may be based only on ADK sync or previous state.")

        # NOTE: Sync TO ADK happens *after* the full agent run in a_run()


    # --- ADK Tool Implementations (Internal Wrappers) ---
    def _ensure_internal_adk_tools(self):
        """Adds essential internal ADK tools if not already present."""
        if not ADK_AVAILABLE or not isinstance(self, LlmAgent):
            return
        if self.tools is None: self.tools = []

        existing_tool_names = {tool.name for tool in self.tools if isinstance(tool, BaseTool)}

        internal_adk_tools = {
            "get_world_model_key": self.adk_tool_world_model_get,
            "show_world_model": self.adk_tool_world_model_show,
        }
        if A2A_AVAILABLE:
            internal_adk_tools["a2a_send_and_wait"] = self.adk_tool_a2a_send_and_wait
            # Add NEW tools
            internal_adk_tools["a2a_send_no_wait"] = self.adk_tool_a2a_send_no_wait
            internal_adk_tools["a2a_get_task_status"] = self.adk_tool_a2a_get_task_status
            internal_adk_tools["a2a_cancel_task"] = self.adk_tool_a2a_cancel_task

        for name, func in internal_adk_tools.items():
            if name not in existing_tool_names:
                try:
                    tool_instance = FunctionTool(func=func) # ADK infers from func signature/docstring
                    self.tools.append(tool_instance)
                    logger.debug(f"Registered internal ADK tool: {name}")
                except Exception as e:
                    logger.warning(f"Failed to register internal ADK tool '{name}': {e}.")

    # --- Existing ADK Tools ---
    async def adk_tool_world_model_get(self, tool_context: ToolContext | None, key: str) -> Any | None:
        """ADK Tool: Retrieves a specific value from the agent's world model."""
        # ... (implementation remains the same) ...
        logger.info(f"[ADK Tool] get_world_model_key called for key: {key}")
        return self.world_model.get(key)

    async def adk_tool_world_model_show(self, tool_context: ToolContext | None) -> str:
        """ADK Tool: Returns a string representation of the agent's entire world model."""
        # ... (implementation remains the same) ...
        logger.info("[ADK Tool] show_world_model called")
        return self.world_model.show()

    async def adk_tool_a2a_send_and_wait(self,
                                         tool_context: ToolContext | None,
                                         target_agent_url: str,
                                         task_prompt: str,
                                         session_id: Optional[str] = None
                                         ) -> str:
        """ADK Tool: Sends a task to another agent via A2A and waits for the final text result."""
        # ... (implementation remains the same, calls _execute_a2a_call) ...
        if not A2A_AVAILABLE: return "Error: python-a2a library not available."
        logger.info(f"[ADK Tool] a2a_send_and_wait called for target: {target_agent_url}")
        tool_session_id = session_id or f"adk_tool_a2a_{uuid.uuid4()}"
        try:
            return await self._execute_a2a_call(
                 user_input=task_prompt,
                 session_id=tool_session_id,
                 target_a2a_agent_url=target_agent_url,
                 a2a_task_prompt=task_prompt
            )
        except Exception as e:
             logger.error(f"[ADK Tool] a2a_send_and_wait failed: {e}", exc_info=True)
             return f"Error executing A2A task via ADK tool: {e}"

        # --- NEW ADK Tools for A2A ---

    async def adk_tool_a2a_send_no_wait(self,
                                        tool_context: ToolContext | None,
                                        target_agent_url: str,
                                        task_prompt: str,
                                        session_id: Optional[str] = None
                                        ) -> str:
        """ADK Tool: Sends a task to another agent via A2A and returns the task ID immediately.

        Args:
            target_agent_url: The full URL of the target A2A agent.
            task_prompt: The natural language prompt or task for the target agent.
            session_id: Optional session ID to use for the A2A interaction.

        Returns:
            The unique ID of the submitted A2A task, or an error message.
        """
        if not A2A_AVAILABLE: return "Error: python-a2a library not available."
        logger.info(f"[ADK Tool] a2a_send_no_wait called for target: {target_agent_url}")

        try:
            client = await self.setup_a2a_client(target_agent_url)
            if not client:
                return f"Error: Could not connect to A2A agent at {target_agent_url}"

            task_id = str(uuid.uuid4())
            a2a_session_id = session_id or f"a2a_tool_nowait_{task_id[:8]}"

            message_payload = {"role": "user", "content": {"type": "text", "text": task_prompt}}

            initial_task_info = None
            if hasattr(client, 'send_task'):
                initial_task_info = await client.send_task(message=message_payload, task_id=task_id,
                                                           session_id=a2a_session_id)
            elif hasattr(client, 'create_task'):
                initial_task_info = await client.create_task(message=message_payload, task_id=task_id,
                                                             session_id=a2a_session_id)
            else:
                return "Error: A2A client does not support send_task or create_task."

            # Check for immediate errors from the submission call
            # Structure depends on python-a2a's return value
            error_info = None
            if isinstance(initial_task_info, dict):
                error_info = initial_task_info.get('error')
            elif hasattr(initial_task_info, 'error'):
                error_info = initial_task_info.error

            if error_info:
                err_msg = error_info.get('message', str(error_info)) if isinstance(error_info, dict) else str(
                    error_info)
                logger.error(f"A2A send_task (no wait) failed immediately: {err_msg}")
                return f"Error submitting A2A task: {err_msg}"
            else:
                logger.info(f"A2A task '{task_id}' submitted successfully (no wait) to {target_agent_url}.")
                return task_id  # Return the ID for later polling/checking

        except Exception as e:
            logger.error(f"[ADK Tool] a2a_send_no_wait failed: {e}", exc_info=True)
            return f"Error sending A2A task (no wait): {e}"

    async def adk_tool_a2a_get_task_status(self,
                                           tool_context: ToolContext | None,
                                           target_agent_url: str,
                                           task_id: str
                                           ) -> dict[str, Any]:
        """ADK Tool: Gets the current status and details of an A2A task.

        Args:
            target_agent_url: The URL of the agent hosting the task.
            task_id: The ID of the task to check.

        Returns:
            A dictionary containing task status details (state, message, artifacts) or an error.
        """
        if not A2A_AVAILABLE: return {"error": "python-a2a library not available."}
        logger.info(f"[ADK Tool] a2a_get_task_status called for task {task_id} on {target_agent_url}")

        try:
            client = await self.setup_a2a_client(target_agent_url)
            if not client:
                return {"error": f"Could not connect to A2A agent at {target_agent_url}"}

            if not hasattr(client, 'get_task'):
                return {"error": f"A2A client for {target_agent_url} does not support 'get_task'."}

            # Get task details from the client
            task_details = await client.get_task(task_id=task_id, history_length=1)  # History=1 gets latest status

            # Parse and return relevant info
            if isinstance(task_details, dict):
                # Basic parsing, adjust based on actual python-a2a structure
                status_info = task_details.get('status', {})
                artifacts = task_details.get('artifacts')
                return {
                    "task_id": task_id,
                    "state": status_info.get('state', 'UNKNOWN'),
                    "status_message": status_info.get('message'),  # Might be complex object
                    "artifacts": artifacts,  # Might be complex list
                    "raw_response": task_details  # Include raw for debugging
                }
            elif hasattr(task_details, 'status'):  # Object-like response
                status_obj = task_details.status
                artifacts_obj = getattr(task_details, 'artifacts', None)
                return {
                    "task_id": task_id,
                    "state": getattr(status_obj, 'state', TaskState.UNKNOWN).value,  # Get enum value
                    "status_message": getattr(status_obj, 'message', None),
                    "artifacts": artifacts_obj,
                    "raw_response": vars(task_details)  # Example conversion
                }
            else:
                return {"error": "Received unexpected response structure from get_task.", "raw_response": task_details}

        except Exception as e:
            # Catch specific errors from python-a2a if they exist (e.g., TaskNotFoundError)
            # if isinstance(e, TaskNotFoundError):
            #    logger.warning(f"[ADK Tool] A2A Task {task_id} not found on {target_agent_url}.")
            #    return {"error": f"Task {task_id} not found."}
            logger.error(f"[ADK Tool] a2a_get_task_status failed: {e}", exc_info=True)
            return {"error": f"Error getting A2A task status: {e}"}

    async def adk_tool_a2a_cancel_task(self,
                                       tool_context: ToolContext | None,
                                       target_agent_url: str,
                                       task_id: str
                                       ) -> dict[str, Any]:
        """ADK Tool: Attempts to cancel an ongoing A2A task.

        Args:
            target_agent_url: The URL of the agent hosting the task.
            task_id: The ID of the task to cancel.

        Returns:
            A dictionary indicating success or failure, possibly with the task's state after cancellation attempt.
        """
        if not A2A_AVAILABLE: return {"error": "python-a2a library not available."}
        logger.info(f"[ADK Tool] a2a_cancel_task called for task {task_id} on {target_agent_url}")

        try:
            client = await self.setup_a2a_client(target_agent_url)
            if not client:
                return {"error": f"Could not connect to A2A agent at {target_agent_url}"}

            if not hasattr(client, 'cancel_task'):
                return {"error": f"A2A client for {target_agent_url} does not support 'cancel_task'."}

            # Call the client's cancel method
            # The response structure depends heavily on the library implementation
            cancel_response = await client.cancel_task(task_id=task_id)

            # Parse response - could be simple success/fail, or updated task state
            if isinstance(cancel_response, dict):
                if 'error' in cancel_response:
                    error_info = cancel_response['error']
                    err_msg = error_info.get('message', str(error_info)) if isinstance(error_info, dict) else str(
                        error_info)
                    logger.warning(f"A2A cancel_task failed for {task_id}: {err_msg}")
                    return {"success": False, "error": err_msg, "raw_response": cancel_response}
                else:
                    # Assume success, response might contain updated task state
                    logger.info(f"A2A task {task_id} cancellation requested successfully.")
                    # Try to extract state if returned
                    state = cancel_response.get('result', {}).get('status', {}).get('state', 'UNKNOWN')
                    return {"success": True, "state_after_request": state, "raw_response": cancel_response}
            elif cancel_response is True:  # Simple boolean success
                return {"success": True, "state_after_request": "UNKNOWN"}
            else:  # Assume object-like or other structure
                # Add parsing based on observed python-a2a behavior
                logger.info(f"A2A task {task_id} cancellation request sent, parsing result.")
                # Example: Check for specific attributes if object is returned
                state = getattr(getattr(getattr(cancel_response, 'result', None), 'status', None), 'state',
                                TaskState.UNKNOWN).value
                return {"success": True, "state_after_request": state,
                        "raw_response": vars(cancel_response) if hasattr(cancel_response, '__dict__') else str(
                            cancel_response)}


        except Exception as e:
            # Catch specific errors like TaskNotFound, TaskNotCancelable if defined by python-a2a
            # if isinstance(e, TaskNotFoundError):
            #    return {"success": False, "error": f"Task {task_id} not found."}
            # if isinstance(e, TaskNotCancelableError):
            #    return {"success": False, "error": f"Task {task_id} is not in a cancelable state."}
            logger.error(f"[ADK Tool] a2a_cancel_task failed: {e}", exc_info=True)
            return {"success": False, "error": f"Error cancelling A2A task: {e}"}

    # async def adk_tool_a2a_get_task(self, tool_context: Optional[ToolContext], target_agent_url: str, task_id: str) -> Dict:
    #     """ADK Tool: Gets the current status and details of an A2A task."""
    #     # Implementation would be similar to _poll_a2a_task but return the status dict directly
    #     pass


    # --- Cost Tracking ---
    def _track_cost(self, response_obj: Any):
        """Updates cost using LiteLLM."""
        if not response_obj: return
        try:
            cost = completion_cost(completion_response=response_obj, model=self.amd.model)
            if cost is not None:
                self.total_cost += cost
                logger.info(f"Turn Cost: ${cost:.6f}, Total Accumulated Cost: ${self.total_cost:.6f}")
            else:
                 logger.debug("Cost calculation returned None (possibly streaming or non-standard response).")
        except Exception as e:
            logger.warning(f"Failed to calculate/track cost: {e}")


    # --- Cleanup ---
    async def close(self):
        """Gracefully close connections and resources."""
        logger.info(f"Closing resources for agent '{self.amd.name}'...")
        # Close A2A resources
        if self.a2a_server and hasattr(self.a2a_server, 'stop'): # Check if server has stop method
             logger.info("Stopping A2A server...")
             try:
                 await self.a2a_server.stop() # Assuming stop is async
             except Exception as e: logger.warning(f"Error stopping A2A server: {e}")
        if hasattr(self, '_a2a_task_manager_instance') and hasattr(self._a2a_task_manager_instance, 'close'):
             logger.info("Closing A2A task manager...")
             await self._a2a_task_manager_instance.close()
        await self.close_a2a_clients()

        # Close MCP server if running
        if self.mcp_server and hasattr(self.mcp_server, 'stop'): # Check for stop method
             logger.info("Stopping MCP server...")
             try:
                 # MCP server run is blocking, stop might need separate mechanism
                 # or be handled by process termination. If stop method exists:
                 # await self.mcp_server.stop() # Assuming async stop
                 logger.warning("MCP server 'stop' might need manual implementation or process signal.")
             except Exception as e: logger.warning(f"Error stopping MCP server: {e}")


        # Close ADK resources (MCPToolset connections managed by exit stack)
        if self.adk_exit_stack:
            logger.info("Closing ADK AsyncExitStack (manages MCPToolset connections)...")
            try:
                await self.adk_exit_stack.aclose()
            except Exception as e:
                logger.warning(f"Error closing ADK exit stack: {e}")

        # Close ADK runner if it has a close method
        if self.adk_runner and hasattr(self.adk_runner, 'close'):
             logger.info("Closing ADK runner...")
             try:
                  # Check if close is async
                 if iscoroutinefunction(self.adk_runner.close):
                     await self.adk_runner.close()
                 else:
                     self.adk_runner.close()
             except Exception as e: logger.warning(f"Error closing ADK runner: {e}")


        logger.info(f"Agent '{self.amd.name}' resource cleanup finished.")

    def print_verbose(self, *args):
        """Conditional logging helper."""
        if self.verbose:
            logger.debug(' '.join(map(str, args)))
a_format_class(pydantic_model, prompt, message_context=None, max_retries=2) async

Uses LiteLLM's response_format feature to get structured JSON output, with retries.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
async def a_format_class(self,
                         pydantic_model: type[BaseModel],
                         prompt: str,
                         message_context: list[dict] | None = None,
                         max_retries: int = 2) -> dict[str, Any]:
    """Uses LiteLLM's response_format feature to get structured JSON output, with retries."""
    logger.debug(f"Formatting prompt for Pydantic model: {pydantic_model.__name__}")
    model_schema = pydantic_model.model_json_schema()

    messages = message_context or []
    # System prompt explaining the task and schema
    messages.append({
        "role": "system",
        "content": f"Your task is to analyze the user's request and extract information into a JSON object.\n"
                   f"Strictly adhere to the following Pydantic schema:\n"
                   f"```json\n{json.dumps(model_schema, indent=2)}\n```\n"
                   f"Guidelines:\n"
                   f"- Analyze the request carefully.\n"
                   f"- Output *only* the JSON object, nothing else (no explanations, apologies, or markdown).\n"
                   f"- Ensure the JSON is valid and conforms exactly to the schema.\n"
                   f"- Omit optional fields if the information is not present in the request."
    })
    messages.append({"role": "user", "content": prompt})

    # Use LiteLLM's JSON mode (requires compatible model/provider)
    response_format_config = {"type": "json_object"}
    # Some providers might need the schema explicitly even in json_object mode
    # response_format_config = {"type": "json_object", "schema": model_schema}

    original_stream_state = self.stream
    self.stream = False # Ensure streaming is off for structured output
    try:
        last_exception = None
        for attempt in range(max_retries + 1):
            try:
                logger.debug(f"Attempt {attempt + 1}/{max_retries + 1} to get structured JSON.")
                # Use a potentially faster/cheaper model optimized for JSON tasks if configured?
                self.format_model = self.format_model_
                response_text = await self.a_run_llm_completion(messages, response_format=response_format_config)
                self.format_model = None
                # Clean and parse the JSON response
                try:
                     # Basic cleaning: remove potential markdown fences
                    cleaned_response = re.sub(r'^```json\s*|\s*```$', '', response_text.strip(), flags=re.MULTILINE)

                     # Try parsing using Pydantic's TypeAdapter for direct validation
                    adapter = TypeAdapter(pydantic_model)
                    validated_obj = adapter.validate_json(cleaned_response)
                    result_dict = validated_obj.model_dump(mode='json') # Get dict representation

                    logger.debug(f"Successfully formatted and validated JSON: {result_dict}")
                    return result_dict

                except (json.JSONDecodeError, ValidationError) as e:
                    logger.warning(f"Attempt {attempt + 1} failed: Invalid JSON or schema mismatch. Error: {e}. Response: {response_text[:500]}")
                    last_exception = ValueError(f"LLM response did not match schema after cleaning. Error: {e}. Response: '{response_text[:200]}...'")
                    # Add feedback to the model for retry
                    messages.append({"role": "assistant", "content": response_text}) # Show previous attempt
                    messages.append({"role": "system", "content": f"Your previous response was invalid ({e}). Please try again, ensuring you output *only* valid JSON matching the schema."})

            except Exception as e:
                logger.error(f"Error during a_format_class (attempt {attempt + 1}): {e}", exc_info=True)
                last_exception = e
                # Don't retry on non-parsing errors immediately, could be API issue
                break

            # Wait before retrying
            if attempt < max_retries:
                 await asyncio.sleep(1.5 ** attempt) # Exponential backoff

        # If all retries fail
        logger.error(f"Failed to get valid structured JSON after {max_retries + 1} attempts.")
        raise last_exception or ValueError("Failed to get structured JSON response from LLM.")

    finally:
        self.stream = original_stream_state # Restore stream setting
a_run(user_input, session_id=None, persist_history=True, strategy_override=None, kwargs_override=None, a2a_task_id=None) async

Main asynchronous execution logic for the agent turn.

Orchestrates world model updates, state sync, strategy selection, execution, cost tracking, and callbacks.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
async def a_run(self,
                user_input: str,
                session_id: Optional[str] = None,
                persist_history: bool = True,
                strategy_override: ProcessingStrategy | None = None,
                kwargs_override: dict[str, Any] | None = None, # For fine-grained control
                a2a_task_id: Optional[str] = None # Context if called from A2A task
                ) -> str:
    """
    Main asynchronous execution logic for the agent turn.

    Orchestrates world model updates, state sync, strategy selection,
    execution, cost tracking, and callbacks.
    """
    self.internal_state = InternalAgentState.PROCESSING
    start_time = time.monotonic()
    session_id = session_id or "default" # Use 'default' if none provided
    response = "Error: Processing failed." # Default error
    turn_cost = 0.0
    span = None # OTel span

    if not self.tracer: self._setup_telemetry() # Ensure tracer exists

    try:
        with self.tracer.start_as_current_span(f"Agent Run: {self.amd.name}", attributes={"session_id": session_id}) as span:

            # Ensure session history list exists
            if session_id not in self.message_history:
                logger.debug(f"Initializing history for session: {session_id}")
                self.message_history[session_id] = []

            logger.info(f"--- Agent Run Start (Session: {session_id}) ---")
            span.add_event("Agent run started")
            logger.info(f"User Input: {user_input[:100]}...")
            span.set_attribute("user_input", user_input[:500]) # Log truncated input

            # 0. Get ADK Session State (if ADK enabled and syncing)
            adk_session_state = None
            if self.sync_adk_state and self.adk_session_service:
                try:
                    # ADK SessionService methods are typically synchronous
                    # Run in threadpool to avoid blocking
                    adk_session = await asyncio.to_thread(
                         self.adk_session_service.get_session,
                         app_name=self.adk_runner.app_name, # Assuming runner is set if syncing
                         user_id=self.amd.user_id or "adk_user", # Needs consistent user ID
                         session_id=session_id
                    )
                    if adk_session:
                        adk_session_state = adk_session.state
                    else:
                        logger.warning(f"ADK Session '{session_id}' not found for state sync.")
                        # Optionally create session here? Be careful about race conditions.
                except Exception as sync_e:
                    logger.error(f"Error getting ADK session state for sync: {sync_e}")

            # 1. Update World Model & Sync State (Run *before* strategy selection)
            # flow_world_model is now responsible for syncing *from* ADK state initially
            await self.flow_world_model(user_input, session_id, adk_session_state)
            span.add_event("World model updated")

            # 2. Prepare message history for this turn
            current_turn_messages = self._prepare_llm_messages(user_input, session_id)
            span.set_attribute("history_length", len(current_turn_messages) -1) # Exclude current input

            # 3. Determine Processing Strategy
            if strategy_override:
                strategy = strategy_override
                strategy_reasoning = "Strategy overridden by caller."
                logger.info(f"Strategy forced by override: {strategy.value}")
            else:
                strategy, strategy_reasoning = self._determine_strategy_heuristic(user_input, current_turn_messages)
                logger.info(f"Strategy Selected: {strategy.value} (Reason: {strategy_reasoning})")
            span.set_attribute("selected_strategy", strategy.value)
            span.set_attribute("strategy_reasoning", strategy_reasoning)


            # --- Prepare kwargs for execution based on strategy ---
            exec_kwargs = kwargs_override or {}
            exec_kwargs['session_id'] = session_id
            exec_kwargs['user_input'] = user_input
            exec_kwargs['current_turn_messages'] = current_turn_messages
            exec_kwargs['adk_session_state'] = adk_session_state # Pass state for potential use/update


            # 4. Execute Selected Strategy
            logger.info(f"Executing strategy: {strategy.value}")
            if strategy == ProcessingStrategy.ADK_RUN:
                if ADK_AVAILABLE and self.adk_runner:
                    response = await self._execute_adk_run(**exec_kwargs)
                else:
                    logger.warning("ADK_RUN strategy selected, but ADK runner not available/configured. Falling back.")
                    # Fallback strategy? Maybe DIRECT_LLM?
                    strategy = ProcessingStrategy.DIRECT_LLM
                    response = await self._execute_direct_llm(**exec_kwargs)

            elif strategy == ProcessingStrategy.A2A_CALL:
                if A2A_AVAILABLE:
                    response = await self._execute_a2a_call(**exec_kwargs)
                else:
                    logger.warning("A2A_CALL strategy selected, but A2A not available. Falling back.")
                    strategy = ProcessingStrategy.DIRECT_LLM
                    response = await self._execute_direct_llm(**exec_kwargs)

            else: # Default: DIRECT_LLM
                response = await self._execute_direct_llm(**exec_kwargs)

            span.set_attribute("raw_response_length", len(response))
            span.add_event("Strategy execution complete")

            # 5. Persist History (if successful and enabled)
            # Add assistant response to history
            if persist_history and not response.startswith("Error:"):
                 self._add_to_history(session_id, LLMMessage(role="assistant", content=response).to_dict())

            # 6. Sync World Model *back* to ADK State (if changed and enabled)
            if self.sync_adk_state and adk_session_state is not None:
                try:
                    self.world_model.sync_to_adk_state(adk_session_state)
                    span.add_event("ADK state synchronized and updated")
                except Exception as sync_e:
                     logger.error(f"Error syncing/updating ADK session state: {sync_e}")
                     span.record_exception(sync_e)

            # 7. Track Cost (using last_llm_result if available)
            if self.last_llm_result:
                try:
                    cost = completion_cost(completion_response=self.last_llm_result, model=self.amd.model)
                    if cost:
                        turn_cost = cost
                        self.total_cost += turn_cost
                        logger.info(f"Turn Cost: ${turn_cost:.6f}, Total Cost: ${self.total_cost:.6f}")
                        span.set_attribute("llm_cost", turn_cost)
                        span.set_attribute("total_agent_cost", self.total_cost)
                    self.last_llm_result = None # Clear after use
                except Exception as cost_e:
                    logger.warning(f"Failed to calculate cost: {cost_e}")
                    span.add_event("Cost calculation failed", attributes={"error": str(cost_e)})


            # 8. Run Post Callback
            if self.post_run_callback and not response.startswith("Error:"):
                try:
                    if iscoroutinefunction(self.post_run_callback):
                        await self.post_run_callback(session_id, response, turn_cost)
                    else:
                        self.post_run_callback(session_id, response, turn_cost)
                    span.add_event("Post-run callback executed")
                except Exception as cb_e:
                    logger.error(f"Post-run callback failed: {cb_e}", exc_info=True)
                    span.record_exception(cb_e)


            logger.info(f"Agent Run finished in {time.monotonic() - start_time:.2f}s. Response: {response[:100]}...")

    except Exception as e:
        logger.error(f"Error during agent run (Session: {session_id}): {e}", exc_info=True)
        self.internal_state = InternalAgentState.ERROR
        response = f"Error: An internal error occurred during processing: {str(e)}"
        if span:
             span.set_status(trace.Status(trace.StatusCode.ERROR, f"Agent run failed: {e}"))
             span.record_exception(e)
    finally:
        self.internal_state = InternalAgentState.IDLE
        if span: span.end() # Ensure span is closed
        logger.info(f"--- Agent Run End (Session: {session_id}) ---")

    return str(response) # Ensure string output
a_run_llm_completion(llm_messages=None, **kwargs) async

Core wrapper around LiteLLM acompletion with error handling, streaming, and cost tracking.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
async def a_run_llm_completion(self, llm_messages: list[dict]=None, **kwargs) -> str:
    """Core wrapper around LiteLLM acompletion with error handling, streaming, and cost tracking."""
    if not llm_messages:
        if "messages" in kwargs:
            llm_messages = kwargs.pop("messages")
        if "llm_messages" in kwargs:
            llm_messages = kwargs.pop("llm_messages")
        if not llm_messages:
            logger.warning("a_run_llm_completion called with empty message list.")
            return "Error: No message provided to the model."

    self.print_verbose(f"Running model '{self.amd.model}' with {len(llm_messages)} messages.")
    # self.print_verbose("Messages:", json.dumps(llm_messages, indent=2)) # Very verbose

    # Prepare LiteLLM parameters from AgentModelData and kwargs overrides
    params = {
        'model': self.format_model or self.amd.model,
        'messages': llm_messages,
        'temperature': self.amd.temperature,
        'top_p': self.amd.top_p,
        'top_k': self.amd.top_k,
        'max_tokens': self.amd.max_tokens,
        'stream': self.stream,
        'stop': self.amd.stop_sequence,
        'user': self.amd.user_id,
        'api_base': self.amd.api_base,
        'api_version': self.amd.api_version,
        'api_key': self.amd.api_key,
        'presence_penalty': self.amd.presence_penalty,
        'frequency_penalty': self.amd.frequency_penalty,
        'caching': self.amd.caching,
        'response_format': kwargs.get('response_format'), # For a_format_class
        'tools': kwargs.get('tools'), # For LiteLLM function calling (less common now with ADK)
    }
    # Filter out None values as LiteLLM prefers absence over None for some params
    params = {k: v for k, v in params.items() if v is not None}

    # Add budget manager if present
    if self.amd.budget_manager: params['budget_manager'] = self.amd.budget_manager

    full_response_content = ""
    tool_calls_requested = None # Store tool calls if generated

    try:
        response_object = await acompletion(**params)

        if self.stream:
            collected_chunks = []
            async for chunk in response_object:
                # Store raw chunk for potential analysis or replay
                collected_chunks.append(chunk)
                # Extract text delta
                chunk_delta = chunk.choices[0].delta.content or ""
                if chunk_delta:
                    full_response_content += chunk_delta
                    if self.stream_callback:
                         try:
                             # Provide only the new text chunk
                             if iscoroutinefunction(self.stream_callback): await self.stream_callback(chunk_delta)
                             else: self.stream_callback(chunk_delta)
                         except Exception as cb_e:
                             logger.warning(f"Stream callback failed: {cb_e}")
                # Check for tool call deltas (less common in streaming)
                tool_deltas = chunk.choices[0].delta.tool_calls
                if tool_deltas:
                     logger.warning("Received tool call delta during streaming - handling may be incomplete.")
                     # : Implement robust handling of streaming tool calls if needed

            # After stream, construct a final response object mimicking non-streaming one for cost tracking
            # This is an approximation, LiteLLM might offer better ways.
            final_choice = {"message": {"role": "assistant", "content": full_response_content}}
            # If tool calls were detected during streaming, add them (complex to reconstruct accurately)
            # if reconstructed_tool_calls: final_choice["message"]["tool_calls"] = reconstructed_tool_calls
            self.last_llm_result = {
                "choices": [{"message": final_choice["message"]}],
                "model": self.amd.model, # Needed for cost tracking
                # Usage stats are often missing or zero in streaming chunks, need final value if available
                "usage": getattr(collected_chunks[-1], 'usage', {"prompt_tokens": 0, "completion_tokens": 0})
            }

        else: # Non-streaming
            self.last_llm_result = response_object # Store the full response
            # Extract content and potential tool calls
            message = response_object.choices[0].message
            full_response_content = message.content or ""
            tool_calls_requested = message.tool_calls # List of ToolCall objects

            # Check if LiteLLM did function/tool calling (different from ADK tools)
            # This path is less likely if using ADK, but supported by LiteLLM
            if tool_calls_requested:
                logger.info(f"LiteLLM requested {len(tool_calls_requested)} tool calls.")
                # This requires a separate mechanism to execute these LiteLLM-requested tools
                # and send back 'tool' role messages in the next turn.
                # Not implemented here as focus is on ADK/A2A tools.
                # For now, return a message indicating tool call request.
                calls_repr = ", ".join([f"{tc.function.name}" for tc in tool_calls_requested])
                return f"Info: LLM requested tool calls ({calls_repr}). Direct execution not implemented."


        self.print_verbose(f"Model Response: {full_response_content[:100]}...")
        return full_response_content

    except RateLimitError as e:
        logger.error(f"Rate limit error from {self.amd.model}: {e}")
        # Implement backoff/retry? For now, re-raise.
        raise
    except (BadRequestError, APIConnectionError, InternalServerError) as e:
        logger.error(f"API/Server error during LiteLLM call for {self.amd.model}: {e}", exc_info=True)
        raise
    except Exception as e:
        logger.error(f"Unexpected error during LiteLLM completion: {e}", exc_info=True)
        raise
adk_tool_a2a_cancel_task(tool_context, target_agent_url, task_id) async

ADK Tool: Attempts to cancel an ongoing A2A task.

Parameters:

Name Type Description Default
target_agent_url str

The URL of the agent hosting the task.

required
task_id str

The ID of the task to cancel.

required

Returns:

Type Description
dict[str, Any]

A dictionary indicating success or failure, possibly with the task's state after cancellation attempt.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
async def adk_tool_a2a_cancel_task(self,
                                   tool_context: ToolContext | None,
                                   target_agent_url: str,
                                   task_id: str
                                   ) -> dict[str, Any]:
    """ADK Tool: Attempts to cancel an ongoing A2A task.

    Args:
        target_agent_url: The URL of the agent hosting the task.
        task_id: The ID of the task to cancel.

    Returns:
        A dictionary indicating success or failure, possibly with the task's state after cancellation attempt.
    """
    if not A2A_AVAILABLE: return {"error": "python-a2a library not available."}
    logger.info(f"[ADK Tool] a2a_cancel_task called for task {task_id} on {target_agent_url}")

    try:
        client = await self.setup_a2a_client(target_agent_url)
        if not client:
            return {"error": f"Could not connect to A2A agent at {target_agent_url}"}

        if not hasattr(client, 'cancel_task'):
            return {"error": f"A2A client for {target_agent_url} does not support 'cancel_task'."}

        # Call the client's cancel method
        # The response structure depends heavily on the library implementation
        cancel_response = await client.cancel_task(task_id=task_id)

        # Parse response - could be simple success/fail, or updated task state
        if isinstance(cancel_response, dict):
            if 'error' in cancel_response:
                error_info = cancel_response['error']
                err_msg = error_info.get('message', str(error_info)) if isinstance(error_info, dict) else str(
                    error_info)
                logger.warning(f"A2A cancel_task failed for {task_id}: {err_msg}")
                return {"success": False, "error": err_msg, "raw_response": cancel_response}
            else:
                # Assume success, response might contain updated task state
                logger.info(f"A2A task {task_id} cancellation requested successfully.")
                # Try to extract state if returned
                state = cancel_response.get('result', {}).get('status', {}).get('state', 'UNKNOWN')
                return {"success": True, "state_after_request": state, "raw_response": cancel_response}
        elif cancel_response is True:  # Simple boolean success
            return {"success": True, "state_after_request": "UNKNOWN"}
        else:  # Assume object-like or other structure
            # Add parsing based on observed python-a2a behavior
            logger.info(f"A2A task {task_id} cancellation request sent, parsing result.")
            # Example: Check for specific attributes if object is returned
            state = getattr(getattr(getattr(cancel_response, 'result', None), 'status', None), 'state',
                            TaskState.UNKNOWN).value
            return {"success": True, "state_after_request": state,
                    "raw_response": vars(cancel_response) if hasattr(cancel_response, '__dict__') else str(
                        cancel_response)}


    except Exception as e:
        # Catch specific errors like TaskNotFound, TaskNotCancelable if defined by python-a2a
        # if isinstance(e, TaskNotFoundError):
        #    return {"success": False, "error": f"Task {task_id} not found."}
        # if isinstance(e, TaskNotCancelableError):
        #    return {"success": False, "error": f"Task {task_id} is not in a cancelable state."}
        logger.error(f"[ADK Tool] a2a_cancel_task failed: {e}", exc_info=True)
        return {"success": False, "error": f"Error cancelling A2A task: {e}"}
adk_tool_a2a_get_task_status(tool_context, target_agent_url, task_id) async

ADK Tool: Gets the current status and details of an A2A task.

Parameters:

Name Type Description Default
target_agent_url str

The URL of the agent hosting the task.

required
task_id str

The ID of the task to check.

required

Returns:

Type Description
dict[str, Any]

A dictionary containing task status details (state, message, artifacts) or an error.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
async def adk_tool_a2a_get_task_status(self,
                                       tool_context: ToolContext | None,
                                       target_agent_url: str,
                                       task_id: str
                                       ) -> dict[str, Any]:
    """ADK Tool: Gets the current status and details of an A2A task.

    Args:
        target_agent_url: The URL of the agent hosting the task.
        task_id: The ID of the task to check.

    Returns:
        A dictionary containing task status details (state, message, artifacts) or an error.
    """
    if not A2A_AVAILABLE: return {"error": "python-a2a library not available."}
    logger.info(f"[ADK Tool] a2a_get_task_status called for task {task_id} on {target_agent_url}")

    try:
        client = await self.setup_a2a_client(target_agent_url)
        if not client:
            return {"error": f"Could not connect to A2A agent at {target_agent_url}"}

        if not hasattr(client, 'get_task'):
            return {"error": f"A2A client for {target_agent_url} does not support 'get_task'."}

        # Get task details from the client
        task_details = await client.get_task(task_id=task_id, history_length=1)  # History=1 gets latest status

        # Parse and return relevant info
        if isinstance(task_details, dict):
            # Basic parsing, adjust based on actual python-a2a structure
            status_info = task_details.get('status', {})
            artifacts = task_details.get('artifacts')
            return {
                "task_id": task_id,
                "state": status_info.get('state', 'UNKNOWN'),
                "status_message": status_info.get('message'),  # Might be complex object
                "artifacts": artifacts,  # Might be complex list
                "raw_response": task_details  # Include raw for debugging
            }
        elif hasattr(task_details, 'status'):  # Object-like response
            status_obj = task_details.status
            artifacts_obj = getattr(task_details, 'artifacts', None)
            return {
                "task_id": task_id,
                "state": getattr(status_obj, 'state', TaskState.UNKNOWN).value,  # Get enum value
                "status_message": getattr(status_obj, 'message', None),
                "artifacts": artifacts_obj,
                "raw_response": vars(task_details)  # Example conversion
            }
        else:
            return {"error": "Received unexpected response structure from get_task.", "raw_response": task_details}

    except Exception as e:
        # Catch specific errors from python-a2a if they exist (e.g., TaskNotFoundError)
        # if isinstance(e, TaskNotFoundError):
        #    logger.warning(f"[ADK Tool] A2A Task {task_id} not found on {target_agent_url}.")
        #    return {"error": f"Task {task_id} not found."}
        logger.error(f"[ADK Tool] a2a_get_task_status failed: {e}", exc_info=True)
        return {"error": f"Error getting A2A task status: {e}"}
adk_tool_a2a_send_and_wait(tool_context, target_agent_url, task_prompt, session_id=None) async

ADK Tool: Sends a task to another agent via A2A and waits for the final text result.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
async def adk_tool_a2a_send_and_wait(self,
                                     tool_context: ToolContext | None,
                                     target_agent_url: str,
                                     task_prompt: str,
                                     session_id: Optional[str] = None
                                     ) -> str:
    """ADK Tool: Sends a task to another agent via A2A and waits for the final text result."""
    # ... (implementation remains the same, calls _execute_a2a_call) ...
    if not A2A_AVAILABLE: return "Error: python-a2a library not available."
    logger.info(f"[ADK Tool] a2a_send_and_wait called for target: {target_agent_url}")
    tool_session_id = session_id or f"adk_tool_a2a_{uuid.uuid4()}"
    try:
        return await self._execute_a2a_call(
             user_input=task_prompt,
             session_id=tool_session_id,
             target_a2a_agent_url=target_agent_url,
             a2a_task_prompt=task_prompt
        )
    except Exception as e:
         logger.error(f"[ADK Tool] a2a_send_and_wait failed: {e}", exc_info=True)
         return f"Error executing A2A task via ADK tool: {e}"
adk_tool_a2a_send_no_wait(tool_context, target_agent_url, task_prompt, session_id=None) async

ADK Tool: Sends a task to another agent via A2A and returns the task ID immediately.

Parameters:

Name Type Description Default
target_agent_url str

The full URL of the target A2A agent.

required
task_prompt str

The natural language prompt or task for the target agent.

required
session_id Optional[str]

Optional session ID to use for the A2A interaction.

None

Returns:

Type Description
str

The unique ID of the submitted A2A task, or an error message.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
async def adk_tool_a2a_send_no_wait(self,
                                    tool_context: ToolContext | None,
                                    target_agent_url: str,
                                    task_prompt: str,
                                    session_id: Optional[str] = None
                                    ) -> str:
    """ADK Tool: Sends a task to another agent via A2A and returns the task ID immediately.

    Args:
        target_agent_url: The full URL of the target A2A agent.
        task_prompt: The natural language prompt or task for the target agent.
        session_id: Optional session ID to use for the A2A interaction.

    Returns:
        The unique ID of the submitted A2A task, or an error message.
    """
    if not A2A_AVAILABLE: return "Error: python-a2a library not available."
    logger.info(f"[ADK Tool] a2a_send_no_wait called for target: {target_agent_url}")

    try:
        client = await self.setup_a2a_client(target_agent_url)
        if not client:
            return f"Error: Could not connect to A2A agent at {target_agent_url}"

        task_id = str(uuid.uuid4())
        a2a_session_id = session_id or f"a2a_tool_nowait_{task_id[:8]}"

        message_payload = {"role": "user", "content": {"type": "text", "text": task_prompt}}

        initial_task_info = None
        if hasattr(client, 'send_task'):
            initial_task_info = await client.send_task(message=message_payload, task_id=task_id,
                                                       session_id=a2a_session_id)
        elif hasattr(client, 'create_task'):
            initial_task_info = await client.create_task(message=message_payload, task_id=task_id,
                                                         session_id=a2a_session_id)
        else:
            return "Error: A2A client does not support send_task or create_task."

        # Check for immediate errors from the submission call
        # Structure depends on python-a2a's return value
        error_info = None
        if isinstance(initial_task_info, dict):
            error_info = initial_task_info.get('error')
        elif hasattr(initial_task_info, 'error'):
            error_info = initial_task_info.error

        if error_info:
            err_msg = error_info.get('message', str(error_info)) if isinstance(error_info, dict) else str(
                error_info)
            logger.error(f"A2A send_task (no wait) failed immediately: {err_msg}")
            return f"Error submitting A2A task: {err_msg}"
        else:
            logger.info(f"A2A task '{task_id}' submitted successfully (no wait) to {target_agent_url}.")
            return task_id  # Return the ID for later polling/checking

    except Exception as e:
        logger.error(f"[ADK Tool] a2a_send_no_wait failed: {e}", exc_info=True)
        return f"Error sending A2A task (no wait): {e}"
adk_tool_world_model_get(tool_context, key) async

ADK Tool: Retrieves a specific value from the agent's world model.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
1903
1904
1905
1906
1907
async def adk_tool_world_model_get(self, tool_context: ToolContext | None, key: str) -> Any | None:
    """ADK Tool: Retrieves a specific value from the agent's world model."""
    # ... (implementation remains the same) ...
    logger.info(f"[ADK Tool] get_world_model_key called for key: {key}")
    return self.world_model.get(key)
adk_tool_world_model_show(tool_context) async

ADK Tool: Returns a string representation of the agent's entire world model.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
1909
1910
1911
1912
1913
async def adk_tool_world_model_show(self, tool_context: ToolContext | None) -> str:
    """ADK Tool: Returns a string representation of the agent's entire world model."""
    # ... (implementation remains the same) ...
    logger.info("[ADK Tool] show_world_model called")
    return self.world_model.show()
close() async

Gracefully close connections and resources.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
async def close(self):
    """Gracefully close connections and resources."""
    logger.info(f"Closing resources for agent '{self.amd.name}'...")
    # Close A2A resources
    if self.a2a_server and hasattr(self.a2a_server, 'stop'): # Check if server has stop method
         logger.info("Stopping A2A server...")
         try:
             await self.a2a_server.stop() # Assuming stop is async
         except Exception as e: logger.warning(f"Error stopping A2A server: {e}")
    if hasattr(self, '_a2a_task_manager_instance') and hasattr(self._a2a_task_manager_instance, 'close'):
         logger.info("Closing A2A task manager...")
         await self._a2a_task_manager_instance.close()
    await self.close_a2a_clients()

    # Close MCP server if running
    if self.mcp_server and hasattr(self.mcp_server, 'stop'): # Check for stop method
         logger.info("Stopping MCP server...")
         try:
             # MCP server run is blocking, stop might need separate mechanism
             # or be handled by process termination. If stop method exists:
             # await self.mcp_server.stop() # Assuming async stop
             logger.warning("MCP server 'stop' might need manual implementation or process signal.")
         except Exception as e: logger.warning(f"Error stopping MCP server: {e}")


    # Close ADK resources (MCPToolset connections managed by exit stack)
    if self.adk_exit_stack:
        logger.info("Closing ADK AsyncExitStack (manages MCPToolset connections)...")
        try:
            await self.adk_exit_stack.aclose()
        except Exception as e:
            logger.warning(f"Error closing ADK exit stack: {e}")

    # Close ADK runner if it has a close method
    if self.adk_runner and hasattr(self.adk_runner, 'close'):
         logger.info("Closing ADK runner...")
         try:
              # Check if close is async
             if iscoroutinefunction(self.adk_runner.close):
                 await self.adk_runner.close()
             else:
                 self.adk_runner.close()
         except Exception as e: logger.warning(f"Error closing ADK runner: {e}")


    logger.info(f"Agent '{self.amd.name}' resource cleanup finished.")
close_a2a_clients() async

Closes all cached A2A client connections.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
901
902
903
904
905
906
907
908
909
async def close_a2a_clients(self):
    """Closes all cached A2A client connections."""
    async with self.a2a_client_lock:
        logger.info(f"Closing {len(self.a2a_clients)} A2A clients.")
        # A2AClient may manage underlying httpx clients automatically.
        # If explicit close needed in future versions, add here.
        # for client in self.a2a_clients.values():
        #     await client.close() # If available
        self.a2a_clients.clear()
construct_initial_prompts()

Constructs the initial system/context messages for the LLM prompt.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
def construct_initial_prompts(self) -> list[dict]:
    """Constructs the initial system/context messages for the LLM prompt."""
    messages = []
    # Base System Prompt
    if self.amd.system_message:
        messages.append(LLMMessage("system", self.amd.system_message).to_dict())

    # World Model Context
    wm_repr = self.world_model.show()
    if wm_repr != "[empty]":
        messages.append(LLMMessage("system", f"Current World State:\n{wm_repr}").to_dict())

    # Capabilities Overview (ADK specific parts depend on LlmAgent inheritance)
    caps = ["LiteLLM (Core LLM access)"]
    if ADK_AVAILABLE and isinstance(self, LlmAgent):
        if self.tools: caps.append("ADK Tools (including potential MCP/A2A wrappers)")
        if self.code_executor: caps.append("ADK Code Execution")
        if any(isinstance(t, type(adk_google_search) | AdkVertexAiSearchTool) for t in getattr(self, 'tools', [])):
             caps.append("ADK Search")
    if A2A_AVAILABLE and self.a2a_clients: caps.append("A2A Client (delegate to other agents)")
    if self.mcp_server: caps.append("MCP Server (exposes capabilities)")
    if self.a2a_server: caps.append("A2A Server (receives tasks)")

    messages.append(LLMMessage("system", f"Your Capabilities: {', '.join(caps)}.").to_dict())

    # ADK Tool Instructions (if ADK enabled and tools exist)
    if ADK_AVAILABLE and isinstance(self, LlmAgent) and self.tools:
        try:
            # Use ADK's internal method to get schema if possible, otherwise basic list
            tool_schemas = getattr(self, 'tool_schemas', None) # ADK might populate this
            if tool_schemas:
                 tool_list_str = json.dumps(tool_schemas, indent=2)
                 messages.append(LLMMessage("system", f"You have access to the following tools (use FunctionCall format):\n{tool_list_str}").to_dict())
            else: # Fallback to basic list
                tool_list = "\n".join([f"- {tool.name}: {tool.description or 'No description'}" for tool in self.tools])
                messages.append(LLMMessage("system", f"You can use the following tools:\n{tool_list}\nRespond with a FunctionCall to use a tool.").to_dict())
        except Exception as e:
             logger.warning(f"Could not generate detailed ADK tool instructions: {e}")


    # Add specific instructions for A2A delegation if needed
    if A2A_AVAILABLE and self.a2a_clients:
         client_names = list(self.a2a_clients.keys()) # Target URLs act as names here
         messages.append(LLMMessage("system", f"You can delegate tasks to other agents via A2A using their URLs (e.g., {client_names[0]} if available). Indicate clearly if you want to delegate.").to_dict())

    return messages
flow_world_model(text_input, session_id, adk_session_state) async

Analyzes input, updates internal WorldModel, and syncs with ADK state if enabled. Sync Priority: If ADK state exists, sync from it first. Then update based on text. The sync to ADK happens after the agent run completes.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
async def flow_world_model(self, text_input: str, session_id: str, adk_session_state: State | None):
    """
    Analyzes input, updates internal WorldModel, and syncs with ADK state if enabled.
    Sync Priority: If ADK state exists, sync *from* it first. Then update based on text.
                 The sync *to* ADK happens after the agent run completes.
    """
    logger.debug(f"Flowing world model based on text: {text_input[:100]}...")

    # 1. Sync FROM ADK State (if enabled and state available)
    if self.sync_adk_state and adk_session_state is not None:
         logger.debug("Syncing World Model FROM ADK session state...")
         self.world_model.sync_from_adk_state(adk_session_state)

    # 2. Update World Model based on Text Input (using LLM)
    # This adds/modifies based on the current turn's input
    # Define Pydantic model for structured update extraction
    current_keys = list(self.world_model.to_dict().keys())
    class WorldModelAdaption(BaseModel):
        action: Literal['add', 'update', 'remove', 'none'] = Field(..., description="Action on the world model.")
        key: str | None = Field(None, description=f"Key to modify/add/remove (e.g., 'user_location', 'task_status'). Existing keys: {current_keys}")
        value: Any | None = Field(None, description="New value (for 'add'/'update'). Should be JSON serializable.")
        reasoning: str = Field(..., description="Why this change (or no change) is needed based on the input.")

    prompt = (f"Analyze the following text and current world state to determine if the agent's world model needs changes.\n"
              f"Current World State Keys: {current_keys}\n"
              f"Text Input: ```\n{text_input}\n```\n"
              f"Decide action, key, value, and reasoning. Focus on factual updates derived *from the text*. Do not hallucinate.")

    try:
        # Use a potentially faster/cheaper model for this classification task
        # Could eventually use a separate AMD config for this call
        adaption_dict = await self.a_format_class(WorldModelAdaption, prompt)
        adaption = WorldModelAdaption(**adaption_dict)

        logger.info(f"World Model Adaption proposed: {adaption.action} on key '{adaption.key}'. Reason: {adaption.reasoning}")

        if adaption.action == 'add' or adaption.action == 'update':
            if adaption.key and adaption.value is not None:
                self.world_model.set(adaption.key, adaption.value)
            else:
                logger.warning("World model 'add'/'update' ignored: missing key or value.")
        elif adaption.action == 'remove':
            if adaption.key:
                self.world_model.remove(adaption.key)
            else:
                logger.warning("World model 'remove' ignored: missing key.")
        # Else ('none'): do nothing

    except (ValidationError, Exception) as e:
        logger.warning(f"Failed to determine world model adaption via LLM: {e}. World model may be based only on ADK sync or previous state.")
print_verbose(*args)

Conditional logging helper.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
2194
2195
2196
2197
def print_verbose(self, *args):
    """Conditional logging helper."""
    if self.verbose:
        logger.debug(' '.join(map(str, args)))
run(user_input, session_id=None, **kwargs)

Synchronous wrapper for a_run.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
def run(self, user_input: str, session_id: Optional[str] = None, **kwargs) -> str:
    """Synchronous wrapper for a_run."""
    try:
        # get_event_loop() is deprecated in 3.10+, use get_running_loop() or new_event_loop()
        try:
            asyncio.get_running_loop()
            # If loop is running, cannot use asyncio.run. Need to schedule and wait.
            # This is complex to get right universally (e.g., in notebooks vs servers).
            # Simplest approach for sync call from sync context is asyncio.run()
            # If called from async context, user should await a_run() directly.
            logger.warning("Synchronous 'run' called from a running event loop. "
                           "This might block the loop. Consider using 'await a_run'.")
            # Fallback to basic run, may error if loop is running
            return asyncio.run(self.a_run(user_input, session_id=session_id, **kwargs))
        except RuntimeError: # No running event loop
             return asyncio.run(self.a_run(user_input, session_id=session_id, **kwargs))
    except Exception as e:
        logger.error(f"Error in synchronous run wrapper: {e}", exc_info=True)
        return f"Error: Failed to execute synchronous run: {e}"
run_a2a_server(host='0.0.0.0', port=5000, **kwargs)

Starts the A2A server (blocking) using the python-a2a run_server function.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
def run_a2a_server(self, host="0.0.0.0", port=5000, **kwargs):
    """Starts the A2A server (blocking) using the python-a2a run_server function."""
    if not self.a2a_server:
        logger.error("A2A server not initialized. Call setup_a2a_server first.")
        return
    if not A2A_AVAILABLE:
        logger.error("python-a2a library not available. Cannot run A2A server.")
        return

    # Get effective host/port from server instance if set, otherwise use args
    effective_host = getattr(self.a2a_server, 'host', host)
    effective_port = getattr(self.a2a_server, 'port', port)

    logger.info(f"Starting A2A server for agent '{self.amd.name}' via run_server_func on {effective_host}:{effective_port}...")
    try:
        # Call the imported run_server function, passing the agent instance
        run_a2a_server_func(self.a2a_server, host=effective_host, port=effective_port, **kwargs) # This blocks
    except Exception as e:
        logger.error(f"A2A server failed to run: {e}", exc_info=True)
run_mcp_server(transport='sse', **kwargs)

Starts the MCP server (blocking).

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
665
666
667
668
669
670
671
672
673
674
675
676
677
678
def run_mcp_server(self, transport='sse', **kwargs):
    """Starts the MCP server (blocking)."""
    if not self.mcp_server:
        logger.error("MCP server not initialized. Call setup_mcp_server first.")
        return
    if not MCP_AVAILABLE:
         logger.error("MCP library not available. Cannot run MCP server.")
         return
    logger.info(f"Starting MCP server for agent '{self.amd.name}' using {transport} transport...")
    # This is blocking, run in a separate process/thread for a long-running agent
    try:
        self.mcp_server.run(transport=transport, **kwargs)
    except Exception as e:
        logger.error(f"MCP server failed to run: {e}", exc_info=True)
setup_a2a_client(target_agent_url) async

Gets or creates an A2A client for a specific target agent URL using python-a2a.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
async def setup_a2a_client(self, target_agent_url: str) -> A2AClient | None:
    """Gets or creates an A2A client for a specific target agent URL using python-a2a."""
    if not A2A_AVAILABLE:
        logger.warning("python-a2a library not installed. Cannot setup A2A client.")
        return None

    async with self.a2a_client_lock:
        if target_agent_url in self.a2a_clients:
            logger.debug(f"Reusing cached A2A client for {target_agent_url}")
            return self.a2a_clients[target_agent_url]

        logger.info(f"Setting up A2A client for target: {target_agent_url}")
        try:
            # python-a2a client likely fetches card on init or first call
            client = A2AClient(base_url=target_agent_url) # Pass the URL directly
            # Verify connection implicitly by getting card (optional, client might do lazy loading)
            # agent_card = await client.get_agent_card() # If method exists
            # logger.info(f"Successfully connected A2A client to agent: {agent_card.name}")
            self.a2a_clients[target_agent_url] = client
            logger.info(f"A2A client created for target: {target_agent_url}")
            return client
        except Exception as e:
            logger.error(f"Failed to setup A2A client for {target_agent_url}: {e}", exc_info=True)
            return None
setup_a2a_server(host='0.0.0.0', port=5000, **a2a_server_options)

Initialize and configure the A2A server capabilities using python-a2a. This dynamically creates a server class with the agent's capabilities.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
def setup_a2a_server(self, host="0.0.0.0", port=5000, **a2a_server_options):
    """
    Initialize and configure the A2A server capabilities using python-a2a.
    This dynamically creates a server class with the agent's capabilities.
    """
    if not A2A_AVAILABLE:
        logger.warning("python-a2a library not installed. Cannot setup A2A server.")
        return None
    if self.a2a_server:
        logger.warning("A2A server already initialized.")
        return self.a2a_server

    logger.info(f"Setting up A2A server for agent '{self.amd.name}' on {host}:{port}")

    agent_instance = self # Reference to the current EnhancedAgent instance

    # Define the A2A Server class dynamically using the decorator
    @a2a_agent_decorator(
        name=self.amd.name or "EnhancedAgent",
        description=f"Enhanced Agent '{self.amd.name}' - Capabilities: ADK({ADK_AVAILABLE}), MCP({MCP_AVAILABLE}), A2A({A2A_AVAILABLE})",
        version="1.0.0",
        # Other AgentCard fields...
    )
    class DynamicA2AServer(A2AServer):
        bound_agent: EnhancedAgent = agent_instance

        def handle_task(self, task: Task) -> Task:
            """ Handles incoming A2A tasks by calling the EnhancedAgent's async logic. """
            # --- (handle_task implementation remains the same as before) ---
            logger.info(f"[A2A Server {self.bound_agent.amd.name}] Received task: {task.id}")
            async def run_agent_async():
                # ... (logic to extract prompt, call a_run, update task) ...
                try:
                    user_prompt = ""
                    # ... (extract user_prompt from task.message) ...
                    if task.message and task.message.get("content"):
                        content = task.message["content"]
                        if isinstance(content, dict) and content.get("type") == "text":
                            user_prompt = content.get("text", "").strip()
                        elif isinstance(content, str):
                            user_prompt = content.strip()

                    if not user_prompt:
                        raise ValueError("Task message has no text content.")

                    session_id = task.message.get("session_id", task.id)
                    agent_response = await self.bound_agent.a_run(
                        user_prompt,
                        session_id=session_id,
                        persist_history=False,
                        a2a_task_id=task.id
                    )
                    task.artifacts = [{"parts": [{"type": "text", "text": str(agent_response)}]}]
                    task.status = TaskStatus(state=TaskState.COMPLETED)
                except Exception as e:
                    # ... (error handling) ...
                    logger.error(f"[A2A Task {task.id}] Error during processing: {e}", exc_info=True)
                    error_msg = f"Internal agent error: {str(e)}"
                    task.artifacts = [{"parts": [{"type": "text", "text": error_msg}]}]
                    task.status = TaskStatus(state=TaskState.FAILED, message={"role": "agent", "content": {"type": "text", "text": error_msg}})
                return task
            try:
                updated_task = asyncio.run(run_agent_async())
                return updated_task
            except RuntimeError as e:
                # ... (handle RuntimeError) ...
                logger.error(f"RuntimeError calling asyncio.run in handle_task: {e}.")
                task.status = TaskStatus(state=TaskState.FAILED, message={"role": "agent", "content": {"type": "text", "text": "Internal Server Error processing task asynchronously."}})
                return task
            # --- (end of handle_task logic) ---


        # --- Expose Skills ---
        @a2a_skill_decorator(
            name="General Query",
            description="Process general natural language queries using the agent's primary LLM.",
            examples=["What is the capital of France?", "Summarize the plot of Hamlet."]
        )
        def general_query_skill(self, query: str) -> str:
            """Handles general queries via the skill mechanism by calling a_run."""
            logger.info(f"[A2A Skill] Received general_query: {query[:50]}...")
            async def run_skill_async():
                # Call a_run, forcing direct LLM strategy for simple queries
                response = await self.bound_agent.a_run(
                    query,
                    a2a_task_id=f"skill_query_{uuid.uuid4()}",
                    strategy_override=ProcessingStrategy.DIRECT_LLM,
                    persist_history=False
                    )
                return response
            try:
                # Bridge sync skill call to async agent logic
                return asyncio.run(run_skill_async())
            except RuntimeError:
                 logger.error("RuntimeError calling asyncio.run in general_query_skill.")
                 return "Error: Could not process skill asynchronously."

        # --- FIXED: Generic Skill for ADK Tools ---
        if ADK_AVAILABLE and isinstance(agent_instance, LlmAgent) and agent_instance.tools:
            # Check if there are any ADK tools to expose
            adk_tool_list = [t for t in agent_instance.tools if isinstance(t, BaseTool)]
            if adk_tool_list:
                logger.info(f"Exposing {len(adk_tool_list)} ADK tools via 'execute_adk_tool' A2A skill.")

                @a2a_skill_decorator(
                    name="execute_adk_tool",
                    description=f"Executes a registered ADK tool. Available tools: {', '.join([t.name for t in adk_tool_list])}",
                    examples=["Execute tool 'some_tool_name' with argument 'arg1'='value1'"] # Generic example
                )
                def execute_adk_tool_skill(self, tool_name: str, arguments: dict[str, Any]) -> str:
                    """Generic skill to execute an ADK tool by name with arguments."""
                    logger.info(f"[A2A Skill] Request to execute ADK tool: {tool_name} with args: {arguments}")

                    # Find the ADK tool instance on the bound agent
                    tool_to_call: BaseTool | None = None
                    for tool in self.bound_agent.tools:
                        if isinstance(tool, BaseTool) and tool.name == tool_name:
                            tool_to_call = tool
                            break

                    if not tool_to_call:
                        logger.warning(f"[A2A Skill] ADK tool '{tool_name}' not found.")
                        return f"Error: ADK tool '{tool_name}' not found on this agent."

                    # --- Bridge sync skill call to async ADK tool execution ---
                    async def run_adk_tool_async():
                        try:
                            # ADK tools require ToolContext. We can provide a minimal one or None.
                            # Providing None might limit tool functionality.
                            # Let's try providing None for simplicity first.
                            adk_tool_context = None

                            # Check if the tool has an async run method (most ADK tools should)
                            if hasattr(tool_to_call, 'run_async') and iscoroutinefunction(tool_to_call.run_async):
                                # Pass arguments directly to run_async
                                result = await tool_to_call.run_async(args=arguments, tool_context=adk_tool_context)
                                # Convert result to string for A2A response
                                if isinstance(result, str): return result
                                try: return json.dumps(result)
                                except: return str(result)
                            elif hasattr(tool_to_call, 'run') and callable(tool_to_call.run):
                                # Fallback to synchronous run in thread pool
                                logger.warning(f"ADK tool '{tool_name}' has no run_async, using synchronous run in thread.")
                                result = await asyncio.to_thread(tool_to_call.run, args=arguments, tool_context=adk_tool_context)
                                if isinstance(result, str): return result
                                try: return json.dumps(result)
                                except: return str(result)
                            else:
                                 return f"Error: ADK tool '{tool_name}' has no callable run or run_async method."

                        except Exception as e:
                            logger.error(f"[A2A Skill] Error executing ADK tool '{tool_name}': {e}", exc_info=True)
                            return f"Error executing ADK tool {tool_name}: {e}"

                    # Execute the async tool runner
                    try:
                        return asyncio.run(run_adk_tool_async())
                    except RuntimeError:
                        logger.error(f"RuntimeError calling asyncio.run in execute_adk_tool_skill for tool {tool_name}.")
                        return "Error: Could not execute ADK tool asynchronously."

        # --- End of Skill Definitions ---

    # Instantiate the dynamic server class
    try:
         self.a2a_server = DynamicA2AServer(**a2a_server_options)
         logger.info(f"A2A server instance created for agent '{self.amd.name}'.")
         return self.a2a_server
    except Exception as e:
         logger.error(f"Failed to instantiate dynamic A2A Server: {e}", exc_info=True)
         return None
setup_adk_runner(runner_options=None)

Initializes an ADK runner for this agent (if ADK enabled).

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
def setup_adk_runner(self, runner_options: dict[str, Any] | None = None):
    """Initializes an ADK runner for this agent (if ADK enabled)."""
    if not ADK_AVAILABLE:
        logger.warning("ADK not available. Cannot setup ADK runner.")
        return None
    if not isinstance(self, LlmAgent):
        logger.error("Agent must inherit from LlmAgent to use ADK runner directly.")
        return None
    if self.adk_runner:
        logger.warning("ADK runner already initialized.")
        return self.adk_runner

    runner_opts = runner_options or {}
    runner_class = runner_opts.pop("runner_class", InMemoryRunner) # Default to InMemory
    app_name = runner_opts.pop("app_name", f"{self.amd.name}_ADKApp")

    if runner_class == InMemoryRunner:
        runner_opts = {}

    logger.info(f"Setting up ADK Runner ({runner_class.__name__}) for app '{app_name}'...")

    try:
         # Pass the agent instance and other options to the runner constructor
        self.adk_runner = runner_class(agent=self, app_name=app_name, **runner_opts)
        self.adk_session_service = self.adk_runner.session_service # Store session service
        logger.info(f"ADK {runner_class.__name__} setup complete for agent '{self.amd.name}'.")
        return self.adk_runner
    except Exception as e:
        logger.error(f"Failed to setup ADK runner: {e}", exc_info=True)
        self.adk_runner = None
        self.adk_session_service = None
        return None
setup_mcp_server(host='0.0.0.0', port=8000, **mcp_kwargs)

Initialize and configure the MCP server capabilities for this agent. This agent will ACT AS an MCP Server.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
def setup_mcp_server(self, host="0.0.0.0", port=8000, **mcp_kwargs):
    """Initialize and configure the MCP server capabilities *for this agent*.
       This agent will ACT AS an MCP Server.
    """
    if not MCP_AVAILABLE:
        logger.warning("MCP library not installed. Cannot setup MCP server.")
        return None
    if self.mcp_server:
        logger.warning("MCP server already initialized.")
        return self.mcp_server
    name = mcp_kwargs.get("name")
    del mcp_kwargs["name"]
    self.mcp_server = FastMCP(name=name or f"{self.amd.name}-mcp-server",
                              description=f"MCP interface for EnhancedAgent {self.amd.name}",
                              **mcp_kwargs)
    logger.info(f"Setting up MCP server for agent '{self.amd.name}' on {host}:{port}")

    # --- Register Agent's core functionalities as MCP services ---
    # Example: Expose World Model (Read-only for safety)
    @self.mcp_server.resource(f"agent://{self.amd.name}/world_model")
    def mcp_get_world_model_resource() -> dict[str, Any]:
        """Gets the agent's world model."""
        logger.debug(f"[MCP Resource] agent://{self.amd.name}/world_model accessed")
        return self.world_model.to_dict()

    # Example: Expose a simple query tool via MCP
    @self.mcp_server.tool(name="simple_llm_query")
    async def mcp_simple_query(prompt: str) -> str:
        """Sends a simple prompt to the agent's LLM (non-persistent run)."""
        logger.debug(f"[MCP Tool] simple_llm_query called: {prompt[:50]}...")
        # Use a minimal, non-persistent run, disable recursive calls
        response = await self.a_run(
            prompt, session_id=f"mcp_query_{uuid.uuid4()}",
            persist_history=False, strategy_override=ProcessingStrategy.DIRECT_LLM
        )
        return response

    # If ADK tools exist, potentially expose them via MCP automatically?
    if ADK_AVAILABLE and isinstance(self, LlmAgent) and self.tools:
         logger.info("Attempting to expose ADK tools via MCP server...")
         for adk_tool in self.tools:
             if adk_tool.name in ["code_execution", "adk_tool_a2a_send_and_wait", "adk_tool_a2a_send_no_wait", "adk_tool_a2a_get_task_status", "adk_tool_a2a_cancel_task"]:
                 continue
             if not isinstance(adk_tool, BaseTool): continue
             try:
                 mcp_schema = adk_to_mcp_tool_type(adk_tool)

                 # Define the MCP tool handler dynamically
                 async def mcp_tool_handler(tool_name=adk_tool.name, **kwargs):
                     logger.info(f"[MCP Tool via ADK] Calling {tool_name} with {kwargs}")
                     # ADK tools expect ToolContext, which we don't have here.
                     # We might need to simulate it or adapt the tool execution.
                     # This simple version calls the tool's underlying function if possible.
                     # WARNING: This bypasses ADK's standard tool execution flow.
                     if hasattr(adk_tool, 'func') and callable(adk_tool.func):
                         # This assumes the function doesn't need ToolContext
                         result = await adk_tool.func(**kwargs)
                         # Convert result to MCP content (e.g., TextContent)
                         if isinstance(result, str):
                             return [mcp_types.TextContent(type="text", text=result)]
                         else:
                             try:
                                 return [mcp_types.TextContent(type="text", text=json.dumps(result))]
                             except:
                                 return [mcp_types.TextContent(type="text", text=str(result))]
                     else:
                         logger.warning(f"Cannot directly call ADK tool {tool_name} via MCP.")
                         return [mcp_types.TextContent(type="text", text=f"Error: Cannot execute ADK tool {tool_name} directly.")]

                 # Register the dynamic handler with the MCP server
                 self.mcp_server.tool(name=mcp_schema.name)(mcp_tool_handler)
                 logger.info(f"Exposed ADK tool '{adk_tool.name}' as MCP tool '{mcp_schema.name}'.")

             except Exception as e:
                 logger.warning(f"Failed to expose ADK tool '{adk_tool.name}' via MCP: {e}")


    logger.info(f"MCP server setup complete for agent '{self.amd.name}'. Run `agent.run_mcp_server()` to start.")
    return self.mcp_server
LLMMessage dataclass

Represents a message in a conversation, compatible with LiteLLM.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
@dataclass
class LLMMessage:
    """Represents a message in a conversation, compatible with LiteLLM."""
    role: Literal["user", "assistant", "system", "tool"]
    content: str | list[dict[str, Any]] # String or multimodal content (LiteLLM format)
    tool_call_id: Optional[str] = None # For tool responses
    name: Optional[str] = None # For tool calls/responses (function name)

    # Add tool_calls for assistant messages requesting tool use (LiteLLM format)
    tool_calls: list[dict[str, Any]] | None = None # e.g., [{"id": "call_123", "function": {"name": "...", "arguments": "{...}"}}]

    def to_dict(self) -> dict[str, Any]:
        """Converts to dict suitable for LiteLLM."""
        d = {
            "role": self.role,
            "content": self.content,
        }
        if self.tool_call_id: d["tool_call_id"] = self.tool_call_id
        if self.name: d["name"] = self.name
        if self.tool_calls: d["tool_calls"] = self.tool_calls
        return d
to_dict()

Converts to dict suitable for LiteLLM.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
386
387
388
389
390
391
392
393
394
395
def to_dict(self) -> dict[str, Any]:
    """Converts to dict suitable for LiteLLM."""
    d = {
        "role": self.role,
        "content": self.content,
    }
    if self.tool_call_id: d["tool_call_id"] = self.tool_call_id
    if self.name: d["name"] = self.name
    if self.tool_calls: d["tool_calls"] = self.tool_calls
    return d
WorldModel dataclass

Thread-safe persistent understanding of the world for the agent.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
@dataclass
class WorldModel:
    """Thread-safe persistent understanding of the world for the agent."""
    data: dict[str, Any] = dataclass_field(default_factory=dict)
    _lock: SkipValidation[threading.Lock] = dataclass_field(default_factory=threading.Lock, init=False, repr=False)

    def get(self, key: str, default: Any = None) -> Any:
        with self._lock:
            return self.data.get(key, default)

    def set(self, key: str, value: Any):
        with self._lock:
            logger.debug(f"WorldModel SET: {key} = {value}")
            self.data[key] = value

    def remove(self, key: str):
        with self._lock:
            if key in self.data:
                logger.debug(f"WorldModel REMOVE: {key}")
                del self.data[key]

    def show(self) -> str:
        with self._lock:
            if not self.data:
                return "[empty]"
            try:
                items = [f"- {k}: {json.dumps(v, indent=None, ensure_ascii=False, default=str)}"
                         for k, v in self.data.items()]
                return "\n".join(items)
            except Exception:
                items = [f"- {k}: {str(v)}" for k, v in self.data.items()]
                return "\n".join(items)

    def to_dict(self) -> dict[str, Any]:
        with self._lock:
            return self.data.copy()

    def update_from_dict(self, data_dict: dict[str, Any]):
        with self._lock:
            self.data.update(data_dict)
            logger.debug(f"WorldModel updated from dict: {list(data_dict.keys())}")

    def sync_from_adk_state(self, adk_state: State):
        """Updates the WorldModel from an ADK Session State."""
        if not ADK_AVAILABLE or not isinstance(adk_state, State):
            return
        with self._lock:
            # Simple overwrite strategy, could be more sophisticated (merge, etc.)
            self.data = adk_state.to_dict() # ADK State is dict-like
            logger.debug(f"WorldModel synced FROM ADK state. Keys: {list(self.data.keys())}")

    def sync_to_adk_state(self, adk_state: State):
        """Updates an ADK Session State from the WorldModel."""
        if not ADK_AVAILABLE or not isinstance(adk_state, State):
            return
        with self._lock:
            # Update the ADK state dictionary directly
            adk_state.update(self.data)
            logger.debug(f"WorldModel synced TO ADK state. Keys: {list(adk_state.keys())}")
sync_from_adk_state(adk_state)

Updates the WorldModel from an ADK Session State.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
324
325
326
327
328
329
330
331
def sync_from_adk_state(self, adk_state: State):
    """Updates the WorldModel from an ADK Session State."""
    if not ADK_AVAILABLE or not isinstance(adk_state, State):
        return
    with self._lock:
        # Simple overwrite strategy, could be more sophisticated (merge, etc.)
        self.data = adk_state.to_dict() # ADK State is dict-like
        logger.debug(f"WorldModel synced FROM ADK state. Keys: {list(self.data.keys())}")
sync_to_adk_state(adk_state)

Updates an ADK Session State from the WorldModel.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
333
334
335
336
337
338
339
340
def sync_to_adk_state(self, adk_state: State):
    """Updates an ADK Session State from the WorldModel."""
    if not ADK_AVAILABLE or not isinstance(adk_state, State):
        return
    with self._lock:
        # Update the ADK state dictionary directly
        adk_state.update(self.data)
        logger.debug(f"WorldModel synced TO ADK state. Keys: {list(adk_state.keys())}")
builder
BuilderConfig

Bases: BaseModel

Serializable configuration state for the EnhancedAgentBuilder.

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
class BuilderConfig(BaseModel):
    """Serializable configuration state for the EnhancedAgentBuilder."""
    agent_name: str = "UnnamedEnhancedAgent"
    agent_version: str = "0.1.0"

    # Core Model Config (Subset of AgentModelData, as some are instance-specific like BudgetManager)
    model_identifier: str | None = None
    formatter_llm_model: str | None = None
    system_message: str = "You are a helpful AI assistant."
    temperature: float | None = None
    top_k: int | None = None
    top_p: float | None = None
    max_tokens_output: int | None = None # Max tokens for LLM *generation*
    max_tokens_input: int | None = None # Max context window (for trimming)
    api_key_env_var: str | None = None # Store env var name, not the key itself
    api_base: str | None = None
    api_version: str | None = None
    stop_sequence: list[str] | None = None
    llm_user_id: str | None = None # 'user' param for LLM calls
    enable_litellm_caching: bool = True

    # Agent Behavior
    enable_streaming: bool = False
    verbose_logging: bool = False
    world_model_initial_data: dict[str, Any] | None = None
    history: BuilderHistoryConfig = Field(default_factory=BuilderHistoryConfig)

    # Framework Integrations
    adk: BuilderADKConfig = Field(default_factory=BuilderADKConfig)
    a2a: BuilderA2AConfig = Field(default_factory=BuilderA2AConfig)
    mcp: BuilderMCPConfig = Field(default_factory=BuilderMCPConfig)

    # Cost Tracking (Configuration for persistence)
    cost_tracker_config: dict[str, Any] | None = Field(default={'type': 'json', 'filepath': './user_costs.json'}, description="Config for UserCostTracker (e.g., type, path)")

    # Observability (Configuration)
    telemetry_config: dict[str, Any] | None = Field(default={'enabled': False, 'service_name': None, 'endpoint': None}, description="Basic OTel config hints")

    model_config = ConfigDict(validate_assignment=True)

    @model_validator(mode='after')
    def _resolve_names(self) -> 'BuilderConfig':
        # Ensure service name defaults to agent name if not set
        if self.telemetry_config and self.telemetry_config.get('enabled') and not self.telemetry_config.get('service_name'):
            self.telemetry_config['service_name'] = self.agent_name
        # Ensure MCP server name defaults if not set
        if self.mcp.enabled and not self.mcp.server_name:
             self.mcp.server_name = f"{self.agent_name}_MCPServer"
        return self
EnhancedAgentBuilder

Fluent builder for configuring and constructing production-ready EnhancedAgent instances. Supports loading configuration from files and provides methods for detailed setup.

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
class EnhancedAgentBuilder:
    """
    Fluent builder for configuring and constructing production-ready EnhancedAgent instances.
    Supports loading configuration from files and provides methods for detailed setup.
    """

    def __init__(self,agent_name: str = "DefaultAgent", config: BuilderConfig | None = None, config_path: str | Path | None = None):
        """
        Initialize the builder. Can start with a config object, path, or blank.

        Args:
            config: An existing BuilderConfig object.
            config_path: Path to a YAML/JSON configuration file for the builder.
        """
        if config and config_path:
            raise ValueError("Provide either config object or config_path, not both.")

        if config_path:
            self.load_config(config_path) # Sets self._config
        elif config:
            self._config = config.copy(deep=True)
        else:
            self._config = BuilderConfig() # Start with defaults

        # --- Transient fields (not saved/loaded directly via BuilderConfig JSON) ---
        # Instances or non-serializable objects provided programmatically.
        self._adk_tools_transient: list[ADKBaseTool | Callable] = []
        self._adk_code_executor_instance: ADKBaseCodeExecutor | None = None
        self._adk_runner_instance: ADKRunner | None = None
        self._adk_session_service_instance: ADKSessionService | None = None
        self._adk_planner_instance: ADKPlanner | None = None
        self._litellm_budget_manager_instance: BudgetManager | None = None
        self._user_cost_tracker_instance: UserCostTracker | None = None
        self._otel_trace_provider_instance: TracerProvider | None = None
        self._callbacks_transient: dict[str, Callable] = {}
        # Pre-initialized server instances (less common, but possible)
        self._a2a_server_instance: A2AServer | None = None
        self._mcp_server_instance: FastMCP | None = None

        # Set initial log level based on loaded config
        logger.setLevel(logging.DEBUG if self._config.verbose_logging else logging.INFO)
        self.with_agent_name(agent_name)

    # --- Configuration Save/Load ---

    def save_config(self, path: str | Path, indent: int = 2):
        """Saves the current builder configuration to a JSON file."""
        filepath = Path(path)
        try:
            filepath.parent.mkdir(parents=True, exist_ok=True)
            config_json = self._config.model_dump_json(indent=indent)
            with open(filepath, 'w') as f:
                f.write(config_json)
            logger.info(f"Builder configuration saved to {filepath}")
        except OSError as e:
            logger.error(f"Failed to save builder configuration to {filepath}: {e}")
        except ValidationError as e:
             logger.error(f"Configuration is invalid, cannot save: {e}")
        except Exception as e:
             logger.error(f"An unexpected error occurred during config save: {e}")


    def load_config(self, path: str | Path) -> 'EnhancedAgentBuilder':
        """Loads builder configuration from a JSON file, overwriting current settings."""
        filepath = Path(path)
        if not filepath.exists():
            raise FileNotFoundError(f"Builder configuration file not found: {filepath}")
        try:
            with open(filepath) as f:
                config_data = json.load(f)
            self._config = BuilderConfig.model_validate(config_data)
            logger.info(f"Builder configuration loaded from {filepath}")
            # Reset transient fields, as they are not saved
            self._reset_transient_fields()
            logger.warning("Transient fields (callbacks, tool instances, tracker instance, etc.) reset. Re-add them if needed.")
            # Update logger level based on loaded config
            logger.setLevel(logging.DEBUG if self._config.verbose_logging else logging.INFO)
        except (OSError, json.JSONDecodeError) as e:
            logger.error(f"Failed to load or parse builder configuration from {filepath}: {e}")
            raise
        except ValidationError as e:
             logger.error(f"Loaded configuration data is invalid: {e}")
             raise
        return self

    def _reset_transient_fields(self):
        """Clears fields that are not part of the saved BuilderConfig."""
        self._adk_tools_transient = []
        self._adk_code_executor_instance = None
        self._adk_runner_instance = None
        self._adk_session_service_instance = None
        self._adk_planner_instance = None
        self._litellm_budget_manager_instance = None
        self._user_cost_tracker_instance = None
        self._otel_trace_provider_instance = None
        self._callbacks_transient = {}
        self._a2a_server_instance = None
        self._mcp_server_instance = None

    # --- Fluent Configuration Methods (Modify self._config) ---

    def with_agent_name(self, name: str) -> 'EnhancedAgentBuilder':
        self._config.agent_name = name
        # Update dependent defaults
        self._config = BuilderConfig.model_validate(self._config.model_dump())
        return self

    def with_agent_version(self, version: str) -> 'EnhancedAgentBuilder':
        self._config.agent_version = version
        return self

    def with_model(self, model_identifier: str) -> 'EnhancedAgentBuilder':
        self._config.model_identifier = model_identifier
        # Auto-detect context window if not set
        if not self._config.max_tokens_input:
            try:
                max_input = get_max_tokens(model_identifier)
                if max_input:
                    self._config.max_tokens_input = max_input
                    logger.info(f"Auto-detected max_input_tokens for {model_identifier}: {max_input}")
                else:
                     # Default fallback if detection fails
                    self._config.max_tokens_input = 4096
                    logger.warning(f"Could not auto-detect max_input_tokens for {model_identifier}, defaulting to 4096.")
            except Exception as e:
                 self._config.max_tokens_input = 4096
                 logger.warning(f"Error auto-detecting max_input_tokens ({e}), defaulting to 4096.")
        # Auto-configure Ollama base URL
        if 'ollama/' in model_identifier and not self._config.api_base:
            self.with_api_base("http://localhost:11434") # Uses the method to log
        return self

    def with_system_message(self, message: str) -> 'EnhancedAgentBuilder':
        self._config.system_message = message
        return self

    def with_temperature(self, temp: float) -> 'EnhancedAgentBuilder':
        self._config.temperature = temp
        return self

    def with_max_output_tokens(self, tokens: int) -> 'EnhancedAgentBuilder':
        self._config.max_tokens_output = tokens
        return self

    def with_max_input_tokens(self, tokens: int) -> 'EnhancedAgentBuilder':
        self._config.max_tokens_input = tokens
        return self

    def with_stop_sequence(self, stop: list[str]) -> 'EnhancedAgentBuilder':
        self._config.stop_sequence = stop
        return self

    def with_api_key_from_env(self, env_var_name: str) -> 'EnhancedAgentBuilder':
        self._config.api_key_env_var = env_var_name
        # Quick check if env var exists
        if not os.getenv(env_var_name):
            logger.warning(f"API key environment variable '{env_var_name}' is not set.")
        return self

    def with_api_base(self, base_url: str | None) -> 'EnhancedAgentBuilder':
        self._config.api_base = base_url
        logger.info(f"API base set to: {base_url}")
        return self

    def with_api_version(self, version: str | None) -> 'EnhancedAgentBuilder':
        self._config.api_version = version
        return self

    def with_llm_user_id(self, user_id: str) -> 'EnhancedAgentBuilder':
        self._config.llm_user_id = user_id
        return self

    def enable_litellm_caching(self, enable: bool = True) -> 'EnhancedAgentBuilder':
        self._config.enable_litellm_caching = enable
        return self

    def enable_streaming(self, enable: bool = True) -> 'EnhancedAgentBuilder':
        self._config.enable_streaming = enable
        return self

    def verbose(self, enable: bool = True) -> 'EnhancedAgentBuilder':
        self._config.verbose_logging = enable
        logger.setLevel(logging.DEBUG if enable else logging.INFO)
        os.environ['LITELLM_LOG'] = 'DEBUG' if enable else 'NONE' # Control LiteLLM verbosity too
        return self
    def formatter_llm_model(self, model: str) -> 'EnhancedAgentBuilder':
        self._config.formatter_llm_model = model
        return self

    def with_initial_world_data(self, data: dict[str, Any]) -> 'EnhancedAgentBuilder':
        self._config.world_model_initial_data = data
        return self

    def with_history_options(self, max_turns: int | None = 20, max_tokens: int | None = None, trim_strategy: Literal["litellm", "basic"] = "litellm") -> 'EnhancedAgentBuilder':
        self._config.history = BuilderHistoryConfig(max_turns=max_turns, max_tokens=max_tokens, trim_strategy=trim_strategy)
        return self

    # --- ADK Configuration Methods ---
    def _ensure_adk(self, feature: str):
        if not ADK_AVAILABLE:
            logger.warning(f"ADK not available. Cannot configure ADK feature: {feature}.")
            return False
        self._config.adk.enabled = True # Mark ADK as enabled if any ADK feature is used
        return True

    def enable_adk(self, runner_class: type[ADKRunner] = InMemoryRunner, runner_options: dict[str, Any] | None = None) -> 'EnhancedAgentBuilder':
        """Enables ADK integration with a specified runner."""
        if not self._ensure_adk("Runner"): return self
        self._config.adk.runner_class_name = runner_class.__name__
        self._config.adk.runner_options = runner_options or {}
        logger.info(f"ADK integration enabled with runner: {self._config.adk.runner_class_name}")
        return self

    def with_adk_description(self, description: str) -> 'EnhancedAgentBuilder':
        if not self._ensure_adk("Description"): return self
        self._config.adk.description = description
        return self

    def with_adk_tool_instance(self, tool: ADKBaseTool) -> 'EnhancedAgentBuilder':
        """Adds a pre-initialized ADK Tool instance (transient)."""
        if not self._ensure_adk("Tool Instance"): return self
        if not isinstance(tool, ADKBaseTool):
            raise TypeError(f"Expected ADK BaseTool instance, got {type(tool)}")
        self._adk_tools_transient.append(tool)
        return self

    def with_adk_tool_function(self, func: Callable, name: Optional[str] = None,
                               description: Optional[str] = None) -> 'EnhancedAgentBuilder':
        """Adds a callable function as an ADK tool (transient)."""
        if not self._ensure_adk("Tool Function"):
            return self
        if not callable(func):
            raise TypeError(f"Expected callable function for ADK tool, got {type(func)}")
        if name:
            func.__name__ = name
        if description:
            func.__doc__ = description
        tool = FunctionTool(func)
        self._adk_tools_transient.append(tool)
        return self

    def with_adk_mcp_toolset(self, connection_type: Literal["stdio", "sse"], **kwargs) -> 'EnhancedAgentBuilder':
        """Configures an ADK MCP Toolset connection (saved in config)."""
        if not self._ensure_adk("MCP Toolset"): return self
        if connection_type == "stdio":
            if "command" not in kwargs: raise ValueError("Stdio MCP toolset requires 'command' argument.")
            config = {"type": "stdio", "command": kwargs["command"], "args": kwargs.get("args", [])}
        elif connection_type == "sse":
            if "url" not in kwargs: raise ValueError("SSE MCP toolset requires 'url' argument.")
            config = {"type": "sse", "url": kwargs["url"]}
        else:
            raise ValueError(f"Unknown MCP toolset connection type: {connection_type}")
        self._config.adk.mcp_toolset_configs.append(config)
        logger.info(f"Configured ADK MCP Toolset: {config}")
        return self

    def with_adk_code_executor(self, executor_type: Literal["adk_builtin", "unsafe_simple", "secure_placeholder", "none"]) -> 'EnhancedAgentBuilder':
        """Configures the type of ADK code executor to use (saved in config)."""
        if not self._ensure_adk("Code Executor Type"): return self
        if executor_type == "unsafe_simple":
            logger.critical("***********************************************************")
            logger.critical("*** WARNING: Configuring UNSAFE SimplePythonExecutor!   ***")
            logger.critical("***********************************************************")
        elif executor_type == "secure_placeholder":
            logger.warning("Configuring SecureCodeExecutorPlaceholder. Implement actual sandboxing!")
        elif executor_type == "adk_builtin":
            if self._config.model_identifier and ("gemini-1.5" not in self._config.model_identifier and "gemini-2" not in self._config.model_identifier) :
                logger.warning(f"ADK built-in code execution selected, but model '{self._config.model_identifier}' might not support it. Ensure model compatibility.")
            logger.info("Configuring ADK built-in code execution (tool-based, requires compatible model).")

        self._config.adk.code_executor_config = executor_type
        self._adk_code_executor_instance = None # Clear any previously set instance
        return self

    def with_adk_code_executor_instance(self, executor: ADKBaseCodeExecutor) -> 'EnhancedAgentBuilder':
        """Provides a pre-initialized ADK code executor instance (transient)."""
        if not self._ensure_adk("Code Executor Instance"): return self
        if not isinstance(executor, ADKBaseCodeExecutor):
            raise TypeError(f"Expected ADKBaseCodeExecutor instance, got {type(executor)}")
        self._adk_code_executor_instance = executor
        self._config.adk.code_executor_config = "custom_instance" # Mark config
        logger.info(f"Using custom ADK code executor instance: {type(executor).__name__}")
        return self

    def enable_adk_state_sync(self, enable: bool = True) -> 'EnhancedAgentBuilder':
        if not self._ensure_adk("State Sync"): return self
        self._config.adk.sync_state = enable
        return self

    # --- Server Configuration Methods ---
    def enable_a2a_server(self, host: str = "0.0.0.0", port: int = 5000, **extra_options) -> 'EnhancedAgentBuilder':
        if not A2A_AVAILABLE:
            logger.warning("python-a2a library not available. Cannot enable A2A server.")
            self._config.a2a.enabled = False
            return self
        self._config.a2a.enabled = True
        self._config.a2a.host = host
        self._config.a2a.port = port
        self._config.a2a.extra_options = extra_options
        return self

    def add_a2a_known_client(self, name: str, url: str) -> 'EnhancedAgentBuilder':
        if not A2A_AVAILABLE:
            logger.warning("python-a2a library not available. Cannot add known A2A client.")
            return self
        # A2A client setup is handled by the agent itself, we just store the config
        self._config.a2a.known_clients[name] = url
        logger.info(f"Added known A2A client config: '{name}' -> {url}")
        return self

    def enable_mcp_server(self, host: str = "0.0.0.0", port: int = 8000, server_name: str | None = None, **extra_options) -> 'EnhancedAgentBuilder':
         if not MCP_AVAILABLE:
             logger.warning("MCP library (FastMCP) not available. Cannot enable MCP server.")
             self._config.mcp.enabled = False
             return self
         self._config.mcp.enabled = True
         self._config.mcp.host = host
         self._config.mcp.port = port
         self._config.mcp.server_name = server_name # Will default later if None
         self._config.mcp.extra_options = extra_options
         # Re-validate to update default name if needed
         self._config = BuilderConfig.model_validate(self._config.model_dump())
         return self

    # --- Cost Tracking & Budgeting Methods ---
    def with_cost_tracker(self, tracker: UserCostTracker) -> 'EnhancedAgentBuilder':
        """Provides a pre-initialized UserCostTracker instance (transient)."""
        if not hasattr(tracker, "get_all_costs"): # Check protocol using isinstance
             raise TypeError("Cost tracker must implement the UserCostTracker protocol.")
        self._user_cost_tracker_instance = tracker
        # Clear file config if instance is provided
        self._config.cost_tracker_config = {'type': 'custom_instance'}
        logger.info(f"Using custom UserCostTracker instance: {type(tracker).__name__}")
        return self

    def with_json_cost_tracker(self, filepath: str | Path) -> 'EnhancedAgentBuilder':
        """Configures the builder to use the JsonFileUserCostTracker (saved in config)."""
        self._config.cost_tracker_config = {'type': 'json', 'filepath': str(filepath)}
        self._user_cost_tracker_instance = None # Clear any instance
        logger.info(f"Configured JsonFileUserCostTracker: {filepath}")
        return self

    def with_litellm_budget_manager(self, manager: BudgetManager) -> 'EnhancedAgentBuilder':
        """Provides a pre-initialized LiteLLM BudgetManager instance (transient)."""
        if not LITELLM_AVAILABLE:
             logger.warning("LiteLLM not available, cannot set BudgetManager.")
             return self
        if not isinstance(manager, BudgetManager):
            raise TypeError("Expected litellm.BudgetManager instance.")
        self._litellm_budget_manager_instance = manager
        return self

    # --- Observability Methods ---
    def enable_telemetry(self, service_name: str | None = None, endpoint: str | None = None) -> 'EnhancedAgentBuilder':
         if not OTEL_AVAILABLE:
              logger.warning("OpenTelemetry SDK not available. Cannot enable telemetry.")
              self._config.telemetry_config = {'enabled': False}
              return self
         self._config.telemetry_config = {
             'enabled': True,
             'service_name': service_name, # Defaults to agent name later
             'endpoint': endpoint # For OTLP exporter, e.g. "http://localhost:4317"
         }
         # Re-validate to update default name if needed
         self._config = BuilderConfig.model_validate(self._config.model_dump())
         return self

    def with_telemetry_provider_instance(self, provider: TracerProvider) -> 'EnhancedAgentBuilder':
        """Provides a pre-initialized OpenTelemetry TracerProvider instance (transient)."""
        if not OTEL_AVAILABLE:
            logger.warning("OpenTelemetry SDK not available. Cannot set TracerProvider.")
            return self
        if not isinstance(provider, TracerProvider):
             raise TypeError("Expected opentelemetry.sdk.trace.TracerProvider instance.")
        self._otel_trace_provider_instance = provider
        # Mark telemetry as enabled, but using custom instance
        self._config.telemetry_config = {'enabled': True, 'type': 'custom_instance'}
        logger.info("Using custom OpenTelemetry TracerProvider instance.")
        return self

    # --- Callback Methods (Transient) ---
    def with_stream_callback(self, func: Callable[[str], None | Awaitable[None]]) -> 'EnhancedAgentBuilder':
        self._callbacks_transient['stream_callback'] = func; return self
    def with_post_run_callback(self, func: Callable[[str, str, float, str | None], None | Awaitable[None]]) -> 'EnhancedAgentBuilder':
        self._callbacks_transient['post_run_callback'] = func; return self # Added user_id
    def with_progress_callback(self, func: Callable[[Any], None | Awaitable[None]]) -> 'EnhancedAgentBuilder':
        self._callbacks_transient['progress_callback'] = func; return self
    def with_human_in_loop_callback(self, func: Callable[[dict], str | Awaitable[str]]) -> 'EnhancedAgentBuilder':
        self._callbacks_transient['human_in_loop_callback'] = func; return self

    # --- Build Method ---
    async def build(self) -> EnhancedAgent:
        """
        Constructs and returns the configured EnhancedAgent instance.
        Handles asynchronous setup like fetching ADK MCP tools.
        """
        logger.info(f"--- Building EnhancedAgent: {self._config.agent_name} v{self._config.agent_version} ---")

        # 1. Final Config Validation (Pydantic model handles most)
        if not self._config.model_identifier:
            raise ValueError("LLM model identifier is required. Use .with_model()")

        # 2. Resolve API Key
        api_key = None
        if self._config.api_key_env_var:
            api_key = os.getenv(self._config.api_key_env_var)
            if not api_key:
                logger.warning(f"API key environment variable '{self._config.api_key_env_var}' is set in config but not found in environment.")
            # else: logger.debug("API key loaded from environment variable.") # Avoid logging key presence

        # 3. Setup Telemetry Provider (if instance provided)
        if self._otel_trace_provider_instance and OTEL_AVAILABLE:
            trace.set_tracer_provider(self._otel_trace_provider_instance)
            logger.info("Global OpenTelemetry TracerProvider set from provided instance.")
        elif self._config.telemetry_config.get('enabled') and self._config.telemetry_config.get('type') != 'custom_instance' and OTEL_AVAILABLE:
             # Basic provider setup from config (can be expanded)
             logger.info("Setting up basic OpenTelemetry based on config (ConsoleExporter example).")
             from opentelemetry.sdk.trace.export import (
                 BatchSpanProcessor,
                 ConsoleSpanExporter,
             )
             provider = TracerProvider()
             provider.add_span_processor(BatchSpanProcessor(ConsoleSpanExporter()))
             #: Add OTLP exporter based on self._config.telemetry_config['endpoint']
             trace.set_tracer_provider(provider)
             self._otel_trace_provider_instance = provider # Store for potential access?

        # 4. Prepare Core Components
        # Agent Model Data
        try:
            amd = AgentModelData(
                name=self._config.agent_name,
                model=self._config.model_identifier,
                system_message=self._config.system_message,
                temperature=self._config.temperature,
                top_k=self._config.top_k,
                top_p=self._config.top_p,
                max_tokens=self._config.max_tokens_output,
                max_input_tokens=self._config.max_tokens_input,
                api_key=api_key,
                api_base=self._config.api_base,
                api_version=self._config.api_version,
                stop_sequence=self._config.stop_sequence,
                user_id=self._config.llm_user_id,
                budget_manager=self._litellm_budget_manager_instance,
                caching=self._config.enable_litellm_caching
            )
        except ValidationError as e:
            logger.error(f"Validation error creating AgentModelData: {e}")
            raise

        # World Model
        world_model = self._config.world_model_initial_data or {}

        # User Cost Tracker
        cost_tracker = self._user_cost_tracker_instance # Use provided instance if available
        if not cost_tracker and self._config.cost_tracker_config:
            tracker_type = self._config.cost_tracker_config.get('type')
            if tracker_type == 'json':
                filepath = self._config.cost_tracker_config.get('filepath')
                if filepath:
                    cost_tracker = JsonFileUserCostTracker(filepath)
                    logger.info(f"Initialized JsonFileUserCostTracker ({filepath})")
                else:
                    logger.warning("JSON cost tracker configured but filepath missing.")
            elif tracker_type == 'custom_instance':
                 logger.warning("Cost tracker configured as 'custom_instance' but no instance was provided via .with_cost_tracker().")
            # Add other tracker types (DB, InMemory) here

        # 5. Prepare ADK Components
        adk_runner_instance = self._adk_runner_instance
        adk_session_service = self._adk_session_service_instance
        adk_planner_instance = self._adk_planner_instance
        adk_code_executor = self._adk_code_executor_instance # Use provided instance first
        adk_exit_stack = None
        processed_adk_tools = list(self._adk_tools_transient) # Start with transient tools

        if ADK_AVAILABLE and self._config.adk.enabled:
            logger.info("Configuring ADK components...")
            adk_exit_stack = contextlib.AsyncExitStack()

            # --- ADK Runner & Session Service ---
            if not adk_runner_instance:
                runner_cls_name = self._config.adk.runner_class_name
                runner_opts = self._config.adk.runner_options
                try:
                    # Dynamically import/get runner class
                    if runner_cls_name == "InMemoryRunner": runner_class = InMemoryRunner
                    elif runner_cls_name == "Runner": runner_class = Runner
                    elif runner_cls_name == "AsyncWebRunner": runner_class = AsyncWebRunner # If available
                    else: raise ValueError(f"Unsupported ADK Runner class name: {runner_cls_name}")

                    # Special handling: InMemoryRunner needs agent instance *later*
                    if runner_class is InMemoryRunner or runner_class is Runner:
                         logger.debug("Deferring InMemoryRunner creation until after agent instantiation.")
                         # Store config to create it later
                         adk_runner_config_for_later = {
                             "runner_class": runner_class,
                             "app_name": runner_opts.get("app_name", f"{self._config.agent_name}_ADKApp"),
                             "session_service": adk_session_service, # Pass service if already created
                             **runner_opts # Pass other options
                         }
                         adk_runner_instance = None # Ensure it's None for now
                    else: # Other runners might be creatable now
                         # Need to ensure session service is handled correctly if runner needs it
                         if not adk_session_service:
                             # Create default session service if needed by runner
                             # This part is complex as runners might create their own
                             logger.info("Using default ADK InMemorySessionService for runner.")
                             adk_session_service = InMemorySessionService()

                         adk_runner_instance = runner_class(
                             session_service=adk_session_service,
                             app_name=runner_opts.get("app_name", f"{self._config.agent_name}_ADKApp"),
                             **runner_opts # Pass other options
                         )
                         logger.info(f"Created ADK Runner instance: {runner_cls_name}")

                except (ImportError, ValueError, TypeError) as e:
                    logger.error(f"Failed to configure ADK Runner '{runner_cls_name}': {e}", exc_info=True)
                    raise ValueError(f"Failed to setup ADK Runner: {e}") from e

            # Ensure session service exists if runner created one
            if adk_runner_instance and hasattr(adk_runner_instance, 'session_service'):
                 if not adk_session_service:
                     adk_session_service = adk_runner_instance.session_service
                 elif adk_session_service is not adk_runner_instance.session_service:
                     logger.warning("Provided ADK SessionService differs from the one in the provided ADK Runner. Using the runner's service.")
                     adk_session_service = adk_runner_instance.session_service

            # Fallback: create default session service if none exists by now
            if not adk_session_service:
                  logger.info("Using default ADK InMemorySessionService.")
                  adk_session_service = InMemorySessionService()


            # --- ADK Code Executor ---
            if not adk_code_executor: # If instance wasn't provided directly
                executor_config = self._config.adk.code_executor_config
                if executor_config == "unsafe_simple":
                    adk_code_executor = UnsafeSimplePythonExecutor()
                    logger.critical("UNSAFE code executor instance created!")
                elif executor_config == "secure_placeholder":
                    adk_code_executor = SecureCodeExecutorPlaceholder()
                    logger.warning("SecureCodeExecutorPlaceholder instance created.")
                elif executor_config == "adk_builtin":
                    # This type uses the TOOL, not an executor instance passed to LlmAgent init
                    adk_code_executor = adk_built_in_code_execution
                    #if not any(getattr(t, 'func', None) == tool_func for t in processed_adk_tools if isinstance(t, FunctionTool)):
                    #     tool_func.__name__ = "code_execution"
                    # processed_adk_tools.append(tool_func)
                    #     logger.info("Added ADK built-in code execution tool.")
                    adk_code_executor = None # Ensure no executor instance is passed for this case
                elif executor_config == "none":
                    adk_code_executor = None
                elif executor_config == "custom_instance":
                    # Should have been provided via .with_adk_code_executor_instance()
                    logger.error("ADK code executor configured as 'custom_instance' but no instance was provided.")
                    adk_code_executor = None
                # Add handling for dict config if needed in the future

            # --- ADK Tools (Wrap callables) ---
            temp_tools = []
            for tool_input in processed_adk_tools:
                 if isinstance(tool_input, ADKBaseTool):
                     temp_tools.append(tool_input)
                 elif callable(tool_input):
                     try:
                         wrapped = ADKFunctionTool(func=tool_input)
                         temp_tools.append(wrapped)
                     except Exception as e: logger.warning(f"Could not wrap callable '{getattr(tool_input, '__name__', 'unknown')}' as ADK tool: {e}")
                 else: logger.warning(f"Skipping invalid ADK tool input: {type(tool_input)}")
            processed_adk_tools = temp_tools

            # --- ADK MCP Toolsets ---
            for mcp_conf in self._config.adk.mcp_toolset_configs:
                 logger.info(f"Fetching tools from configured MCP Server: {mcp_conf}...")
                 try:
                      params = None
                      if mcp_conf.get("type") == "stdio":
                          params = StdioServerParameters(command=mcp_conf["command"], args=mcp_conf.get("args", []))
                      elif mcp_conf.get("type") == "sse":
                           params = SseServerParams(url=mcp_conf["url"])

                      if params:
                          mcp_tools, _ = await MCPToolset.from_server(
                              connection_params=params,
                              async_exit_stack=adk_exit_stack
                          )
                          for tool in mcp_tools: tool._is_mcp_tool = True
                          processed_adk_tools.extend(mcp_tools)
                          logger.info(f"Fetched {len(mcp_tools)} tools via ADK MCPToolset ({mcp_conf.get('type')}).")
                      else:
                           logger.warning(f"Unsupported MCP config type: {mcp_conf.get('type')}")

                 except Exception as e:
                      logger.error(f"Failed to fetch tools from MCP server {mcp_conf}: {e}", exc_info=True)
                      # Decide whether to raise or continue

            # --- ADK Planner, Examples, Output Schema ---



        # 6. Instantiate EnhancedAgent
        try:
            # Base arguments for EnhancedAgent
            agent_init_kwargs = {
                'amd': amd,
                'world_model': world_model,
                'format_model': self._config.formatter_llm_model if self._config.formatter_llm_model else None, # Example passing extra config
                'verbose': self._config.verbose_logging,
                'stream': self._config.enable_streaming,
                'max_history_turns': self._config.history.max_turns,
                'max_history_tokens': self._config.history.max_tokens,
                'trim_strategy': self._config.history.trim_strategy,
                'sync_adk_state': self._config.adk.sync_state if ADK_AVAILABLE else False,
                'adk_exit_stack': adk_exit_stack, # Pass stack for cleanup
                'user_cost_tracker': cost_tracker, # Pass the tracker instance
                **self._callbacks_transient, # Pass configured callbacks
                # Pass server instances if provided (less common)
                'a2a_server': self._a2a_server_instance,
                'mcp_server': self._mcp_server_instance,
            }

            # Add ADK-specific arguments if inheriting from LlmAgent
            agent_class = EnhancedAgent
            if ADK_AVAILABLE and issubclass(EnhancedAgent, ADKLlmAgent):
                 logger.debug("Adding ADK LlmAgent specific arguments to init.")
                 adk_specific_kwargs = {
                     'name': self._config.agent_name, # Required by LlmAgent
                     'model': LiteLlm(model=self._config.model_identifier), # LlmAgent needs BaseLlm instance
                     'description': self._config.adk.description or self._config.system_message,
                     'instruction': self._config.system_message, # Or dedicated instruction field?
                     'tools': processed_adk_tools,
                     'code_executor': adk_code_executor, # Pass the *instance*
                     'planner': adk_planner_instance,
                     # Process examples/schema if needed
                     'examples': [ADKExample(**ex) for ex in self._config.adk.examples] if self._config.adk.examples else None,
                     'output_schema': self._config.adk.output_schema,
                     # Pass runner/session service if NOT using InMemoryRunner deferred creation
                     # If runner is created later, it's assigned post-init
                     'runner': adk_runner_instance if adk_runner_instance else None, # Pass runner if created now
                     'session_service': adk_session_service, # Pass session service
                 }
                 # Merge, ensuring agent_init_kwargs takes precedence for overlapping basic fields if necessary
                 # but allow ADK specifics to be added. Be careful with overlaps like 'name'.
                 # EnhancedAgent init should handle reconciling these if needed.
                 # A safer merge:
                 final_kwargs = agent_init_kwargs.copy()
                 for k, v in adk_specific_kwargs.items():
                      if k not in final_kwargs: # Only add ADK specifics not already handled
                          final_kwargs[k] = v
                      # Handle specific overrides/merges needed for LlmAgent base
                      elif k == 'tools' and v: # Merge tools
                          final_kwargs['tools'] = (final_kwargs.get('tools') or []) + v
                      # Overwrite description/instruction from ADK config if set
                      elif k in ['description', 'instruction'] and v or k == 'code_executor' or k == 'model':
                           final_kwargs[k] = v

                 agent_init_kwargs = final_kwargs


            logger.info(f"Final keys for EnhancedAgent init: {list(agent_init_kwargs.keys())}")
            logger.info(f"Final keys for EnhancedAgent init: {agent_init_kwargs}")

            # --- Instantiate the Agent ---
            agent = agent_class(**agent_init_kwargs)
            # --- Agent Instantiated ---

            # If ADK InMemoryRunner creation was deferred, create and assign now
            if ADK_AVAILABLE and 'adk_runner_config_for_later' in locals():
                 cfg = locals()['adk_runner_config_for_later']
                 if not isinstance(cfg['runner_class'], InMemoryRunner) and cfg.get('session_service') is None: cfg['session_service'] = agent.adk_session_service # Ensure service is passed
                 agent.setup_adk_runner(cfg)
                 logger.info(f"Created and assigned deferred ADK Runner instance: {agent.adk_runner.__class__.__name__}")
                 # Ensure agent has runner's session service if it differs
                 if agent.adk_runner and agent.adk_session_service is not agent.adk_runner.session_service:
                      logger.warning("Agent session service differs from deferred runner's service. Updating agent's reference.")
                      agent.adk_session_service = agent.adk_runner.session_service
            elif ADK_AVAILABLE and adk_runner_instance and not agent.adk_runner:
                # If runner was created earlier but not passed via LlmAgent init (e.g. non-LlmAgent base)
                # Or if we want to explicitly assign it
                 agent.adk_runner = adk_runner_instance
                 # Ensure session service consistency
                 if agent.adk_session_service is not agent.adk_runner.session_service:
                      agent.adk_session_service = agent.adk_runner.session_service


        except ValidationError as e:
            logger.error(f"Pydantic validation error Instantiating EnhancedAgent: {e}", exc_info=True)
            raise
        except Exception as e:
            logger.error(f"Unexpected error Instantiating EnhancedAgent: {e}", exc_info=True)
            raise

        # 7. Setup Agent's Internal Server Capabilities (if enabled and not pre-initialized)
        if self._config.a2a.enabled and not agent.a2a_server:
            if AGENT_A2A_AVAILABLE:
                logger.info("Setting up A2A server on agent instance...")
                agent.setup_a2a_server(
                    host=self._config.a2a.host,
                    port=self._config.a2a.port,
                    **self._config.a2a.extra_options
                )
            else: logger.warning("A2A server configured in builder, but A2A not available in agent environment.")

        if self._config.mcp.enabled and not agent.mcp_server:
            if AGENT_MCP_AVAILABLE:
                logger.info("Setting up MCP server on agent instance...")
                agent.setup_mcp_server(
                    host=self._config.mcp.host,
                    port=self._config.mcp.port,
                    name=self._config.mcp.server_name, # Already defaulted
                    **self._config.mcp.extra_options
                )
            else: logger.warning("MCP server configured in builder, but MCP not available in agent environment.")

        # 8. Setup A2A known clients configuration on the agent
        if self._config.a2a.known_clients:
             if AGENT_A2A_AVAILABLE:
                 # The agent likely handles client creation on demand,
                 # but we can pass the config for it to use.
                 # Assuming agent has a way to receive this, e.g., during init or a setter
                 if hasattr(agent, 'set_known_a2a_clients'):
                     agent.set_known_a2a_clients(self._config.a2a.known_clients)
                 else:
                      # Fallback: store on a generic config dict? Less ideal.
                      # agent.config.a2a_known_clients = self._config.a2a.known_clients
                      logger.warning("Agent does not have 'set_known_a2a_clients' method. Known client config stored raw.")
             else:
                  logger.warning("A2A known clients configured, but A2A not available in agent env.")


        logger.info(f"--- EnhancedAgent Build Complete: {agent.amd.name} ---")
        return agent
__init__(agent_name='DefaultAgent', config=None, config_path=None)

Initialize the builder. Can start with a config object, path, or blank.

Parameters:

Name Type Description Default
config BuilderConfig | None

An existing BuilderConfig object.

None
config_path str | Path | None

Path to a YAML/JSON configuration file for the builder.

None
Source code in toolboxv2/mods/isaa/base/Agent/builder.py
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
def __init__(self,agent_name: str = "DefaultAgent", config: BuilderConfig | None = None, config_path: str | Path | None = None):
    """
    Initialize the builder. Can start with a config object, path, or blank.

    Args:
        config: An existing BuilderConfig object.
        config_path: Path to a YAML/JSON configuration file for the builder.
    """
    if config and config_path:
        raise ValueError("Provide either config object or config_path, not both.")

    if config_path:
        self.load_config(config_path) # Sets self._config
    elif config:
        self._config = config.copy(deep=True)
    else:
        self._config = BuilderConfig() # Start with defaults

    # --- Transient fields (not saved/loaded directly via BuilderConfig JSON) ---
    # Instances or non-serializable objects provided programmatically.
    self._adk_tools_transient: list[ADKBaseTool | Callable] = []
    self._adk_code_executor_instance: ADKBaseCodeExecutor | None = None
    self._adk_runner_instance: ADKRunner | None = None
    self._adk_session_service_instance: ADKSessionService | None = None
    self._adk_planner_instance: ADKPlanner | None = None
    self._litellm_budget_manager_instance: BudgetManager | None = None
    self._user_cost_tracker_instance: UserCostTracker | None = None
    self._otel_trace_provider_instance: TracerProvider | None = None
    self._callbacks_transient: dict[str, Callable] = {}
    # Pre-initialized server instances (less common, but possible)
    self._a2a_server_instance: A2AServer | None = None
    self._mcp_server_instance: FastMCP | None = None

    # Set initial log level based on loaded config
    logger.setLevel(logging.DEBUG if self._config.verbose_logging else logging.INFO)
    self.with_agent_name(agent_name)
build() async

Constructs and returns the configured EnhancedAgent instance. Handles asynchronous setup like fetching ADK MCP tools.

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
async def build(self) -> EnhancedAgent:
    """
    Constructs and returns the configured EnhancedAgent instance.
    Handles asynchronous setup like fetching ADK MCP tools.
    """
    logger.info(f"--- Building EnhancedAgent: {self._config.agent_name} v{self._config.agent_version} ---")

    # 1. Final Config Validation (Pydantic model handles most)
    if not self._config.model_identifier:
        raise ValueError("LLM model identifier is required. Use .with_model()")

    # 2. Resolve API Key
    api_key = None
    if self._config.api_key_env_var:
        api_key = os.getenv(self._config.api_key_env_var)
        if not api_key:
            logger.warning(f"API key environment variable '{self._config.api_key_env_var}' is set in config but not found in environment.")
        # else: logger.debug("API key loaded from environment variable.") # Avoid logging key presence

    # 3. Setup Telemetry Provider (if instance provided)
    if self._otel_trace_provider_instance and OTEL_AVAILABLE:
        trace.set_tracer_provider(self._otel_trace_provider_instance)
        logger.info("Global OpenTelemetry TracerProvider set from provided instance.")
    elif self._config.telemetry_config.get('enabled') and self._config.telemetry_config.get('type') != 'custom_instance' and OTEL_AVAILABLE:
         # Basic provider setup from config (can be expanded)
         logger.info("Setting up basic OpenTelemetry based on config (ConsoleExporter example).")
         from opentelemetry.sdk.trace.export import (
             BatchSpanProcessor,
             ConsoleSpanExporter,
         )
         provider = TracerProvider()
         provider.add_span_processor(BatchSpanProcessor(ConsoleSpanExporter()))
         #: Add OTLP exporter based on self._config.telemetry_config['endpoint']
         trace.set_tracer_provider(provider)
         self._otel_trace_provider_instance = provider # Store for potential access?

    # 4. Prepare Core Components
    # Agent Model Data
    try:
        amd = AgentModelData(
            name=self._config.agent_name,
            model=self._config.model_identifier,
            system_message=self._config.system_message,
            temperature=self._config.temperature,
            top_k=self._config.top_k,
            top_p=self._config.top_p,
            max_tokens=self._config.max_tokens_output,
            max_input_tokens=self._config.max_tokens_input,
            api_key=api_key,
            api_base=self._config.api_base,
            api_version=self._config.api_version,
            stop_sequence=self._config.stop_sequence,
            user_id=self._config.llm_user_id,
            budget_manager=self._litellm_budget_manager_instance,
            caching=self._config.enable_litellm_caching
        )
    except ValidationError as e:
        logger.error(f"Validation error creating AgentModelData: {e}")
        raise

    # World Model
    world_model = self._config.world_model_initial_data or {}

    # User Cost Tracker
    cost_tracker = self._user_cost_tracker_instance # Use provided instance if available
    if not cost_tracker and self._config.cost_tracker_config:
        tracker_type = self._config.cost_tracker_config.get('type')
        if tracker_type == 'json':
            filepath = self._config.cost_tracker_config.get('filepath')
            if filepath:
                cost_tracker = JsonFileUserCostTracker(filepath)
                logger.info(f"Initialized JsonFileUserCostTracker ({filepath})")
            else:
                logger.warning("JSON cost tracker configured but filepath missing.")
        elif tracker_type == 'custom_instance':
             logger.warning("Cost tracker configured as 'custom_instance' but no instance was provided via .with_cost_tracker().")
        # Add other tracker types (DB, InMemory) here

    # 5. Prepare ADK Components
    adk_runner_instance = self._adk_runner_instance
    adk_session_service = self._adk_session_service_instance
    adk_planner_instance = self._adk_planner_instance
    adk_code_executor = self._adk_code_executor_instance # Use provided instance first
    adk_exit_stack = None
    processed_adk_tools = list(self._adk_tools_transient) # Start with transient tools

    if ADK_AVAILABLE and self._config.adk.enabled:
        logger.info("Configuring ADK components...")
        adk_exit_stack = contextlib.AsyncExitStack()

        # --- ADK Runner & Session Service ---
        if not adk_runner_instance:
            runner_cls_name = self._config.adk.runner_class_name
            runner_opts = self._config.adk.runner_options
            try:
                # Dynamically import/get runner class
                if runner_cls_name == "InMemoryRunner": runner_class = InMemoryRunner
                elif runner_cls_name == "Runner": runner_class = Runner
                elif runner_cls_name == "AsyncWebRunner": runner_class = AsyncWebRunner # If available
                else: raise ValueError(f"Unsupported ADK Runner class name: {runner_cls_name}")

                # Special handling: InMemoryRunner needs agent instance *later*
                if runner_class is InMemoryRunner or runner_class is Runner:
                     logger.debug("Deferring InMemoryRunner creation until after agent instantiation.")
                     # Store config to create it later
                     adk_runner_config_for_later = {
                         "runner_class": runner_class,
                         "app_name": runner_opts.get("app_name", f"{self._config.agent_name}_ADKApp"),
                         "session_service": adk_session_service, # Pass service if already created
                         **runner_opts # Pass other options
                     }
                     adk_runner_instance = None # Ensure it's None for now
                else: # Other runners might be creatable now
                     # Need to ensure session service is handled correctly if runner needs it
                     if not adk_session_service:
                         # Create default session service if needed by runner
                         # This part is complex as runners might create their own
                         logger.info("Using default ADK InMemorySessionService for runner.")
                         adk_session_service = InMemorySessionService()

                     adk_runner_instance = runner_class(
                         session_service=adk_session_service,
                         app_name=runner_opts.get("app_name", f"{self._config.agent_name}_ADKApp"),
                         **runner_opts # Pass other options
                     )
                     logger.info(f"Created ADK Runner instance: {runner_cls_name}")

            except (ImportError, ValueError, TypeError) as e:
                logger.error(f"Failed to configure ADK Runner '{runner_cls_name}': {e}", exc_info=True)
                raise ValueError(f"Failed to setup ADK Runner: {e}") from e

        # Ensure session service exists if runner created one
        if adk_runner_instance and hasattr(adk_runner_instance, 'session_service'):
             if not adk_session_service:
                 adk_session_service = adk_runner_instance.session_service
             elif adk_session_service is not adk_runner_instance.session_service:
                 logger.warning("Provided ADK SessionService differs from the one in the provided ADK Runner. Using the runner's service.")
                 adk_session_service = adk_runner_instance.session_service

        # Fallback: create default session service if none exists by now
        if not adk_session_service:
              logger.info("Using default ADK InMemorySessionService.")
              adk_session_service = InMemorySessionService()


        # --- ADK Code Executor ---
        if not adk_code_executor: # If instance wasn't provided directly
            executor_config = self._config.adk.code_executor_config
            if executor_config == "unsafe_simple":
                adk_code_executor = UnsafeSimplePythonExecutor()
                logger.critical("UNSAFE code executor instance created!")
            elif executor_config == "secure_placeholder":
                adk_code_executor = SecureCodeExecutorPlaceholder()
                logger.warning("SecureCodeExecutorPlaceholder instance created.")
            elif executor_config == "adk_builtin":
                # This type uses the TOOL, not an executor instance passed to LlmAgent init
                adk_code_executor = adk_built_in_code_execution
                #if not any(getattr(t, 'func', None) == tool_func for t in processed_adk_tools if isinstance(t, FunctionTool)):
                #     tool_func.__name__ = "code_execution"
                # processed_adk_tools.append(tool_func)
                #     logger.info("Added ADK built-in code execution tool.")
                adk_code_executor = None # Ensure no executor instance is passed for this case
            elif executor_config == "none":
                adk_code_executor = None
            elif executor_config == "custom_instance":
                # Should have been provided via .with_adk_code_executor_instance()
                logger.error("ADK code executor configured as 'custom_instance' but no instance was provided.")
                adk_code_executor = None
            # Add handling for dict config if needed in the future

        # --- ADK Tools (Wrap callables) ---
        temp_tools = []
        for tool_input in processed_adk_tools:
             if isinstance(tool_input, ADKBaseTool):
                 temp_tools.append(tool_input)
             elif callable(tool_input):
                 try:
                     wrapped = ADKFunctionTool(func=tool_input)
                     temp_tools.append(wrapped)
                 except Exception as e: logger.warning(f"Could not wrap callable '{getattr(tool_input, '__name__', 'unknown')}' as ADK tool: {e}")
             else: logger.warning(f"Skipping invalid ADK tool input: {type(tool_input)}")
        processed_adk_tools = temp_tools

        # --- ADK MCP Toolsets ---
        for mcp_conf in self._config.adk.mcp_toolset_configs:
             logger.info(f"Fetching tools from configured MCP Server: {mcp_conf}...")
             try:
                  params = None
                  if mcp_conf.get("type") == "stdio":
                      params = StdioServerParameters(command=mcp_conf["command"], args=mcp_conf.get("args", []))
                  elif mcp_conf.get("type") == "sse":
                       params = SseServerParams(url=mcp_conf["url"])

                  if params:
                      mcp_tools, _ = await MCPToolset.from_server(
                          connection_params=params,
                          async_exit_stack=adk_exit_stack
                      )
                      for tool in mcp_tools: tool._is_mcp_tool = True
                      processed_adk_tools.extend(mcp_tools)
                      logger.info(f"Fetched {len(mcp_tools)} tools via ADK MCPToolset ({mcp_conf.get('type')}).")
                  else:
                       logger.warning(f"Unsupported MCP config type: {mcp_conf.get('type')}")

             except Exception as e:
                  logger.error(f"Failed to fetch tools from MCP server {mcp_conf}: {e}", exc_info=True)
                  # Decide whether to raise or continue

        # --- ADK Planner, Examples, Output Schema ---



    # 6. Instantiate EnhancedAgent
    try:
        # Base arguments for EnhancedAgent
        agent_init_kwargs = {
            'amd': amd,
            'world_model': world_model,
            'format_model': self._config.formatter_llm_model if self._config.formatter_llm_model else None, # Example passing extra config
            'verbose': self._config.verbose_logging,
            'stream': self._config.enable_streaming,
            'max_history_turns': self._config.history.max_turns,
            'max_history_tokens': self._config.history.max_tokens,
            'trim_strategy': self._config.history.trim_strategy,
            'sync_adk_state': self._config.adk.sync_state if ADK_AVAILABLE else False,
            'adk_exit_stack': adk_exit_stack, # Pass stack for cleanup
            'user_cost_tracker': cost_tracker, # Pass the tracker instance
            **self._callbacks_transient, # Pass configured callbacks
            # Pass server instances if provided (less common)
            'a2a_server': self._a2a_server_instance,
            'mcp_server': self._mcp_server_instance,
        }

        # Add ADK-specific arguments if inheriting from LlmAgent
        agent_class = EnhancedAgent
        if ADK_AVAILABLE and issubclass(EnhancedAgent, ADKLlmAgent):
             logger.debug("Adding ADK LlmAgent specific arguments to init.")
             adk_specific_kwargs = {
                 'name': self._config.agent_name, # Required by LlmAgent
                 'model': LiteLlm(model=self._config.model_identifier), # LlmAgent needs BaseLlm instance
                 'description': self._config.adk.description or self._config.system_message,
                 'instruction': self._config.system_message, # Or dedicated instruction field?
                 'tools': processed_adk_tools,
                 'code_executor': adk_code_executor, # Pass the *instance*
                 'planner': adk_planner_instance,
                 # Process examples/schema if needed
                 'examples': [ADKExample(**ex) for ex in self._config.adk.examples] if self._config.adk.examples else None,
                 'output_schema': self._config.adk.output_schema,
                 # Pass runner/session service if NOT using InMemoryRunner deferred creation
                 # If runner is created later, it's assigned post-init
                 'runner': adk_runner_instance if adk_runner_instance else None, # Pass runner if created now
                 'session_service': adk_session_service, # Pass session service
             }
             # Merge, ensuring agent_init_kwargs takes precedence for overlapping basic fields if necessary
             # but allow ADK specifics to be added. Be careful with overlaps like 'name'.
             # EnhancedAgent init should handle reconciling these if needed.
             # A safer merge:
             final_kwargs = agent_init_kwargs.copy()
             for k, v in adk_specific_kwargs.items():
                  if k not in final_kwargs: # Only add ADK specifics not already handled
                      final_kwargs[k] = v
                  # Handle specific overrides/merges needed for LlmAgent base
                  elif k == 'tools' and v: # Merge tools
                      final_kwargs['tools'] = (final_kwargs.get('tools') or []) + v
                  # Overwrite description/instruction from ADK config if set
                  elif k in ['description', 'instruction'] and v or k == 'code_executor' or k == 'model':
                       final_kwargs[k] = v

             agent_init_kwargs = final_kwargs


        logger.info(f"Final keys for EnhancedAgent init: {list(agent_init_kwargs.keys())}")
        logger.info(f"Final keys for EnhancedAgent init: {agent_init_kwargs}")

        # --- Instantiate the Agent ---
        agent = agent_class(**agent_init_kwargs)
        # --- Agent Instantiated ---

        # If ADK InMemoryRunner creation was deferred, create and assign now
        if ADK_AVAILABLE and 'adk_runner_config_for_later' in locals():
             cfg = locals()['adk_runner_config_for_later']
             if not isinstance(cfg['runner_class'], InMemoryRunner) and cfg.get('session_service') is None: cfg['session_service'] = agent.adk_session_service # Ensure service is passed
             agent.setup_adk_runner(cfg)
             logger.info(f"Created and assigned deferred ADK Runner instance: {agent.adk_runner.__class__.__name__}")
             # Ensure agent has runner's session service if it differs
             if agent.adk_runner and agent.adk_session_service is not agent.adk_runner.session_service:
                  logger.warning("Agent session service differs from deferred runner's service. Updating agent's reference.")
                  agent.adk_session_service = agent.adk_runner.session_service
        elif ADK_AVAILABLE and adk_runner_instance and not agent.adk_runner:
            # If runner was created earlier but not passed via LlmAgent init (e.g. non-LlmAgent base)
            # Or if we want to explicitly assign it
             agent.adk_runner = adk_runner_instance
             # Ensure session service consistency
             if agent.adk_session_service is not agent.adk_runner.session_service:
                  agent.adk_session_service = agent.adk_runner.session_service


    except ValidationError as e:
        logger.error(f"Pydantic validation error Instantiating EnhancedAgent: {e}", exc_info=True)
        raise
    except Exception as e:
        logger.error(f"Unexpected error Instantiating EnhancedAgent: {e}", exc_info=True)
        raise

    # 7. Setup Agent's Internal Server Capabilities (if enabled and not pre-initialized)
    if self._config.a2a.enabled and not agent.a2a_server:
        if AGENT_A2A_AVAILABLE:
            logger.info("Setting up A2A server on agent instance...")
            agent.setup_a2a_server(
                host=self._config.a2a.host,
                port=self._config.a2a.port,
                **self._config.a2a.extra_options
            )
        else: logger.warning("A2A server configured in builder, but A2A not available in agent environment.")

    if self._config.mcp.enabled and not agent.mcp_server:
        if AGENT_MCP_AVAILABLE:
            logger.info("Setting up MCP server on agent instance...")
            agent.setup_mcp_server(
                host=self._config.mcp.host,
                port=self._config.mcp.port,
                name=self._config.mcp.server_name, # Already defaulted
                **self._config.mcp.extra_options
            )
        else: logger.warning("MCP server configured in builder, but MCP not available in agent environment.")

    # 8. Setup A2A known clients configuration on the agent
    if self._config.a2a.known_clients:
         if AGENT_A2A_AVAILABLE:
             # The agent likely handles client creation on demand,
             # but we can pass the config for it to use.
             # Assuming agent has a way to receive this, e.g., during init or a setter
             if hasattr(agent, 'set_known_a2a_clients'):
                 agent.set_known_a2a_clients(self._config.a2a.known_clients)
             else:
                  # Fallback: store on a generic config dict? Less ideal.
                  # agent.config.a2a_known_clients = self._config.a2a.known_clients
                  logger.warning("Agent does not have 'set_known_a2a_clients' method. Known client config stored raw.")
         else:
              logger.warning("A2A known clients configured, but A2A not available in agent env.")


    logger.info(f"--- EnhancedAgent Build Complete: {agent.amd.name} ---")
    return agent
enable_adk(runner_class=InMemoryRunner, runner_options=None)

Enables ADK integration with a specified runner.

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
549
550
551
552
553
554
555
def enable_adk(self, runner_class: type[ADKRunner] = InMemoryRunner, runner_options: dict[str, Any] | None = None) -> 'EnhancedAgentBuilder':
    """Enables ADK integration with a specified runner."""
    if not self._ensure_adk("Runner"): return self
    self._config.adk.runner_class_name = runner_class.__name__
    self._config.adk.runner_options = runner_options or {}
    logger.info(f"ADK integration enabled with runner: {self._config.adk.runner_class_name}")
    return self
load_config(path)

Loads builder configuration from a JSON file, overwriting current settings.

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
def load_config(self, path: str | Path) -> 'EnhancedAgentBuilder':
    """Loads builder configuration from a JSON file, overwriting current settings."""
    filepath = Path(path)
    if not filepath.exists():
        raise FileNotFoundError(f"Builder configuration file not found: {filepath}")
    try:
        with open(filepath) as f:
            config_data = json.load(f)
        self._config = BuilderConfig.model_validate(config_data)
        logger.info(f"Builder configuration loaded from {filepath}")
        # Reset transient fields, as they are not saved
        self._reset_transient_fields()
        logger.warning("Transient fields (callbacks, tool instances, tracker instance, etc.) reset. Re-add them if needed.")
        # Update logger level based on loaded config
        logger.setLevel(logging.DEBUG if self._config.verbose_logging else logging.INFO)
    except (OSError, json.JSONDecodeError) as e:
        logger.error(f"Failed to load or parse builder configuration from {filepath}: {e}")
        raise
    except ValidationError as e:
         logger.error(f"Loaded configuration data is invalid: {e}")
         raise
    return self
save_config(path, indent=2)

Saves the current builder configuration to a JSON file.

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
def save_config(self, path: str | Path, indent: int = 2):
    """Saves the current builder configuration to a JSON file."""
    filepath = Path(path)
    try:
        filepath.parent.mkdir(parents=True, exist_ok=True)
        config_json = self._config.model_dump_json(indent=indent)
        with open(filepath, 'w') as f:
            f.write(config_json)
        logger.info(f"Builder configuration saved to {filepath}")
    except OSError as e:
        logger.error(f"Failed to save builder configuration to {filepath}: {e}")
    except ValidationError as e:
         logger.error(f"Configuration is invalid, cannot save: {e}")
    except Exception as e:
         logger.error(f"An unexpected error occurred during config save: {e}")
with_adk_code_executor(executor_type)

Configures the type of ADK code executor to use (saved in config).

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
def with_adk_code_executor(self, executor_type: Literal["adk_builtin", "unsafe_simple", "secure_placeholder", "none"]) -> 'EnhancedAgentBuilder':
    """Configures the type of ADK code executor to use (saved in config)."""
    if not self._ensure_adk("Code Executor Type"): return self
    if executor_type == "unsafe_simple":
        logger.critical("***********************************************************")
        logger.critical("*** WARNING: Configuring UNSAFE SimplePythonExecutor!   ***")
        logger.critical("***********************************************************")
    elif executor_type == "secure_placeholder":
        logger.warning("Configuring SecureCodeExecutorPlaceholder. Implement actual sandboxing!")
    elif executor_type == "adk_builtin":
        if self._config.model_identifier and ("gemini-1.5" not in self._config.model_identifier and "gemini-2" not in self._config.model_identifier) :
            logger.warning(f"ADK built-in code execution selected, but model '{self._config.model_identifier}' might not support it. Ensure model compatibility.")
        logger.info("Configuring ADK built-in code execution (tool-based, requires compatible model).")

    self._config.adk.code_executor_config = executor_type
    self._adk_code_executor_instance = None # Clear any previously set instance
    return self
with_adk_code_executor_instance(executor)

Provides a pre-initialized ADK code executor instance (transient).

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
618
619
620
621
622
623
624
625
626
def with_adk_code_executor_instance(self, executor: ADKBaseCodeExecutor) -> 'EnhancedAgentBuilder':
    """Provides a pre-initialized ADK code executor instance (transient)."""
    if not self._ensure_adk("Code Executor Instance"): return self
    if not isinstance(executor, ADKBaseCodeExecutor):
        raise TypeError(f"Expected ADKBaseCodeExecutor instance, got {type(executor)}")
    self._adk_code_executor_instance = executor
    self._config.adk.code_executor_config = "custom_instance" # Mark config
    logger.info(f"Using custom ADK code executor instance: {type(executor).__name__}")
    return self
with_adk_mcp_toolset(connection_type, **kwargs)

Configures an ADK MCP Toolset connection (saved in config).

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
585
586
587
588
589
590
591
592
593
594
595
596
597
598
def with_adk_mcp_toolset(self, connection_type: Literal["stdio", "sse"], **kwargs) -> 'EnhancedAgentBuilder':
    """Configures an ADK MCP Toolset connection (saved in config)."""
    if not self._ensure_adk("MCP Toolset"): return self
    if connection_type == "stdio":
        if "command" not in kwargs: raise ValueError("Stdio MCP toolset requires 'command' argument.")
        config = {"type": "stdio", "command": kwargs["command"], "args": kwargs.get("args", [])}
    elif connection_type == "sse":
        if "url" not in kwargs: raise ValueError("SSE MCP toolset requires 'url' argument.")
        config = {"type": "sse", "url": kwargs["url"]}
    else:
        raise ValueError(f"Unknown MCP toolset connection type: {connection_type}")
    self._config.adk.mcp_toolset_configs.append(config)
    logger.info(f"Configured ADK MCP Toolset: {config}")
    return self
with_adk_tool_function(func, name=None, description=None)

Adds a callable function as an ADK tool (transient).

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
570
571
572
573
574
575
576
577
578
579
580
581
582
583
def with_adk_tool_function(self, func: Callable, name: Optional[str] = None,
                           description: Optional[str] = None) -> 'EnhancedAgentBuilder':
    """Adds a callable function as an ADK tool (transient)."""
    if not self._ensure_adk("Tool Function"):
        return self
    if not callable(func):
        raise TypeError(f"Expected callable function for ADK tool, got {type(func)}")
    if name:
        func.__name__ = name
    if description:
        func.__doc__ = description
    tool = FunctionTool(func)
    self._adk_tools_transient.append(tool)
    return self
with_adk_tool_instance(tool)

Adds a pre-initialized ADK Tool instance (transient).

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
562
563
564
565
566
567
568
def with_adk_tool_instance(self, tool: ADKBaseTool) -> 'EnhancedAgentBuilder':
    """Adds a pre-initialized ADK Tool instance (transient)."""
    if not self._ensure_adk("Tool Instance"): return self
    if not isinstance(tool, ADKBaseTool):
        raise TypeError(f"Expected ADK BaseTool instance, got {type(tool)}")
    self._adk_tools_transient.append(tool)
    return self
with_cost_tracker(tracker)

Provides a pre-initialized UserCostTracker instance (transient).

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
669
670
671
672
673
674
675
676
677
def with_cost_tracker(self, tracker: UserCostTracker) -> 'EnhancedAgentBuilder':
    """Provides a pre-initialized UserCostTracker instance (transient)."""
    if not hasattr(tracker, "get_all_costs"): # Check protocol using isinstance
         raise TypeError("Cost tracker must implement the UserCostTracker protocol.")
    self._user_cost_tracker_instance = tracker
    # Clear file config if instance is provided
    self._config.cost_tracker_config = {'type': 'custom_instance'}
    logger.info(f"Using custom UserCostTracker instance: {type(tracker).__name__}")
    return self
with_json_cost_tracker(filepath)

Configures the builder to use the JsonFileUserCostTracker (saved in config).

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
679
680
681
682
683
684
def with_json_cost_tracker(self, filepath: str | Path) -> 'EnhancedAgentBuilder':
    """Configures the builder to use the JsonFileUserCostTracker (saved in config)."""
    self._config.cost_tracker_config = {'type': 'json', 'filepath': str(filepath)}
    self._user_cost_tracker_instance = None # Clear any instance
    logger.info(f"Configured JsonFileUserCostTracker: {filepath}")
    return self
with_litellm_budget_manager(manager)

Provides a pre-initialized LiteLLM BudgetManager instance (transient).

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
686
687
688
689
690
691
692
693
694
def with_litellm_budget_manager(self, manager: BudgetManager) -> 'EnhancedAgentBuilder':
    """Provides a pre-initialized LiteLLM BudgetManager instance (transient)."""
    if not LITELLM_AVAILABLE:
         logger.warning("LiteLLM not available, cannot set BudgetManager.")
         return self
    if not isinstance(manager, BudgetManager):
        raise TypeError("Expected litellm.BudgetManager instance.")
    self._litellm_budget_manager_instance = manager
    return self
with_telemetry_provider_instance(provider)

Provides a pre-initialized OpenTelemetry TracerProvider instance (transient).

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
711
712
713
714
715
716
717
718
719
720
721
722
def with_telemetry_provider_instance(self, provider: TracerProvider) -> 'EnhancedAgentBuilder':
    """Provides a pre-initialized OpenTelemetry TracerProvider instance (transient)."""
    if not OTEL_AVAILABLE:
        logger.warning("OpenTelemetry SDK not available. Cannot set TracerProvider.")
        return self
    if not isinstance(provider, TracerProvider):
         raise TypeError("Expected opentelemetry.sdk.trace.TracerProvider instance.")
    self._otel_trace_provider_instance = provider
    # Mark telemetry as enabled, but using custom instance
    self._config.telemetry_config = {'enabled': True, 'type': 'custom_instance'}
    logger.info("Using custom OpenTelemetry TracerProvider instance.")
    return self
JsonFileUserCostTracker

Stores user costs persistently in a JSON file.

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
class JsonFileUserCostTracker:
    """Stores user costs persistently in a JSON file."""
    def __init__(self, filepath: str | Path):
        self.filepath = Path(filepath)
        self._costs: dict[str, float] = {}
        self._lock = threading.Lock()
        self.load() # Load costs on initialization

    def get_cost(self, user_id: str) -> float:
        with self._lock:
            return self._costs.get(user_id, 0.0)

    def add_cost(self, user_id: str, cost: float) -> None:
        if not user_id:
            logger.warning("Cost tracking skipped: user_id is missing.")
            return
        if cost > 0:
            with self._lock:
                self._costs[user_id] = self._costs.get(user_id, 0.0) + cost
                logger.debug(f"Cost added for user '{user_id}': +{cost:.6f}. New total: {self._costs[user_id]:.6f}")
            # Optional: Auto-save periodically or based on number of updates
            # For simplicity, we rely on explicit save() or agent close

    def get_all_costs(self) -> dict[str, float]:
        with self._lock:
            return self._costs.copy()

    def save(self) -> None:
        with self._lock:
            try:
                self.filepath.parent.mkdir(parents=True, exist_ok=True)
                with open(self.filepath, 'w') as f:
                    json.dump(self._costs, f, indent=2)
                logger.info(f"User costs saved to {self.filepath}")
            except OSError as e:
                logger.error(f"Failed to save user costs to {self.filepath}: {e}")

    def load(self) -> None:
        with self._lock:
            if self.filepath.exists():
                try:
                    with open(self.filepath) as f:
                        self._costs = json.load(f)
                    logger.info(f"User costs loaded from {self.filepath}")
                except (OSError, json.JSONDecodeError) as e:
                    logger.error(f"Failed to load user costs from {self.filepath}: {e}. Starting fresh.")
                    self._costs = {}
            else:
                logger.info(f"User cost file not found ({self.filepath}). Starting fresh.")
                self._costs = {}

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.save()
UserCostTracker

Bases: Protocol

Protocol for tracking costs per user.

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
177
178
179
180
181
182
183
class UserCostTracker(Protocol):
    """Protocol for tracking costs per user."""
    def get_cost(self, user_id: str) -> float: ...
    def add_cost(self, user_id: str, cost: float) -> None: ...
    def get_all_costs(self) -> dict[str, float]: ...
    def save(self) -> None: ...
    def load(self) -> None: ...
config
A2AConfig

Bases: BaseModel

Configuration for A2A integration.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
116
117
118
119
120
121
122
class A2AConfig(BaseModel):
    """Configuration for A2A integration."""
    server: dict[str, Any] | None = Field(default=None, description="Configuration to run an A2A server (host, port, etc.).")
    known_agents: dict[str, str] = Field(default_factory=dict, description="Named A2A agent URLs to interact with (e.g., {'weather_agent': 'http://weather:5000'}).")
    default_task_timeout: int = Field(default=120, description="Default timeout in seconds for waiting on A2A task results.")

    model_config = ConfigDict(arbitrary_types_allowed=True)
ADKConfig

Bases: BaseModel

Configuration for ADK integration.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
class ADKConfig(BaseModel):
    """Configuration for ADK integration."""
    enabled: bool = Field(default=True, description="Enable ADK features if ADK is installed.")
    description: str | None = Field(default=None, description="ADK LlmAgent description.")
    instruction_override: str | None = Field(default=None, description="Override agent's system message for ADK.")
    # Tools added via builder or auto-discovery
    code_executor: str | BaseCodeExecutor | None = Field(default=None, description="Reference name or instance of ADK code executor.")
    planner: str | BasePlanner | None = Field(default=None, description="Reference name or instance of ADK planner.")
    examples: list[Example] | None = Field(default=None, description="Few-shot examples for ADK.")
    output_schema: type[BaseModel] | None = Field(default=None, description="Pydantic model for structured output.")
    # MCP Toolset config handled separately if ADK is enabled
    use_mcp_toolset: bool = Field(default=True, description="Use ADK's MCPToolset for MCP client connections if ADK is enabled.")
    # Runner config handled separately

    model_config = ConfigDict(arbitrary_types_allowed=True)
AgentConfig

Bases: BaseModel

Main configuration schema for an EnhancedAgent.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
class AgentConfig(BaseModel):
    """Main configuration schema for an EnhancedAgent."""
    agent_name: str = Field(..., description="Unique name for this agent instance.")
    version: str = Field(default="0.1.0")

    agent_instruction: str = Field(default="You are a helpful AI assistant. Answer user questions to the best of your knowledge. Respond concisely. use tools when needed")
    agent_description: str = Field(default="An configurable, production-ready agent with integrated capabilities.")

    # Model Selection
    models: list[ModelConfig] = Field(..., description="List of available LLM configurations.")
    default_llm_model: str = Field(..., description="Name of the ModelConfig to use for general LLM calls.")
    formatter_llm_model: str | None = Field(default=None, description="Optional: Name of a faster/cheaper ModelConfig for a_format_class calls.")

    # Core Agent Settings
    world_model_initial_data: dict[str, Any] | None = Field(default=None)
    enable_streaming: bool = Field(default=False)
    verbose: bool = Field(default=False)
    log_level: str = Field(default="INFO", description="Logging level (DEBUG, INFO, WARNING, ERROR).")
    max_history_length: int = Field(default=20, description="Max conversation turns for LiteLLM history.")
    trim_strategy: Literal["litellm", "basic"] = Field(default="litellm")
    persist_history: bool = Field(default=True, description="Persist conversation history (requires persistent ChatSession).")
    user_id_default: str | None = Field(default=None, description="Default user ID for interactions.")

    # Secure Code Execution
    code_executor_type: Literal["restricted", "docker", "none"] = Field(default="restricted", description="Type of code executor to use.")
    code_executor_config: dict[str, Any] = Field(default_factory=dict, description="Configuration specific to the chosen code executor.")
    enable_adk_code_execution_tool: bool = Field(default=True, description="Expose code execution as an ADK tool if ADK is enabled.")

    # Framework Integrations
    adk: ADKConfig | None = Field(default_factory=ADKConfig if ADK_AVAILABLE_CONF else lambda: None)
    mcp: MCPConfig | None = Field(default_factory=MCPConfig if MCP_AVAILABLE_CONF else lambda: None)
    a2a: A2AConfig | None = Field(default_factory=A2AConfig if A2A_AVAILABLE_CONF else lambda: None)

    # Observability & Cost
    observability: ObservabilityConfig | None = Field(default_factory=ObservabilityConfig)
    budget_manager: BudgetManager | None = Field(default=None, description="Global LiteLLM budget manager instance.") # Needs to be passed in

    # Human-in-the-Loop
    enable_hitl: bool = Field(default=False, description="Enable basic Human-in-the-Loop hooks.")

    # Add other global settings as needed

    model_config = ConfigDict(arbitrary_types_allowed=True)

    @model_validator(mode='after')
    def validate_model_references(self) -> 'AgentConfig':
        model_names = {m.name for m in self.models}
        if self.default_llm_model not in model_names:
            raise ValueError(f"default_llm_model '{self.default_llm_model}' not found in defined models.")
        if self.formatter_llm_model and self.formatter_llm_model not in model_names:
            raise ValueError(f"formatter_llm_model '{self.formatter_llm_model}' not found in defined models.")
        return self

    @model_validator(mode='after')
    def validate_framework_availability(self) -> 'AgentConfig':
        if self.adk and self.adk.enabled and not ADK_AVAILABLE_CONF:
            logger.warning("ADK configuration provided but ADK library not installed. Disabling ADK features.")
            self.adk.enabled = False
        if self.mcp and (self.mcp.server or self.mcp.client_connections) and not MCP_AVAILABLE_CONF:
             logger.warning("MCP configuration provided but MCP library not installed. Disabling MCP features.")
             self.mcp = None # Or disable specific parts
        if self.a2a and (self.a2a.server or self.a2a.known_agents) and not A2A_AVAILABLE_CONF:
             logger.warning("A2A configuration provided but A2A library not installed. Disabling A2A features.")
             self.a2a = None # Or disable specific parts
        return self

    @classmethod
    def load_from_yaml(cls, path: str | Path) -> 'AgentConfig':
        """Loads configuration from a YAML file."""
        file_path = Path(path)
        if not file_path.is_file():
            raise FileNotFoundError(f"Configuration file not found: {path}")
        with open(file_path) as f:
            config_data = yaml.safe_load(f)
        logger.info(f"Loaded agent configuration from {path}")
        return cls(**config_data)

    def save_to_yaml(self, path: str | Path):
        """Saves the current configuration to a YAML file."""
        file_path = Path(path)
        file_path.parent.mkdir(parents=True, exist_ok=True)
        with open(file_path, 'w') as f:
            # Use Pydantic's model_dump for clean serialization
            yaml.dump(self.model_dump(mode='python'), f, sort_keys=False)
        logger.info(f"Saved agent configuration to {path}")
load_from_yaml(path) classmethod

Loads configuration from a YAML file.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
201
202
203
204
205
206
207
208
209
210
@classmethod
def load_from_yaml(cls, path: str | Path) -> 'AgentConfig':
    """Loads configuration from a YAML file."""
    file_path = Path(path)
    if not file_path.is_file():
        raise FileNotFoundError(f"Configuration file not found: {path}")
    with open(file_path) as f:
        config_data = yaml.safe_load(f)
    logger.info(f"Loaded agent configuration from {path}")
    return cls(**config_data)
save_to_yaml(path)

Saves the current configuration to a YAML file.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
212
213
214
215
216
217
218
219
def save_to_yaml(self, path: str | Path):
    """Saves the current configuration to a YAML file."""
    file_path = Path(path)
    file_path.parent.mkdir(parents=True, exist_ok=True)
    with open(file_path, 'w') as f:
        # Use Pydantic's model_dump for clean serialization
        yaml.dump(self.model_dump(mode='python'), f, sort_keys=False)
    logger.info(f"Saved agent configuration to {path}")
MCPConfig

Bases: BaseModel

Configuration for MCP integration.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
107
108
109
110
111
112
113
class MCPConfig(BaseModel):
    """Configuration for MCP integration."""
    server: dict[str, Any] | None = Field(default=None, description="Configuration to run an MCP server (host, port, etc.).")
    client_connections: dict[str, str] = Field(default_factory=dict, description="Named MCP server URLs to connect to as a client (e.g., {'files': 'stdio:npx @mcp/server-filesystem /data'}).")
    # ADK's MCPToolset handles client connections if ADKConfig.use_mcp_toolset is True

    model_config = ConfigDict(arbitrary_types_allowed=True)
ModelConfig

Bases: BaseModel

Configuration specific to an LLM model via LiteLLM.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
class ModelConfig(BaseModel):
    """Configuration specific to an LLM model via LiteLLM."""
    # Used as key for model selection
    name: str = Field(..., description="Unique identifier/alias for this model configuration (e.g., 'fast_formatter', 'main_reasoner').")
    model: str = Field(..., description="LiteLLM model string (e.g., 'gemini/gemini-1.5-pro-latest', 'ollama/mistral').")
    provider: str | None = Field(default=None, description="LiteLLM provider override if needed.")
    api_key: str | None = Field(default=None, description="API Key (consider using environment variables).")
    api_base: str | None = Field(default=None, description="API Base URL (for local models, proxies).")
    api_version: str | None = Field(default=None, description="API Version (e.g., for Azure).")

    # Common LLM Parameters
    temperature: float | None = Field(default=0.7)
    top_p: float | None = Field(default=None)
    top_k: int | None = Field(default=None)
    max_tokens: int | None = Field(default=2048, description="Max tokens for generation.")
    max_input_tokens: int | None = Field(default=None, description="Max input context window (autodetected if None).")
    stop_sequence: list[str] | None = Field(default=None)
    presence_penalty: float | None = Field(default=None)
    frequency_penalty: float | None = Field(default=None)
    system_message: str | None = Field(default=None, description="Default system message for this model.")

    # LiteLLM Specific
    caching: bool = Field(default=True, description="Enable LiteLLM caching for this model.")
    # budget_manager: Optional[BudgetManager] = Field(default=None) # Budget manager applied globally or per-agent

    model_config = ConfigDict(arbitrary_types_allowed=True, extra='allow') # Allow extra LiteLLM params
ObservabilityConfig

Bases: BaseModel

Configuration for observability (OpenTelemetry).

Source code in toolboxv2/mods/isaa/base/Agent/config.py
125
126
127
128
129
130
131
132
class ObservabilityConfig(BaseModel):
    """Configuration for observability (OpenTelemetry)."""
    enabled: bool = Field(default=True)
    endpoint: str | None = Field(default=None, description="OTLP endpoint URL (e.g., http://jaeger:4317).")
    service_name: str | None = Field(default=None, description="Service name for traces/metrics (defaults to agent name).")
    # Add more OTel config options as needed (headers, certs, resource attributes)

    model_config = ConfigDict(arbitrary_types_allowed=True)
executors
DockerCodeExecutor

Bases: _BaseExecutorClass

Executes Python code in a sandboxed Docker container.

Requires Docker to be installed and running, and the 'docker' Python SDK.

Source code in toolboxv2/mods/isaa/base/Agent/executors.py
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
class DockerCodeExecutor(_BaseExecutorClass):
    """
    Executes Python code in a sandboxed Docker container.

    Requires Docker to be installed and running, and the 'docker' Python SDK.
    """
    DEFAULT_DOCKER_IMAGE = "python:3.10-slim" # Use a minimal image
    DEFAULT_TIMEOUT = 10 # Seconds
    DEFAULT_MEM_LIMIT = "128m"
    DEFAULT_CPUS = 0.5

    def __init__(self,
                 docker_image: str = DEFAULT_DOCKER_IMAGE,
                 timeout: int = DEFAULT_TIMEOUT,
                 mem_limit: str = DEFAULT_MEM_LIMIT,
                 cpus: float = DEFAULT_CPUS,
                 network_mode: str = "none", # Disable networking by default for security
                 docker_client_config: dict | None = None):
        if not DOCKER_AVAILABLE:
            raise ImportError("Docker SDK not installed ('pip install docker'). Cannot use DockerCodeExecutor.")

        self.docker_image = docker_image
        self.timeout = timeout
        self.mem_limit = mem_limit
        self.cpus = cpus
        self.network_mode = network_mode
        try:
            self.client = docker.from_env(**(docker_client_config or {}))
            self.client.ping() # Check connection
            # Ensure image exists locally or pull it
            try:
                self.client.images.get(self.docker_image)
                logger.info(f"Docker image '{self.docker_image}' found locally.")
            except ImageNotFound:
                logger.warning(f"Docker image '{self.docker_image}' not found locally. Attempting to pull...")
                try:
                    self.client.images.pull(self.docker_image)
                    logger.info(f"Successfully pulled Docker image '{self.docker_image}'.")
                except APIError as pull_err:
                    raise RuntimeError(f"Failed to pull Docker image '{self.docker_image}': {pull_err}") from pull_err
        except Exception as e:
            raise RuntimeError(f"Failed to connect to Docker daemon: {e}. Is Docker running?") from e
        logger.info(f"DockerCodeExecutor initialized (Image: {docker_image}, Timeout: {timeout}s, Network: {network_mode})")

    def _execute(self, code: str) -> dict[str, Any]:
        """Internal execution logic."""
        result = {"stdout": "", "stderr": "", "error": None, "exit_code": None}
        container = None

        try:
            logger.debug(f"Creating Docker container from image '{self.docker_image}'...")
            container = self.client.containers.run(
                image=self.docker_image,
                command=["python", "-c", code],
                detach=True,
                mem_limit=self.mem_limit,
                nano_cpus=int(self.cpus * 1e9),
                network_mode=self.network_mode,
                # Security considerations: Consider read-only filesystem, dropping capabilities
                read_only=True,
                # working_dir="/app", # Define a working dir if needed
                # volumes={...} # Mount volumes carefully if required
            )
            logger.debug(f"Container '{container.short_id}' started.")

            # Wait for container completion with timeout
            container_result = container.wait(timeout=self.timeout)
            result["exit_code"] = container_result.get("StatusCode", None)

            # Retrieve logs
            result["stdout"] = container.logs(stdout=True, stderr=False).decode('utf-8', errors='replace').strip()
            result["stderr"] = container.logs(stdout=False, stderr=True).decode('utf-8', errors='replace').strip()

            logger.debug(f"Container '{container.short_id}' finished with exit code {result['exit_code']}.")
            if result["exit_code"] != 0:
                 logger.warning(f"Container stderr: {result['stderr'][:500]}...") # Log stderr on failure

        except ContainerError as e:
            result["error"] = f"ContainerError: {e}"
            result["stderr"] = e.stderr.decode('utf-8', errors='replace').strip() if e.stderr else str(e)
            result["exit_code"] = e.exit_status
            logger.error(f"Container '{container.short_id if container else 'N/A'}' failed: {result['error']}\nStderr: {result['stderr']}")
        except APIError as e:
            result["error"] = f"Docker APIError: {e}"
            result["exit_code"] = -1
            logger.error(f"Docker API error during execution: {e}")
        except Exception as e:
            # Catch potential timeout errors from container.wait or other unexpected issues
            result["error"] = f"Unexpected execution error: {type(e).__name__}: {e}"
            result["exit_code"] = -1
            # Check if it looks like a timeout
            if isinstance(e, TimeoutError) or "Timeout" in str(e): # docker SDK might raise requests.exceptions.ReadTimeout
                result["stderr"] = f"Execution timed out after {self.timeout} seconds."
                logger.warning(f"Container execution timed out ({self.timeout}s).")
            else:
                logger.error(f"Unexpected error during Docker execution: {e}", exc_info=True)
        finally:
            if container:
                try:
                    logger.debug(f"Removing container '{container.short_id}'...")
                    container.remove(force=True)
                except APIError as rm_err:
                    logger.warning(f"Failed to remove container {container.short_id}: {rm_err}")

        return result

     # --- ADK Compatibility Method ---
    if ADK_EXEC_AVAILABLE:
        def execute_code(self, invocation_context: InvocationContext, code_input: CodeExecutionInput) -> CodeExecutionResult:
            logger.debug(f"DockerCodeExecutor executing ADK request (lang: {code_input.language}). Code: {code_input.code[:100]}...")
            if code_input.language.lower() != 'python':
                 return CodeExecutionResult(output=f"Error: Unsupported language '{code_input.language}'. Only Python is supported.", outcome="OUTCOME_FAILURE")

            exec_result = self._execute(code_input.code)

            output_str = ""
            if exec_result["stdout"]:
                output_str += f"Stdout:\n{exec_result['stdout']}\n"
            if exec_result["stderr"]:
                 output_str += f"Stderr:\n{exec_result['stderr']}\n"
            if not output_str and exec_result["exit_code"] == 0:
                 output_str = "Execution successful with no output."
            elif not output_str and exec_result["exit_code"] != 0:
                 output_str = f"Execution failed with no output (Exit code: {exec_result['exit_code']}). Error: {exec_result['error']}"

            outcome = "OUTCOME_SUCCESS" if exec_result["exit_code"] == 0 else "OUTCOME_FAILURE"

            return CodeExecutionResult(output=output_str.strip(), outcome=outcome)
    # --- End ADK Compatibility ---

    # --- Direct Call Method ---
    def execute(self, code: str) -> dict[str, Any]:
        """Directly execute code, returning detailed dictionary."""
        logger.debug(f"DockerCodeExecutor executing direct call. Code: {code[:100]}...")
        return self._execute(code)
execute(code)

Directly execute code, returning detailed dictionary.

Source code in toolboxv2/mods/isaa/base/Agent/executors.py
330
331
332
333
def execute(self, code: str) -> dict[str, Any]:
    """Directly execute code, returning detailed dictionary."""
    logger.debug(f"DockerCodeExecutor executing direct call. Code: {code[:100]}...")
    return self._execute(code)
RestrictedPythonExecutor

Bases: _BaseExecutorClass

Executes Python code using restrictedpython.

Safer than exec() but NOT a full sandbox. Known vulnerabilities exist. Use with extreme caution and only with trusted code sources or for low-risk operations. Docker is strongly recommended for untrusted code.

Source code in toolboxv2/mods/isaa/base/Agent/executors.py
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
class RestrictedPythonExecutor(_BaseExecutorClass):
    """
    Executes Python code using restrictedpython.

    Safer than exec() but NOT a full sandbox. Known vulnerabilities exist.
    Use with extreme caution and only with trusted code sources or for
    low-risk operations. Docker is strongly recommended for untrusted code.
    """
    DEFAULT_ALLOWED_GLOBALS = {
        **safe_globals,
        '_print_': restrictedpython.PrintCollector,
        '_getattr_': restrictedpython.safe_getattr,
        '_getitem_': restrictedpython.safe_getitem,
        '_write_': restrictedpython.guarded_setattr, # Allows modifying specific safe objects if needed
        # Add other safe builtins or modules carefully
        'math': __import__('math'),
        'random': __import__('random'),
        'datetime': __import__('datetime'),
        'time': __import__('time'),
        # 'requests': None, # Example: Explicitly disallow
    }

    def __init__(self, allowed_globals: dict | None = None, max_execution_time: int = 5):
        if not RESTRICTEDPYTHON_AVAILABLE:
            raise ImportError("restrictedpython is not installed. Cannot use RestrictedPythonExecutor.")
        self.allowed_globals = allowed_globals or self.DEFAULT_ALLOWED_GLOBALS
        self.max_execution_time = max_execution_time # Basic timeout (not perfectly enforced by restrictedpython)
        logger.warning("Initialized RestrictedPythonExecutor. This provides LIMITED sandboxing. Use Docker for untrusted code.")

    def _execute(self, code: str) -> dict[str, Any]:
        """Internal execution logic."""
        start_time = time.monotonic()
        result = {"stdout": "", "stderr": "", "error": None, "exit_code": None}
        local_vars = {}
        stdout_capture = io.StringIO()
        stderr_capture = io.StringIO()

        try:
            # Basic timeout check (not preemptive)
            if time.monotonic() - start_time > self.max_execution_time:
                 raise TimeoutError(f"Execution exceeded max time of {self.max_execution_time}s (pre-check).")

            # Compile the code in restricted mode
            byte_code = compile_restricted(code, filename='<inline code>', mode='exec')

            # Add a print collector to capture output
            self.allowed_globals['_print_'] = restrictedpython.PrintCollector
            print_collector = self.allowed_globals['_print_']()
            exec_globals = {**self.allowed_globals, '_print': print_collector}

            # Execute the compiled code
            # Note: restrictedpython does not inherently support robust timeouts during exec
            exec(byte_code, exec_globals, local_vars)

            # Check execution time again
            duration = time.monotonic() - start_time
            if duration > self.max_execution_time:
                logger.warning(f"Execution finished but exceeded max time ({duration:.2f}s > {self.max_execution_time}s).")
                # Potentially treat as an error or partial success

            result["stdout"] = print_collector.printed_text # Access collected prints
            result["exit_code"] = 0 # Assume success if no exception

        except TimeoutError as e:
            result["stderr"] = f"TimeoutError: {e}"
            result["error"] = str(e)
            result["exit_code"] = -1 # Indicate timeout
        except SyntaxError as e:
            result["stderr"] = f"SyntaxError: {e}"
            result["error"] = str(e)
            result["exit_code"] = 1
        except Exception as e:
            # Capture other potential execution errors allowed by restrictedpython
            error_type = type(e).__name__
            error_msg = f"{error_type}: {e}"
            result["stderr"] = error_msg
            result["error"] = str(e)
            result["exit_code"] = 1
            logger.warning(f"RestrictedPython execution caught exception: {error_msg}", exc_info=False) # Avoid logging potentially sensitive details from code
        finally:
            stdout_capture.close() # Not used directly with PrintCollector
            stderr_capture.close()

        return result

    # --- ADK Compatibility Method ---
    if ADK_EXEC_AVAILABLE:
        def execute_code(self, invocation_context: InvocationContext, code_input: CodeExecutionInput) -> CodeExecutionResult:
            logger.debug(f"RestrictedPythonExecutor executing ADK request (lang: {code_input.language}). Code: {code_input.code[:100]}...")
            if code_input.language.lower() != 'python':
                 return CodeExecutionResult(output=f"Error: Unsupported language '{code_input.language}'. Only Python is supported.", outcome="OUTCOME_FAILURE")

            exec_result = self._execute(code_input.code)

            output_str = ""
            if exec_result["stdout"]:
                output_str += f"Stdout:\n{exec_result['stdout']}\n"
            if exec_result["stderr"]:
                 output_str += f"Stderr:\n{exec_result['stderr']}\n"
            if not output_str and exec_result["exit_code"] == 0:
                 output_str = "Execution successful with no output."
            elif not output_str and exec_result["exit_code"] != 0:
                 output_str = f"Execution failed with no output (Exit code: {exec_result['exit_code']}). Error: {exec_result['error']}"


            outcome = "OUTCOME_SUCCESS" if exec_result["exit_code"] == 0 else "OUTCOME_FAILURE"

            return CodeExecutionResult(output=output_str.strip(), outcome=outcome)
    # --- End ADK Compatibility ---

    # --- Direct Call Method ---
    def execute(self, code: str) -> dict[str, Any]:
        """Directly execute code, returning detailed dictionary."""
        logger.debug(f"RestrictedPythonExecutor executing direct call. Code: {code[:100]}...")
        return self._execute(code)
execute(code)

Directly execute code, returning detailed dictionary.

Source code in toolboxv2/mods/isaa/base/Agent/executors.py
190
191
192
193
def execute(self, code: str) -> dict[str, Any]:
    """Directly execute code, returning detailed dictionary."""
    logger.debug(f"RestrictedPythonExecutor executing direct call. Code: {code[:100]}...")
    return self._execute(code)
get_code_executor(config)

Creates a code executor instance based on configuration.

Source code in toolboxv2/mods/isaa/base/Agent/executors.py
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
def get_code_executor(config: 'AgentConfig') -> RestrictedPythonExecutor | DockerCodeExecutor | BaseCodeExecutor | None:
    """Creates a code executor instance based on configuration."""
    executor_type = config.code_executor_type
    executor_config = config.code_executor_config or {}

    if executor_type == "restricted":
        if not RESTRICTEDPYTHON_AVAILABLE:
            logger.error("RestrictedPython executor configured but library not installed. Code execution disabled.")
            return None
        return RestrictedPythonExecutor(**executor_config)
    elif executor_type == "docker":
        if not DOCKER_AVAILABLE:
            logger.error("Docker executor configured but library not installed or Docker not running. Code execution disabled.")
            return None
        try:
            return DockerCodeExecutor(**executor_config)
        except Exception as e:
            logger.error(f"Failed to initialize DockerCodeExecutor: {e}. Code execution disabled.")
            return None
    elif executor_type == "none":
        logger.info("Code execution explicitly disabled in configuration.")
        return None
    elif executor_type and ADK_EXEC_AVAILABLE and isinstance(executor_type, BaseCodeExecutor):
        # Allow passing a pre-configured ADK executor instance
        logger.info(f"Using pre-configured ADK code executor: {type(executor_type).__name__}")
        return executor_type
    else:
        logger.warning(f"Unknown or unsupported code_executor_type: '{executor_type}'. Code execution disabled.")
        return None
utils
LLMMessage dataclass

Represents a message in a conversation with the LLM.

Source code in toolboxv2/mods/isaa/base/Agent/utils.py
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
@dataclass
class LLMMessage:
    """Represents a message in a conversation with the LLM."""
    role: str  # "user", "assistant", "system", "tool"
    # Content can be string or list (e.g., multimodal with text/image dicts)
    # Conforms to LiteLLM/OpenAI structure
    content: str | list[dict[str, Any]]
    tool_call_id: str | None = None  # For tool responses
    name: str | None = None  # For tool calls/responses (function name)

    def to_dict(self) -> dict:
        """Convert to dictionary, handling potential dataclass nuances."""
        d = {"role": self.role, "content": self.content}
        if self.tool_call_id:
            d["tool_call_id"] = self.tool_call_id
        if self.name:
            d["name"] = self.name
        return d
to_dict()

Convert to dictionary, handling potential dataclass nuances.

Source code in toolboxv2/mods/isaa/base/Agent/utils.py
144
145
146
147
148
149
150
151
def to_dict(self) -> dict:
    """Convert to dictionary, handling potential dataclass nuances."""
    d = {"role": self.role, "content": self.content}
    if self.tool_call_id:
        d["tool_call_id"] = self.tool_call_id
    if self.name:
        d["name"] = self.name
    return d
WorldModel dataclass

Thread-safe representation of the agent's persistent understanding of the world.

Source code in toolboxv2/mods/isaa/base/Agent/utils.py
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
@dataclass
class WorldModel:
    """Thread-safe representation of the agent's persistent understanding of the world."""
    data: dict[str, Any] = dataclass_field(default_factory=dict)
    _lock: threading.Lock = dataclass_field(default_factory=threading.Lock)

    def get(self, key: str, default: Any = None) -> Any:
        with self._lock:
            return self.data.get(key, default)

    def set(self, key: str, value: Any):
        with self._lock:
            logger_wm.debug(f"WorldModel SET: {key} = {value}")
            self.data[key] = value

    def remove(self, key: str):
        with self._lock:
            if key in self.data:
                logger_wm.debug(f"WorldModel REMOVE: {key}")
                del self.data[key]

    def show(self) -> str:
        with self._lock:
            if not self.data:
                return "[empty]"
            try:
                items = [f"- {k}: {json.dumps(v, indent=None, ensure_ascii=False, default=str)}"
                         for k, v in self.data.items()]
                return "\n".join(items)
            except Exception:
                items = [f"- {k}: {str(v)}" for k, v in self.data.items()]
                return "\n".join(items)

    def to_dict(self) -> dict[str, Any]:
        with self._lock:
            # Deep copy might be needed if values are mutable and modified externally
            # For simplicity, shallow copy is used here.
            return self.data.copy()

    def update_from_dict(self, data_dict: dict[str, Any]):
        with self._lock:
            self.data.update(data_dict)
            logger_wm.debug(f"WorldModel updated from dict: {list(data_dict.keys())}")
AgentUtils
AISemanticMemory
Source code in toolboxv2/mods/isaa/base/AgentUtils.py
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
class AISemanticMemory(metaclass=Singleton):
    def __init__(self,
                 base_path: str = "/semantic_memory",
                 default_model: str = os.getenv("DEFAULTMODELSUMMERY"),
                 default_embedding_model: str = os.getenv("DEFAULTMODELEMBEDDING"),
                 default_similarity_threshold: float = 0.61,
                 default_batch_size: int = 64,
                 default_n_clusters: int = 2,
                 default_deduplication_threshold: float = 0.85):
        """
        Initialize AISemanticMemory with KnowledgeBase integration

        Args:
            base_path: Root directory for memory storage
            default_model: Default model for text generation
            default_embedding_model: Default embedding model
            default_similarity_threshold: Default similarity threshold for retrieval
            default_batch_size: Default batch size for processing
            default_n_clusters: Default number of clusters for FAISS
            default_deduplication_threshold: Default threshold for deduplication
        """
        self.base_path = os.path.join(os.getcwd(), ".data", base_path)
        self.memories: dict[str, KnowledgeBase] = {}

        # Map of embedding models to their dimensions
        self.embedding_dims = {
            "text-embedding-3-small": 1536,
            "text-embedding-3-large": 3072,
            "nomic-embed-text": 768,
            "default": 768
        }

        self.default_config = {
            "embedding_model": default_embedding_model,
            "embedding_dim": self._get_embedding_dim(default_embedding_model),
            "similarity_threshold": default_similarity_threshold,
            "batch_size": default_batch_size,
            "n_clusters": default_n_clusters,
            "deduplication_threshold": default_deduplication_threshold,
            "model_name": default_model
        }

    def _get_embedding_dim(self, model_name: str) -> int:
        """Get embedding dimension for a model"""
        return self.embedding_dims.get(model_name, 768)

    @staticmethod
    def _sanitize_name(name: str) -> str:
        """Sanitize memory name for filesystem safety"""
        name = re.sub(r'[^a-zA-Z0-9_-]', '-', name)[:63].strip('-')
        if not name:
            raise ValueError("Invalid memory name")
        if len(name) < 3:
            name += "Z" * (3 - len(name))
        return name

    def create_memory(self,
                      name: str,
                      model_config: dict | None = None,
                      storage_config: dict | None = None) -> KnowledgeBase:
        """
        Create new memory store with KnowledgeBase

        Args:
            name: Unique name for the memory store
            model_config: Configuration for embedding model
            storage_config: Configuration for KnowledgeBase parameters
        """
        sanitized = self._sanitize_name(name)
        if sanitized in self.memories:
            raise ValueError(f"Memory '{name}' already exists")

        # Determine embedding model and dimension
        embedding_model = self.default_config["embedding_model"]
        model_name = self.default_config["model_name"]
        if model_config:
            embedding_model = model_config.get("embedding_model", embedding_model)
            model_name = model_config.get("model_name", model_name)
        embedding_dim = self._get_embedding_dim(embedding_model)

        # Get KnowledgeBase parameters
        kb_params = {
            "embedding_dim": embedding_dim,
            "embedding_model": embedding_model,
            "similarity_threshold": self.default_config["similarity_threshold"],
            "batch_size": self.default_config["batch_size"],
            "n_clusters": self.default_config["n_clusters"],
            "deduplication_threshold": self.default_config["deduplication_threshold"],
            "model_name": model_name,
        }

        if storage_config:
            kb_params.update({
                "similarity_threshold": storage_config.get("similarity_threshold", kb_params["similarity_threshold"]),
                "batch_size": storage_config.get("batch_size", kb_params["batch_size"]),
                "n_clusters": storage_config.get("n_clusters", kb_params["n_clusters"]),
                "model_name": storage_config.get("model_name", kb_params["model_name"]),
                "embedding_model": storage_config.get("embedding_model", kb_params["embedding_model"]),
                "deduplication_threshold": storage_config.get("deduplication_threshold",
                                                              kb_params["deduplication_threshold"]),
            })

        # Create KnowledgeBase instance
        self.memories[sanitized] = KnowledgeBase(**kb_params)
        return self.memories[sanitized]

    async def add_data(self,
                       memory_name: str,
                       data: str | list[str] | bytes | dict,
                       metadata: dict | None = None) -> bool:
        """
        Add data to memory store

        Args:
            memory_name: Target memory store
            data: Text, list of texts, binary file, or structured data
            metadata: Optional metadata
        """
        name = self._sanitize_name(memory_name)
        kb = self.memories.get(name)
        if not kb:
            kb = self.create_memory(name)

        # Process input data
        texts = []
        if isinstance(data, bytes):
            try:
                import textract
                text = textract.process(data).decode('utf-8')
                texts = [text.replace('\\t', '').replace('\t', '')]
            except Exception as e:
                raise ValueError(f"File processing failed: {str(e)}")
        elif isinstance(data, str):
            texts = [data.replace('\\t', '').replace('\t', '')]
        elif isinstance(data, list):
            texts = [d.replace('\\t', '').replace('\t', '') for d in data]
        elif isinstance(data, dict):
            # Custom KG not supported in current KnowledgeBase
            raise NotImplementedError("Custom knowledge graph insertion not supported")
        else:
            raise ValueError("Unsupported data type")

        # Add data to KnowledgeBase
        try:
            added, duplicates = await kb.add_data(texts, metadata)
            return added > 0
        except Exception as e:
            import traceback
            print(traceback.format_exc())
            raise RuntimeError(f"Data addition failed: {str(e)}")

    def get(self, names):
        return [m for n,m in self._get_target_memories(names)]

    async def query(self,
                    query: str,
                    memory_names: str | list[str] | None = None,
                    query_params: dict | None = None,
                    to_str: bool = False,
                    unified_retrieve: bool =False) -> str | list[dict]:
        """
        Query memories using KnowledgeBase retrieval

        Args:
            query: Search query
            memory_names: Target memory names
            query_params: Query parameters
            to_str: Return string format
            unified_retrieve: Unified retrieve
        """
        targets = self._get_target_memories(memory_names)
        if not targets:
            return []

        results = []
        for name, kb in targets:
            #try:
                # Use KnowledgeBase's retrieve_with_overview for comprehensive results
                result = await kb.retrieve_with_overview(
                    query=query,
                    k=query_params.get("k", 3) if query_params else 3,
                    min_similarity=query_params.get("min_similarity", 0.2) if query_params else 0.2,
                    cross_ref_depth=query_params.get("cross_ref_depth", 2) if query_params else 2,
                    max_cross_refs=query_params.get("max_cross_refs", 2) if query_params else 2,
                    max_sentences=query_params.get("max_sentences", 5) if query_params else 5
                ) if not unified_retrieve else await kb.unified_retrieve(
                    query=query,
                    k=query_params.get("k", 2) if query_params else 2,
                    min_similarity=query_params.get("min_similarity", 0.2) if query_params else 0.2,
                    cross_ref_depth=query_params.get("cross_ref_depth", 2) if query_params else 2,
                    max_cross_refs=query_params.get("max_cross_refs", 6) if query_params else 6,
                    max_sentences=query_params.get("max_sentences", 12) if query_params else 12
                )
                results.append({
                    "memory": name,
                    "result": result
                })
            #except Exception as e:
            #    print(f"Query failed on {name}: {str(e)}")
        if to_str:
            if not unified_retrieve:
                str_res = [
                    f"{x['memory']} - {json.dumps(x['result'].overview)}\n - {[c.text for c in x['result'].details]}\n - {[(k, [c.text for c in v]) for k, v in x['result'].cross_references.items()]}"
                    for x in results]
                # str_res =
            else:
                str_res = json.dumps(results)
            return str_res
        return results

    def _get_target_memories(self, memory_names: str | list[str] | None) -> list[tuple[str, KnowledgeBase]]:
        """Get target memories for query"""
        if not memory_names:
            return list(self.memories.items())

        names = [memory_names] if isinstance(memory_names, str) else memory_names

        targets = []
        for name in names:
            sanitized = self._sanitize_name(name)
            if kb := self.memories.get(sanitized):
                targets.append((sanitized, kb))
        return targets

    def list_memories(self) -> list[str]:
        """List all available memories"""
        return list(self.memories.keys())

    async def delete_memory(self, name: str) -> bool:
        """Delete a memory store"""
        sanitized = self._sanitize_name(name)
        if sanitized in self.memories:
            del self.memories[sanitized]
            return True
        return False

    def save_memory(self, name: str, path: str) -> bool | bytes:
        """Save a memory store to disk"""
        sanitized = self._sanitize_name(name)
        if kb := self.memories.get(sanitized):
            try:
                return kb.save(path)
            except Exception as e:
                print(f"Error saving memory: {str(e)}")
                return False
        return False

    def save_all_memories(self, path: str) -> bool:
        """Save all memory stores to disk"""
        for name, kb in self.memories.items():
            try:
                kb.save(os.path.join(path, f"{name}.pkl"))
            except Exception as e:
                print(f"Error saving memory: {str(e)}")
                return False
        return True

    def load_all_memories(self, path: str) -> bool:
        """Load all memory stores from disk"""
        for file in os.listdir(path):
            if file.endswith(".pkl"):
                try:
                    self.memories[file[:-4]] = KnowledgeBase.load(os.path.join(path, file))
                except Exception as e:
                    print(f"Error loading memory: {str(e)}")
                    return False
        return True

    def load_memory(self, name: str, path: str | bytes) -> bool:
        """Load a memory store from disk"""
        sanitized = self._sanitize_name(name)
        if sanitized in self.memories:
            return False
        try:
            self.memories[sanitized] = KnowledgeBase.load(path)
            return True
        except Exception:
            # print(f"Error loading memory: {str(e)}")
            return False
__init__(base_path='/semantic_memory', default_model=os.getenv('DEFAULTMODELSUMMERY'), default_embedding_model=os.getenv('DEFAULTMODELEMBEDDING'), default_similarity_threshold=0.61, default_batch_size=64, default_n_clusters=2, default_deduplication_threshold=0.85)

Initialize AISemanticMemory with KnowledgeBase integration

Parameters:

Name Type Description Default
base_path str

Root directory for memory storage

'/semantic_memory'
default_model str

Default model for text generation

getenv('DEFAULTMODELSUMMERY')
default_embedding_model str

Default embedding model

getenv('DEFAULTMODELEMBEDDING')
default_similarity_threshold float

Default similarity threshold for retrieval

0.61
default_batch_size int

Default batch size for processing

64
default_n_clusters int

Default number of clusters for FAISS

2
default_deduplication_threshold float

Default threshold for deduplication

0.85
Source code in toolboxv2/mods/isaa/base/AgentUtils.py
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
def __init__(self,
             base_path: str = "/semantic_memory",
             default_model: str = os.getenv("DEFAULTMODELSUMMERY"),
             default_embedding_model: str = os.getenv("DEFAULTMODELEMBEDDING"),
             default_similarity_threshold: float = 0.61,
             default_batch_size: int = 64,
             default_n_clusters: int = 2,
             default_deduplication_threshold: float = 0.85):
    """
    Initialize AISemanticMemory with KnowledgeBase integration

    Args:
        base_path: Root directory for memory storage
        default_model: Default model for text generation
        default_embedding_model: Default embedding model
        default_similarity_threshold: Default similarity threshold for retrieval
        default_batch_size: Default batch size for processing
        default_n_clusters: Default number of clusters for FAISS
        default_deduplication_threshold: Default threshold for deduplication
    """
    self.base_path = os.path.join(os.getcwd(), ".data", base_path)
    self.memories: dict[str, KnowledgeBase] = {}

    # Map of embedding models to their dimensions
    self.embedding_dims = {
        "text-embedding-3-small": 1536,
        "text-embedding-3-large": 3072,
        "nomic-embed-text": 768,
        "default": 768
    }

    self.default_config = {
        "embedding_model": default_embedding_model,
        "embedding_dim": self._get_embedding_dim(default_embedding_model),
        "similarity_threshold": default_similarity_threshold,
        "batch_size": default_batch_size,
        "n_clusters": default_n_clusters,
        "deduplication_threshold": default_deduplication_threshold,
        "model_name": default_model
    }
add_data(memory_name, data, metadata=None) async

Add data to memory store

Parameters:

Name Type Description Default
memory_name str

Target memory store

required
data str | list[str] | bytes | dict

Text, list of texts, binary file, or structured data

required
metadata dict | None

Optional metadata

None
Source code in toolboxv2/mods/isaa/base/AgentUtils.py
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
async def add_data(self,
                   memory_name: str,
                   data: str | list[str] | bytes | dict,
                   metadata: dict | None = None) -> bool:
    """
    Add data to memory store

    Args:
        memory_name: Target memory store
        data: Text, list of texts, binary file, or structured data
        metadata: Optional metadata
    """
    name = self._sanitize_name(memory_name)
    kb = self.memories.get(name)
    if not kb:
        kb = self.create_memory(name)

    # Process input data
    texts = []
    if isinstance(data, bytes):
        try:
            import textract
            text = textract.process(data).decode('utf-8')
            texts = [text.replace('\\t', '').replace('\t', '')]
        except Exception as e:
            raise ValueError(f"File processing failed: {str(e)}")
    elif isinstance(data, str):
        texts = [data.replace('\\t', '').replace('\t', '')]
    elif isinstance(data, list):
        texts = [d.replace('\\t', '').replace('\t', '') for d in data]
    elif isinstance(data, dict):
        # Custom KG not supported in current KnowledgeBase
        raise NotImplementedError("Custom knowledge graph insertion not supported")
    else:
        raise ValueError("Unsupported data type")

    # Add data to KnowledgeBase
    try:
        added, duplicates = await kb.add_data(texts, metadata)
        return added > 0
    except Exception as e:
        import traceback
        print(traceback.format_exc())
        raise RuntimeError(f"Data addition failed: {str(e)}")
create_memory(name, model_config=None, storage_config=None)

Create new memory store with KnowledgeBase

Parameters:

Name Type Description Default
name str

Unique name for the memory store

required
model_config dict | None

Configuration for embedding model

None
storage_config dict | None

Configuration for KnowledgeBase parameters

None
Source code in toolboxv2/mods/isaa/base/AgentUtils.py
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
def create_memory(self,
                  name: str,
                  model_config: dict | None = None,
                  storage_config: dict | None = None) -> KnowledgeBase:
    """
    Create new memory store with KnowledgeBase

    Args:
        name: Unique name for the memory store
        model_config: Configuration for embedding model
        storage_config: Configuration for KnowledgeBase parameters
    """
    sanitized = self._sanitize_name(name)
    if sanitized in self.memories:
        raise ValueError(f"Memory '{name}' already exists")

    # Determine embedding model and dimension
    embedding_model = self.default_config["embedding_model"]
    model_name = self.default_config["model_name"]
    if model_config:
        embedding_model = model_config.get("embedding_model", embedding_model)
        model_name = model_config.get("model_name", model_name)
    embedding_dim = self._get_embedding_dim(embedding_model)

    # Get KnowledgeBase parameters
    kb_params = {
        "embedding_dim": embedding_dim,
        "embedding_model": embedding_model,
        "similarity_threshold": self.default_config["similarity_threshold"],
        "batch_size": self.default_config["batch_size"],
        "n_clusters": self.default_config["n_clusters"],
        "deduplication_threshold": self.default_config["deduplication_threshold"],
        "model_name": model_name,
    }

    if storage_config:
        kb_params.update({
            "similarity_threshold": storage_config.get("similarity_threshold", kb_params["similarity_threshold"]),
            "batch_size": storage_config.get("batch_size", kb_params["batch_size"]),
            "n_clusters": storage_config.get("n_clusters", kb_params["n_clusters"]),
            "model_name": storage_config.get("model_name", kb_params["model_name"]),
            "embedding_model": storage_config.get("embedding_model", kb_params["embedding_model"]),
            "deduplication_threshold": storage_config.get("deduplication_threshold",
                                                          kb_params["deduplication_threshold"]),
        })

    # Create KnowledgeBase instance
    self.memories[sanitized] = KnowledgeBase(**kb_params)
    return self.memories[sanitized]
delete_memory(name) async

Delete a memory store

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
858
859
860
861
862
863
864
async def delete_memory(self, name: str) -> bool:
    """Delete a memory store"""
    sanitized = self._sanitize_name(name)
    if sanitized in self.memories:
        del self.memories[sanitized]
        return True
    return False
list_memories()

List all available memories

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
854
855
856
def list_memories(self) -> list[str]:
    """List all available memories"""
    return list(self.memories.keys())
load_all_memories(path)

Load all memory stores from disk

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
887
888
889
890
891
892
893
894
895
896
def load_all_memories(self, path: str) -> bool:
    """Load all memory stores from disk"""
    for file in os.listdir(path):
        if file.endswith(".pkl"):
            try:
                self.memories[file[:-4]] = KnowledgeBase.load(os.path.join(path, file))
            except Exception as e:
                print(f"Error loading memory: {str(e)}")
                return False
    return True
load_memory(name, path)

Load a memory store from disk

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
898
899
900
901
902
903
904
905
906
907
908
def load_memory(self, name: str, path: str | bytes) -> bool:
    """Load a memory store from disk"""
    sanitized = self._sanitize_name(name)
    if sanitized in self.memories:
        return False
    try:
        self.memories[sanitized] = KnowledgeBase.load(path)
        return True
    except Exception:
        # print(f"Error loading memory: {str(e)}")
        return False
query(query, memory_names=None, query_params=None, to_str=False, unified_retrieve=False) async

Query memories using KnowledgeBase retrieval

Parameters:

Name Type Description Default
query str

Search query

required
memory_names str | list[str] | None

Target memory names

None
query_params dict | None

Query parameters

None
to_str bool

Return string format

False
unified_retrieve bool

Unified retrieve

False
Source code in toolboxv2/mods/isaa/base/AgentUtils.py
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
async def query(self,
                query: str,
                memory_names: str | list[str] | None = None,
                query_params: dict | None = None,
                to_str: bool = False,
                unified_retrieve: bool =False) -> str | list[dict]:
    """
    Query memories using KnowledgeBase retrieval

    Args:
        query: Search query
        memory_names: Target memory names
        query_params: Query parameters
        to_str: Return string format
        unified_retrieve: Unified retrieve
    """
    targets = self._get_target_memories(memory_names)
    if not targets:
        return []

    results = []
    for name, kb in targets:
        #try:
            # Use KnowledgeBase's retrieve_with_overview for comprehensive results
            result = await kb.retrieve_with_overview(
                query=query,
                k=query_params.get("k", 3) if query_params else 3,
                min_similarity=query_params.get("min_similarity", 0.2) if query_params else 0.2,
                cross_ref_depth=query_params.get("cross_ref_depth", 2) if query_params else 2,
                max_cross_refs=query_params.get("max_cross_refs", 2) if query_params else 2,
                max_sentences=query_params.get("max_sentences", 5) if query_params else 5
            ) if not unified_retrieve else await kb.unified_retrieve(
                query=query,
                k=query_params.get("k", 2) if query_params else 2,
                min_similarity=query_params.get("min_similarity", 0.2) if query_params else 0.2,
                cross_ref_depth=query_params.get("cross_ref_depth", 2) if query_params else 2,
                max_cross_refs=query_params.get("max_cross_refs", 6) if query_params else 6,
                max_sentences=query_params.get("max_sentences", 12) if query_params else 12
            )
            results.append({
                "memory": name,
                "result": result
            })
        #except Exception as e:
        #    print(f"Query failed on {name}: {str(e)}")
    if to_str:
        if not unified_retrieve:
            str_res = [
                f"{x['memory']} - {json.dumps(x['result'].overview)}\n - {[c.text for c in x['result'].details]}\n - {[(k, [c.text for c in v]) for k, v in x['result'].cross_references.items()]}"
                for x in results]
            # str_res =
        else:
            str_res = json.dumps(results)
        return str_res
    return results
save_all_memories(path)

Save all memory stores to disk

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
877
878
879
880
881
882
883
884
885
def save_all_memories(self, path: str) -> bool:
    """Save all memory stores to disk"""
    for name, kb in self.memories.items():
        try:
            kb.save(os.path.join(path, f"{name}.pkl"))
        except Exception as e:
            print(f"Error saving memory: {str(e)}")
            return False
    return True
save_memory(name, path)

Save a memory store to disk

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
866
867
868
869
870
871
872
873
874
875
def save_memory(self, name: str, path: str) -> bool | bytes:
    """Save a memory store to disk"""
    sanitized = self._sanitize_name(name)
    if kb := self.memories.get(sanitized):
        try:
            return kb.save(path)
        except Exception as e:
            print(f"Error saving memory: {str(e)}")
            return False
    return False
PyEnvEval
Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
class PyEnvEval:
    def __init__(self):
        self.local_env = locals().copy()
        self.global_env = {'local_env': self.local_env}  # globals().copy()

    def eval_code(self, code):
        try:
            exec(code, self.global_env, self.local_env)
            result = eval(code, self.global_env, self.local_env)
            return self.format_output(result)
        except Exception as e:
            return self.format_output(str(e))

    def get_env(self):
        local_env_str = self.format_env(self.local_env)
        return f'Locals:\n{local_env_str}'

    @staticmethod
    def format_output(output):
        return f'Ergebnis: {output}'

    @staticmethod
    def format_env(env):
        return '\n'.join(f'{key}: {value}' for key, value in env.items())

    def run_and_display(self, python_code):
        """function to eval python code"""
        start = f'Start-state:\n{self.get_env()}'
        result = self.eval_code(python_code)
        end = f'End-state:\n{self.get_env()}'
        return f'{start}\nResult:\n{result}\n{end}'

    def tool(self):
        return {"PythonEval": {"func": self.run_and_display, "description": "Use Python Code to Get to an Persis Answer! input must be valid python code all non code parts must be comments!"}}
run_and_display(python_code)

function to eval python code

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1109
1110
1111
1112
1113
1114
def run_and_display(self, python_code):
    """function to eval python code"""
    start = f'Start-state:\n{self.get_env()}'
    result = self.eval_code(python_code)
    end = f'End-state:\n{self.get_env()}'
    return f'{start}\nResult:\n{result}\n{end}'
anything_from_str_to_dict(data, expected_keys=None, mini_task=lambda x: '')

Versucht, einen String in ein oder mehrere Dictionaries umzuwandeln. Berücksichtigt dabei die erwarteten Schlüssel und ihre Standardwerte.

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
def anything_from_str_to_dict(data: str, expected_keys: dict = None, mini_task=lambda x: ''):
    """
    Versucht, einen String in ein oder mehrere Dictionaries umzuwandeln.
    Berücksichtigt dabei die erwarteten Schlüssel und ihre Standardwerte.
    """
    if len(data) < 4:
        return []

    if expected_keys is None:
        expected_keys = {}

    result = []
    json_objects = find_json_objects_in_str(data)
    if not json_objects and data.startswith('[') and data.endswith(']'):
        json_objects = eval(data)
    if json_objects and len(json_objects) > 0 and isinstance(json_objects[0], dict):
        result.extend([{**expected_keys, **ob} for ob in json_objects])
    if not result:
        completed_object = complete_json_object(data, mini_task)
        if completed_object is not None:
            result.append(completed_object)
    if len(result) == 0 and expected_keys:
        result = [{list(expected_keys.keys())[0]: data}]
    for res in result:
        if isinstance(res, list) and len(res) > 0:
            res = res[0]
        for key, value in expected_keys.items():
            if key not in res:
                res[key] = value

    if len(result) == 0:
        fixed = fix_json(data)
        if fixed:
            result.append(fixed)

    return result
complete_json_object(data, mini_task)

Ruft eine Funktion auf, um einen String in das richtige Format zu bringen. Gibt das resultierende JSON-Objekt zurück, wenn die Funktion erfolgreich ist, sonst None.

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
def complete_json_object(data: str, mini_task):
    """
    Ruft eine Funktion auf, um einen String in das richtige Format zu bringen.
    Gibt das resultierende JSON-Objekt zurück, wenn die Funktion erfolgreich ist, sonst None.
    """
    ret = mini_task(
        f"Vervollständige das Json Object. Und bringe den string in das Richtige format. data={data}\nJson=")
    if ret:
        return anything_from_str_to_dict(ret)
    return None
find_json_objects_in_str(data)

Sucht nach JSON-Objekten innerhalb eines Strings. Gibt eine Liste von JSON-Objekten zurück, die im String gefunden wurden.

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1540
1541
1542
1543
1544
1545
1546
1547
1548
def find_json_objects_in_str(data: str):
    """
    Sucht nach JSON-Objekten innerhalb eines Strings.
    Gibt eine Liste von JSON-Objekten zurück, die im String gefunden wurden.
    """
    json_objects = extract_json_objects(data)
    if not isinstance(json_objects, list):
        json_objects = [json_objects]
    return [get_json_from_json_str(ob, 10) for ob in json_objects if get_json_from_json_str(ob, 10) is not None]
get_json_from_json_str(json_str, repeat=1)

Versucht, einen JSON-String in ein Python-Objekt umzuwandeln.

Wenn beim Parsen ein Fehler auftritt, versucht die Funktion, das Problem zu beheben, indem sie das Zeichen an der Position des Fehlers durch ein Escape-Zeichen ersetzt. Dieser Vorgang wird bis zu repeat-mal wiederholt.

Parameters:

Name Type Description Default
json_str str or list or dict

Der JSON-String, der geparst werden soll.

required
repeat int

Die Anzahl der Versuche, das Parsen durchzuführen.

1

Returns:

Type Description
dict or None

Das resultierende Python-Objekt.

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
def get_json_from_json_str(json_str: str or list or dict, repeat: int = 1) -> dict or None:
    """Versucht, einen JSON-String in ein Python-Objekt umzuwandeln.

    Wenn beim Parsen ein Fehler auftritt, versucht die Funktion, das Problem zu beheben,
    indem sie das Zeichen an der Position des Fehlers durch ein Escape-Zeichen ersetzt.
    Dieser Vorgang wird bis zu `repeat`-mal wiederholt.

    Args:
        json_str: Der JSON-String, der geparst werden soll.
        repeat: Die Anzahl der Versuche, das Parsen durchzuführen.

    Returns:
        Das resultierende Python-Objekt.
    """
    for _ in range(repeat):
        try:
            return parse_json_with_auto_detection(json_str)
        except json.JSONDecodeError as e:
            unexp = int(re.findall(r'\(char (\d+)\)', str(e))[0])
            unesc = json_str.rfind(r'"', 0, unexp)
            json_str = json_str[:unesc] + r'\"' + json_str[unesc + 1:]
            closg = json_str.find(r'"', unesc + 2)
            json_str = json_str[:closg] + r'\"' + json_str[closg + 1:]
        new = fix_json_object(json_str)
        if new is not None:
            json_str = new
    get_logger().info(f"Unable to parse JSON string after {json_str}")
    return None
parse_json_with_auto_detection(json_data)

Parses JSON data, automatically detecting if a value is a JSON string and parsing it accordingly. If a value cannot be parsed as JSON, it is returned as is.

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
def parse_json_with_auto_detection(json_data):
    """
    Parses JSON data, automatically detecting if a value is a JSON string and parsing it accordingly.
    If a value cannot be parsed as JSON, it is returned as is.
    """

    def try_parse_json(value):
        """
        Tries to parse a value as JSON. If the parsing fails, the original value is returned.
        """
        try:
            # print("parse_json_with_auto_detection:", type(value), value)
            parsed_value = json.loads(value)
            # print("parsed_value:", type(parsed_value), parsed_value)
            # If the parsed value is a string, it might be a JSON string, so we try to parse it again
            if isinstance(parsed_value, str):
                return eval(parsed_value)
            else:
                return parsed_value
        except Exception:
            # logging.warning(f"Failed to parse value as JSON: {value}. Exception: {e}")
            return value

    get_logger()

    if isinstance(json_data, dict):
        return {key: parse_json_with_auto_detection(value) for key, value in json_data.items()}
    elif isinstance(json_data, list):
        return [parse_json_with_auto_detection(item) for item in json_data]
    else:
        return try_parse_json(json_data)
KnowledgeBase
Chunk dataclass

Represents a chunk of text with its embedding and metadata

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
28
29
30
31
32
33
34
35
@dataclass(slots=True)
class Chunk:
    """Represents a chunk of text with its embedding and metadata"""
    text: str
    embedding: np.ndarray
    metadata: dict[str, Any]
    content_hash: str
    cluster_id: int | None = None
ConceptAnalysis

Bases: BaseModel

Represents the analysis of key concepts.

Attributes:

Name Type Description
key_concepts list[str]

A list of primary key concepts identified.

relationships list[str]

A list of relationships between the identified key concepts.

importance_hierarchy list[str]

A list that represents the hierarchical importance of the key concepts.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
112
113
114
115
116
117
118
119
120
121
122
123
class ConceptAnalysis(BaseModel):
    """
    Represents the analysis of key concepts.

    Attributes:
        key_concepts (list[str]): A list of primary key concepts identified.
        relationships (list[str]): A list of relationships between the identified key concepts.
        importance_hierarchy (list[str]): A list that represents the hierarchical importance of the key concepts.
    """
    key_concepts: list[str]
    relationships: list[str]
    importance_hierarchy: list[str]
ConceptExtractor

Handles extraction of concepts and relationships from text

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
class ConceptExtractor:
    """Handles extraction of concepts and relationships from text"""

    def __init__(self, knowledge_base, requests_per_second = 85.):
        self.kb = knowledge_base
        self.concept_graph = ConceptGraph()
        self.requests_per_second = requests_per_second

    async def extract_concepts(self, texts: list[str], metadatas: list[dict[str, Any]]) -> list[list[Concept]]:
        """
        Extract concepts from texts using concurrent processing with rate limiting.
        Requests are made at the specified rate while responses are processed asynchronously.
        """
        # Ensure metadatas list matches texts length
        metadatas = metadatas + [{}] * (len(texts) - len(metadatas))

        # Initialize rate limiter
        rate_limiter = DynamicRateLimiter()

        system_prompt = (
            "Analyze the given text and extract key concepts and their relationships. For each concept:\n"
            "1. Identify the concept name and category (technical, domain, method, property, ...)\n"
            "2. Determine relationships with other concepts (uses, part_of, similar_to, depends_on, ...)\n"
            "3. Assess importance (0-1 score) based on centrality to the text\n"
            "4. Extract relevant context snippets\n"
            "5. Max 5 Concepts!\n"
            "only return in json format!\n"
            """{"concepts": [{
                "name": "concept_name",
                "category": "category_name",
                "relationships": {
                    "relationship_type": ["related_concept1", "related_concept2"]
                },
                "importance_score": 0.0,
                "context_snippets": ["relevant text snippet"]
            }]}\n"""
        )

        # Prepare all requests
        requests = [
            (idx, f"Text to Convert in to JSON structure:\n{text}", system_prompt, metadata)
            for idx, (text, metadata) in enumerate(zip(texts, metadatas, strict=False))
        ]

        async def process_single_request(idx: int, prompt: str, system_prompt: str, metadata: dict[str, Any]):
            """Process a single request with rate limiting"""
            try:
                # Wait for rate limit
                await rate_limiter.acquire()
                i__[1] += 1
                # Make API call without awaiting the response
                response_future = litellm_complete(
                    prompt=prompt,
                    system_prompt=system_prompt,
                    response_format=Concepts,
                    model_name=self.kb.model_name,
                    fallbacks=["groq/gemma2-9b-it"] +
                              [m for m in os.getenv("FALLBACKS_MODELS_PREM", '').split(',') if m]
                )

                return idx, response_future

            except Exception as e:
                print(f"Error initiating request {idx}: {str(e)}")
                return idx, None

        async def process_response(idx: int, response_future) -> list[Concept]:
            """Process the response once it's ready"""
            try:
                if response_future is None:
                    return []

                response = await response_future
                return await self._process_response(response, metadatas[idx])

            except Exception as e:
                print(f"Error processing response {idx}: {str(e)}")
                return []

        # Create tasks for all requests
        request_tasks = []
        batch_size = self.kb.batch_size

        rate_limiter.update_rate(self.requests_per_second)

        for batch_start in range(0, len(requests), batch_size):
            batch = requests[batch_start:batch_start + batch_size]

            # Create tasks for the batch
            batch_tasks = [
                process_single_request(idx, prompt, sys_prompt, meta)
                for idx, prompt, sys_prompt, meta in batch
            ]
            request_tasks.extend(batch_tasks)

        # Execute all requests with rate limiting
        request_results = await asyncio.gather(*request_tasks)

        # Process responses as they complete
        response_tasks = [
            process_response(idx, response_future)
            for idx, response_future in request_results
        ]

        # Gather all results
        all_results = await asyncio.gather(*response_tasks)

        # Sort results by original index
        sorted_results = [[] for _ in texts]
        for idx, concepts in enumerate(all_results):
            sorted_results[idx] = concepts

        return sorted_results

    async def _process_response(self, response: Any, metadata: dict[str, Any]) -> list[Concept]:
        """Helper method to process a single response and convert it to Concepts"""
        try:
            # Extract content from response
            if hasattr(response, 'choices'):
                content = response.choices[0].message.content
                if content is None:
                    content = response.choices[0].message.tool_calls[0].function.arguments
                if content is None:
                    return []
            elif isinstance(response, str):
                content = response
            else:
                print(f"Unexpected response type: {type(response)}")
                return []

            # Parse JSON and create concepts
            concept_data = after_format(content)
            concepts = []

            for concept_info in concept_data.get("concepts", []):
                concept = Concept(
                    name=concept_info["name"],
                    category=concept_info.get("category", "N/A"),
                    relationships={k: set(v) for k, v in concept_info.get("relationships", {}).items()},
                    importance_score=concept_info.get("importance_score", 0.1),
                    context_snippets=concept_info.get("context_snippets", "N/A"),
                    metadata=metadata
                )
                concepts.append(concept)
                self.concept_graph.add_concept(concept)

            return concepts

        except Exception:
            i__[2] +=1
            return []

    async def process_chunks(self, chunks: list[Chunk]) -> None:
        """
        Process all chunks in batch to extract and store concepts.
        Each chunk's metadata will be updated with the concept names and relationships.
        """
        # Gather all texts from the chunks.
        texts = [chunk.text for chunk in chunks]
        # Call extract_concepts once with all texts.
        all_concepts = await self.extract_concepts(texts, [chunk.metadata for chunk in chunks])

        # Update each chunk's metadata with its corresponding concepts.
        for chunk, concepts in zip(chunks, all_concepts, strict=False):
            chunk.metadata["concepts"] = [c.name for c in concepts]
            chunk.metadata["concept_relationships"] = {
                c.name: {k: list(v) for k, v in c.relationships.items()}
                for c in concepts
            }

    async def query_concepts(self, query: str) -> dict[str, any]:
        """Query the concept graph based on natural language query"""

        system_prompt = """
        Convert the natural language query about concepts into a structured format that specifies:
        1. Main concepts of interest
        2. Desired relationship types
        3. Any category filters
        4. Importance threshold

        Format as JSON.
        """

        prompt = f"""
        Query: {query}

        Convert to this JSON structure:
        {{
            "target_concepts": ["concept1", "concept2"],
            "relationship_types": ["type1", "type2"],
            "categories": ["category1", "category2"],
            "min_importance": 0.0
        }}
        """

        try:
            response = await litellm_complete(
                model_name=self.kb.model_name,
                prompt=prompt,
                system_prompt=system_prompt,
                response_format=TConcept
            )

            query_params = json.loads(response)

            results = {
                "concepts": {},
                "relationships": [],
                "groups": []
            }

            # Find matching concepts
            for concept_name in query_params["target_concepts"]:
                if concept_name in self.concept_graph.concepts:
                    concept = self.concept_graph.concepts[concept_name]
                    if concept.importance_score >= query_params["min_importance"]:
                        results["concepts"][concept_name] = {
                            "category": concept.category,
                            "importance": concept.importance_score,
                            "context": concept.context_snippets
                        }

                        # Get relationships
                        for rel_type in query_params["relationship_types"]:
                            related = self.concept_graph.get_related_concepts(
                                concept_name, rel_type
                            )
                            for related_concept in related:
                                results["relationships"].append({
                                    "from": concept_name,
                                    "to": related_concept,
                                    "type": rel_type
                                })

            # Group concepts by category
            category_groups = defaultdict(list)
            for concept_name, concept_info in results["concepts"].items():
                category_groups[concept_info["category"]].append(concept_name)
            results["groups"] = [
                {"category": cat, "concepts": concepts}
                for cat, concepts in category_groups.items()
            ]

            return results

        except Exception as e:
            print(f"Error querying concepts: {str(e)}")
            return {"concepts": {}, "relationships": [], "groups": []}
extract_concepts(texts, metadatas) async

Extract concepts from texts using concurrent processing with rate limiting. Requests are made at the specified rate while responses are processed asynchronously.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
async def extract_concepts(self, texts: list[str], metadatas: list[dict[str, Any]]) -> list[list[Concept]]:
    """
    Extract concepts from texts using concurrent processing with rate limiting.
    Requests are made at the specified rate while responses are processed asynchronously.
    """
    # Ensure metadatas list matches texts length
    metadatas = metadatas + [{}] * (len(texts) - len(metadatas))

    # Initialize rate limiter
    rate_limiter = DynamicRateLimiter()

    system_prompt = (
        "Analyze the given text and extract key concepts and their relationships. For each concept:\n"
        "1. Identify the concept name and category (technical, domain, method, property, ...)\n"
        "2. Determine relationships with other concepts (uses, part_of, similar_to, depends_on, ...)\n"
        "3. Assess importance (0-1 score) based on centrality to the text\n"
        "4. Extract relevant context snippets\n"
        "5. Max 5 Concepts!\n"
        "only return in json format!\n"
        """{"concepts": [{
            "name": "concept_name",
            "category": "category_name",
            "relationships": {
                "relationship_type": ["related_concept1", "related_concept2"]
            },
            "importance_score": 0.0,
            "context_snippets": ["relevant text snippet"]
        }]}\n"""
    )

    # Prepare all requests
    requests = [
        (idx, f"Text to Convert in to JSON structure:\n{text}", system_prompt, metadata)
        for idx, (text, metadata) in enumerate(zip(texts, metadatas, strict=False))
    ]

    async def process_single_request(idx: int, prompt: str, system_prompt: str, metadata: dict[str, Any]):
        """Process a single request with rate limiting"""
        try:
            # Wait for rate limit
            await rate_limiter.acquire()
            i__[1] += 1
            # Make API call without awaiting the response
            response_future = litellm_complete(
                prompt=prompt,
                system_prompt=system_prompt,
                response_format=Concepts,
                model_name=self.kb.model_name,
                fallbacks=["groq/gemma2-9b-it"] +
                          [m for m in os.getenv("FALLBACKS_MODELS_PREM", '').split(',') if m]
            )

            return idx, response_future

        except Exception as e:
            print(f"Error initiating request {idx}: {str(e)}")
            return idx, None

    async def process_response(idx: int, response_future) -> list[Concept]:
        """Process the response once it's ready"""
        try:
            if response_future is None:
                return []

            response = await response_future
            return await self._process_response(response, metadatas[idx])

        except Exception as e:
            print(f"Error processing response {idx}: {str(e)}")
            return []

    # Create tasks for all requests
    request_tasks = []
    batch_size = self.kb.batch_size

    rate_limiter.update_rate(self.requests_per_second)

    for batch_start in range(0, len(requests), batch_size):
        batch = requests[batch_start:batch_start + batch_size]

        # Create tasks for the batch
        batch_tasks = [
            process_single_request(idx, prompt, sys_prompt, meta)
            for idx, prompt, sys_prompt, meta in batch
        ]
        request_tasks.extend(batch_tasks)

    # Execute all requests with rate limiting
    request_results = await asyncio.gather(*request_tasks)

    # Process responses as they complete
    response_tasks = [
        process_response(idx, response_future)
        for idx, response_future in request_results
    ]

    # Gather all results
    all_results = await asyncio.gather(*response_tasks)

    # Sort results by original index
    sorted_results = [[] for _ in texts]
    for idx, concepts in enumerate(all_results):
        sorted_results[idx] = concepts

    return sorted_results
process_chunks(chunks) async

Process all chunks in batch to extract and store concepts. Each chunk's metadata will be updated with the concept names and relationships.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
async def process_chunks(self, chunks: list[Chunk]) -> None:
    """
    Process all chunks in batch to extract and store concepts.
    Each chunk's metadata will be updated with the concept names and relationships.
    """
    # Gather all texts from the chunks.
    texts = [chunk.text for chunk in chunks]
    # Call extract_concepts once with all texts.
    all_concepts = await self.extract_concepts(texts, [chunk.metadata for chunk in chunks])

    # Update each chunk's metadata with its corresponding concepts.
    for chunk, concepts in zip(chunks, all_concepts, strict=False):
        chunk.metadata["concepts"] = [c.name for c in concepts]
        chunk.metadata["concept_relationships"] = {
            c.name: {k: list(v) for k, v in c.relationships.items()}
            for c in concepts
        }
query_concepts(query) async

Query the concept graph based on natural language query

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
async def query_concepts(self, query: str) -> dict[str, any]:
    """Query the concept graph based on natural language query"""

    system_prompt = """
    Convert the natural language query about concepts into a structured format that specifies:
    1. Main concepts of interest
    2. Desired relationship types
    3. Any category filters
    4. Importance threshold

    Format as JSON.
    """

    prompt = f"""
    Query: {query}

    Convert to this JSON structure:
    {{
        "target_concepts": ["concept1", "concept2"],
        "relationship_types": ["type1", "type2"],
        "categories": ["category1", "category2"],
        "min_importance": 0.0
    }}
    """

    try:
        response = await litellm_complete(
            model_name=self.kb.model_name,
            prompt=prompt,
            system_prompt=system_prompt,
            response_format=TConcept
        )

        query_params = json.loads(response)

        results = {
            "concepts": {},
            "relationships": [],
            "groups": []
        }

        # Find matching concepts
        for concept_name in query_params["target_concepts"]:
            if concept_name in self.concept_graph.concepts:
                concept = self.concept_graph.concepts[concept_name]
                if concept.importance_score >= query_params["min_importance"]:
                    results["concepts"][concept_name] = {
                        "category": concept.category,
                        "importance": concept.importance_score,
                        "context": concept.context_snippets
                    }

                    # Get relationships
                    for rel_type in query_params["relationship_types"]:
                        related = self.concept_graph.get_related_concepts(
                            concept_name, rel_type
                        )
                        for related_concept in related:
                            results["relationships"].append({
                                "from": concept_name,
                                "to": related_concept,
                                "type": rel_type
                            })

        # Group concepts by category
        category_groups = defaultdict(list)
        for concept_name, concept_info in results["concepts"].items():
            category_groups[concept_info["category"]].append(concept_name)
        results["groups"] = [
            {"category": cat, "concepts": concepts}
            for cat, concepts in category_groups.items()
        ]

        return results

    except Exception as e:
        print(f"Error querying concepts: {str(e)}")
        return {"concepts": {}, "relationships": [], "groups": []}
ConceptGraph

Manages concept relationships and hierarchies

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
class ConceptGraph:
    """Manages concept relationships and hierarchies"""

    def __init__(self):
        self.concepts: dict[str, Concept] = {}

    def add_concept(self, concept: Concept):
        """Add or update a concept in the graph"""
        if concept.name.lower() in self.concepts:
            # Merge relationships and context
            existing = self.concepts[concept.name.lower()]
            for rel_type, related in concept.relationships.items():
                if rel_type not in existing.relationships:
                    existing.relationships[rel_type] = set()
                existing.relationships[rel_type].update(related)
            existing.context_snippets.extend(concept.context_snippets)
            # Update importance score with rolling average
            existing.importance_score = (existing.importance_score + concept.importance_score) / 2
        else:
            self.concepts[concept.name.lower()] = concept

    def get_related_concepts(self, concept_name: str, relationship_type: str | None = None) -> set[str]:
        """Get related concepts, optionally filtered by relationship type"""
        if concept_name not in self.concepts:
            return set()

        concept = self.concepts[concept_name.lower()]
        if relationship_type:
            return concept.relationships.get(relationship_type, set())

        related = set()
        for relations in concept.relationships.values():
            related.update(relations)
        return related


    def convert_to_networkx(self) -> nx.DiGraph:
        """Convert ConceptGraph to NetworkX graph with layout"""
        print(f"Converting to NetworkX graph with {len(self.concepts.values())} concepts")

        G = nx.DiGraph()

        if len(self.concepts.values()) == 0:
            return G

        for concept in self.concepts.values():
            cks = '\n - '.join(concept.context_snippets[:4])
            G.add_node(
                concept.name,
                size=concept.importance_score * 10,
                group=concept.category,
                title=f"""
                    {concept.name}
                    Category: {concept.category}
                    Importance: {concept.importance_score:.2f}
                    Context: \n - {cks}
                    """
            )

            for rel_type, targets in concept.relationships.items():
                for target in targets:
                    G.add_edge(concept.name, target, label=rel_type, title=rel_type)

        return G
add_concept(concept)

Add or update a concept in the graph

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
175
176
177
178
179
180
181
182
183
184
185
186
187
188
def add_concept(self, concept: Concept):
    """Add or update a concept in the graph"""
    if concept.name.lower() in self.concepts:
        # Merge relationships and context
        existing = self.concepts[concept.name.lower()]
        for rel_type, related in concept.relationships.items():
            if rel_type not in existing.relationships:
                existing.relationships[rel_type] = set()
            existing.relationships[rel_type].update(related)
        existing.context_snippets.extend(concept.context_snippets)
        # Update importance score with rolling average
        existing.importance_score = (existing.importance_score + concept.importance_score) / 2
    else:
        self.concepts[concept.name.lower()] = concept
convert_to_networkx()

Convert ConceptGraph to NetworkX graph with layout

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
def convert_to_networkx(self) -> nx.DiGraph:
    """Convert ConceptGraph to NetworkX graph with layout"""
    print(f"Converting to NetworkX graph with {len(self.concepts.values())} concepts")

    G = nx.DiGraph()

    if len(self.concepts.values()) == 0:
        return G

    for concept in self.concepts.values():
        cks = '\n - '.join(concept.context_snippets[:4])
        G.add_node(
            concept.name,
            size=concept.importance_score * 10,
            group=concept.category,
            title=f"""
                {concept.name}
                Category: {concept.category}
                Importance: {concept.importance_score:.2f}
                Context: \n - {cks}
                """
        )

        for rel_type, targets in concept.relationships.items():
            for target in targets:
                G.add_edge(concept.name, target, label=rel_type, title=rel_type)

    return G
get_related_concepts(concept_name, relationship_type=None)

Get related concepts, optionally filtered by relationship type

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
190
191
192
193
194
195
196
197
198
199
200
201
202
def get_related_concepts(self, concept_name: str, relationship_type: str | None = None) -> set[str]:
    """Get related concepts, optionally filtered by relationship type"""
    if concept_name not in self.concepts:
        return set()

    concept = self.concepts[concept_name.lower()]
    if relationship_type:
        return concept.relationships.get(relationship_type, set())

    related = set()
    for relations in concept.relationships.values():
        related.update(relations)
    return related
Concepts

Bases: BaseModel

Represents a collection of key concepts.

Attributes:

Name Type Description
concepts List[rConcept]

A list of Concept instances, each representing an individual key concept.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
103
104
105
106
107
108
109
110
class Concepts(BaseModel):
    """
    Represents a collection of key concepts.

    Attributes:
        concepts (List[rConcept]): A list of Concept instances, each representing an individual key concept.
    """
    concepts: list[rConcept]
DataModel

Bases: BaseModel

The main data model that encapsulates the overall analysis.

Attributes:

Name Type Description
main_summary str

A Detailed overview summarizing the key findings and relations format MD string.

concept_analysis ConceptAnalysis

An instance containing the analysis of key concepts.

topic_insights TopicInsights

An instance containing insights regarding the topics.

relevance_assessment RelevanceAssessment

An instance assessing the relevance and alignment of the query.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
154
155
156
157
158
159
160
161
162
163
164
165
166
167
class DataModel(BaseModel):
    """
    The main data model that encapsulates the overall analysis.

    Attributes:
        main_summary (str): A Detailed overview summarizing the key findings and relations format MD string.
        concept_analysis (ConceptAnalysis): An instance containing the analysis of key concepts.
        topic_insights (TopicInsights): An instance containing insights regarding the topics.
        relevance_assessment (RelevanceAssessment): An instance assessing the relevance and alignment of the query.
    """
    main_summary: str
    concept_analysis: ConceptAnalysis
    topic_insights: TopicInsights
    relevance_assessment: RelevanceAssessment
DynamicRateLimiter
Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
class DynamicRateLimiter:
    def __init__(self):
        self.last_request_time = 0.0
        self._lock = asyncio.Lock()

    def update_rate(self, requests_per_second: float):
        """Update rate limit dynamically"""
        self.min_interval = 1.0 / requests_per_second if requests_per_second > 0 else float('inf')

    async def acquire(self):
        """Acquire permission to make a request"""
        async with self._lock:
            current_time = time.time()
            time_since_last = current_time - self.last_request_time
            if time_since_last < self.min_interval:
                wait_time = self.min_interval - time_since_last
                await asyncio.sleep(wait_time)
            self.last_request_time = time.time()
acquire() async

Acquire permission to make a request

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
267
268
269
270
271
272
273
274
275
async def acquire(self):
    """Acquire permission to make a request"""
    async with self._lock:
        current_time = time.time()
        time_since_last = current_time - self.last_request_time
        if time_since_last < self.min_interval:
            wait_time = self.min_interval - time_since_last
            await asyncio.sleep(wait_time)
        self.last_request_time = time.time()
update_rate(requests_per_second)

Update rate limit dynamically

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
263
264
265
def update_rate(self, requests_per_second: float):
    """Update rate limit dynamically"""
    self.min_interval = 1.0 / requests_per_second if requests_per_second > 0 else float('inf')
GraphVisualizer
Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
class GraphVisualizer:
    @staticmethod
    def visualize(nx_graph: nx.DiGraph, output_file: str = "concept_graph.html", get_output=False):
        """Create interactive visualization using PyVis"""
        from pyvis.network import Network
        net = Network(
            height="800px",
            width="100%",
            notebook=False,
            directed=True,
            bgcolor="#1a1a1a",
            font_color="white"
        )

        net.from_nx(nx_graph)

        net.save_graph(output_file)
        print(f"Graph saved to {output_file} Open in browser to view.", len(nx_graph))
        if get_output:
            c = open(output_file, encoding="utf-8").read()
            os.remove(output_file)
            return c
visualize(nx_graph, output_file='concept_graph.html', get_output=False) staticmethod

Create interactive visualization using PyVis

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
@staticmethod
def visualize(nx_graph: nx.DiGraph, output_file: str = "concept_graph.html", get_output=False):
    """Create interactive visualization using PyVis"""
    from pyvis.network import Network
    net = Network(
        height="800px",
        width="100%",
        notebook=False,
        directed=True,
        bgcolor="#1a1a1a",
        font_color="white"
    )

    net.from_nx(nx_graph)

    net.save_graph(output_file)
    print(f"Graph saved to {output_file} Open in browser to view.", len(nx_graph))
    if get_output:
        c = open(output_file, encoding="utf-8").read()
        os.remove(output_file)
        return c
KnowledgeBase
Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
class KnowledgeBase:
    def __init__(self, embedding_dim: int = 768, similarity_threshold: float = 0.61, batch_size: int = 64,
                 n_clusters: int = 4, deduplication_threshold: float = 0.85, model_name=os.getenv("DEFAULTMODELSUMMERY"),
                 embedding_model=os.getenv("DEFAULTMODELEMBEDDING"),
                 vis_class:str | None = "FaissVectorStore",
                 vis_kwargs:dict[str, Any] | None=None,
                 requests_per_second=85.,
                 chunk_size: int = 3600,
                 chunk_overlap: int = 130,
                 separator: str = "\n"
                 ):
        """Initialize the knowledge base with given parameters"""

        self.existing_hashes: set[str] = set()
        self.embedding_model = embedding_model
        self.embedding_dim = embedding_dim
        self.similarity_threshold = similarity_threshold
        self.deduplication_threshold = deduplication_threshold
        if model_name == "openrouter/mistralai/mistral-nemo":
            batch_size = 9
            requests_per_second = 1.5
        self.batch_size = batch_size
        self.n_clusters = n_clusters
        self.model_name = model_name
        self.sto: list = []

        self.text_splitter = TextSplitter(chunk_size=chunk_size,chunk_overlap=chunk_overlap, separator=separator)
        self.similarity_graph = {}
        self.concept_extractor = ConceptExtractor(self, requests_per_second)

        self.vis_class = None
        self.vis_kwargs = None
        self.vdb = None
        self.init_vis(vis_class, vis_kwargs)

    def init_vis(self, vis_class, vis_kwargs):
        if vis_class is None:
            vis_class = "FaissVectorStore"
        if vis_class == "FaissVectorStore":
            if vis_kwargs is None:
                vis_kwargs = {
                    "dimension": self.embedding_dim
                }
            self.vdb = FaissVectorStore(**vis_kwargs)
        else:
            from toolboxv2.mods.isaa.base.VectorStores.taichiNumpyNumbaVectorStores import (
                EnhancedVectorStore,
                FastVectorStore1,
                FastVectorStoreO,
                NumpyVectorStore,
                VectorStoreConfig,
            )
        if vis_class == "FastVectorStoreO":
            if vis_kwargs is None:
                vis_kwargs = {
                    "embedding_size": self.embedding_dim
                }
            self.vdb = FastVectorStoreO(**vis_kwargs)
        if vis_class == "EnhancedVectorStore":
            if vis_kwargs is None:
                vis_kwargs = {
                    "dimension": self.embedding_dim
                }
            vis_kwargs = VectorStoreConfig(**vis_kwargs)
            self.vdb = EnhancedVectorStore(vis_kwargs)
        if vis_class == "FastVectorStore1":
            self.vdb = FastVectorStore1()
        if vis_class == "NumpyVectorStore":
            self.vdb = NumpyVectorStore()

        self.vis_class = vis_class
        self.vis_kwargs = vis_kwargs


    @staticmethod
    def compute_hash(text: str) -> str:
        """Compute SHA-256 hash of text"""
        return hashlib.sha256(text.encode('utf-8', errors='ignore')).hexdigest()

    async def _get_embeddings(self, texts: list[str]) -> np.ndarray:
        """Get normalized embeddings in batches"""
        try:
            async def process_batch(batch: list[str]) -> np.ndarray:
                from toolboxv2.mods.isaa.extras.adapter import litellm_embed
                # print("Processing", batch)
                embeddings = await litellm_embed(texts=batch, model=self.embedding_model)
                return normalize_vectors(embeddings)

            tasks = []
            for i in range(0, len(texts), self.batch_size):
                batch = texts[i:i + self.batch_size]
                tasks.append(process_batch(batch))

            embeddings = await asyncio.gather(*tasks)
            i__[0] += len(texts)
            return np.vstack(embeddings)
        except Exception as e:
            get_logger().error(f"Error generating embeddings: {str(e)}")
            raise



    def _remove_similar_chunks(self, threshold: float = None) -> int:
        """Remove chunks that are too similar to each other"""
        if len(self.vdb.chunks) < 2:
            return 0

        if threshold is None:
            threshold = self.deduplication_threshold

        try:
            # Get all embeddings
            embeddings = np.vstack([c.embedding for c in self.vdb.chunks])
            n = len(embeddings)

            # Compute similarity matrix
            similarities = np.dot(embeddings, embeddings.T)

            # Create mask for chunks to keep
            keep_mask = np.ones(n, dtype=bool)

            # Iterate through chunks
            for i in range(n):
                if not keep_mask[i]:
                    continue

                # Find chunks that are too similar to current chunk
                similar_indices = similarities[i] >= threshold
                similar_indices[i] = False  # Don't count self-similarity

                # Mark similar chunks for removal
                keep_mask[similar_indices] = False

            # Keep only unique chunks
            unique_chunks = [chunk for chunk, keep in zip(self.vdb.chunks, keep_mask, strict=False) if keep]
            removed_count = len(self.vdb.chunks) - len(unique_chunks)

            # Update chunks and hashes
            self.vdb.chunks = unique_chunks
            self.existing_hashes = {chunk.content_hash for chunk in self.vdb.chunks}

            # Rebuild index if chunks were removed
            if removed_count > 0:
                self.vdb.rebuild_index()


            return removed_count

        except Exception as e:
            get_logger().error(f"Error removing similar chunks: {str(e)}")
            raise

    async def _add_data(
        self,
        texts: list[str],
        metadata: list[dict[str, Any]] | None= None,
    ) -> tuple[int, int]:
        """
        Process and add new data to the knowledge base
        Returns: Tuple of (added_count, duplicate_count)
        """
        if len(texts) == 0:
            return -1, -1
        try:
            # Compute hashes and filter exact duplicates
            hashes = [self.compute_hash(text) for text in texts]
            unique_data = []
            for t, m, h in zip(texts, metadata, hashes, strict=False):
                if h in self.existing_hashes:
                    continue
                # Update existing hashes
                self.existing_hashes.add(h)
                unique_data.append((t, m, h))

            if not unique_data:
                return 0, len(texts)

            # Get embeddings
            embeddings = await self._get_embeddings(texts)

            texts = []
            metadata = []
            hashes = []
            embeddings_final = []
            if len(self.vdb.chunks):
                for i, d in enumerate(unique_data):
                    c = self.vdb.search(embeddings[i], 5, self.deduplication_threshold)
                    if len(c) > 2:
                        continue
                    t, m, h = d
                    texts.append(t)
                    metadata.append(m)
                    hashes.append(h)
                    embeddings_final.append(embeddings[i])

            else:
                texts , metadata, hashes = zip(*unique_data, strict=False)
                embeddings_final = embeddings

            if not texts:  # All were similar to existing chunks
                return 0, len(unique_data)

            # Create and add new chunks
            new_chunks = [
                Chunk(text=t, embedding=e, metadata=m, content_hash=h)
                for t, e, m, h in zip(texts, embeddings_final, metadata, hashes, strict=False)
            ]

            # Add new chunks
            # Update index
            if new_chunks:
                all_embeddings = np.vstack([c.embedding for c in new_chunks])
                self.vdb.add_embeddings(all_embeddings, new_chunks)

            # Remove similar chunks from the entire collection
            removed = self._remove_similar_chunks()
            get_logger().info(f"Removed {removed} similar chunks during deduplication")
            # Invalidate visualization cache

            if len(new_chunks) - removed > 0:
                # Process new chunks for concepts
                await self.concept_extractor.process_chunks(new_chunks)
            print("[total, calls, errors]", i__)

            return len(new_chunks) - removed, len(texts) - len(new_chunks) + removed

        except Exception as e:
            get_logger().error(f"Error adding data: {str(e)}")
            raise


    async def add_data(
        self,
        texts: list[str],
        metadata: list[dict[str, Any]] | None = None,
    ) -> tuple[int, int]:
        """Enhanced version with smart splitting and clustering"""
        if isinstance(texts, str):
            texts = [texts]
        if metadata is None:
            metadata = [{}] * len(texts)
        if isinstance(metadata, dict):
            metadata = [metadata]
        if len(texts) != len(metadata):
            raise ValueError("Length of texts and metadata must match")
        if len(texts) == 1 and len(texts[0]) < 10_000:
            if len(self.sto) < self.batch_size and len(texts) == 1:
                self.sto.append((texts[0], metadata[0]))
                return -1, -1
            if len(self.sto) >= self.batch_size:
                _ = [texts.append(t) or metadata.append([m]) for (t, m) in self.sto]
                self.sto = []

        # Split large texts
        split_texts = []
        split_metadata = []

        while Spinner("Saving Data to Memory", symbols='t'):

            for idx, text in enumerate(texts):
                chunks = self.text_splitter.split_text(text)
                split_texts.extend(chunks)

                # Adjust metadata for splits
                meta = metadata[idx] if metadata else {}
                if isinstance(meta, list):
                    meta = meta[0]
                for i, _chunk in enumerate(chunks):
                    chunk_meta = meta.copy()
                    chunk_meta.update({
                        'chunk_index': i,
                        'total_chunks': len(chunks),
                        'original_text_id': idx
                    })
                    split_metadata.append(chunk_meta)

            return await self._add_data(split_texts, split_metadata)

    def _update_similarity_graph(self, embeddings: np.ndarray, chunk_ids: list[int]):
        """Update similarity graph for connected information detection"""
        similarities = np.dot(embeddings, embeddings.T)

        for i in range(len(chunk_ids)):
            for j in range(i + 1, len(chunk_ids)):
                if similarities[i, j] >= self.similarity_threshold:
                    id1, id2 = chunk_ids[i], chunk_ids[j]
                    if id1 not in self.similarity_graph:
                        self.similarity_graph[id1] = set()
                    if id2 not in self.similarity_graph:
                        self.similarity_graph[id2] = set()
                    self.similarity_graph[id1].add(id2)
                    self.similarity_graph[id2].add(id1)

    async def retrieve(
        self,
        query: str="",
        query_embedding: np.ndarray | None = None,
        k: int = 5,
        min_similarity: float = 0.2,
        include_connected: bool = True
    ) -> list[Chunk]:
        """Enhanced retrieval with connected information"""
        if query_embedding is None:
            query_embedding = (await self._get_embeddings([query]))[0]
        k = min(k, len(self.vdb.chunks)-1)
        if k <= 0:
            return []
        initial_results = self.vdb.search(query_embedding, k, min_similarity)

        if not include_connected or not initial_results:
            return initial_results

        # Find connected chunks
        connected_chunks = set()
        for chunk in initial_results:
            chunk_id = self.vdb.chunks.index(chunk)
            if chunk_id in self.similarity_graph:
                connected_chunks.update(self.similarity_graph[chunk_id])

        # Add connected chunks to results
        all_chunks = self.vdb.chunks
        additional_results = [all_chunks[i] for i in connected_chunks
                              if all_chunks[i] not in initial_results]

        # Sort by similarity to query
        all_results = initial_results + additional_results

        return sorted(
            all_results,
            key=lambda x: np.dot(x.embedding, query_embedding),
            reverse=True
        )[:k * 2]  # Return more results when including connected information

    async def forget_irrelevant(self, irrelevant_concepts: list[str], similarity_threshold: float | None=None) -> int:
        """
        Remove chunks similar to irrelevant concepts
        Returns: Number of chunks removed
        """
        if not irrelevant_concepts:
            return 0

        if similarity_threshold is None:
            similarity_threshold = self.similarity_threshold

        try:
            irrelevant_embeddings = await self._get_embeddings(irrelevant_concepts)
            initial_count = len(self.vdb.chunks)

            def is_relevant(chunk: Chunk) -> bool:
                similarities = np.dot(chunk.embedding, irrelevant_embeddings.T)
                do_keep = np.max(similarities) < similarity_threshold
                if do_keep:
                    return True
                for c in chunk.metadata.get("concepts", []):
                    if c in self.concept_extractor.concept_graph.concepts:
                        del self.concept_extractor.concept_graph.concepts[c]
                return False

            relevant_chunks = [chunk for chunk in self.vdb.chunks if is_relevant(chunk)]
            self.vdb.chunks = relevant_chunks
            self.existing_hashes = {chunk.content_hash for chunk in self.vdb.chunks}
            self.vdb.rebuild_index()


            return initial_count - len(self.vdb.chunks)

        except Exception as e:
            get_logger().error(f"Error forgetting irrelevant concepts: {str(e)}")
            raise

    ## ----------------------------------------------------------------

    def _cluster_chunks(
        self,
        chunks: list[Chunk],
        query_embedding: np.ndarray | None = None,
        min_cluster_size: int = 2,
        min_samples: int = 1,
        max_clusters: int = 10
    ) -> dict[int, list[Chunk]]:
        """
        Enhanced clustering of chunks into topics with query awareness
        and dynamic parameter adjustment
        """
        if len(chunks) < 2:
            return {0: chunks}

        embeddings = np.vstack([chunk.embedding for chunk in chunks])

        # Normalize embeddings for cosine similarity
        embeddings = normalize_vectors(embeddings)

        # If query is provided, weight embeddings by query relevance
        if query_embedding is not None:
            query_similarities = np.dot(embeddings, query_embedding)
            # Apply soft weighting to maintain structure while considering query relevance
            embeddings = embeddings * query_similarities[:, np.newaxis]
            embeddings = normalize_vectors(embeddings)

        # Dynamic parameter adjustment based on dataset size
        adjusted_min_cluster_size = max(
            min_cluster_size,
            min(len(chunks) // 10, 5)  # Scale with data size, max 5
        )

        adjusted_min_samples = max(
            min_samples,
            adjusted_min_cluster_size // 2
        )

        # Try different parameter combinations for optimal clustering
        best_clusters = None
        best_score = float('-inf')

        epsilon_range = [0.2, 0.3, 0.4]

        for epsilon in epsilon_range:
            clusterer = HDBSCAN(
                min_cluster_size=adjusted_min_cluster_size,
                min_samples=adjusted_min_samples,
                metric='cosine',
                cluster_selection_epsilon=epsilon
            )

            cluster_labels = clusterer.fit_predict(embeddings)

            # Skip if all points are noise
            if len(set(cluster_labels)) <= 1:
                continue

            # Calculate clustering quality metrics
            score = self._evaluate_clustering(
                embeddings,
                cluster_labels,
                query_embedding
            )

            if score > best_score:
                best_score = score
                best_clusters = cluster_labels

        # If no good clustering found, fall back to simpler approach
        if best_clusters is None:
            return self._fallback_clustering(chunks, query_embedding)

        # Organize chunks by cluster
        clusters: dict[int, list[Chunk]] = {}

        # Sort clusters by size and relevance
        cluster_scores = []

        for label in set(best_clusters):
            if label == -1:  # Handle noise points separately
                continue

            # Fixed: Use boolean mask to select chunks for current cluster
            cluster_mask = best_clusters == label
            cluster_chunks = [chunk for chunk, is_in_cluster in zip(chunks, cluster_mask, strict=False) if is_in_cluster]

            # Skip empty clusters
            if not cluster_chunks:
                continue

            # Calculate cluster score based on size and query relevance
            score = len(cluster_chunks)
            if query_embedding is not None:
                cluster_embeddings = np.vstack([c.embedding for c in cluster_chunks])
                query_relevance = np.mean(np.dot(cluster_embeddings, query_embedding))
                score = score * (1 + query_relevance)  # Boost by relevance

            cluster_scores.append((label, score, cluster_chunks))

        # Sort clusters by score and limit to max_clusters
        cluster_scores.sort(key=lambda x: x[1], reverse=True)

        # Assign cleaned clusters
        for i, (_, _, cluster_chunks) in enumerate(cluster_scores[:max_clusters]):
            clusters[i] = cluster_chunks

        # Handle noise points by assigning to nearest cluster
        noise_chunks = [chunk for chunk, label in zip(chunks, best_clusters, strict=False) if label == -1]
        if noise_chunks:
            self._assign_noise_points(noise_chunks, clusters, query_embedding)

        return clusters

    @staticmethod
    def _evaluate_clustering(
        embeddings: np.ndarray,
        labels: np.ndarray,
        query_embedding: np.ndarray | None = None
    ) -> float:
        """
        Evaluate clustering quality using multiple metrics
        """
        if len(set(labels)) <= 1:
            return float('-inf')

        # Calculate silhouette score for cluster cohesion
        from sklearn.metrics import silhouette_score
        try:
            sil_score = silhouette_score(embeddings, labels, metric='cosine')
        except:
            sil_score = -1

        # Calculate Davies-Bouldin score for cluster separation
        from sklearn.metrics import davies_bouldin_score
        try:
            db_score = -davies_bouldin_score(embeddings, labels)  # Negated as lower is better
        except:
            db_score = -1

        # Calculate query relevance if provided
        query_score = 0
        if query_embedding is not None:
            unique_labels = set(labels) - {-1}
            if unique_labels:
                query_sims = []
                for label in unique_labels:
                    cluster_mask = labels == label
                    cluster_embeddings = embeddings[cluster_mask]
                    cluster_centroid = np.mean(cluster_embeddings, axis=0)
                    query_sims.append(np.dot(cluster_centroid, query_embedding))
                query_score = np.mean(query_sims)

        # Combine scores with weights
        combined_score = (
            0.4 * sil_score +
            0.3 * db_score +
            0.3 * query_score
        )

        return combined_score

    @staticmethod
    def _fallback_clustering(
        chunks: list[Chunk],
        query_embedding: np.ndarray | None = None
    ) -> dict[int, list[Chunk]]:
        """
        Simple fallback clustering when HDBSCAN fails
        """
        if query_embedding is not None:
            # Sort by query relevance
            chunks_with_scores = [
                (chunk, np.dot(chunk.embedding, query_embedding))
                for chunk in chunks
            ]
            chunks_with_scores.sort(key=lambda x: x[1], reverse=True)
            chunks = [c for c, _ in chunks_with_scores]

        # Create fixed-size clusters
        clusters = {}
        cluster_size = max(2, len(chunks) // 5)

        for i in range(0, len(chunks), cluster_size):
            clusters[len(clusters)] = chunks[i:i + cluster_size]

        return clusters

    @staticmethod
    def _assign_noise_points(
        noise_chunks: list[Chunk],
        clusters: dict[int, list[Chunk]],
        query_embedding: np.ndarray | None = None
    ) -> None:
        """
        Assign noise points to nearest clusters
        """
        if not clusters:
            clusters[0] = noise_chunks
            return

        for chunk in noise_chunks:
            best_cluster = None
            best_similarity = float('-inf')

            for cluster_id, cluster_chunks in clusters.items():
                cluster_embeddings = np.vstack([c.embedding for c in cluster_chunks])
                cluster_centroid = np.mean(cluster_embeddings, axis=0)

                similarity = np.dot(chunk.embedding, cluster_centroid)

                # Consider query relevance in assignment if available
                if query_embedding is not None:
                    query_sim = np.dot(chunk.embedding, query_embedding)
                    similarity = 0.7 * similarity + 0.3 * query_sim

                if similarity > best_similarity:
                    best_similarity = similarity
                    best_cluster = cluster_id

            if best_cluster is not None:
                clusters[best_cluster].append(chunk)

    @staticmethod
    def _generate_topic_summary(
        chunks: list[Chunk],
        query_embedding: np.ndarray,
        max_sentences=3
    ) -> str:
        """Generate a summary for a topic using most representative chunks"""
        if not chunks:
            return ""

        # Find chunks most similar to cluster centroid
        embeddings = np.vstack([chunk.embedding for chunk in chunks])
        centroid = embeddings.mean(axis=0)

        # Calculate similarities to both centroid and query
        centroid_sims = np.dot(embeddings, centroid)
        query_sims = np.dot(embeddings, query_embedding)

        # Combine both similarities
        combined_sims = 0.7 * centroid_sims + 0.3 * query_sims

        # Select top sentences from most representative chunks
        top_indices = np.argsort(combined_sims)[-max_sentences:]
        summary_chunks = [chunks[i] for i in top_indices]

        # Extract key sentences
        sentences = []
        for chunk in summary_chunks:
            sentences.extend(sent.strip() for sent in chunk.text.split('.') if sent.strip())

        return '. '.join(sentences[:max_sentences]) + '.'

    async def retrieve_with_overview(
        self,
        query: str,
        query_embedding=None,
        k: int = 5,
        min_similarity: float = 0.2,
        max_sentences: int = 5,
        cross_ref_depth: int = 2,
        max_cross_refs: int = 10  # New parameter to control cross-reference count
    ) -> RetrievalResult:
        """Enhanced retrieval with better cross-reference handling"""
        # Get initial results with query embedding
        if query_embedding is None:
            query_embedding = (await self._get_embeddings([query]))[0]
        initial_results = await self.retrieve(query_embedding=query_embedding, k=k, min_similarity=min_similarity)

        if not initial_results:
            return RetrievalResult([], [], {})

        # Find cross-references with similarity scoring
        initial_ids = {self.vdb.chunks.index(chunk) for chunk in initial_results}
        related_ids = self._find_cross_references(
            initial_ids,
            depth=cross_ref_depth,
            query_embedding=query_embedding  # Pass query embedding for relevance scoring
        )

        # Get all relevant chunks with smarter filtering
        all_chunks = self.vdb.chunks
        all_relevant_chunks = initial_results + [
            chunk for i, chunk in enumerate(all_chunks)
            if i in related_ids and self._is_relevant_cross_ref(
                chunk,
                query_embedding,
                initial_results
            )
        ]

        # Enhanced clustering with dynamic cluster size
        clusters = self._cluster_chunks(
            all_relevant_chunks,
            query_embedding=query_embedding
        )

        # Fallback: If no clusters are found, treat all relevant chunks as a single cluster.
        if not clusters:
            print("No clusters found. Falling back to using all relevant chunks as a single cluster.")
            clusters = {0: all_relevant_chunks}

        # Generate summaries and organize results
        overview = []
        cross_references = {}

        for cluster_id, cluster_chunks in clusters.items():
            summary = self._generate_topic_summary(
                cluster_chunks,
                query_embedding,
                max_sentences=max_sentences  # Increased for more context
            )

            # Enhanced chunk sorting with combined scoring
            sorted_chunks = self._sort_chunks_by_relevance(
                cluster_chunks,
                query_embedding,
                initial_results
            )

            # Separate direct matches and cross-references
            direct_matches_ = [{'text':c.text, 'metadata':c.metadata} for c in sorted_chunks if c in initial_results]
            direct_matches = []
            for match in direct_matches_:
                if match in direct_matches:
                    continue
                direct_matches.append(match)
            cross_refs_ = [c for c in sorted_chunks if c not in initial_results]
            cross_refs = []
            for match in cross_refs_:
                if match in cross_refs:
                    continue
                cross_refs.append(match)
            # Limit cross-references while maintaining diversity
            selected_cross_refs = self._select_diverse_cross_refs(
                cross_refs,
                max_cross_refs,
                query_embedding
            )

            topic_info = {
                'topic_id': cluster_id,
                'summary': summary,
                'main_chunks': [x for x in direct_matches[:3]],
                'chunk_count': len(cluster_chunks),
                'relevance_score': self._calculate_topic_relevance(
                    cluster_chunks,
                    query_embedding
                )
            }
            overview.append(topic_info)

            if selected_cross_refs:
                cross_references[f"topic_{cluster_id}"] = selected_cross_refs

        # Sort overview by relevance score
        overview.sort(key=lambda x: x['relevance_score'], reverse=True)

        return RetrievalResult(
            overview=overview,
            details=initial_results,
            cross_references=cross_references
        )

    def _find_cross_references(
        self,
        chunk_ids: set[int],
        depth: int,
        query_embedding: np.ndarray
    ) -> set[int]:
        """Enhanced cross-reference finding with relevance scoring"""
        related_ids = set(chunk_ids)
        current_depth = 0
        frontier = set(chunk_ids)

        while current_depth < depth and frontier:
            new_frontier = set()
            for chunk_id in frontier:
                if chunk_id in self.similarity_graph:
                    # Score potential cross-references by relevance
                    candidates = self.similarity_graph[chunk_id] - related_ids
                    scored_candidates = [
                        (cid, self._calculate_topic_relevance(
                            [self.vdb.chunks[cid]],
                            query_embedding
                        ))
                        for cid in candidates
                    ]

                    # Filter by relevance threshold
                    relevant_candidates = {
                        cid for cid, score in scored_candidates
                        if score > 0.5  # Adjustable threshold
                    }
                    new_frontier.update(relevant_candidates)

            related_ids.update(new_frontier)
            frontier = new_frontier
            current_depth += 1

        return related_ids

    @staticmethod
    def _is_relevant_cross_ref(
        chunk: Chunk,
        query_embedding: np.ndarray,
        initial_results: list[Chunk]
    ) -> bool:
        """Determine if a cross-reference is relevant enough to include"""
        # Calculate similarity to query
        query_similarity = np.dot(chunk.embedding, query_embedding)

        # Calculate similarity to initial results
        initial_similarities = [
            np.dot(chunk.embedding, r.embedding) for r in initial_results
        ]
        max_initial_similarity = max(initial_similarities)

        # Combined relevance score
        relevance_score = 0.7 * query_similarity + 0.3 * max_initial_similarity

        return relevance_score > 0.6  # Adjustable threshold

    @staticmethod
    def _select_diverse_cross_refs(
        cross_refs: list[Chunk],
        max_count: int,
        query_embedding: np.ndarray
    ) -> list[Chunk]:
        """Select diverse and relevant cross-references"""
        if not cross_refs or len(cross_refs) <= max_count:
            return cross_refs

        # Calculate diversity scores
        embeddings = np.vstack([c.embedding for c in cross_refs])
        similarities = np.dot(embeddings, embeddings.T)

        selected = []
        remaining = list(enumerate(cross_refs))

        while len(selected) < max_count and remaining:
            # Score remaining chunks by relevance and diversity
            scores = []
            for idx, chunk in remaining:
                relevance = np.dot(chunk.embedding, query_embedding)
                diversity = 1.0
                if selected:
                    # Calculate diversity penalty based on similarity to selected chunks
                    selected_similarities = [
                        similarities[idx][list(cross_refs).index(s)]
                        for s in selected
                    ]
                    diversity = 1.0 - max(selected_similarities)

                combined_score = 0.7 * relevance + 0.3 * diversity
                scores.append((combined_score, idx, chunk))

            # Select the highest scoring chunk
            scores.sort(reverse=True)
            _, idx, chunk = scores[0]
            selected.append(chunk)
            remaining = [(i, c) for i, c in remaining if i != idx]

        return selected

    @staticmethod
    def _calculate_topic_relevance(
        chunks: list[Chunk],
        query_embedding: np.ndarray,
    ) -> float:
        """Calculate overall topic relevance score"""
        if not chunks:
            return 0.0

        similarities = [
            np.dot(chunk.embedding, query_embedding) for chunk in chunks
        ]
        return np.mean(similarities)

    @staticmethod
    def _sort_chunks_by_relevance(
        chunks: list[Chunk],
        query_embedding: np.ndarray,
        initial_results: list[Chunk]
    ) -> list[Chunk]:
        """Sort chunks by combined relevance score"""
        scored_chunks = []
        for chunk in chunks:
            query_similarity = np.dot(chunk.embedding, query_embedding)
            initial_similarities = [
                np.dot(chunk.embedding, r.embedding)
                for r in initial_results
            ]
            max_initial_similarity = max(initial_similarities) if initial_similarities else 0

            # Combined score favoring query relevance
            combined_score = 0.7 * query_similarity + 0.3 * max_initial_similarity
            scored_chunks.append((combined_score, chunk))

        scored_chunks.sort(reverse=True)
        return [chunk for _, chunk in scored_chunks]

    async def query_concepts(self, query: str) -> dict[str, any]:
        """Query concepts extracted from the knowledge base"""
        return await self.concept_extractor.query_concepts(query)

    async def unified_retrieve(
        self,
        query: str,
        k: int = 5,
        min_similarity: float = 0.2,
        cross_ref_depth: int = 2,
        max_cross_refs: int = 10,
        max_sentences: int = 10
    ) -> dict[str, Any]:
        """
        Unified retrieval function that combines concept querying, retrieval with overview,
        and basic retrieval, then generates a comprehensive summary using LLM.

        Args:
            query: Search query string
            k: Number of primary results to retrieve
            min_similarity: Minimum similarity threshold for retrieval
            cross_ref_depth: Depth for cross-reference search
            max_cross_refs: Maximum number of cross-references per topic
            max_sentences: Maximum number Sentences in the main summary text

        Returns:
            Dictionary containing comprehensive results including summary and details
        """
        # Get concept information
        concept_results = await self.concept_extractor.query_concepts(query)

        # Get retrieval overview

        query_embedding = (await self._get_embeddings([query]))[0]
        overview_results = await self.retrieve_with_overview(
            query=query,
            query_embedding=query_embedding,
            k=k,
            min_similarity=min_similarity,
            cross_ref_depth=cross_ref_depth,
            max_cross_refs=max_cross_refs,
            max_sentences=max_sentences
        )

        # Get basic retrieval results
        basic_results = await self.retrieve(
            query_embedding=query_embedding,
            k=k,
            min_similarity=min_similarity
        )
        if len(basic_results) == 0:
            return {}
        if len(basic_results) == 1 and isinstance(basic_results[0], str) and basic_results[0].endswith('[]\n - []\n - []'):
            return {}

        # Prepare context for LLM summary
        context = {
            "concepts": {
                "main_concepts": concept_results.get("concepts", {}),
                "relationships": concept_results.get("relationships", []),
                "concept_groups": concept_results.get("groups", [])
            },
            "topics": [
                {
                    "id": topic["topic_id"],
                    "summary": topic["summary"],
                    "relevance": topic["relevance_score"],
                    "chunk_count": topic["chunk_count"]
                }
                for topic in overview_results.overview
            ],
            "key_chunks": [
                {
                    "text": chunk.text,
                    "metadata": chunk.metadata
                }
                for chunk in basic_results
            ]
        }

        # Generate comprehensive summary using LLM
        system_prompt = """
        Analyze the provided search results and generate a comprehensive summary
        that includes:
        1. Main concepts and their relationships
        2. Key topics and their relevance
        3. Most important findings and insights
        4. Cross-references and connections between topics
        5. Potential gaps or areas for further investigation

        Format the response as a JSON object with these sections.
        """

        prompt = f"""
        Query: {query}

        Context:
        {json.dumps(context, indent=2)}

        Generate a comprehensive analysis and summary following the structure:
        """

        try:
            await asyncio.sleep(0.25)
            llm_response = await litellm_complete(
                model_name=self.model_name,
                prompt=prompt,
                system_prompt=system_prompt,
                response_format=DataModel,
            )
            summary_analysis = json.loads(llm_response)
        except Exception as e:
            get_logger().error(f"Error generating summary: {str(e)}")
            summary_analysis = {
                "main_summary": "Error generating summary",
                "error": str(e)
            }

        # Compile final results
        return {
            "summary": summary_analysis,
            "raw_results": {
                "concepts": concept_results,
                "overview": {
                    "topics": overview_results.overview,
                    "cross_references": overview_results.cross_references
                },
                "relevant_chunks": [
                    {
                        "text": chunk.text,
                        "metadata": chunk.metadata,
                        "cluster_id": chunk.cluster_id
                    }
                    for chunk in basic_results
                ]
            },
            "metadata": {
                "query": query,
                "timestamp": time.time(),
                "retrieval_params": {
                    "k": k,
                    "min_similarity": min_similarity,
                    "cross_ref_depth": cross_ref_depth,
                    "max_cross_refs": max_cross_refs
                }
            }
        }

    def save(self, path: str) -> bytes | None:
        """
        Save the complete knowledge base to disk, including all sub-components

        Args:
            path (str): Path where the knowledge base will be saved
        """
        try:
            data = {
                # Core components
                'vdb': self.vdb.save(),
                'vis_kwargs': self.vis_kwargs,
                'vis_class': self.vis_class,
                'existing_hashes': self.existing_hashes,

                # Configuration parameters
                'embedding_dim': self.embedding_dim,
                'similarity_threshold': self.similarity_threshold,
                'batch_size': self.batch_size,
                'n_clusters': self.n_clusters,
                'deduplication_threshold': self.deduplication_threshold,
                'model_name': self.model_name,
                'embedding_model': self.embedding_model,

                # Cache and graph data
                'similarity_graph': self.similarity_graph,
                'sto': self.sto,

                # Text splitter configuration
                'text_splitter_config': {
                    'chunk_size': self.text_splitter.chunk_size,
                    'chunk_overlap': self.text_splitter.chunk_overlap,
                    'separator': self.text_splitter.separator
                },

                # Concept extractor data
                'concept_graph': {
                    'concepts': {
                        name: {
                            'name': concept.name,
                            'category': concept.category,
                            'relationships': {k: list(v) for k, v in concept.relationships.items()},
                            'importance_score': concept.importance_score,
                            'context_snippets': concept.context_snippets,
                            'metadata': concept.metadata
                        }
                        for name, concept in self.concept_extractor.concept_graph.concepts.items()
                    }
                }
            }
            if path is None:
                return pickle.dumps(data)
            # Save to disk using pickle
            with open(path, 'wb') as f:
                pickle.dump(data, f)
            print(f"Knowledge base successfully saved to {path} with {len(self.concept_extractor.concept_graph.concepts.items())} concepts")

        except Exception as e:
            print(f"Error saving knowledge base: {str(e)}")
            raise
    def init_vdb(self, db:AbstractVectorStore=AbstractVectorStore):
        pass
    @classmethod
    def load(cls, path: str | bytes) -> 'KnowledgeBase':
        """
        Load a complete knowledge base from disk, including all sub-components

        Args:
            path (str): Path from where to load the knowledge base

        Returns:
            KnowledgeBase: A fully restored knowledge base instance
        """
        try:
            if isinstance(path, str):
                # Load data from disk
                with open(path, 'rb') as f:
                    data = pickle.load(f)
            elif isinstance(path, bytes):
                data = pickle.loads(path)
            else:
                raise ValueError("Invalid path type")

            # Create new knowledge base instance with saved configuration
            kb = cls(
                embedding_dim=data['embedding_dim'],
                similarity_threshold=data['similarity_threshold'],
                batch_size=data['batch_size'],
                n_clusters=data['n_clusters'],
                deduplication_threshold=data['deduplication_threshold'],
                model_name=data['model_name'],
                embedding_model=data['embedding_model']
            )

            # Restore core components
            kb.init_vis(data.get('vis_class'), data.get('vis_kwargs'))
            kb.existing_hashes = data['existing_hashes']

            # Restore cache and graph data
            kb.similarity_graph = data.get('similarity_graph', {})
            kb.sto = data.get('sto', [])

            # Restore text splitter configuration
            splitter_config = data.get('text_splitter_config', {})
            kb.text_splitter = TextSplitter(
                chunk_size=splitter_config.get('chunk_size', 12_000),
                chunk_overlap=splitter_config.get('chunk_overlap', 200),
                separator=splitter_config.get('separator', '\n')
            )

            # Restore concept graph
            concept_data = data.get('concept_graph', {}).get('concepts', {})
            for concept_info in concept_data.values():
                concept = Concept(
                    name=concept_info['name'],
                    category=concept_info['category'],
                    relationships={k: set(v) for k, v in concept_info['relationships'].items()},
                    importance_score=concept_info['importance_score'],
                    context_snippets=concept_info['context_snippets'],
                    metadata=concept_info['metadata']
                )
                kb.concept_extractor.concept_graph.add_concept(concept)

            print(f"Knowledge base successfully loaded from {path} with {len(concept_data)} concepts")
            return kb

        except Exception as e:
            print(f"Error loading knowledge base: {str(e)}")
            raise

    def vis(self,output_file: str = "concept_graph.html", get_output_html=False, get_output_net=False):
        if not self.concept_extractor.concept_graph.concepts:
            print("NO Concepts defined")
            return None
        net = self.concept_extractor.concept_graph.convert_to_networkx()
        if get_output_net:
            return net
        return GraphVisualizer.visualize(net, output_file=output_file, get_output=get_output_html)
__init__(embedding_dim=768, similarity_threshold=0.61, batch_size=64, n_clusters=4, deduplication_threshold=0.85, model_name=os.getenv('DEFAULTMODELSUMMERY'), embedding_model=os.getenv('DEFAULTMODELEMBEDDING'), vis_class='FaissVectorStore', vis_kwargs=None, requests_per_second=85.0, chunk_size=3600, chunk_overlap=130, separator='\n')

Initialize the knowledge base with given parameters

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
def __init__(self, embedding_dim: int = 768, similarity_threshold: float = 0.61, batch_size: int = 64,
             n_clusters: int = 4, deduplication_threshold: float = 0.85, model_name=os.getenv("DEFAULTMODELSUMMERY"),
             embedding_model=os.getenv("DEFAULTMODELEMBEDDING"),
             vis_class:str | None = "FaissVectorStore",
             vis_kwargs:dict[str, Any] | None=None,
             requests_per_second=85.,
             chunk_size: int = 3600,
             chunk_overlap: int = 130,
             separator: str = "\n"
             ):
    """Initialize the knowledge base with given parameters"""

    self.existing_hashes: set[str] = set()
    self.embedding_model = embedding_model
    self.embedding_dim = embedding_dim
    self.similarity_threshold = similarity_threshold
    self.deduplication_threshold = deduplication_threshold
    if model_name == "openrouter/mistralai/mistral-nemo":
        batch_size = 9
        requests_per_second = 1.5
    self.batch_size = batch_size
    self.n_clusters = n_clusters
    self.model_name = model_name
    self.sto: list = []

    self.text_splitter = TextSplitter(chunk_size=chunk_size,chunk_overlap=chunk_overlap, separator=separator)
    self.similarity_graph = {}
    self.concept_extractor = ConceptExtractor(self, requests_per_second)

    self.vis_class = None
    self.vis_kwargs = None
    self.vdb = None
    self.init_vis(vis_class, vis_kwargs)
add_data(texts, metadata=None) async

Enhanced version with smart splitting and clustering

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
async def add_data(
    self,
    texts: list[str],
    metadata: list[dict[str, Any]] | None = None,
) -> tuple[int, int]:
    """Enhanced version with smart splitting and clustering"""
    if isinstance(texts, str):
        texts = [texts]
    if metadata is None:
        metadata = [{}] * len(texts)
    if isinstance(metadata, dict):
        metadata = [metadata]
    if len(texts) != len(metadata):
        raise ValueError("Length of texts and metadata must match")
    if len(texts) == 1 and len(texts[0]) < 10_000:
        if len(self.sto) < self.batch_size and len(texts) == 1:
            self.sto.append((texts[0], metadata[0]))
            return -1, -1
        if len(self.sto) >= self.batch_size:
            _ = [texts.append(t) or metadata.append([m]) for (t, m) in self.sto]
            self.sto = []

    # Split large texts
    split_texts = []
    split_metadata = []

    while Spinner("Saving Data to Memory", symbols='t'):

        for idx, text in enumerate(texts):
            chunks = self.text_splitter.split_text(text)
            split_texts.extend(chunks)

            # Adjust metadata for splits
            meta = metadata[idx] if metadata else {}
            if isinstance(meta, list):
                meta = meta[0]
            for i, _chunk in enumerate(chunks):
                chunk_meta = meta.copy()
                chunk_meta.update({
                    'chunk_index': i,
                    'total_chunks': len(chunks),
                    'original_text_id': idx
                })
                split_metadata.append(chunk_meta)

        return await self._add_data(split_texts, split_metadata)
compute_hash(text) staticmethod

Compute SHA-256 hash of text

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
679
680
681
682
@staticmethod
def compute_hash(text: str) -> str:
    """Compute SHA-256 hash of text"""
    return hashlib.sha256(text.encode('utf-8', errors='ignore')).hexdigest()
forget_irrelevant(irrelevant_concepts, similarity_threshold=None) async

Remove chunks similar to irrelevant concepts Returns: Number of chunks removed

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
async def forget_irrelevant(self, irrelevant_concepts: list[str], similarity_threshold: float | None=None) -> int:
    """
    Remove chunks similar to irrelevant concepts
    Returns: Number of chunks removed
    """
    if not irrelevant_concepts:
        return 0

    if similarity_threshold is None:
        similarity_threshold = self.similarity_threshold

    try:
        irrelevant_embeddings = await self._get_embeddings(irrelevant_concepts)
        initial_count = len(self.vdb.chunks)

        def is_relevant(chunk: Chunk) -> bool:
            similarities = np.dot(chunk.embedding, irrelevant_embeddings.T)
            do_keep = np.max(similarities) < similarity_threshold
            if do_keep:
                return True
            for c in chunk.metadata.get("concepts", []):
                if c in self.concept_extractor.concept_graph.concepts:
                    del self.concept_extractor.concept_graph.concepts[c]
            return False

        relevant_chunks = [chunk for chunk in self.vdb.chunks if is_relevant(chunk)]
        self.vdb.chunks = relevant_chunks
        self.existing_hashes = {chunk.content_hash for chunk in self.vdb.chunks}
        self.vdb.rebuild_index()


        return initial_count - len(self.vdb.chunks)

    except Exception as e:
        get_logger().error(f"Error forgetting irrelevant concepts: {str(e)}")
        raise
load(path) classmethod

Load a complete knowledge base from disk, including all sub-components

Parameters:

Name Type Description Default
path str

Path from where to load the knowledge base

required

Returns:

Name Type Description
KnowledgeBase KnowledgeBase

A fully restored knowledge base instance

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
@classmethod
def load(cls, path: str | bytes) -> 'KnowledgeBase':
    """
    Load a complete knowledge base from disk, including all sub-components

    Args:
        path (str): Path from where to load the knowledge base

    Returns:
        KnowledgeBase: A fully restored knowledge base instance
    """
    try:
        if isinstance(path, str):
            # Load data from disk
            with open(path, 'rb') as f:
                data = pickle.load(f)
        elif isinstance(path, bytes):
            data = pickle.loads(path)
        else:
            raise ValueError("Invalid path type")

        # Create new knowledge base instance with saved configuration
        kb = cls(
            embedding_dim=data['embedding_dim'],
            similarity_threshold=data['similarity_threshold'],
            batch_size=data['batch_size'],
            n_clusters=data['n_clusters'],
            deduplication_threshold=data['deduplication_threshold'],
            model_name=data['model_name'],
            embedding_model=data['embedding_model']
        )

        # Restore core components
        kb.init_vis(data.get('vis_class'), data.get('vis_kwargs'))
        kb.existing_hashes = data['existing_hashes']

        # Restore cache and graph data
        kb.similarity_graph = data.get('similarity_graph', {})
        kb.sto = data.get('sto', [])

        # Restore text splitter configuration
        splitter_config = data.get('text_splitter_config', {})
        kb.text_splitter = TextSplitter(
            chunk_size=splitter_config.get('chunk_size', 12_000),
            chunk_overlap=splitter_config.get('chunk_overlap', 200),
            separator=splitter_config.get('separator', '\n')
        )

        # Restore concept graph
        concept_data = data.get('concept_graph', {}).get('concepts', {})
        for concept_info in concept_data.values():
            concept = Concept(
                name=concept_info['name'],
                category=concept_info['category'],
                relationships={k: set(v) for k, v in concept_info['relationships'].items()},
                importance_score=concept_info['importance_score'],
                context_snippets=concept_info['context_snippets'],
                metadata=concept_info['metadata']
            )
            kb.concept_extractor.concept_graph.add_concept(concept)

        print(f"Knowledge base successfully loaded from {path} with {len(concept_data)} concepts")
        return kb

    except Exception as e:
        print(f"Error loading knowledge base: {str(e)}")
        raise
query_concepts(query) async

Query concepts extracted from the knowledge base

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
1481
1482
1483
async def query_concepts(self, query: str) -> dict[str, any]:
    """Query concepts extracted from the knowledge base"""
    return await self.concept_extractor.query_concepts(query)
retrieve(query='', query_embedding=None, k=5, min_similarity=0.2, include_connected=True) async

Enhanced retrieval with connected information

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
async def retrieve(
    self,
    query: str="",
    query_embedding: np.ndarray | None = None,
    k: int = 5,
    min_similarity: float = 0.2,
    include_connected: bool = True
) -> list[Chunk]:
    """Enhanced retrieval with connected information"""
    if query_embedding is None:
        query_embedding = (await self._get_embeddings([query]))[0]
    k = min(k, len(self.vdb.chunks)-1)
    if k <= 0:
        return []
    initial_results = self.vdb.search(query_embedding, k, min_similarity)

    if not include_connected or not initial_results:
        return initial_results

    # Find connected chunks
    connected_chunks = set()
    for chunk in initial_results:
        chunk_id = self.vdb.chunks.index(chunk)
        if chunk_id in self.similarity_graph:
            connected_chunks.update(self.similarity_graph[chunk_id])

    # Add connected chunks to results
    all_chunks = self.vdb.chunks
    additional_results = [all_chunks[i] for i in connected_chunks
                          if all_chunks[i] not in initial_results]

    # Sort by similarity to query
    all_results = initial_results + additional_results

    return sorted(
        all_results,
        key=lambda x: np.dot(x.embedding, query_embedding),
        reverse=True
    )[:k * 2]  # Return more results when including connected information
retrieve_with_overview(query, query_embedding=None, k=5, min_similarity=0.2, max_sentences=5, cross_ref_depth=2, max_cross_refs=10) async

Enhanced retrieval with better cross-reference handling

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
async def retrieve_with_overview(
    self,
    query: str,
    query_embedding=None,
    k: int = 5,
    min_similarity: float = 0.2,
    max_sentences: int = 5,
    cross_ref_depth: int = 2,
    max_cross_refs: int = 10  # New parameter to control cross-reference count
) -> RetrievalResult:
    """Enhanced retrieval with better cross-reference handling"""
    # Get initial results with query embedding
    if query_embedding is None:
        query_embedding = (await self._get_embeddings([query]))[0]
    initial_results = await self.retrieve(query_embedding=query_embedding, k=k, min_similarity=min_similarity)

    if not initial_results:
        return RetrievalResult([], [], {})

    # Find cross-references with similarity scoring
    initial_ids = {self.vdb.chunks.index(chunk) for chunk in initial_results}
    related_ids = self._find_cross_references(
        initial_ids,
        depth=cross_ref_depth,
        query_embedding=query_embedding  # Pass query embedding for relevance scoring
    )

    # Get all relevant chunks with smarter filtering
    all_chunks = self.vdb.chunks
    all_relevant_chunks = initial_results + [
        chunk for i, chunk in enumerate(all_chunks)
        if i in related_ids and self._is_relevant_cross_ref(
            chunk,
            query_embedding,
            initial_results
        )
    ]

    # Enhanced clustering with dynamic cluster size
    clusters = self._cluster_chunks(
        all_relevant_chunks,
        query_embedding=query_embedding
    )

    # Fallback: If no clusters are found, treat all relevant chunks as a single cluster.
    if not clusters:
        print("No clusters found. Falling back to using all relevant chunks as a single cluster.")
        clusters = {0: all_relevant_chunks}

    # Generate summaries and organize results
    overview = []
    cross_references = {}

    for cluster_id, cluster_chunks in clusters.items():
        summary = self._generate_topic_summary(
            cluster_chunks,
            query_embedding,
            max_sentences=max_sentences  # Increased for more context
        )

        # Enhanced chunk sorting with combined scoring
        sorted_chunks = self._sort_chunks_by_relevance(
            cluster_chunks,
            query_embedding,
            initial_results
        )

        # Separate direct matches and cross-references
        direct_matches_ = [{'text':c.text, 'metadata':c.metadata} for c in sorted_chunks if c in initial_results]
        direct_matches = []
        for match in direct_matches_:
            if match in direct_matches:
                continue
            direct_matches.append(match)
        cross_refs_ = [c for c in sorted_chunks if c not in initial_results]
        cross_refs = []
        for match in cross_refs_:
            if match in cross_refs:
                continue
            cross_refs.append(match)
        # Limit cross-references while maintaining diversity
        selected_cross_refs = self._select_diverse_cross_refs(
            cross_refs,
            max_cross_refs,
            query_embedding
        )

        topic_info = {
            'topic_id': cluster_id,
            'summary': summary,
            'main_chunks': [x for x in direct_matches[:3]],
            'chunk_count': len(cluster_chunks),
            'relevance_score': self._calculate_topic_relevance(
                cluster_chunks,
                query_embedding
            )
        }
        overview.append(topic_info)

        if selected_cross_refs:
            cross_references[f"topic_{cluster_id}"] = selected_cross_refs

    # Sort overview by relevance score
    overview.sort(key=lambda x: x['relevance_score'], reverse=True)

    return RetrievalResult(
        overview=overview,
        details=initial_results,
        cross_references=cross_references
    )
save(path)

Save the complete knowledge base to disk, including all sub-components

Parameters:

Name Type Description Default
path str

Path where the knowledge base will be saved

required
Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
def save(self, path: str) -> bytes | None:
    """
    Save the complete knowledge base to disk, including all sub-components

    Args:
        path (str): Path where the knowledge base will be saved
    """
    try:
        data = {
            # Core components
            'vdb': self.vdb.save(),
            'vis_kwargs': self.vis_kwargs,
            'vis_class': self.vis_class,
            'existing_hashes': self.existing_hashes,

            # Configuration parameters
            'embedding_dim': self.embedding_dim,
            'similarity_threshold': self.similarity_threshold,
            'batch_size': self.batch_size,
            'n_clusters': self.n_clusters,
            'deduplication_threshold': self.deduplication_threshold,
            'model_name': self.model_name,
            'embedding_model': self.embedding_model,

            # Cache and graph data
            'similarity_graph': self.similarity_graph,
            'sto': self.sto,

            # Text splitter configuration
            'text_splitter_config': {
                'chunk_size': self.text_splitter.chunk_size,
                'chunk_overlap': self.text_splitter.chunk_overlap,
                'separator': self.text_splitter.separator
            },

            # Concept extractor data
            'concept_graph': {
                'concepts': {
                    name: {
                        'name': concept.name,
                        'category': concept.category,
                        'relationships': {k: list(v) for k, v in concept.relationships.items()},
                        'importance_score': concept.importance_score,
                        'context_snippets': concept.context_snippets,
                        'metadata': concept.metadata
                    }
                    for name, concept in self.concept_extractor.concept_graph.concepts.items()
                }
            }
        }
        if path is None:
            return pickle.dumps(data)
        # Save to disk using pickle
        with open(path, 'wb') as f:
            pickle.dump(data, f)
        print(f"Knowledge base successfully saved to {path} with {len(self.concept_extractor.concept_graph.concepts.items())} concepts")

    except Exception as e:
        print(f"Error saving knowledge base: {str(e)}")
        raise
unified_retrieve(query, k=5, min_similarity=0.2, cross_ref_depth=2, max_cross_refs=10, max_sentences=10) async

Unified retrieval function that combines concept querying, retrieval with overview, and basic retrieval, then generates a comprehensive summary using LLM.

Parameters:

Name Type Description Default
query str

Search query string

required
k int

Number of primary results to retrieve

5
min_similarity float

Minimum similarity threshold for retrieval

0.2
cross_ref_depth int

Depth for cross-reference search

2
max_cross_refs int

Maximum number of cross-references per topic

10
max_sentences int

Maximum number Sentences in the main summary text

10

Returns:

Type Description
dict[str, Any]

Dictionary containing comprehensive results including summary and details

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
async def unified_retrieve(
    self,
    query: str,
    k: int = 5,
    min_similarity: float = 0.2,
    cross_ref_depth: int = 2,
    max_cross_refs: int = 10,
    max_sentences: int = 10
) -> dict[str, Any]:
    """
    Unified retrieval function that combines concept querying, retrieval with overview,
    and basic retrieval, then generates a comprehensive summary using LLM.

    Args:
        query: Search query string
        k: Number of primary results to retrieve
        min_similarity: Minimum similarity threshold for retrieval
        cross_ref_depth: Depth for cross-reference search
        max_cross_refs: Maximum number of cross-references per topic
        max_sentences: Maximum number Sentences in the main summary text

    Returns:
        Dictionary containing comprehensive results including summary and details
    """
    # Get concept information
    concept_results = await self.concept_extractor.query_concepts(query)

    # Get retrieval overview

    query_embedding = (await self._get_embeddings([query]))[0]
    overview_results = await self.retrieve_with_overview(
        query=query,
        query_embedding=query_embedding,
        k=k,
        min_similarity=min_similarity,
        cross_ref_depth=cross_ref_depth,
        max_cross_refs=max_cross_refs,
        max_sentences=max_sentences
    )

    # Get basic retrieval results
    basic_results = await self.retrieve(
        query_embedding=query_embedding,
        k=k,
        min_similarity=min_similarity
    )
    if len(basic_results) == 0:
        return {}
    if len(basic_results) == 1 and isinstance(basic_results[0], str) and basic_results[0].endswith('[]\n - []\n - []'):
        return {}

    # Prepare context for LLM summary
    context = {
        "concepts": {
            "main_concepts": concept_results.get("concepts", {}),
            "relationships": concept_results.get("relationships", []),
            "concept_groups": concept_results.get("groups", [])
        },
        "topics": [
            {
                "id": topic["topic_id"],
                "summary": topic["summary"],
                "relevance": topic["relevance_score"],
                "chunk_count": topic["chunk_count"]
            }
            for topic in overview_results.overview
        ],
        "key_chunks": [
            {
                "text": chunk.text,
                "metadata": chunk.metadata
            }
            for chunk in basic_results
        ]
    }

    # Generate comprehensive summary using LLM
    system_prompt = """
    Analyze the provided search results and generate a comprehensive summary
    that includes:
    1. Main concepts and their relationships
    2. Key topics and their relevance
    3. Most important findings and insights
    4. Cross-references and connections between topics
    5. Potential gaps or areas for further investigation

    Format the response as a JSON object with these sections.
    """

    prompt = f"""
    Query: {query}

    Context:
    {json.dumps(context, indent=2)}

    Generate a comprehensive analysis and summary following the structure:
    """

    try:
        await asyncio.sleep(0.25)
        llm_response = await litellm_complete(
            model_name=self.model_name,
            prompt=prompt,
            system_prompt=system_prompt,
            response_format=DataModel,
        )
        summary_analysis = json.loads(llm_response)
    except Exception as e:
        get_logger().error(f"Error generating summary: {str(e)}")
        summary_analysis = {
            "main_summary": "Error generating summary",
            "error": str(e)
        }

    # Compile final results
    return {
        "summary": summary_analysis,
        "raw_results": {
            "concepts": concept_results,
            "overview": {
                "topics": overview_results.overview,
                "cross_references": overview_results.cross_references
            },
            "relevant_chunks": [
                {
                    "text": chunk.text,
                    "metadata": chunk.metadata,
                    "cluster_id": chunk.cluster_id
                }
                for chunk in basic_results
            ]
        },
        "metadata": {
            "query": query,
            "timestamp": time.time(),
            "retrieval_params": {
                "k": k,
                "min_similarity": min_similarity,
                "cross_ref_depth": cross_ref_depth,
                "max_cross_refs": max_cross_refs
            }
        }
    }
RelevanceAssessment

Bases: BaseModel

Represents an assessment of the relevance of the data in relation to a specific query.

Attributes:

Name Type Description
query_alignment float

A float representing the alignment between the query and the data.

confidence_score float

A float indicating the confidence level in the alignment.

coverage_analysis str

A textual description analyzing the data coverage.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
140
141
142
143
144
145
146
147
148
149
150
151
class RelevanceAssessment(BaseModel):
    """
    Represents an assessment of the relevance of the data in relation to a specific query.

    Attributes:
        query_alignment (float): A float representing the alignment between the query and the data.
        confidence_score (float): A float indicating the confidence level in the alignment.
        coverage_analysis (str): A textual description analyzing the data coverage.
    """
    query_alignment: float
    confidence_score: float
    coverage_analysis: str
RetrievalResult dataclass

Structure for organizing retrieval results

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
38
39
40
41
42
43
@dataclass
class RetrievalResult:
    """Structure for organizing retrieval results"""
    overview: list[dict[str, any]]  # List of topic summaries
    details: list[Chunk]  # Detailed chunks
    cross_references: dict[str, list[Chunk]]  # Related chunks by topic
TConcept

Bases: BaseModel

Represents the criteria or target parameters for concept selection and filtering.

Attributes:

Name Type Description
min_importance float

The minimum importance score a concept must have to be considered.

target_concepts List[str]

A list of names of target concepts to focus on.

relationship_types List[str]

A list of relationship types to be considered in the analysis.

categories List[str]

A list of concept categories to filter or group the concepts.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
class TConcept(BaseModel):
    """
    Represents the criteria or target parameters for concept selection and filtering.

    Attributes:
        min_importance (float): The minimum importance score a concept must have to be considered.
        target_concepts (List[str]): A list of names of target concepts to focus on.
        relationship_types (List[str]): A list of relationship types to be considered in the analysis.
        categories (List[str]): A list of concept categories to filter or group the concepts.
    """
    min_importance: float
    target_concepts: list[str]
    relationship_types: list[str]
    categories: list[str]
TextSplitter
Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
class TextSplitter:
    def __init__(
        self,
        chunk_size: int = 3600,
        chunk_overlap: int = 130,
        separator: str = "\n"
    ):
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.separator = separator

    def approximate(self, text_len: int) -> float:
        """
        Approximate the number of chunks and average chunk size for a given text length

        Args:
            text_len (int): Length of the text to be split

        Returns:
            Tuple[int, int]: (number_of_chunks, approximate_chunk_size)
        """
        if text_len <= self.chunk_size:
            return 1, text_len

        # Handle extreme overlap cases
        if self.chunk_overlap >= self.chunk_size:
            estimated_chunks = text_len
            return estimated_chunks, 1

        # Calculate based on overlap ratio
        overlap_ratio = self.chunk_overlap / self.chunk_size
        base_chunks = text_len / self.chunk_size
        estimated_chunks = base_chunks * 2 / (overlap_ratio if overlap_ratio > 0 else 1)

        # print('#',estimated_chunks, base_chunks, overlap_ratio)
        # Calculate average chunk size
        avg_chunk_size = max(1, text_len / estimated_chunks)

        return estimated_chunks * avg_chunk_size

    def split_text(self, text: str) -> list[str]:
        """Split text into chunks with overlap"""
        # Clean and normalize text
        text = re.sub(r'\s+', ' ', text).strip()

        # If text is shorter than chunk_size, return as is
        if len(text) <= self.chunk_size:
            return [text]

        chunks = []
        start = 0

        while start < len(text):
            # Find end of chunk
            end = start + self.chunk_size

            if end >= len(text):
                chunks.append(text[start:])
                break

            # Try to find a natural break point
            last_separator = text.rfind(self.separator, start, end)
            if last_separator != -1:
                end = last_separator

            # Add chunk
            chunks.append(text[start:end])

            # Calculate allowed overlap for this chunk
            chunk_length = end - start
            allowed_overlap = min(self.chunk_overlap, chunk_length - 1)

            # Move start position considering adjusted overlap
            start = end - allowed_overlap

        return chunks
approximate(text_len)

Approximate the number of chunks and average chunk size for a given text length

Parameters:

Name Type Description Default
text_len int

Length of the text to be split

required

Returns:

Type Description
float

Tuple[int, int]: (number_of_chunks, approximate_chunk_size)

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
def approximate(self, text_len: int) -> float:
    """
    Approximate the number of chunks and average chunk size for a given text length

    Args:
        text_len (int): Length of the text to be split

    Returns:
        Tuple[int, int]: (number_of_chunks, approximate_chunk_size)
    """
    if text_len <= self.chunk_size:
        return 1, text_len

    # Handle extreme overlap cases
    if self.chunk_overlap >= self.chunk_size:
        estimated_chunks = text_len
        return estimated_chunks, 1

    # Calculate based on overlap ratio
    overlap_ratio = self.chunk_overlap / self.chunk_size
    base_chunks = text_len / self.chunk_size
    estimated_chunks = base_chunks * 2 / (overlap_ratio if overlap_ratio > 0 else 1)

    # print('#',estimated_chunks, base_chunks, overlap_ratio)
    # Calculate average chunk size
    avg_chunk_size = max(1, text_len / estimated_chunks)

    return estimated_chunks * avg_chunk_size
split_text(text)

Split text into chunks with overlap

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
def split_text(self, text: str) -> list[str]:
    """Split text into chunks with overlap"""
    # Clean and normalize text
    text = re.sub(r'\s+', ' ', text).strip()

    # If text is shorter than chunk_size, return as is
    if len(text) <= self.chunk_size:
        return [text]

    chunks = []
    start = 0

    while start < len(text):
        # Find end of chunk
        end = start + self.chunk_size

        if end >= len(text):
            chunks.append(text[start:])
            break

        # Try to find a natural break point
        last_separator = text.rfind(self.separator, start, end)
        if last_separator != -1:
            end = last_separator

        # Add chunk
        chunks.append(text[start:end])

        # Calculate allowed overlap for this chunk
        chunk_length = end - start
        allowed_overlap = min(self.chunk_overlap, chunk_length - 1)

        # Move start position considering adjusted overlap
        start = end - allowed_overlap

    return chunks
TopicInsights

Bases: BaseModel

Represents insights related to various topics.

Attributes:

Name Type Description
primary_topics list[str]

A list of main topics addressed.

cross_references list[str]

A list of cross-references that connect different topics.

knowledge_gaps list[str]

A list of identified gaps in the current knowledge.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
126
127
128
129
130
131
132
133
134
135
136
137
class TopicInsights(BaseModel):
    """
    Represents insights related to various topics.

    Attributes:
        primary_topics (list[str]): A list of main topics addressed.
        cross_references (list[str]): A list of cross-references that connect different topics.
        knowledge_gaps (list[str]): A list of identified gaps in the current knowledge.
    """
    primary_topics: list[str]
    cross_references: list[str]
    knowledge_gaps: list[str]
rConcept

Bases: BaseModel

Represents a key concept with its relationships and associated metadata.

Attributes:

Name Type Description
name str

The name of the concept.

category str

The category of the concept (e.g., 'technical', 'domain', 'method', etc.).

relationships Dict[str, List[str]]

A mapping where each key is a type of relationship and the value is a list of related concept names.

importance_score float

A numerical score representing the importance or relevance of the concept.

context_snippets List[str]

A list of text snippets providing context where the concept appears.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
class rConcept(BaseModel):
    """
    Represents a key concept with its relationships and associated metadata.

    Attributes:
        name (str): The name of the concept.
        category (str): The category of the concept (e.g., 'technical', 'domain', 'method', etc.).
        relationships (Dict[str, List[str]]): A mapping where each key is a type of relationship and the
            value is a list of related concept names.
        importance_score (float): A numerical score representing the importance or relevance of the concept.
        context_snippets (List[str]): A list of text snippets providing context where the concept appears.
    """
    name: str
    category: str
    relationships: dict[str, list[str]]
    importance_score: float
    context_snippets: list[str]
normalize_vectors(vectors)

Normalize vectors to unit length

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
53
54
55
56
def normalize_vectors(vectors: np.ndarray) -> np.ndarray:
    """Normalize vectors to unit length"""
    norms = np.linalg.norm(vectors, axis=1, keepdims=True)
    return np.divide(vectors, norms, where=norms != 0)
VectorStores

Vector store implementations for the toolboxv2 system.

taichiNumpyNumbaVectorStores
NumpyVectorStore

Bases: AbstractVectorStore

Source code in toolboxv2/mods/isaa/base/VectorStores/taichiNumpyNumbaVectorStores.py
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
class NumpyVectorStore(AbstractVectorStore):
    def __init__(self, use_gpu=False):
        self.embeddings = np.empty((0, 0))
        self.chunks = []
        # Initialize Taich
        import taichi as ti
        ti.init(arch=ti.gpu if use_gpu else ti.cpu)
        self.normalized_embeddings = None

    def add_embeddings(self, embeddings: np.ndarray, chunks: list[Chunk]) -> None:
        if len(embeddings.shape) != 2:
            raise ValueError("Embeddings must be 2D array")
        if len(chunks) != embeddings.shape[0]:
            raise ValueError("Mismatch between embeddings and chunks count")

        if self.embeddings.size == 0:
            self.embeddings = embeddings
        else:
            if embeddings.shape[1] != self.embeddings.shape[1]:
                raise ValueError("Embedding dimensions must match")
            self.embeddings = np.vstack([self.embeddings, embeddings])
        self.chunks.extend(chunks)
        # Reset normalized embeddings cache
        self.normalized_embeddings = None

    def search(self, query_embedding: np.ndarray, k: int = 5, min_similarity: float = 0.7) -> list[Chunk]:
        if self.embeddings.size == 0:
            return []

        # Pre-compute normalized embeddings if not cached
        if self.normalized_embeddings is None:
            self._precompute_normalized_embeddings()

        # Normalize query
        query_norm = self._normalize_vector(query_embedding)

        # Enhanced Taichi kernel for similarity computation
        n = len(self.chunks)
        similarities = np.zeros(n, dtype=np.float32)
        import taichi as ti
        @ti.kernel
        def compute_similarities_optimized(
            query: ti.types.ndarray(dtype=ti.f32),
            embeddings: ti.types.ndarray(dtype=ti.f32),
            similarities: ti.types.ndarray(dtype=ti.f32),
            n: ti.i32,
            dim: ti.i32
        ):
            ti.loop_config(block_dim=256)
            for i in range(n):
                dot_product = 0.0
                # Vectorized dot product computation
                for j in range(dim):
                    dot_product += embeddings[i, j] * query[j]
                similarities[i] = dot_product

        # Alternative optimized kernel using tile-based computation
        @ti.kernel
        def compute_similarities_tiled(
            query: ti.types.ndarray(dtype=ti.f32),
            embeddings: ti.types.ndarray(dtype=ti.f32),
            similarities: ti.types.ndarray(dtype=ti.f32),
            n: ti.i32,
            dim: ti.i32
        ):
            tile_size = 16  # Adjust based on hardware
            for i in range(n):
                dot_product = 0.0
                # Process in tiles for better cache utilization
                for jt in range(0, dim):
                    if jt % tile_size != 0:
                        continue
                    tile_sum = 0.0
                    for j in range(jt, ti.min(jt + tile_size, dim)):
                        tile_sum += embeddings[i, j] * query[j]
                    dot_product += tile_sum
                similarities[i] = dot_product

        # Choose the appropriate kernel based on dimension size
        if query_embedding.shape[0] >= 256:
            compute_similarities_tiled(
                query_norm.astype(np.float32),
                self.normalized_embeddings,
                similarities,
                n,
                query_embedding.shape[0]
            )
        else:
            compute_similarities_optimized(
                query_norm.astype(np.float32),
                self.normalized_embeddings,
                similarities,
                n,
                query_embedding.shape[0]
            )

        # Optimize top-k selection
        if k >= n:
            indices = np.argsort(-similarities)
        else:
            # Use partial sort for better performance when k < n
            indices = np.argpartition(-similarities, k)[:k]
            indices = indices[np.argsort(-similarities[indices])]

        # Filter results efficiently using vectorized operations
        mask = similarities[indices] >= min_similarity
        filtered_indices = indices[mask]
        return [self.chunks[idx] for idx in filtered_indices[:k]]

    def save(self) -> bytes:
        return pickle.dumps({
            'embeddings': self.embeddings,
            'chunks': self.chunks
        })

    def load(self, data: bytes) -> 'NumpyVectorStore':
        loaded = pickle.loads(data)
        self.embeddings = loaded['embeddings']
        self.chunks = loaded['chunks']
        return self

    def clear(self) -> None:
        self.embeddings = np.empty((0, 0))
        self.chunks = []
        self.normalized_embeddings = None

    def rebuild_index(self) -> None:
        pass  # No index to rebuild for numpy implementation

    def _normalize_vector(self, vector: np.ndarray) -> np.ndarray:
        """Normalize a single vector efficiently."""
        return vector / (np.linalg.norm(vector) + 1e-8)

    def _precompute_normalized_embeddings(self) -> None:
        """Pre-compute and cache normalized embeddings."""
        # Allocate output array
        self.normalized_embeddings = np.empty_like(self.embeddings, dtype=np.float32)

        # Normalize embeddings using Taichi
        batch_normalize(
            self.embeddings.astype(np.float32),
            self.normalized_embeddings,
            self.embeddings.shape[0],
            self.embeddings.shape[1]
        )
types
AbstractVectorStore

Bases: ABC

Abstract base class for vector stores

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
class AbstractVectorStore(ABC):
    """Abstract base class for vector stores"""

    @abstractmethod
    def add_embeddings(self, embeddings: np.ndarray, chunks: list[Chunk]) -> None:
        """Add embeddings and their corresponding chunks to the store"""
        pass

    @abstractmethod
    def search(self, query_embedding: np.ndarray, k: int = 5, min_similarity: float = 0.7) -> list[Chunk]:
        """Search for similar vectors"""
        pass

    @abstractmethod
    def save(self) -> bytes:
        """Save the vector store to disk"""
        pass

    @abstractmethod
    def load(self, data: bytes) -> 'AbstractVectorStore':
        """Load the vector store from disk"""
        pass

    @abstractmethod
    def clear(self) -> None:
        """Clear all data from the store"""
        pass

    @abstractmethod
    def rebuild_index(self) -> None:
        """Optional for faster searches"""
        pass
add_embeddings(embeddings, chunks) abstractmethod

Add embeddings and their corresponding chunks to the store

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
20
21
22
23
@abstractmethod
def add_embeddings(self, embeddings: np.ndarray, chunks: list[Chunk]) -> None:
    """Add embeddings and their corresponding chunks to the store"""
    pass
clear() abstractmethod

Clear all data from the store

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
40
41
42
43
@abstractmethod
def clear(self) -> None:
    """Clear all data from the store"""
    pass
load(data) abstractmethod

Load the vector store from disk

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
35
36
37
38
@abstractmethod
def load(self, data: bytes) -> 'AbstractVectorStore':
    """Load the vector store from disk"""
    pass
rebuild_index() abstractmethod

Optional for faster searches

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
45
46
47
48
@abstractmethod
def rebuild_index(self) -> None:
    """Optional for faster searches"""
    pass
save() abstractmethod

Save the vector store to disk

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
30
31
32
33
@abstractmethod
def save(self) -> bytes:
    """Save the vector store to disk"""
    pass
search(query_embedding, k=5, min_similarity=0.7) abstractmethod

Search for similar vectors

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
25
26
27
28
@abstractmethod
def search(self, query_embedding: np.ndarray, k: int = 5, min_similarity: float = 0.7) -> list[Chunk]:
    """Search for similar vectors"""
    pass
Chunk dataclass

Represents a chunk of text with its embedding and metadata

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
 7
 8
 9
10
11
12
13
14
@dataclass(slots=True)
class Chunk:
    """Represents a chunk of text with its embedding and metadata"""
    text: str
    embedding: np.ndarray
    metadata: dict[str, Any]
    content_hash: str
    cluster_id: int | None = None

chainUi

delete_task_chain(app, request=None) async

Deletes a task chain.

Source code in toolboxv2/mods/isaa/chainUi.py
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
@export(mod_name=MOD_NAME, api=True, version=VERSION, request_as_kwarg=True, api_methods=['DELETE'])
async def delete_task_chain(app: App, request: Optional[RequestData] = None):
    """Deletes a task chain."""
    chain_name = request.query_params.get("chain_name") if request and request.query_params else None
    if not chain_name:
        return Result.default_user_error(info="Chain name is required for deletion.", exec_code=400)

    isaa = get_isaa_instance(app)
    try:
        isaa.remove_task(chain_name)  # Removes from memory
        isaa.save_task()  # Saves all chains, effectively removing the deleted one from file
        # This also deletes the .chain.json file

        # Delete associated Drawflow file if it exists
        drawflow_file_path = isaa.agent_chain.directory / f"{chain_name}.drawflow.json"
        if drawflow_file_path.exists():
            try:
                drawflow_file_path.unlink()
                app.logger.info(f"Deleted Drawflow data for chain '{chain_name}'.")
            except Exception as e:
                app.logger.warning(f"Could not delete Drawflow data file for chain '{chain_name}': {e}")

        return Result.ok(info=f"Task chain '{chain_name}' deleted successfully.")
    except KeyError:  # If isaa.remove_task raises KeyError for non-existent chain
        return Result.default_user_error(info=f"Task chain '{chain_name}' not found.", exec_code=404)
    except Exception as e:
        app.logger.error(f"Error deleting task chain '{chain_name}': {e}", exc_info=True)
        return Result.custom_error(info=f"Failed to delete task chain: {str(e)}", exec_code=500)
get_task_chain_definition(app, request=None) async

Gets the definition of a specific task chain, including its Drawflow export if available.

Source code in toolboxv2/mods/isaa/chainUi.py
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
@export(mod_name=MOD_NAME, api=True, version=VERSION, request_as_kwarg=True, api_methods=['GET'])
async def get_task_chain_definition(app: App, request: Optional[RequestData] = None):
    """Gets the definition of a specific task chain, including its Drawflow export if available."""
    chain_name = request.query_params.get("chain_name") if request and request.query_params else None
    if not chain_name:
        return Result.default_user_error(info="Chain name is required.", exec_code=400)

    isaa = get_isaa_instance(app)
    try:
        # Get logical task definition
        tasks_list_dicts = isaa.get_task(chain_name)
        if tasks_list_dicts is None:  # Check if chain exists
            return Result.default_user_error(info=f"Task chain '{chain_name}' not found.", exec_code=404)

        description = isaa.agent_chain.get_discr(chain_name) or ""

        # Attempt to load Drawflow specific data if it exists
        # This assumes Drawflow data is saved in a parallel file or embedded
        drawflow_data = None
        drawflow_file_path = isaa.agent_chain.directory / f"{chain_name}.drawflow.json"
        if drawflow_file_path.exists():
            try:
                with open(drawflow_file_path, 'r') as f:
                    drawflow_data = json.load(f)
            except Exception as e:
                app.logger.warning(f"Could not load Drawflow data for chain '{chain_name}': {e}")

        chain_pydantic_tasks = [ISAAPydanticTask(**task_dict) for task_dict in tasks_list_dicts]

        response_data = ISAAPydanticTaskChain(
            name=chain_name,
            description=description,
            tasks=chain_pydantic_tasks
        ).model_dump()

        if drawflow_data:
            response_data["drawflow_export"] = drawflow_data  # Embed Drawflow data

        return Result.json(data=response_data)

    except Exception as e:
        app.logger.error(f"Error getting task chain definition for '{chain_name}': {e}", exc_info=True)
        return Result.custom_error(info=f"Failed to get task chain definition: {str(e)}", exec_code=500)
get_task_chain_editor_page_drawflow(app, request=None) async

Serves the HTML page for the Drawflow-based Task Chain Editor.

Source code in toolboxv2/mods/isaa/chainUi.py
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
@export(mod_name=MOD_NAME, api=True, version=VERSION, name="task_chain_editor_drawflow", api_methods=['GET'])
async def get_task_chain_editor_page_drawflow(app: App, request: Optional[RequestData] = None):
    """Serves the HTML page for the Drawflow-based Task Chain Editor."""
    if app is None:  # Should not happen if called via export
        app = get_app()

    # The Drawflow HTML and JS will be substantial.
    # It's better to load it from a separate .html file for maintainability.
    # For this example, I'll provide a condensed version here.
    # In a real setup, use:
    #   ui_file_path = Path(__file__).parent / "task_chain_editor_drawflow.html"
    #   with open(ui_file_path, "r") as f:
    #       html_content = f.read()
    # And then inject app.web_context() if needed, or ensure tb.js handles it.

    html_content = DRAWFLOW_TASK_CHAIN_EDITOR_HTML_TEMPLATE  # Defined below
    return Result.html(data=app.web_context() + html_content)
get_task_chain_list(app, request=None) async

Lists all available global task chains.

Source code in toolboxv2/mods/isaa/chainUi.py
35
36
37
38
39
40
41
42
43
44
@export(mod_name=MOD_NAME, api=True, version=VERSION, request_as_kwarg=True, api_methods=['GET'])
async def get_task_chain_list(app: App, request: Optional[RequestData] = None):
    """Lists all available global task chains."""
    isaa = get_isaa_instance(app)
    try:
        chain_names = list(isaa.agent_chain.chains.keys() ) # This should return List[str]
        return Result.json(data=chain_names)
    except Exception as e:
        app.logger.error(f"Error listing task chains: {e}", exc_info=True)
        return Result.custom_error(info=f"Failed to list task chains: {str(e)}", exec_code=500)
initialize_module(app)

Initializes the ISAA ChainUI module and registers its UI with CloudM.

Source code in toolboxv2/mods/isaa/chainUi.py
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
@export(mod_name=MOD_NAME, version=VERSION)
def initialize_module(app: App):
    """Initializes the ISAA ChainUI module and registers its UI with CloudM."""
    print(f"ISAA Drawflow ChainUI Modul ({MOD_NAME} v{VERSION}) initialisiert.")
    if app is None:
        app = get_app()

    # Register the new Drawflow-based Task Chain Editor UI
    app.run_any(("CloudM", "add_ui"),
                name=f"{Name}_TaskChainEditorDrawflow",  # Unique name
                title="Task Chain Editor (Drawflow)",
                path=f"/api/{Name}/task_chain_editor_drawflow",  # Unique path
                description="Visual editor for ISAA Task Chains using Drawflow.",
                auth=True
                )
    return Result.ok(info="ISAA Drawflow ChainUI Modul und Editor UI bereit.")
run_chain_visualized(app, request=None, data=None) async

Executes a specified task chain with the given input.

Source code in toolboxv2/mods/isaa/chainUi.py
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
@export(mod_name=MOD_NAME, api=True, version=VERSION, request_as_kwarg=True, api_methods=['POST'])
async def run_chain_visualized(app: App, request: Optional[RequestData] = None, data: RunChainRequest = None):
    """Executes a specified task chain with the given input."""
    if not data:  # Compatibility
        if request and request.body and isinstance(request.body, dict):
            try:
                data = RunChainRequest(**request.body)
            except Exception as e:
                return Result.default_user_error(info=f"Invalid run chain data: {e}", exec_code=400)
        else:
            return Result.default_user_error(info="No run chain data provided.", exec_code=400)
    elif isinstance(data, dict):
        data = RunChainRequest(**data)

    if not data.chain_name:
        return Result.default_user_error(info="Chain name is required for execution.", exec_code=400)
    if data.task_input is None:  # Allow empty string as input
        return Result.default_user_error(info="Task input is required.", exec_code=400)

    isaa = get_isaa_instance(app)

    # TODO: Add SSE streaming for execution progress if desired in the future.
    # For now, simple blocking execution.

    try:
        # If chain_definition is provided, use it directly (for running unsaved chains)
        if data.chain_definition:
            app.logger.info(
                f"Executing unsaved chain definition for '{data.chain_name}' with input: {data.task_input[:50]}...")
            # Temporarily add this chain definition to isaa.agent_chain without saving to file
            # This requires isaa.agent_chain to support in-memory, non-persistent additions or direct execution
            # For simplicity, let's assume isaa.run_task can accept a task list directly if AgentChain is adapted.
            # If not, we'd save it temporarily or find another way.
            # For now, assuming run_task primarily uses named, saved chains.
            # A more robust solution would be to modify `isaa.run_task` or `ChainTreeExecutor`
            # to accept a raw list of task dictionaries.
            # This example will proceed assuming the chain must be saved first if not already.
            # Let's add a note that this feature (running unsaved chains from UI) needs more work on ISAA core.
            app.logger.warning(
                "Running unsaved chain definitions directly is not fully supported by this endpoint version. The chain should be saved first.")
            # Fallback to trying to run by name, assuming it was saved.

        app.logger.info(f"Executing chain '{data.chain_name}' with input: {data.task_input[:50]}...")
        # `isaa.run_task` is already async
        execution_result = await isaa.run_task(task_input=data.task_input, chain_name=data.chain_name)

        # `execution_result` structure depends on `ChainTreeExecutor.execute`
        # It's usually a dictionary of results.
        return Result.json(data={"output": execution_result, "final_message": "Chain execution completed."})

    except Exception as e:
        app.logger.error(f"Error executing task chain '{data.chain_name}': {e}", exc_info=True)
        return Result.custom_error(info=f"Chain execution failed: {str(e)}", exec_code=500)
save_task_chain_definition(app, request=None, data=None) async

Saves a task chain definition. Expects logical tasks and optional Drawflow export.

Source code in toolboxv2/mods/isaa/chainUi.py
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
@export(mod_name=MOD_NAME, api=True, version=VERSION, request_as_kwarg=True, api_methods=['POST'])
async def save_task_chain_definition(app: App, request: Optional[RequestData] = None,
                                     data: SaveTaskChainRequest = None):
    """Saves a task chain definition. Expects logical tasks and optional Drawflow export."""
    if not data:  # Compatibility for direct data passthrough if decorator doesn't parse body for Pydantic model
        if request and request.body and isinstance(request.body, dict):
            try:
                data = SaveTaskChainRequest(**request.body)
            except Exception as e:
                return Result.default_user_error(info=f"Invalid chain data provided: {e}", exec_code=400)
        else:
            return Result.default_user_error(info="No chain data provided.", exec_code=400)
    elif isinstance(data, dict):
        data = SaveTaskChainRequest(**data)

    if not data.name:
        return Result.default_user_error(info="Chain name cannot be empty.", exec_code=400)

    isaa = get_isaa_instance(app)
    try:
        # Save logical tasks to ISAA's AgentChain
        task_dicts = [task.model_dump() for task in data.tasks]
        isaa.add_task(data.name, task_dicts)  # add_task replaces if exists
        if data.description is not None:  # Allow empty description
            isaa.agent_chain.add_discr(data.name, data.description)
        isaa.save_task(data.name)  # Persists the .chain.json file

        # Save Drawflow specific data if provided
        if data.drawflow_export:
            drawflow_file_path = Path(isaa.agent_chain.directory) / f"{data.name}.drawflow.json"
            try:
                with open(drawflow_file_path, 'w') as f:
                    json.dump(data.drawflow_export, f, indent=2)
                app.logger.info(f"Saved Drawflow data for chain '{data.name}' to {drawflow_file_path}")
            except Exception as e:
                app.logger.error(f"Failed to save Drawflow data for chain '{data.name}': {e}")
                # Optionally, inform client that logical save succeeded but visual save failed
                return Result.ok(
                    info=f"Task chain '{data.name}' saved (logical part), but Drawflow visual data failed to save.")

        return Result.ok(info=f"Task chain '{data.name}' saved successfully.")

    except Exception as e:
        app.logger.error(f"Error saving task chain '{data.name}': {e}", exc_info=True)
        return Result.custom_error(info=f"Failed to save task chain: {str(e)}", exec_code=500)

extras

adapter
LiteLLM LLM Interface Module

This module provides interfaces for interacting with LiteLLM's language models, including text generation and embedding capabilities.

Author: Lightrag Team Created: 2025-02-04 License: MIT License Version: 1.0.0

Change Log: - 1.0.0 (2025-02-04): Initial LiteLLM release * Ported OpenAI logic to use litellm async client * Updated error types and environment variable names * Preserved streaming and embedding support

Dependencies
  • litellm
  • numpy
  • pipmaster
  • Python >= 3.10
Usage

from llm_interfaces.litellm import litellm_complete, litellm_embed

litellm_complete(prompt, system_prompt=None, history_messages=None, keyword_extraction=False, model_name='groq/gemma2-9b-it', **kwargs) async

Public completion interface using the model name specified in the global configuration. Optionally extracts keywords if requested.

Source code in toolboxv2/mods/isaa/extras/adapter.py
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
async def litellm_complete(
    prompt, system_prompt=None, history_messages=None, keyword_extraction=False, model_name = "groq/gemma2-9b-it", **kwargs
) -> str | AsyncIterator[str]:
    """
    Public completion interface using the model name specified in the global configuration.
    Optionally extracts keywords if requested.
    """
    if history_messages is None:
        history_messages = []
    # Check and set response format for keyword extraction if needed
    keyword_extraction_flag = kwargs.pop("keyword_extraction", None)
    if keyword_extraction_flag:
        kwargs["response_format"] = "json"
     # kwargs["hashing_kv"].global_config["llm_model_name"]

    return await litellm_complete_if_cache(
        model_name,
        prompt,
        system_prompt=system_prompt,
        history_messages=history_messages,
        **kwargs,
    )
litellm_complete_if_cache(model, prompt, system_prompt=None, history_messages=None, base_url=None, api_key=None, **kwargs) async

Core function to query the LiteLLM model. It builds the message context, invokes the completion API, and returns either a complete result string or an async iterator for streaming responses.

Source code in toolboxv2/mods/isaa/extras/adapter.py
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
@retry(
    stop=stop_after_attempt(3),
    wait=wait_exponential(multiplier=1, min=4, max=10),
    retry=retry_if_exception_type((RateLimitError, Timeout, APIConnectionError)),
)
async def litellm_complete_if_cache(
    model,
    prompt,
    system_prompt=None,
    history_messages=None,
    base_url=None,
    api_key=None,
    **kwargs,
) -> str | AsyncIterator[str]:
    """
    Core function to query the LiteLLM model. It builds the message context,
    invokes the completion API, and returns either a complete result string or
    an async iterator for streaming responses.
    """
    # Set the API key if provided
    if api_key:
        os.environ["LITELLM_API_KEY"] = api_key

    # Remove internal keys not needed for the client call
    kwargs.pop("hashing_kv", None)
    kwargs.pop("keyword_extraction", None)

    fallbacks_ = kwargs.pop("fallbacks", [])
    # Build the messages list from system prompt, conversation history, and the new prompt
    messages = []
    if system_prompt:
        messages.append({"role": "system", "content": system_prompt})
    if history_messages is not None:
        messages.extend(history_messages)
    messages.append({"role": "user", "content": prompt})

    # Log query details for debugging purposes
    try:
        # Depending on the response format, choose the appropriate API call
        if "response_format" in kwargs:
            response = await acompletion(
                model=model, messages=messages,
                fallbacks=fallbacks_+os.getenv("FALLBACKS_MODELS", '').split(','),
                **kwargs
            )
        else:
            response = await acompletion(
                model=model, messages=messages,
                fallbacks=os.getenv("FALLBACKS_MODELS", '').split(','),
                **kwargs
            )
    except Exception as e:
        print(e)
        get_logger().error(f"Failed to litellm memory work {e}")
        return ""

    # Check if the response is a streaming response (i.e. an async iterator)
    if hasattr(response, "__aiter__"):

        async def inner():
            async for chunk in response:
                # Assume LiteLLM response structure is similar to OpenAI's
                content = chunk.choices[0].delta.content
                if content is None:
                    continue
                yield content

        return inner()
    else:
        # Non-streaming: extract and return the full content string

        content = response.choices[0].message.content
        if content is None:
            content = response.choices[0].message.tool_calls[0].function.arguments
        return content
litellm_embed(texts, model='gemini/text-embedding-004', base_url=None, api_key=None) async

Generates embeddings for the given list of texts using LiteLLM.

Source code in toolboxv2/mods/isaa/extras/adapter.py
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
@retry(
    stop=stop_after_attempt(3),
    wait=wait_exponential(multiplier=1, min=4, max=60),
    retry=retry_if_exception_type((RateLimitError, Timeout, APIConnectionError)),
)
async def litellm_embed(
    texts: list[str],
    model: str = "gemini/text-embedding-004",
    base_url: str = None,
    api_key: str = None,
) -> np.ndarray:
    """
    Generates embeddings for the given list of texts using LiteLLM.
    """
    response = await litellm.aembedding(
        model=model, input=texts,
        # encoding_format="float"
    )
    return np.array([dp.embedding for dp in response.data])
filter
filter_relevant_texts(query, texts, fuzzy_threshold=70, semantic_threshold=0.75, model=None)

Filters a list of texts based on their relevance to the query. It first uses a fuzzy matching score and, if that score is below the threshold, it then checks the semantic similarity.

:param query: The query string. :param texts: List of page texts. :param fuzzy_threshold: Fuzzy matching score threshold (0-100). :param semantic_threshold: Semantic similarity threshold (0.0-1.0). :param model: A preloaded SentenceTransformer model (if None, one will be loaded). :return: Filtered list of texts deemed relevant.

Source code in toolboxv2/mods/isaa/extras/filter.py
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
def filter_relevant_texts(query: str,
                          texts: list[str],
                          fuzzy_threshold: int = 70,
                          semantic_threshold: float = 0.75,
                          model = None) -> list[str]:
    """
    Filters a list of texts based on their relevance to the query.
    It first uses a fuzzy matching score and, if that score is below the threshold,
    it then checks the semantic similarity.

    :param query: The query string.
    :param texts: List of page texts.
    :param fuzzy_threshold: Fuzzy matching score threshold (0-100).
    :param semantic_threshold: Semantic similarity threshold (0.0-1.0).
    :param model: A preloaded SentenceTransformer model (if None, one will be loaded).
    :return: Filtered list of texts deemed relevant.
    """
    try:
        from rapidfuzz import fuzz
    except Exception:
        os.system([sys.executable, '-m', 'pip', 'install', 'RapidFuzz'])
        from rapidfuzz import fuzz
    try:
        from sentence_transformers import SentenceTransformer, util
    except Exception:
        os.system([sys.executable, '-m', 'pip', 'install', 'sentence-transformers'])
        from sentence_transformers import SentenceTransformer, util

    if model is None:
        # For efficiency, consider pre-loading this model outside the function.
        model = SentenceTransformer('paraphrase-MiniLM-L6-v2')

    # Pre-compute query embedding for the semantic check:
    query_embedding = model.encode(query, convert_to_tensor=True)

    relevant_texts = []
    for text in texts:
        # --- Fuzzy Keyword Filtering ---
        fuzzy_score = fuzz.partial_ratio(query.lower(), text.lower())
        if fuzzy_score >= fuzzy_threshold:
            relevant_texts.append(text)
        else:
            # --- Semantic Similarity Filtering ---
            text_embedding = model.encode(text, convert_to_tensor=True)
            similarity = util.pytorch_cos_sim(query_embedding, text_embedding).item()
            if similarity >= semantic_threshold:
                relevant_texts.append(text)
    return relevant_texts
modes
generate_prompt(subject, context='', additional_requirements=None)

Generates a prompt based on the given subject, with optional context and additional requirements.

Parameters: - subject (str): The main subject for the prompt. - context (str): Optional additional context to tailor the prompt. - additional_requirements (Dict[str, Any]): Optional additional parameters or requirements for the prompt.

Returns: - str: A crafted prompt.

Source code in toolboxv2/mods/isaa/extras/modes.py
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
def generate_prompt(subject: str, context: str = "", additional_requirements: dict[str, Any] = None) -> str:
    """
    Generates a prompt based on the given subject, with optional context and additional requirements.

    Parameters:
    - subject (str): The main subject for the prompt.
    - context (str): Optional additional context to tailor the prompt.
    - additional_requirements (Dict[str, Any]): Optional additional parameters or requirements for the prompt.

    Returns:
    - str: A crafted prompt.
    """
    prompt = f"Based on the subject '{subject}', with the context '{context}', generate a clear and precise instruction."
    if additional_requirements:
        prompt += f" Consider the following requirements: {additional_requirements}."
    return prompt

toolboxv2.flows_dict(s='.py', remote=False, dir_path=None, flows_dict_=None)

Source code in toolboxv2/flows/__init__.py
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
def flows_dict(s='.py', remote=False, dir_path=None, flows_dict_=None):

    if flows_dict_ is None:
        flows_dict_ = {}
    with Spinner("Loading flows"):
        # Erhalte den Pfad zum aktuellen Verzeichnis
        if dir_path is None:
            for ex_path in os.getenv("EXTERNAL_PATH_RUNNABELS", '').split(','):
                if not ex_path or len(ex_path) == 0:
                    continue
                flows_dict(s,remote,ex_path,flows_dict_)
            dir_path = os.path.dirname(os.path.realpath(__file__))
        to = time.perf_counter()
        # Iteriere über alle Dateien im Verzeichnis
        files = os.listdir(dir_path)
        l_files = len(files)
        for i, file_name in enumerate(files):
            with Spinner(f"{file_name} {i}/{l_files}"):
                # Überprüfe, ob die Datei eine Python-Datei ist
                if file_name == "__init__.py":
                    pass

                elif remote and s in file_name and file_name.endswith('.gist'):
                    # print("Loading from Gist :", file_name)
                    name_f = os.path.splitext(file_name)[0]
                    name = name_f.split('.')[0]
                    # publisher = name_f.split('.')[1]
                    url = name_f.split('.')[-1]
                    # print("Ent", name)
                    # Lade das Modul
                    print(f"Gist Name: {name}, URL: {url}")
                    try:
                        module = GistLoader(f"{name}/{url}").load_module(name)
                    #try:
                    #    module = GistLoader(f"{name}/{url}")
                    except Exception as e:
                        print(f"Error loading module {name} from github {url}")
                        print(e)
                        continue

                    # Füge das Modul der Dictionary hinzu
                    print(f"{hasattr(module, 'run')} and {callable(module.run)} and {hasattr(module, 'NAME')}")
                    if hasattr(module, 'run') and callable(module.run) and hasattr(module, 'NAME'):
                        # print("Collecing :", module.NAME)
                        flows_dict_[module.NAME] = module.run
                elif file_name.endswith('.py') and s in file_name:
                    name = os.path.splitext(file_name)[0]
                    # print("Loading :", name)
                    # Lade das Modul
                    spec = importlib.util.spec_from_file_location(name, os.path.join(dir_path, file_name))
                    module = importlib.util.module_from_spec(spec)
                    try:
                        spec.loader.exec_module(module)
                    except Exception as e:
                        print("Error loading module ", name)
                        print(e)
                        continue

                    # Füge das Modul der Dictionary hinzu
                    if hasattr(module, 'run') and callable(module.run) and hasattr(module, 'NAME'):
                        # print("Collecing :", module.NAME)
                        flows_dict_[module.NAME] = module.run

        print(f"Getting all flows took {time.perf_counter() - to:.2f} for {len(flows_dict_.keys())} elements")
        return flows_dict_

toolboxv2.TBEF

Automatic generated by ToolBox v = 0.1.21

Other Exposed Items

toolboxv2.ToolBox_over = 'root' module-attribute