diff --git a/env_playground.env b/env_playground.env
new file mode 100644
index 00000000..105f87fd
--- /dev/null
+++ b/env_playground.env
@@ -0,0 +1,25 @@
+# Playground Environment Configuration
+
+# System Configuration
+APP_ENV_TYPE = play
+APP_ENV_LABEL = Playground Instance
+APP_CALL=uvicorn app:app --host 0.0.0.0 --port 8000
+
+# Database Configuration System
+DB_SYSTEM_HOST=/home/_powerondb
+DB_SYSTEM_DATABASE=system
+DB_SYSTEM_USER=dev_user
+DB_SYSTEM_PASSWORD_SECRET=prod_password
+
+# Database Configuration LucyDOM
+DB_LUCYDOM_HOST=/home/_powerondb
+DB_LUCYDOM_DATABASE=lucydom
+DB_LUCYDOM_USER=dev_user
+DB_LUCYDOM_PASSWORD_SECRET=prod_password
+
+# Security Configuration
+APP_JWT_SECRET_SECRET=dev_jwt_secret_token
+APP_TOKEN_EXPIRY=300
+
+# CORS Configuration
+APP_ALLOWED_ORIGINS="http://localhost:8080","http://localhost:3000"
diff --git a/modules/lucydomInterface.py b/modules/lucydomInterface.py
index 7bc09110..50ae9fc1 100644
--- a/modules/lucydomInterface.py
+++ b/modules/lucydomInterface.py
@@ -128,7 +128,13 @@ class LucyDOMInterface:
"userId": effectiveUserId,
"content": "Develop a UI/UX design concept for [APPLICATION/WEBSITE]. Consider the target audience, main functions, and brand identity. Describe the visual design, navigation, interaction patterns, and information architecture. Explain how the design optimizes user-friendliness and user experience.",
"name": "Design: UI/UX Design"
- }
+ },
+ {
+ "mandateId": effectiveMandateId,
+ "userId": effectiveUserId,
+ "content": "Gib mir die ersten 1000 Primzahlen",
+ "name": "Code: Primzahlen"
+ }
]
# Create prompts
diff --git a/static/100_first_77_primes.txt b/static/100_first_77_primes.txt
new file mode 100644
index 00000000..87725e78
--- /dev/null
+++ b/static/100_first_77_primes.txt
@@ -0,0 +1,77 @@
+2
+3
+5
+7
+11
+13
+17
+19
+23
+29
+31
+37
+41
+43
+47
+53
+59
+61
+67
+71
+73
+79
+83
+89
+97
+101
+103
+107
+109
+113
+127
+131
+137
+139
+149
+151
+157
+163
+167
+173
+179
+181
+191
+193
+197
+199
+211
+223
+227
+229
+233
+239
+241
+251
+257
+263
+269
+271
+277
+281
+283
+293
+307
+311
+313
+317
+331
+337
+347
+349
+353
+359
+367
+373
+379
+383
+389
\ No newline at end of file
diff --git a/static/101_generated_code.py b/static/101_generated_code.py
new file mode 100644
index 00000000..c4ae0f1b
--- /dev/null
+++ b/static/101_generated_code.py
@@ -0,0 +1,38 @@
+inputFiles = [] # DO NOT CHANGE THIS LINE
+
+def is_prime(n):
+ if n <= 1:
+ return False
+ if n <= 3:
+ return True
+ if n % 2 == 0 or n % 3 == 0:
+ return False
+ i = 5
+ while i * i <= n:
+ if n % i == 0 or n % (i + 2) == 0:
+ return False
+ i += 6
+ return True
+
+def generate_primes(count):
+ primes = []
+ num = 2
+ while len(primes) < count:
+ if is_prime(num):
+ primes.append(num)
+ num += 1
+ return primes
+
+primes = generate_primes(123)
+prime_numbers_content = "\n".join(map(str, primes))
+
+result = {
+ "prime_numbers.txt": {
+ "content": prime_numbers_content,
+ "base64Encoded": False,
+ "contentType": "text/plain"
+ }
+}
+
+import json
+print(json.dumps(result))
\ No newline at end of file
diff --git a/static/102_execution_history.json b/static/102_execution_history.json
new file mode 100644
index 00000000..2336e057
--- /dev/null
+++ b/static/102_execution_history.json
@@ -0,0 +1,19 @@
+[
+ {
+ "attempt": 1,
+ "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(123)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
+ "result": {
+ "success": true,
+ "output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
+ "error": "",
+ "result": {
+ "prime_numbers.txt": {
+ "content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677",
+ "base64Encoded": false,
+ "contentType": "text/plain"
+ }
+ },
+ "exitCode": 0
+ }
+ }
+]
\ No newline at end of file
diff --git a/static/103_generated_code.py b/static/103_generated_code.py
new file mode 100644
index 00000000..b17aa80e
--- /dev/null
+++ b/static/103_generated_code.py
@@ -0,0 +1,40 @@
+inputFiles = [] # DO NOT CHANGE THIS LINE
+
+# REQUIREMENTS:
+
+def is_prime(n):
+ if n <= 1:
+ return False
+ if n <= 3:
+ return True
+ if n % 2 == 0 or n % 3 == 0:
+ return False
+ i = 5
+ while i * i <= n:
+ if n % i == 0 or n % (i + 2) == 0:
+ return False
+ i += 6
+ return True
+
+def generate_primes(count):
+ primes = []
+ num = 2
+ while len(primes) < count:
+ if is_prime(num):
+ primes.append(num)
+ num += 1
+ return primes
+
+primes = generate_primes(202)
+prime_numbers_content = "\n".join(map(str, primes))
+
+result = {
+ "prime_numbers.txt": {
+ "content": prime_numbers_content,
+ "base64Encoded": False,
+ "contentType": "text/plain"
+ }
+}
+
+import json
+print(json.dumps(result))
\ No newline at end of file
diff --git a/static/104_execution_history.json b/static/104_execution_history.json
new file mode 100644
index 00000000..d8d0cc2c
--- /dev/null
+++ b/static/104_execution_history.json
@@ -0,0 +1,19 @@
+[
+ {
+ "attempt": 1,
+ "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\n# REQUIREMENTS: \n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(202)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
+ "result": {
+ "success": true,
+ "output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
+ "error": "",
+ "result": {
+ "prime_numbers.txt": {
+ "content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231",
+ "base64Encoded": false,
+ "contentType": "text/plain"
+ }
+ },
+ "exitCode": 0
+ }
+ }
+]
\ No newline at end of file
diff --git a/static/105_prime_numbers.txt b/static/105_prime_numbers.txt
new file mode 100644
index 00000000..75099161
--- /dev/null
+++ b/static/105_prime_numbers.txt
@@ -0,0 +1,202 @@
+2
+3
+5
+7
+11
+13
+17
+19
+23
+29
+31
+37
+41
+43
+47
+53
+59
+61
+67
+71
+73
+79
+83
+89
+97
+101
+103
+107
+109
+113
+127
+131
+137
+139
+149
+151
+157
+163
+167
+173
+179
+181
+191
+193
+197
+199
+211
+223
+227
+229
+233
+239
+241
+251
+257
+263
+269
+271
+277
+281
+283
+293
+307
+311
+313
+317
+331
+337
+347
+349
+353
+359
+367
+373
+379
+383
+389
+397
+401
+409
+419
+421
+431
+433
+439
+443
+449
+457
+461
+463
+467
+479
+487
+491
+499
+503
+509
+521
+523
+541
+547
+557
+563
+569
+571
+577
+587
+593
+599
+601
+607
+613
+617
+619
+631
+641
+643
+647
+653
+659
+661
+673
+677
+683
+691
+701
+709
+719
+727
+733
+739
+743
+751
+757
+761
+769
+773
+787
+797
+809
+811
+821
+823
+827
+829
+839
+853
+857
+859
+863
+877
+881
+883
+887
+907
+911
+919
+929
+937
+941
+947
+953
+967
+971
+977
+983
+991
+997
+1009
+1013
+1019
+1021
+1031
+1033
+1039
+1049
+1051
+1061
+1063
+1069
+1087
+1091
+1093
+1097
+1103
+1109
+1117
+1123
+1129
+1151
+1153
+1163
+1171
+1181
+1187
+1193
+1201
+1213
+1217
+1223
+1229
+1231
\ No newline at end of file
diff --git a/static/106_generated_code.py b/static/106_generated_code.py
new file mode 100644
index 00000000..44c125b7
--- /dev/null
+++ b/static/106_generated_code.py
@@ -0,0 +1,38 @@
+inputFiles = [] # DO NOT CHANGE THIS LINE
+
+def is_prime(n):
+ if n <= 1:
+ return False
+ if n <= 3:
+ return True
+ if n % 2 == 0 or n % 3 == 0:
+ return False
+ i = 5
+ while i * i <= n:
+ if n % i == 0 or n % (i + 2) == 0:
+ return False
+ i += 6
+ return True
+
+def generate_primes(limit):
+ primes = []
+ num = 2
+ while len(primes) < limit:
+ if is_prime(num):
+ primes.append(num)
+ num += 1
+ return primes
+
+primes = generate_primes(1000)
+primes_content = "\n".join(map(str, primes))
+
+result = {
+ "prime_numbers.txt": {
+ "content": primes_content,
+ "base64Encoded": False,
+ "contentType": "text/plain"
+ }
+}
+
+import json
+print(json.dumps(result))
\ No newline at end of file
diff --git a/static/107_execution_history.json b/static/107_execution_history.json
new file mode 100644
index 00000000..6ff5f3f6
--- /dev/null
+++ b/static/107_execution_history.json
@@ -0,0 +1,19 @@
+[
+ {
+ "attempt": 1,
+ "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(limit):\n primes = []\n num = 2\n while len(primes) < limit:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(1000)\nprimes_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": primes_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
+ "result": {
+ "success": true,
+ "output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\\n1237\\n1249\\n1259\\n1277\\n1279\\n1283\\n1289\\n1291\\n1297\\n1301\\n1303\\n1307\\n1319\\n1321\\n1327\\n1361\\n1367\\n1373\\n1381\\n1399\\n1409\\n1423\\n1427\\n1429\\n1433\\n1439\\n1447\\n1451\\n1453\\n1459\\n1471\\n1481\\n1483\\n1487\\n1489\\n1493\\n1499\\n1511\\n1523\\n1531\\n1543\\n1549\\n1553\\n1559\\n1567\\n1571\\n1579\\n1583\\n1597\\n1601\\n1607\\n1609\\n1613\\n1619\\n1621\\n1627\\n1637\\n1657\\n1663\\n1667\\n1669\\n1693\\n1697\\n1699\\n1709\\n1721\\n1723\\n1733\\n1741\\n1747\\n1753\\n1759\\n1777\\n1783\\n1787\\n1789\\n1801\\n1811\\n1823\\n1831\\n1847\\n1861\\n1867\\n1871\\n1873\\n1877\\n1879\\n1889\\n1901\\n1907\\n1913\\n1931\\n1933\\n1949\\n1951\\n1973\\n1979\\n1987\\n1993\\n1997\\n1999\\n2003\\n2011\\n2017\\n2027\\n2029\\n2039\\n2053\\n2063\\n2069\\n2081\\n2083\\n2087\\n2089\\n2099\\n2111\\n2113\\n2129\\n2131\\n2137\\n2141\\n2143\\n2153\\n2161\\n2179\\n2203\\n2207\\n2213\\n2221\\n2237\\n2239\\n2243\\n2251\\n2267\\n2269\\n2273\\n2281\\n2287\\n2293\\n2297\\n2309\\n2311\\n2333\\n2339\\n2341\\n2347\\n2351\\n2357\\n2371\\n2377\\n2381\\n2383\\n2389\\n2393\\n2399\\n2411\\n2417\\n2423\\n2437\\n2441\\n2447\\n2459\\n2467\\n2473\\n2477\\n2503\\n2521\\n2531\\n2539\\n2543\\n2549\\n2551\\n2557\\n2579\\n2591\\n2593\\n2609\\n2617\\n2621\\n2633\\n2647\\n2657\\n2659\\n2663\\n2671\\n2677\\n2683\\n2687\\n2689\\n2693\\n2699\\n2707\\n2711\\n2713\\n2719\\n2729\\n2731\\n2741\\n2749\\n2753\\n2767\\n2777\\n2789\\n2791\\n2797\\n2801\\n2803\\n2819\\n2833\\n2837\\n2843\\n2851\\n2857\\n2861\\n2879\\n2887\\n2897\\n2903\\n2909\\n2917\\n2927\\n2939\\n2953\\n2957\\n2963\\n2969\\n2971\\n2999\\n3001\\n3011\\n3019\\n3023\\n3037\\n3041\\n3049\\n3061\\n3067\\n3079\\n3083\\n3089\\n3109\\n3119\\n3121\\n3137\\n3163\\n3167\\n3169\\n3181\\n3187\\n3191\\n3203\\n3209\\n3217\\n3221\\n3229\\n3251\\n3253\\n3257\\n3259\\n3271\\n3299\\n3301\\n3307\\n3313\\n3319\\n3323\\n3329\\n3331\\n3343\\n3347\\n3359\\n3361\\n3371\\n3373\\n3389\\n3391\\n3407\\n3413\\n3433\\n3449\\n3457\\n3461\\n3463\\n3467\\n3469\\n3491\\n3499\\n3511\\n3517\\n3527\\n3529\\n3533\\n3539\\n3541\\n3547\\n3557\\n3559\\n3571\\n3581\\n3583\\n3593\\n3607\\n3613\\n3617\\n3623\\n3631\\n3637\\n3643\\n3659\\n3671\\n3673\\n3677\\n3691\\n3697\\n3701\\n3709\\n3719\\n3727\\n3733\\n3739\\n3761\\n3767\\n3769\\n3779\\n3793\\n3797\\n3803\\n3821\\n3823\\n3833\\n3847\\n3851\\n3853\\n3863\\n3877\\n3881\\n3889\\n3907\\n3911\\n3917\\n3919\\n3923\\n3929\\n3931\\n3943\\n3947\\n3967\\n3989\\n4001\\n4003\\n4007\\n4013\\n4019\\n4021\\n4027\\n4049\\n4051\\n4057\\n4073\\n4079\\n4091\\n4093\\n4099\\n4111\\n4127\\n4129\\n4133\\n4139\\n4153\\n4157\\n4159\\n4177\\n4201\\n4211\\n4217\\n4219\\n4229\\n4231\\n4241\\n4243\\n4253\\n4259\\n4261\\n4271\\n4273\\n4283\\n4289\\n4297\\n4327\\n4337\\n4339\\n4349\\n4357\\n4363\\n4373\\n4391\\n4397\\n4409\\n4421\\n4423\\n4441\\n4447\\n4451\\n4457\\n4463\\n4481\\n4483\\n4493\\n4507\\n4513\\n4517\\n4519\\n4523\\n4547\\n4549\\n4561\\n4567\\n4583\\n4591\\n4597\\n4603\\n4621\\n4637\\n4639\\n4643\\n4649\\n4651\\n4657\\n4663\\n4673\\n4679\\n4691\\n4703\\n4721\\n4723\\n4729\\n4733\\n4751\\n4759\\n4783\\n4787\\n4789\\n4793\\n4799\\n4801\\n4813\\n4817\\n4831\\n4861\\n4871\\n4877\\n4889\\n4903\\n4909\\n4919\\n4931\\n4933\\n4937\\n4943\\n4951\\n4957\\n4967\\n4969\\n4973\\n4987\\n4993\\n4999\\n5003\\n5009\\n5011\\n5021\\n5023\\n5039\\n5051\\n5059\\n5077\\n5081\\n5087\\n5099\\n5101\\n5107\\n5113\\n5119\\n5147\\n5153\\n5167\\n5171\\n5179\\n5189\\n5197\\n5209\\n5227\\n5231\\n5233\\n5237\\n5261\\n5273\\n5279\\n5281\\n5297\\n5303\\n5309\\n5323\\n5333\\n5347\\n5351\\n5381\\n5387\\n5393\\n5399\\n5407\\n5413\\n5417\\n5419\\n5431\\n5437\\n5441\\n5443\\n5449\\n5471\\n5477\\n5479\\n5483\\n5501\\n5503\\n5507\\n5519\\n5521\\n5527\\n5531\\n5557\\n5563\\n5569\\n5573\\n5581\\n5591\\n5623\\n5639\\n5641\\n5647\\n5651\\n5653\\n5657\\n5659\\n5669\\n5683\\n5689\\n5693\\n5701\\n5711\\n5717\\n5737\\n5741\\n5743\\n5749\\n5779\\n5783\\n5791\\n5801\\n5807\\n5813\\n5821\\n5827\\n5839\\n5843\\n5849\\n5851\\n5857\\n5861\\n5867\\n5869\\n5879\\n5881\\n5897\\n5903\\n5923\\n5927\\n5939\\n5953\\n5981\\n5987\\n6007\\n6011\\n6029\\n6037\\n6043\\n6047\\n6053\\n6067\\n6073\\n6079\\n6089\\n6091\\n6101\\n6113\\n6121\\n6131\\n6133\\n6143\\n6151\\n6163\\n6173\\n6197\\n6199\\n6203\\n6211\\n6217\\n6221\\n6229\\n6247\\n6257\\n6263\\n6269\\n6271\\n6277\\n6287\\n6299\\n6301\\n6311\\n6317\\n6323\\n6329\\n6337\\n6343\\n6353\\n6359\\n6361\\n6367\\n6373\\n6379\\n6389\\n6397\\n6421\\n6427\\n6449\\n6451\\n6469\\n6473\\n6481\\n6491\\n6521\\n6529\\n6547\\n6551\\n6553\\n6563\\n6569\\n6571\\n6577\\n6581\\n6599\\n6607\\n6619\\n6637\\n6653\\n6659\\n6661\\n6673\\n6679\\n6689\\n6691\\n6701\\n6703\\n6709\\n6719\\n6733\\n6737\\n6761\\n6763\\n6779\\n6781\\n6791\\n6793\\n6803\\n6823\\n6827\\n6829\\n6833\\n6841\\n6857\\n6863\\n6869\\n6871\\n6883\\n6899\\n6907\\n6911\\n6917\\n6947\\n6949\\n6959\\n6961\\n6967\\n6971\\n6977\\n6983\\n6991\\n6997\\n7001\\n7013\\n7019\\n7027\\n7039\\n7043\\n7057\\n7069\\n7079\\n7103\\n7109\\n7121\\n7127\\n7129\\n7151\\n7159\\n7177\\n7187\\n7193\\n7207\\n7211\\n7213\\n7219\\n7229\\n7237\\n7243\\n7247\\n7253\\n7283\\n7297\\n7307\\n7309\\n7321\\n7331\\n7333\\n7349\\n7351\\n7369\\n7393\\n7411\\n7417\\n7433\\n7451\\n7457\\n7459\\n7477\\n7481\\n7487\\n7489\\n7499\\n7507\\n7517\\n7523\\n7529\\n7537\\n7541\\n7547\\n7549\\n7559\\n7561\\n7573\\n7577\\n7583\\n7589\\n7591\\n7603\\n7607\\n7621\\n7639\\n7643\\n7649\\n7669\\n7673\\n7681\\n7687\\n7691\\n7699\\n7703\\n7717\\n7723\\n7727\\n7741\\n7753\\n7757\\n7759\\n7789\\n7793\\n7817\\n7823\\n7829\\n7841\\n7853\\n7867\\n7873\\n7877\\n7879\\n7883\\n7901\\n7907\\n7919\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
+ "error": "",
+ "result": {
+ "prime_numbers.txt": {
+ "content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231\n1237\n1249\n1259\n1277\n1279\n1283\n1289\n1291\n1297\n1301\n1303\n1307\n1319\n1321\n1327\n1361\n1367\n1373\n1381\n1399\n1409\n1423\n1427\n1429\n1433\n1439\n1447\n1451\n1453\n1459\n1471\n1481\n1483\n1487\n1489\n1493\n1499\n1511\n1523\n1531\n1543\n1549\n1553\n1559\n1567\n1571\n1579\n1583\n1597\n1601\n1607\n1609\n1613\n1619\n1621\n1627\n1637\n1657\n1663\n1667\n1669\n1693\n1697\n1699\n1709\n1721\n1723\n1733\n1741\n1747\n1753\n1759\n1777\n1783\n1787\n1789\n1801\n1811\n1823\n1831\n1847\n1861\n1867\n1871\n1873\n1877\n1879\n1889\n1901\n1907\n1913\n1931\n1933\n1949\n1951\n1973\n1979\n1987\n1993\n1997\n1999\n2003\n2011\n2017\n2027\n2029\n2039\n2053\n2063\n2069\n2081\n2083\n2087\n2089\n2099\n2111\n2113\n2129\n2131\n2137\n2141\n2143\n2153\n2161\n2179\n2203\n2207\n2213\n2221\n2237\n2239\n2243\n2251\n2267\n2269\n2273\n2281\n2287\n2293\n2297\n2309\n2311\n2333\n2339\n2341\n2347\n2351\n2357\n2371\n2377\n2381\n2383\n2389\n2393\n2399\n2411\n2417\n2423\n2437\n2441\n2447\n2459\n2467\n2473\n2477\n2503\n2521\n2531\n2539\n2543\n2549\n2551\n2557\n2579\n2591\n2593\n2609\n2617\n2621\n2633\n2647\n2657\n2659\n2663\n2671\n2677\n2683\n2687\n2689\n2693\n2699\n2707\n2711\n2713\n2719\n2729\n2731\n2741\n2749\n2753\n2767\n2777\n2789\n2791\n2797\n2801\n2803\n2819\n2833\n2837\n2843\n2851\n2857\n2861\n2879\n2887\n2897\n2903\n2909\n2917\n2927\n2939\n2953\n2957\n2963\n2969\n2971\n2999\n3001\n3011\n3019\n3023\n3037\n3041\n3049\n3061\n3067\n3079\n3083\n3089\n3109\n3119\n3121\n3137\n3163\n3167\n3169\n3181\n3187\n3191\n3203\n3209\n3217\n3221\n3229\n3251\n3253\n3257\n3259\n3271\n3299\n3301\n3307\n3313\n3319\n3323\n3329\n3331\n3343\n3347\n3359\n3361\n3371\n3373\n3389\n3391\n3407\n3413\n3433\n3449\n3457\n3461\n3463\n3467\n3469\n3491\n3499\n3511\n3517\n3527\n3529\n3533\n3539\n3541\n3547\n3557\n3559\n3571\n3581\n3583\n3593\n3607\n3613\n3617\n3623\n3631\n3637\n3643\n3659\n3671\n3673\n3677\n3691\n3697\n3701\n3709\n3719\n3727\n3733\n3739\n3761\n3767\n3769\n3779\n3793\n3797\n3803\n3821\n3823\n3833\n3847\n3851\n3853\n3863\n3877\n3881\n3889\n3907\n3911\n3917\n3919\n3923\n3929\n3931\n3943\n3947\n3967\n3989\n4001\n4003\n4007\n4013\n4019\n4021\n4027\n4049\n4051\n4057\n4073\n4079\n4091\n4093\n4099\n4111\n4127\n4129\n4133\n4139\n4153\n4157\n4159\n4177\n4201\n4211\n4217\n4219\n4229\n4231\n4241\n4243\n4253\n4259\n4261\n4271\n4273\n4283\n4289\n4297\n4327\n4337\n4339\n4349\n4357\n4363\n4373\n4391\n4397\n4409\n4421\n4423\n4441\n4447\n4451\n4457\n4463\n4481\n4483\n4493\n4507\n4513\n4517\n4519\n4523\n4547\n4549\n4561\n4567\n4583\n4591\n4597\n4603\n4621\n4637\n4639\n4643\n4649\n4651\n4657\n4663\n4673\n4679\n4691\n4703\n4721\n4723\n4729\n4733\n4751\n4759\n4783\n4787\n4789\n4793\n4799\n4801\n4813\n4817\n4831\n4861\n4871\n4877\n4889\n4903\n4909\n4919\n4931\n4933\n4937\n4943\n4951\n4957\n4967\n4969\n4973\n4987\n4993\n4999\n5003\n5009\n5011\n5021\n5023\n5039\n5051\n5059\n5077\n5081\n5087\n5099\n5101\n5107\n5113\n5119\n5147\n5153\n5167\n5171\n5179\n5189\n5197\n5209\n5227\n5231\n5233\n5237\n5261\n5273\n5279\n5281\n5297\n5303\n5309\n5323\n5333\n5347\n5351\n5381\n5387\n5393\n5399\n5407\n5413\n5417\n5419\n5431\n5437\n5441\n5443\n5449\n5471\n5477\n5479\n5483\n5501\n5503\n5507\n5519\n5521\n5527\n5531\n5557\n5563\n5569\n5573\n5581\n5591\n5623\n5639\n5641\n5647\n5651\n5653\n5657\n5659\n5669\n5683\n5689\n5693\n5701\n5711\n5717\n5737\n5741\n5743\n5749\n5779\n5783\n5791\n5801\n5807\n5813\n5821\n5827\n5839\n5843\n5849\n5851\n5857\n5861\n5867\n5869\n5879\n5881\n5897\n5903\n5923\n5927\n5939\n5953\n5981\n5987\n6007\n6011\n6029\n6037\n6043\n6047\n6053\n6067\n6073\n6079\n6089\n6091\n6101\n6113\n6121\n6131\n6133\n6143\n6151\n6163\n6173\n6197\n6199\n6203\n6211\n6217\n6221\n6229\n6247\n6257\n6263\n6269\n6271\n6277\n6287\n6299\n6301\n6311\n6317\n6323\n6329\n6337\n6343\n6353\n6359\n6361\n6367\n6373\n6379\n6389\n6397\n6421\n6427\n6449\n6451\n6469\n6473\n6481\n6491\n6521\n6529\n6547\n6551\n6553\n6563\n6569\n6571\n6577\n6581\n6599\n6607\n6619\n6637\n6653\n6659\n6661\n6673\n6679\n6689\n6691\n6701\n6703\n6709\n6719\n6733\n6737\n6761\n6763\n6779\n6781\n6791\n6793\n6803\n6823\n6827\n6829\n6833\n6841\n6857\n6863\n6869\n6871\n6883\n6899\n6907\n6911\n6917\n6947\n6949\n6959\n6961\n6967\n6971\n6977\n6983\n6991\n6997\n7001\n7013\n7019\n7027\n7039\n7043\n7057\n7069\n7079\n7103\n7109\n7121\n7127\n7129\n7151\n7159\n7177\n7187\n7193\n7207\n7211\n7213\n7219\n7229\n7237\n7243\n7247\n7253\n7283\n7297\n7307\n7309\n7321\n7331\n7333\n7349\n7351\n7369\n7393\n7411\n7417\n7433\n7451\n7457\n7459\n7477\n7481\n7487\n7489\n7499\n7507\n7517\n7523\n7529\n7537\n7541\n7547\n7549\n7559\n7561\n7573\n7577\n7583\n7589\n7591\n7603\n7607\n7621\n7639\n7643\n7649\n7669\n7673\n7681\n7687\n7691\n7699\n7703\n7717\n7723\n7727\n7741\n7753\n7757\n7759\n7789\n7793\n7817\n7823\n7829\n7841\n7853\n7867\n7873\n7877\n7879\n7883\n7901\n7907\n7919",
+ "base64Encoded": false,
+ "contentType": "text/plain"
+ }
+ },
+ "exitCode": 0
+ }
+ }
+]
\ No newline at end of file
diff --git a/static/108_generated_code.py b/static/108_generated_code.py
new file mode 100644
index 00000000..a0f1a176
--- /dev/null
+++ b/static/108_generated_code.py
@@ -0,0 +1,38 @@
+inputFiles = [] # DO NOT CHANGE THIS LINE
+
+def is_prime(n):
+ if n <= 1:
+ return False
+ if n <= 3:
+ return True
+ if n % 2 == 0 or n % 3 == 0:
+ return False
+ i = 5
+ while i * i <= n:
+ if n % i == 0 or n % (i + 2) == 0:
+ return False
+ i += 6
+ return True
+
+def generate_primes(count):
+ primes = []
+ num = 2
+ while len(primes) < count:
+ if is_prime(num):
+ primes.append(num)
+ num += 1
+ return primes
+
+primes = generate_primes(999)
+prime_numbers_content = "\n".join(map(str, primes))
+
+result = {
+ "prime_numbers.txt": {
+ "content": prime_numbers_content,
+ "base64Encoded": False,
+ "contentType": "text/plain"
+ }
+}
+
+import json
+print(json.dumps(result))
\ No newline at end of file
diff --git a/static/109_execution_history.json b/static/109_execution_history.json
new file mode 100644
index 00000000..e6816758
--- /dev/null
+++ b/static/109_execution_history.json
@@ -0,0 +1,19 @@
+[
+ {
+ "attempt": 1,
+ "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(999)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
+ "result": {
+ "success": true,
+ "output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\\n1237\\n1249\\n1259\\n1277\\n1279\\n1283\\n1289\\n1291\\n1297\\n1301\\n1303\\n1307\\n1319\\n1321\\n1327\\n1361\\n1367\\n1373\\n1381\\n1399\\n1409\\n1423\\n1427\\n1429\\n1433\\n1439\\n1447\\n1451\\n1453\\n1459\\n1471\\n1481\\n1483\\n1487\\n1489\\n1493\\n1499\\n1511\\n1523\\n1531\\n1543\\n1549\\n1553\\n1559\\n1567\\n1571\\n1579\\n1583\\n1597\\n1601\\n1607\\n1609\\n1613\\n1619\\n1621\\n1627\\n1637\\n1657\\n1663\\n1667\\n1669\\n1693\\n1697\\n1699\\n1709\\n1721\\n1723\\n1733\\n1741\\n1747\\n1753\\n1759\\n1777\\n1783\\n1787\\n1789\\n1801\\n1811\\n1823\\n1831\\n1847\\n1861\\n1867\\n1871\\n1873\\n1877\\n1879\\n1889\\n1901\\n1907\\n1913\\n1931\\n1933\\n1949\\n1951\\n1973\\n1979\\n1987\\n1993\\n1997\\n1999\\n2003\\n2011\\n2017\\n2027\\n2029\\n2039\\n2053\\n2063\\n2069\\n2081\\n2083\\n2087\\n2089\\n2099\\n2111\\n2113\\n2129\\n2131\\n2137\\n2141\\n2143\\n2153\\n2161\\n2179\\n2203\\n2207\\n2213\\n2221\\n2237\\n2239\\n2243\\n2251\\n2267\\n2269\\n2273\\n2281\\n2287\\n2293\\n2297\\n2309\\n2311\\n2333\\n2339\\n2341\\n2347\\n2351\\n2357\\n2371\\n2377\\n2381\\n2383\\n2389\\n2393\\n2399\\n2411\\n2417\\n2423\\n2437\\n2441\\n2447\\n2459\\n2467\\n2473\\n2477\\n2503\\n2521\\n2531\\n2539\\n2543\\n2549\\n2551\\n2557\\n2579\\n2591\\n2593\\n2609\\n2617\\n2621\\n2633\\n2647\\n2657\\n2659\\n2663\\n2671\\n2677\\n2683\\n2687\\n2689\\n2693\\n2699\\n2707\\n2711\\n2713\\n2719\\n2729\\n2731\\n2741\\n2749\\n2753\\n2767\\n2777\\n2789\\n2791\\n2797\\n2801\\n2803\\n2819\\n2833\\n2837\\n2843\\n2851\\n2857\\n2861\\n2879\\n2887\\n2897\\n2903\\n2909\\n2917\\n2927\\n2939\\n2953\\n2957\\n2963\\n2969\\n2971\\n2999\\n3001\\n3011\\n3019\\n3023\\n3037\\n3041\\n3049\\n3061\\n3067\\n3079\\n3083\\n3089\\n3109\\n3119\\n3121\\n3137\\n3163\\n3167\\n3169\\n3181\\n3187\\n3191\\n3203\\n3209\\n3217\\n3221\\n3229\\n3251\\n3253\\n3257\\n3259\\n3271\\n3299\\n3301\\n3307\\n3313\\n3319\\n3323\\n3329\\n3331\\n3343\\n3347\\n3359\\n3361\\n3371\\n3373\\n3389\\n3391\\n3407\\n3413\\n3433\\n3449\\n3457\\n3461\\n3463\\n3467\\n3469\\n3491\\n3499\\n3511\\n3517\\n3527\\n3529\\n3533\\n3539\\n3541\\n3547\\n3557\\n3559\\n3571\\n3581\\n3583\\n3593\\n3607\\n3613\\n3617\\n3623\\n3631\\n3637\\n3643\\n3659\\n3671\\n3673\\n3677\\n3691\\n3697\\n3701\\n3709\\n3719\\n3727\\n3733\\n3739\\n3761\\n3767\\n3769\\n3779\\n3793\\n3797\\n3803\\n3821\\n3823\\n3833\\n3847\\n3851\\n3853\\n3863\\n3877\\n3881\\n3889\\n3907\\n3911\\n3917\\n3919\\n3923\\n3929\\n3931\\n3943\\n3947\\n3967\\n3989\\n4001\\n4003\\n4007\\n4013\\n4019\\n4021\\n4027\\n4049\\n4051\\n4057\\n4073\\n4079\\n4091\\n4093\\n4099\\n4111\\n4127\\n4129\\n4133\\n4139\\n4153\\n4157\\n4159\\n4177\\n4201\\n4211\\n4217\\n4219\\n4229\\n4231\\n4241\\n4243\\n4253\\n4259\\n4261\\n4271\\n4273\\n4283\\n4289\\n4297\\n4327\\n4337\\n4339\\n4349\\n4357\\n4363\\n4373\\n4391\\n4397\\n4409\\n4421\\n4423\\n4441\\n4447\\n4451\\n4457\\n4463\\n4481\\n4483\\n4493\\n4507\\n4513\\n4517\\n4519\\n4523\\n4547\\n4549\\n4561\\n4567\\n4583\\n4591\\n4597\\n4603\\n4621\\n4637\\n4639\\n4643\\n4649\\n4651\\n4657\\n4663\\n4673\\n4679\\n4691\\n4703\\n4721\\n4723\\n4729\\n4733\\n4751\\n4759\\n4783\\n4787\\n4789\\n4793\\n4799\\n4801\\n4813\\n4817\\n4831\\n4861\\n4871\\n4877\\n4889\\n4903\\n4909\\n4919\\n4931\\n4933\\n4937\\n4943\\n4951\\n4957\\n4967\\n4969\\n4973\\n4987\\n4993\\n4999\\n5003\\n5009\\n5011\\n5021\\n5023\\n5039\\n5051\\n5059\\n5077\\n5081\\n5087\\n5099\\n5101\\n5107\\n5113\\n5119\\n5147\\n5153\\n5167\\n5171\\n5179\\n5189\\n5197\\n5209\\n5227\\n5231\\n5233\\n5237\\n5261\\n5273\\n5279\\n5281\\n5297\\n5303\\n5309\\n5323\\n5333\\n5347\\n5351\\n5381\\n5387\\n5393\\n5399\\n5407\\n5413\\n5417\\n5419\\n5431\\n5437\\n5441\\n5443\\n5449\\n5471\\n5477\\n5479\\n5483\\n5501\\n5503\\n5507\\n5519\\n5521\\n5527\\n5531\\n5557\\n5563\\n5569\\n5573\\n5581\\n5591\\n5623\\n5639\\n5641\\n5647\\n5651\\n5653\\n5657\\n5659\\n5669\\n5683\\n5689\\n5693\\n5701\\n5711\\n5717\\n5737\\n5741\\n5743\\n5749\\n5779\\n5783\\n5791\\n5801\\n5807\\n5813\\n5821\\n5827\\n5839\\n5843\\n5849\\n5851\\n5857\\n5861\\n5867\\n5869\\n5879\\n5881\\n5897\\n5903\\n5923\\n5927\\n5939\\n5953\\n5981\\n5987\\n6007\\n6011\\n6029\\n6037\\n6043\\n6047\\n6053\\n6067\\n6073\\n6079\\n6089\\n6091\\n6101\\n6113\\n6121\\n6131\\n6133\\n6143\\n6151\\n6163\\n6173\\n6197\\n6199\\n6203\\n6211\\n6217\\n6221\\n6229\\n6247\\n6257\\n6263\\n6269\\n6271\\n6277\\n6287\\n6299\\n6301\\n6311\\n6317\\n6323\\n6329\\n6337\\n6343\\n6353\\n6359\\n6361\\n6367\\n6373\\n6379\\n6389\\n6397\\n6421\\n6427\\n6449\\n6451\\n6469\\n6473\\n6481\\n6491\\n6521\\n6529\\n6547\\n6551\\n6553\\n6563\\n6569\\n6571\\n6577\\n6581\\n6599\\n6607\\n6619\\n6637\\n6653\\n6659\\n6661\\n6673\\n6679\\n6689\\n6691\\n6701\\n6703\\n6709\\n6719\\n6733\\n6737\\n6761\\n6763\\n6779\\n6781\\n6791\\n6793\\n6803\\n6823\\n6827\\n6829\\n6833\\n6841\\n6857\\n6863\\n6869\\n6871\\n6883\\n6899\\n6907\\n6911\\n6917\\n6947\\n6949\\n6959\\n6961\\n6967\\n6971\\n6977\\n6983\\n6991\\n6997\\n7001\\n7013\\n7019\\n7027\\n7039\\n7043\\n7057\\n7069\\n7079\\n7103\\n7109\\n7121\\n7127\\n7129\\n7151\\n7159\\n7177\\n7187\\n7193\\n7207\\n7211\\n7213\\n7219\\n7229\\n7237\\n7243\\n7247\\n7253\\n7283\\n7297\\n7307\\n7309\\n7321\\n7331\\n7333\\n7349\\n7351\\n7369\\n7393\\n7411\\n7417\\n7433\\n7451\\n7457\\n7459\\n7477\\n7481\\n7487\\n7489\\n7499\\n7507\\n7517\\n7523\\n7529\\n7537\\n7541\\n7547\\n7549\\n7559\\n7561\\n7573\\n7577\\n7583\\n7589\\n7591\\n7603\\n7607\\n7621\\n7639\\n7643\\n7649\\n7669\\n7673\\n7681\\n7687\\n7691\\n7699\\n7703\\n7717\\n7723\\n7727\\n7741\\n7753\\n7757\\n7759\\n7789\\n7793\\n7817\\n7823\\n7829\\n7841\\n7853\\n7867\\n7873\\n7877\\n7879\\n7883\\n7901\\n7907\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
+ "error": "",
+ "result": {
+ "prime_numbers.txt": {
+ "content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231\n1237\n1249\n1259\n1277\n1279\n1283\n1289\n1291\n1297\n1301\n1303\n1307\n1319\n1321\n1327\n1361\n1367\n1373\n1381\n1399\n1409\n1423\n1427\n1429\n1433\n1439\n1447\n1451\n1453\n1459\n1471\n1481\n1483\n1487\n1489\n1493\n1499\n1511\n1523\n1531\n1543\n1549\n1553\n1559\n1567\n1571\n1579\n1583\n1597\n1601\n1607\n1609\n1613\n1619\n1621\n1627\n1637\n1657\n1663\n1667\n1669\n1693\n1697\n1699\n1709\n1721\n1723\n1733\n1741\n1747\n1753\n1759\n1777\n1783\n1787\n1789\n1801\n1811\n1823\n1831\n1847\n1861\n1867\n1871\n1873\n1877\n1879\n1889\n1901\n1907\n1913\n1931\n1933\n1949\n1951\n1973\n1979\n1987\n1993\n1997\n1999\n2003\n2011\n2017\n2027\n2029\n2039\n2053\n2063\n2069\n2081\n2083\n2087\n2089\n2099\n2111\n2113\n2129\n2131\n2137\n2141\n2143\n2153\n2161\n2179\n2203\n2207\n2213\n2221\n2237\n2239\n2243\n2251\n2267\n2269\n2273\n2281\n2287\n2293\n2297\n2309\n2311\n2333\n2339\n2341\n2347\n2351\n2357\n2371\n2377\n2381\n2383\n2389\n2393\n2399\n2411\n2417\n2423\n2437\n2441\n2447\n2459\n2467\n2473\n2477\n2503\n2521\n2531\n2539\n2543\n2549\n2551\n2557\n2579\n2591\n2593\n2609\n2617\n2621\n2633\n2647\n2657\n2659\n2663\n2671\n2677\n2683\n2687\n2689\n2693\n2699\n2707\n2711\n2713\n2719\n2729\n2731\n2741\n2749\n2753\n2767\n2777\n2789\n2791\n2797\n2801\n2803\n2819\n2833\n2837\n2843\n2851\n2857\n2861\n2879\n2887\n2897\n2903\n2909\n2917\n2927\n2939\n2953\n2957\n2963\n2969\n2971\n2999\n3001\n3011\n3019\n3023\n3037\n3041\n3049\n3061\n3067\n3079\n3083\n3089\n3109\n3119\n3121\n3137\n3163\n3167\n3169\n3181\n3187\n3191\n3203\n3209\n3217\n3221\n3229\n3251\n3253\n3257\n3259\n3271\n3299\n3301\n3307\n3313\n3319\n3323\n3329\n3331\n3343\n3347\n3359\n3361\n3371\n3373\n3389\n3391\n3407\n3413\n3433\n3449\n3457\n3461\n3463\n3467\n3469\n3491\n3499\n3511\n3517\n3527\n3529\n3533\n3539\n3541\n3547\n3557\n3559\n3571\n3581\n3583\n3593\n3607\n3613\n3617\n3623\n3631\n3637\n3643\n3659\n3671\n3673\n3677\n3691\n3697\n3701\n3709\n3719\n3727\n3733\n3739\n3761\n3767\n3769\n3779\n3793\n3797\n3803\n3821\n3823\n3833\n3847\n3851\n3853\n3863\n3877\n3881\n3889\n3907\n3911\n3917\n3919\n3923\n3929\n3931\n3943\n3947\n3967\n3989\n4001\n4003\n4007\n4013\n4019\n4021\n4027\n4049\n4051\n4057\n4073\n4079\n4091\n4093\n4099\n4111\n4127\n4129\n4133\n4139\n4153\n4157\n4159\n4177\n4201\n4211\n4217\n4219\n4229\n4231\n4241\n4243\n4253\n4259\n4261\n4271\n4273\n4283\n4289\n4297\n4327\n4337\n4339\n4349\n4357\n4363\n4373\n4391\n4397\n4409\n4421\n4423\n4441\n4447\n4451\n4457\n4463\n4481\n4483\n4493\n4507\n4513\n4517\n4519\n4523\n4547\n4549\n4561\n4567\n4583\n4591\n4597\n4603\n4621\n4637\n4639\n4643\n4649\n4651\n4657\n4663\n4673\n4679\n4691\n4703\n4721\n4723\n4729\n4733\n4751\n4759\n4783\n4787\n4789\n4793\n4799\n4801\n4813\n4817\n4831\n4861\n4871\n4877\n4889\n4903\n4909\n4919\n4931\n4933\n4937\n4943\n4951\n4957\n4967\n4969\n4973\n4987\n4993\n4999\n5003\n5009\n5011\n5021\n5023\n5039\n5051\n5059\n5077\n5081\n5087\n5099\n5101\n5107\n5113\n5119\n5147\n5153\n5167\n5171\n5179\n5189\n5197\n5209\n5227\n5231\n5233\n5237\n5261\n5273\n5279\n5281\n5297\n5303\n5309\n5323\n5333\n5347\n5351\n5381\n5387\n5393\n5399\n5407\n5413\n5417\n5419\n5431\n5437\n5441\n5443\n5449\n5471\n5477\n5479\n5483\n5501\n5503\n5507\n5519\n5521\n5527\n5531\n5557\n5563\n5569\n5573\n5581\n5591\n5623\n5639\n5641\n5647\n5651\n5653\n5657\n5659\n5669\n5683\n5689\n5693\n5701\n5711\n5717\n5737\n5741\n5743\n5749\n5779\n5783\n5791\n5801\n5807\n5813\n5821\n5827\n5839\n5843\n5849\n5851\n5857\n5861\n5867\n5869\n5879\n5881\n5897\n5903\n5923\n5927\n5939\n5953\n5981\n5987\n6007\n6011\n6029\n6037\n6043\n6047\n6053\n6067\n6073\n6079\n6089\n6091\n6101\n6113\n6121\n6131\n6133\n6143\n6151\n6163\n6173\n6197\n6199\n6203\n6211\n6217\n6221\n6229\n6247\n6257\n6263\n6269\n6271\n6277\n6287\n6299\n6301\n6311\n6317\n6323\n6329\n6337\n6343\n6353\n6359\n6361\n6367\n6373\n6379\n6389\n6397\n6421\n6427\n6449\n6451\n6469\n6473\n6481\n6491\n6521\n6529\n6547\n6551\n6553\n6563\n6569\n6571\n6577\n6581\n6599\n6607\n6619\n6637\n6653\n6659\n6661\n6673\n6679\n6689\n6691\n6701\n6703\n6709\n6719\n6733\n6737\n6761\n6763\n6779\n6781\n6791\n6793\n6803\n6823\n6827\n6829\n6833\n6841\n6857\n6863\n6869\n6871\n6883\n6899\n6907\n6911\n6917\n6947\n6949\n6959\n6961\n6967\n6971\n6977\n6983\n6991\n6997\n7001\n7013\n7019\n7027\n7039\n7043\n7057\n7069\n7079\n7103\n7109\n7121\n7127\n7129\n7151\n7159\n7177\n7187\n7193\n7207\n7211\n7213\n7219\n7229\n7237\n7243\n7247\n7253\n7283\n7297\n7307\n7309\n7321\n7331\n7333\n7349\n7351\n7369\n7393\n7411\n7417\n7433\n7451\n7457\n7459\n7477\n7481\n7487\n7489\n7499\n7507\n7517\n7523\n7529\n7537\n7541\n7547\n7549\n7559\n7561\n7573\n7577\n7583\n7589\n7591\n7603\n7607\n7621\n7639\n7643\n7649\n7669\n7673\n7681\n7687\n7691\n7699\n7703\n7717\n7723\n7727\n7741\n7753\n7757\n7759\n7789\n7793\n7817\n7823\n7829\n7841\n7853\n7867\n7873\n7877\n7879\n7883\n7901\n7907",
+ "base64Encoded": false,
+ "contentType": "text/plain"
+ }
+ },
+ "exitCode": 0
+ }
+ }
+]
\ No newline at end of file
diff --git a/static/110_prime_numbers.txt b/static/110_prime_numbers.txt
new file mode 100644
index 00000000..345ba9b1
--- /dev/null
+++ b/static/110_prime_numbers.txt
@@ -0,0 +1,999 @@
+2
+3
+5
+7
+11
+13
+17
+19
+23
+29
+31
+37
+41
+43
+47
+53
+59
+61
+67
+71
+73
+79
+83
+89
+97
+101
+103
+107
+109
+113
+127
+131
+137
+139
+149
+151
+157
+163
+167
+173
+179
+181
+191
+193
+197
+199
+211
+223
+227
+229
+233
+239
+241
+251
+257
+263
+269
+271
+277
+281
+283
+293
+307
+311
+313
+317
+331
+337
+347
+349
+353
+359
+367
+373
+379
+383
+389
+397
+401
+409
+419
+421
+431
+433
+439
+443
+449
+457
+461
+463
+467
+479
+487
+491
+499
+503
+509
+521
+523
+541
+547
+557
+563
+569
+571
+577
+587
+593
+599
+601
+607
+613
+617
+619
+631
+641
+643
+647
+653
+659
+661
+673
+677
+683
+691
+701
+709
+719
+727
+733
+739
+743
+751
+757
+761
+769
+773
+787
+797
+809
+811
+821
+823
+827
+829
+839
+853
+857
+859
+863
+877
+881
+883
+887
+907
+911
+919
+929
+937
+941
+947
+953
+967
+971
+977
+983
+991
+997
+1009
+1013
+1019
+1021
+1031
+1033
+1039
+1049
+1051
+1061
+1063
+1069
+1087
+1091
+1093
+1097
+1103
+1109
+1117
+1123
+1129
+1151
+1153
+1163
+1171
+1181
+1187
+1193
+1201
+1213
+1217
+1223
+1229
+1231
+1237
+1249
+1259
+1277
+1279
+1283
+1289
+1291
+1297
+1301
+1303
+1307
+1319
+1321
+1327
+1361
+1367
+1373
+1381
+1399
+1409
+1423
+1427
+1429
+1433
+1439
+1447
+1451
+1453
+1459
+1471
+1481
+1483
+1487
+1489
+1493
+1499
+1511
+1523
+1531
+1543
+1549
+1553
+1559
+1567
+1571
+1579
+1583
+1597
+1601
+1607
+1609
+1613
+1619
+1621
+1627
+1637
+1657
+1663
+1667
+1669
+1693
+1697
+1699
+1709
+1721
+1723
+1733
+1741
+1747
+1753
+1759
+1777
+1783
+1787
+1789
+1801
+1811
+1823
+1831
+1847
+1861
+1867
+1871
+1873
+1877
+1879
+1889
+1901
+1907
+1913
+1931
+1933
+1949
+1951
+1973
+1979
+1987
+1993
+1997
+1999
+2003
+2011
+2017
+2027
+2029
+2039
+2053
+2063
+2069
+2081
+2083
+2087
+2089
+2099
+2111
+2113
+2129
+2131
+2137
+2141
+2143
+2153
+2161
+2179
+2203
+2207
+2213
+2221
+2237
+2239
+2243
+2251
+2267
+2269
+2273
+2281
+2287
+2293
+2297
+2309
+2311
+2333
+2339
+2341
+2347
+2351
+2357
+2371
+2377
+2381
+2383
+2389
+2393
+2399
+2411
+2417
+2423
+2437
+2441
+2447
+2459
+2467
+2473
+2477
+2503
+2521
+2531
+2539
+2543
+2549
+2551
+2557
+2579
+2591
+2593
+2609
+2617
+2621
+2633
+2647
+2657
+2659
+2663
+2671
+2677
+2683
+2687
+2689
+2693
+2699
+2707
+2711
+2713
+2719
+2729
+2731
+2741
+2749
+2753
+2767
+2777
+2789
+2791
+2797
+2801
+2803
+2819
+2833
+2837
+2843
+2851
+2857
+2861
+2879
+2887
+2897
+2903
+2909
+2917
+2927
+2939
+2953
+2957
+2963
+2969
+2971
+2999
+3001
+3011
+3019
+3023
+3037
+3041
+3049
+3061
+3067
+3079
+3083
+3089
+3109
+3119
+3121
+3137
+3163
+3167
+3169
+3181
+3187
+3191
+3203
+3209
+3217
+3221
+3229
+3251
+3253
+3257
+3259
+3271
+3299
+3301
+3307
+3313
+3319
+3323
+3329
+3331
+3343
+3347
+3359
+3361
+3371
+3373
+3389
+3391
+3407
+3413
+3433
+3449
+3457
+3461
+3463
+3467
+3469
+3491
+3499
+3511
+3517
+3527
+3529
+3533
+3539
+3541
+3547
+3557
+3559
+3571
+3581
+3583
+3593
+3607
+3613
+3617
+3623
+3631
+3637
+3643
+3659
+3671
+3673
+3677
+3691
+3697
+3701
+3709
+3719
+3727
+3733
+3739
+3761
+3767
+3769
+3779
+3793
+3797
+3803
+3821
+3823
+3833
+3847
+3851
+3853
+3863
+3877
+3881
+3889
+3907
+3911
+3917
+3919
+3923
+3929
+3931
+3943
+3947
+3967
+3989
+4001
+4003
+4007
+4013
+4019
+4021
+4027
+4049
+4051
+4057
+4073
+4079
+4091
+4093
+4099
+4111
+4127
+4129
+4133
+4139
+4153
+4157
+4159
+4177
+4201
+4211
+4217
+4219
+4229
+4231
+4241
+4243
+4253
+4259
+4261
+4271
+4273
+4283
+4289
+4297
+4327
+4337
+4339
+4349
+4357
+4363
+4373
+4391
+4397
+4409
+4421
+4423
+4441
+4447
+4451
+4457
+4463
+4481
+4483
+4493
+4507
+4513
+4517
+4519
+4523
+4547
+4549
+4561
+4567
+4583
+4591
+4597
+4603
+4621
+4637
+4639
+4643
+4649
+4651
+4657
+4663
+4673
+4679
+4691
+4703
+4721
+4723
+4729
+4733
+4751
+4759
+4783
+4787
+4789
+4793
+4799
+4801
+4813
+4817
+4831
+4861
+4871
+4877
+4889
+4903
+4909
+4919
+4931
+4933
+4937
+4943
+4951
+4957
+4967
+4969
+4973
+4987
+4993
+4999
+5003
+5009
+5011
+5021
+5023
+5039
+5051
+5059
+5077
+5081
+5087
+5099
+5101
+5107
+5113
+5119
+5147
+5153
+5167
+5171
+5179
+5189
+5197
+5209
+5227
+5231
+5233
+5237
+5261
+5273
+5279
+5281
+5297
+5303
+5309
+5323
+5333
+5347
+5351
+5381
+5387
+5393
+5399
+5407
+5413
+5417
+5419
+5431
+5437
+5441
+5443
+5449
+5471
+5477
+5479
+5483
+5501
+5503
+5507
+5519
+5521
+5527
+5531
+5557
+5563
+5569
+5573
+5581
+5591
+5623
+5639
+5641
+5647
+5651
+5653
+5657
+5659
+5669
+5683
+5689
+5693
+5701
+5711
+5717
+5737
+5741
+5743
+5749
+5779
+5783
+5791
+5801
+5807
+5813
+5821
+5827
+5839
+5843
+5849
+5851
+5857
+5861
+5867
+5869
+5879
+5881
+5897
+5903
+5923
+5927
+5939
+5953
+5981
+5987
+6007
+6011
+6029
+6037
+6043
+6047
+6053
+6067
+6073
+6079
+6089
+6091
+6101
+6113
+6121
+6131
+6133
+6143
+6151
+6163
+6173
+6197
+6199
+6203
+6211
+6217
+6221
+6229
+6247
+6257
+6263
+6269
+6271
+6277
+6287
+6299
+6301
+6311
+6317
+6323
+6329
+6337
+6343
+6353
+6359
+6361
+6367
+6373
+6379
+6389
+6397
+6421
+6427
+6449
+6451
+6469
+6473
+6481
+6491
+6521
+6529
+6547
+6551
+6553
+6563
+6569
+6571
+6577
+6581
+6599
+6607
+6619
+6637
+6653
+6659
+6661
+6673
+6679
+6689
+6691
+6701
+6703
+6709
+6719
+6733
+6737
+6761
+6763
+6779
+6781
+6791
+6793
+6803
+6823
+6827
+6829
+6833
+6841
+6857
+6863
+6869
+6871
+6883
+6899
+6907
+6911
+6917
+6947
+6949
+6959
+6961
+6967
+6971
+6977
+6983
+6991
+6997
+7001
+7013
+7019
+7027
+7039
+7043
+7057
+7069
+7079
+7103
+7109
+7121
+7127
+7129
+7151
+7159
+7177
+7187
+7193
+7207
+7211
+7213
+7219
+7229
+7237
+7243
+7247
+7253
+7283
+7297
+7307
+7309
+7321
+7331
+7333
+7349
+7351
+7369
+7393
+7411
+7417
+7433
+7451
+7457
+7459
+7477
+7481
+7487
+7489
+7499
+7507
+7517
+7523
+7529
+7537
+7541
+7547
+7549
+7559
+7561
+7573
+7577
+7583
+7589
+7591
+7603
+7607
+7621
+7639
+7643
+7649
+7669
+7673
+7681
+7687
+7691
+7699
+7703
+7717
+7723
+7727
+7741
+7753
+7757
+7759
+7789
+7793
+7817
+7823
+7829
+7841
+7853
+7867
+7873
+7877
+7879
+7883
+7901
+7907
\ No newline at end of file
diff --git a/static/111_generated_code.py b/static/111_generated_code.py
new file mode 100644
index 00000000..d1bf4973
--- /dev/null
+++ b/static/111_generated_code.py
@@ -0,0 +1,39 @@
+inputFiles = [] # DO NOT CHANGE THIS LINE
+
+import json
+
+def is_prime(n):
+ if n <= 1:
+ return False
+ if n <= 3:
+ return True
+ if n % 2 == 0 or n % 3 == 0:
+ return False
+ i = 5
+ while i * i <= n:
+ if n % i == 0 or n % (i + 2) == 0:
+ return False
+ i += 6
+ return True
+
+def generate_primes(count):
+ primes = []
+ num = 2
+ while len(primes) < count:
+ if is_prime(num):
+ primes.append(num)
+ num += 1
+ return primes
+
+primes = generate_primes(779)
+prime_numbers_content = "\n".join(map(str, primes))
+
+result = {
+ "prime_numbers.txt": {
+ "content": prime_numbers_content,
+ "base64Encoded": False,
+ "contentType": "text/plain"
+ }
+}
+
+print(json.dumps(result))
\ No newline at end of file
diff --git a/static/112_execution_history.json b/static/112_execution_history.json
new file mode 100644
index 00000000..c419a1c2
--- /dev/null
+++ b/static/112_execution_history.json
@@ -0,0 +1,19 @@
+[
+ {
+ "attempt": 1,
+ "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\nimport json\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(count):\n primes = []\n num = 2\n while len(primes) < count:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(779)\nprime_numbers_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"prime_numbers.txt\": {\n \"content\": prime_numbers_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nprint(json.dumps(result))",
+ "result": {
+ "success": true,
+ "output": "{\"prime_numbers.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\\n1237\\n1249\\n1259\\n1277\\n1279\\n1283\\n1289\\n1291\\n1297\\n1301\\n1303\\n1307\\n1319\\n1321\\n1327\\n1361\\n1367\\n1373\\n1381\\n1399\\n1409\\n1423\\n1427\\n1429\\n1433\\n1439\\n1447\\n1451\\n1453\\n1459\\n1471\\n1481\\n1483\\n1487\\n1489\\n1493\\n1499\\n1511\\n1523\\n1531\\n1543\\n1549\\n1553\\n1559\\n1567\\n1571\\n1579\\n1583\\n1597\\n1601\\n1607\\n1609\\n1613\\n1619\\n1621\\n1627\\n1637\\n1657\\n1663\\n1667\\n1669\\n1693\\n1697\\n1699\\n1709\\n1721\\n1723\\n1733\\n1741\\n1747\\n1753\\n1759\\n1777\\n1783\\n1787\\n1789\\n1801\\n1811\\n1823\\n1831\\n1847\\n1861\\n1867\\n1871\\n1873\\n1877\\n1879\\n1889\\n1901\\n1907\\n1913\\n1931\\n1933\\n1949\\n1951\\n1973\\n1979\\n1987\\n1993\\n1997\\n1999\\n2003\\n2011\\n2017\\n2027\\n2029\\n2039\\n2053\\n2063\\n2069\\n2081\\n2083\\n2087\\n2089\\n2099\\n2111\\n2113\\n2129\\n2131\\n2137\\n2141\\n2143\\n2153\\n2161\\n2179\\n2203\\n2207\\n2213\\n2221\\n2237\\n2239\\n2243\\n2251\\n2267\\n2269\\n2273\\n2281\\n2287\\n2293\\n2297\\n2309\\n2311\\n2333\\n2339\\n2341\\n2347\\n2351\\n2357\\n2371\\n2377\\n2381\\n2383\\n2389\\n2393\\n2399\\n2411\\n2417\\n2423\\n2437\\n2441\\n2447\\n2459\\n2467\\n2473\\n2477\\n2503\\n2521\\n2531\\n2539\\n2543\\n2549\\n2551\\n2557\\n2579\\n2591\\n2593\\n2609\\n2617\\n2621\\n2633\\n2647\\n2657\\n2659\\n2663\\n2671\\n2677\\n2683\\n2687\\n2689\\n2693\\n2699\\n2707\\n2711\\n2713\\n2719\\n2729\\n2731\\n2741\\n2749\\n2753\\n2767\\n2777\\n2789\\n2791\\n2797\\n2801\\n2803\\n2819\\n2833\\n2837\\n2843\\n2851\\n2857\\n2861\\n2879\\n2887\\n2897\\n2903\\n2909\\n2917\\n2927\\n2939\\n2953\\n2957\\n2963\\n2969\\n2971\\n2999\\n3001\\n3011\\n3019\\n3023\\n3037\\n3041\\n3049\\n3061\\n3067\\n3079\\n3083\\n3089\\n3109\\n3119\\n3121\\n3137\\n3163\\n3167\\n3169\\n3181\\n3187\\n3191\\n3203\\n3209\\n3217\\n3221\\n3229\\n3251\\n3253\\n3257\\n3259\\n3271\\n3299\\n3301\\n3307\\n3313\\n3319\\n3323\\n3329\\n3331\\n3343\\n3347\\n3359\\n3361\\n3371\\n3373\\n3389\\n3391\\n3407\\n3413\\n3433\\n3449\\n3457\\n3461\\n3463\\n3467\\n3469\\n3491\\n3499\\n3511\\n3517\\n3527\\n3529\\n3533\\n3539\\n3541\\n3547\\n3557\\n3559\\n3571\\n3581\\n3583\\n3593\\n3607\\n3613\\n3617\\n3623\\n3631\\n3637\\n3643\\n3659\\n3671\\n3673\\n3677\\n3691\\n3697\\n3701\\n3709\\n3719\\n3727\\n3733\\n3739\\n3761\\n3767\\n3769\\n3779\\n3793\\n3797\\n3803\\n3821\\n3823\\n3833\\n3847\\n3851\\n3853\\n3863\\n3877\\n3881\\n3889\\n3907\\n3911\\n3917\\n3919\\n3923\\n3929\\n3931\\n3943\\n3947\\n3967\\n3989\\n4001\\n4003\\n4007\\n4013\\n4019\\n4021\\n4027\\n4049\\n4051\\n4057\\n4073\\n4079\\n4091\\n4093\\n4099\\n4111\\n4127\\n4129\\n4133\\n4139\\n4153\\n4157\\n4159\\n4177\\n4201\\n4211\\n4217\\n4219\\n4229\\n4231\\n4241\\n4243\\n4253\\n4259\\n4261\\n4271\\n4273\\n4283\\n4289\\n4297\\n4327\\n4337\\n4339\\n4349\\n4357\\n4363\\n4373\\n4391\\n4397\\n4409\\n4421\\n4423\\n4441\\n4447\\n4451\\n4457\\n4463\\n4481\\n4483\\n4493\\n4507\\n4513\\n4517\\n4519\\n4523\\n4547\\n4549\\n4561\\n4567\\n4583\\n4591\\n4597\\n4603\\n4621\\n4637\\n4639\\n4643\\n4649\\n4651\\n4657\\n4663\\n4673\\n4679\\n4691\\n4703\\n4721\\n4723\\n4729\\n4733\\n4751\\n4759\\n4783\\n4787\\n4789\\n4793\\n4799\\n4801\\n4813\\n4817\\n4831\\n4861\\n4871\\n4877\\n4889\\n4903\\n4909\\n4919\\n4931\\n4933\\n4937\\n4943\\n4951\\n4957\\n4967\\n4969\\n4973\\n4987\\n4993\\n4999\\n5003\\n5009\\n5011\\n5021\\n5023\\n5039\\n5051\\n5059\\n5077\\n5081\\n5087\\n5099\\n5101\\n5107\\n5113\\n5119\\n5147\\n5153\\n5167\\n5171\\n5179\\n5189\\n5197\\n5209\\n5227\\n5231\\n5233\\n5237\\n5261\\n5273\\n5279\\n5281\\n5297\\n5303\\n5309\\n5323\\n5333\\n5347\\n5351\\n5381\\n5387\\n5393\\n5399\\n5407\\n5413\\n5417\\n5419\\n5431\\n5437\\n5441\\n5443\\n5449\\n5471\\n5477\\n5479\\n5483\\n5501\\n5503\\n5507\\n5519\\n5521\\n5527\\n5531\\n5557\\n5563\\n5569\\n5573\\n5581\\n5591\\n5623\\n5639\\n5641\\n5647\\n5651\\n5653\\n5657\\n5659\\n5669\\n5683\\n5689\\n5693\\n5701\\n5711\\n5717\\n5737\\n5741\\n5743\\n5749\\n5779\\n5783\\n5791\\n5801\\n5807\\n5813\\n5821\\n5827\\n5839\\n5843\\n5849\\n5851\\n5857\\n5861\\n5867\\n5869\\n5879\\n5881\\n5897\\n5903\\n5923\\n5927\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
+ "error": "",
+ "result": {
+ "prime_numbers.txt": {
+ "content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231\n1237\n1249\n1259\n1277\n1279\n1283\n1289\n1291\n1297\n1301\n1303\n1307\n1319\n1321\n1327\n1361\n1367\n1373\n1381\n1399\n1409\n1423\n1427\n1429\n1433\n1439\n1447\n1451\n1453\n1459\n1471\n1481\n1483\n1487\n1489\n1493\n1499\n1511\n1523\n1531\n1543\n1549\n1553\n1559\n1567\n1571\n1579\n1583\n1597\n1601\n1607\n1609\n1613\n1619\n1621\n1627\n1637\n1657\n1663\n1667\n1669\n1693\n1697\n1699\n1709\n1721\n1723\n1733\n1741\n1747\n1753\n1759\n1777\n1783\n1787\n1789\n1801\n1811\n1823\n1831\n1847\n1861\n1867\n1871\n1873\n1877\n1879\n1889\n1901\n1907\n1913\n1931\n1933\n1949\n1951\n1973\n1979\n1987\n1993\n1997\n1999\n2003\n2011\n2017\n2027\n2029\n2039\n2053\n2063\n2069\n2081\n2083\n2087\n2089\n2099\n2111\n2113\n2129\n2131\n2137\n2141\n2143\n2153\n2161\n2179\n2203\n2207\n2213\n2221\n2237\n2239\n2243\n2251\n2267\n2269\n2273\n2281\n2287\n2293\n2297\n2309\n2311\n2333\n2339\n2341\n2347\n2351\n2357\n2371\n2377\n2381\n2383\n2389\n2393\n2399\n2411\n2417\n2423\n2437\n2441\n2447\n2459\n2467\n2473\n2477\n2503\n2521\n2531\n2539\n2543\n2549\n2551\n2557\n2579\n2591\n2593\n2609\n2617\n2621\n2633\n2647\n2657\n2659\n2663\n2671\n2677\n2683\n2687\n2689\n2693\n2699\n2707\n2711\n2713\n2719\n2729\n2731\n2741\n2749\n2753\n2767\n2777\n2789\n2791\n2797\n2801\n2803\n2819\n2833\n2837\n2843\n2851\n2857\n2861\n2879\n2887\n2897\n2903\n2909\n2917\n2927\n2939\n2953\n2957\n2963\n2969\n2971\n2999\n3001\n3011\n3019\n3023\n3037\n3041\n3049\n3061\n3067\n3079\n3083\n3089\n3109\n3119\n3121\n3137\n3163\n3167\n3169\n3181\n3187\n3191\n3203\n3209\n3217\n3221\n3229\n3251\n3253\n3257\n3259\n3271\n3299\n3301\n3307\n3313\n3319\n3323\n3329\n3331\n3343\n3347\n3359\n3361\n3371\n3373\n3389\n3391\n3407\n3413\n3433\n3449\n3457\n3461\n3463\n3467\n3469\n3491\n3499\n3511\n3517\n3527\n3529\n3533\n3539\n3541\n3547\n3557\n3559\n3571\n3581\n3583\n3593\n3607\n3613\n3617\n3623\n3631\n3637\n3643\n3659\n3671\n3673\n3677\n3691\n3697\n3701\n3709\n3719\n3727\n3733\n3739\n3761\n3767\n3769\n3779\n3793\n3797\n3803\n3821\n3823\n3833\n3847\n3851\n3853\n3863\n3877\n3881\n3889\n3907\n3911\n3917\n3919\n3923\n3929\n3931\n3943\n3947\n3967\n3989\n4001\n4003\n4007\n4013\n4019\n4021\n4027\n4049\n4051\n4057\n4073\n4079\n4091\n4093\n4099\n4111\n4127\n4129\n4133\n4139\n4153\n4157\n4159\n4177\n4201\n4211\n4217\n4219\n4229\n4231\n4241\n4243\n4253\n4259\n4261\n4271\n4273\n4283\n4289\n4297\n4327\n4337\n4339\n4349\n4357\n4363\n4373\n4391\n4397\n4409\n4421\n4423\n4441\n4447\n4451\n4457\n4463\n4481\n4483\n4493\n4507\n4513\n4517\n4519\n4523\n4547\n4549\n4561\n4567\n4583\n4591\n4597\n4603\n4621\n4637\n4639\n4643\n4649\n4651\n4657\n4663\n4673\n4679\n4691\n4703\n4721\n4723\n4729\n4733\n4751\n4759\n4783\n4787\n4789\n4793\n4799\n4801\n4813\n4817\n4831\n4861\n4871\n4877\n4889\n4903\n4909\n4919\n4931\n4933\n4937\n4943\n4951\n4957\n4967\n4969\n4973\n4987\n4993\n4999\n5003\n5009\n5011\n5021\n5023\n5039\n5051\n5059\n5077\n5081\n5087\n5099\n5101\n5107\n5113\n5119\n5147\n5153\n5167\n5171\n5179\n5189\n5197\n5209\n5227\n5231\n5233\n5237\n5261\n5273\n5279\n5281\n5297\n5303\n5309\n5323\n5333\n5347\n5351\n5381\n5387\n5393\n5399\n5407\n5413\n5417\n5419\n5431\n5437\n5441\n5443\n5449\n5471\n5477\n5479\n5483\n5501\n5503\n5507\n5519\n5521\n5527\n5531\n5557\n5563\n5569\n5573\n5581\n5591\n5623\n5639\n5641\n5647\n5651\n5653\n5657\n5659\n5669\n5683\n5689\n5693\n5701\n5711\n5717\n5737\n5741\n5743\n5749\n5779\n5783\n5791\n5801\n5807\n5813\n5821\n5827\n5839\n5843\n5849\n5851\n5857\n5861\n5867\n5869\n5879\n5881\n5897\n5903\n5923\n5927",
+ "base64Encoded": false,
+ "contentType": "text/plain"
+ }
+ },
+ "exitCode": 0
+ }
+ }
+]
\ No newline at end of file
diff --git a/static/113_prime_numbers.txt b/static/113_prime_numbers.txt
new file mode 100644
index 00000000..938f366b
--- /dev/null
+++ b/static/113_prime_numbers.txt
@@ -0,0 +1,779 @@
+2
+3
+5
+7
+11
+13
+17
+19
+23
+29
+31
+37
+41
+43
+47
+53
+59
+61
+67
+71
+73
+79
+83
+89
+97
+101
+103
+107
+109
+113
+127
+131
+137
+139
+149
+151
+157
+163
+167
+173
+179
+181
+191
+193
+197
+199
+211
+223
+227
+229
+233
+239
+241
+251
+257
+263
+269
+271
+277
+281
+283
+293
+307
+311
+313
+317
+331
+337
+347
+349
+353
+359
+367
+373
+379
+383
+389
+397
+401
+409
+419
+421
+431
+433
+439
+443
+449
+457
+461
+463
+467
+479
+487
+491
+499
+503
+509
+521
+523
+541
+547
+557
+563
+569
+571
+577
+587
+593
+599
+601
+607
+613
+617
+619
+631
+641
+643
+647
+653
+659
+661
+673
+677
+683
+691
+701
+709
+719
+727
+733
+739
+743
+751
+757
+761
+769
+773
+787
+797
+809
+811
+821
+823
+827
+829
+839
+853
+857
+859
+863
+877
+881
+883
+887
+907
+911
+919
+929
+937
+941
+947
+953
+967
+971
+977
+983
+991
+997
+1009
+1013
+1019
+1021
+1031
+1033
+1039
+1049
+1051
+1061
+1063
+1069
+1087
+1091
+1093
+1097
+1103
+1109
+1117
+1123
+1129
+1151
+1153
+1163
+1171
+1181
+1187
+1193
+1201
+1213
+1217
+1223
+1229
+1231
+1237
+1249
+1259
+1277
+1279
+1283
+1289
+1291
+1297
+1301
+1303
+1307
+1319
+1321
+1327
+1361
+1367
+1373
+1381
+1399
+1409
+1423
+1427
+1429
+1433
+1439
+1447
+1451
+1453
+1459
+1471
+1481
+1483
+1487
+1489
+1493
+1499
+1511
+1523
+1531
+1543
+1549
+1553
+1559
+1567
+1571
+1579
+1583
+1597
+1601
+1607
+1609
+1613
+1619
+1621
+1627
+1637
+1657
+1663
+1667
+1669
+1693
+1697
+1699
+1709
+1721
+1723
+1733
+1741
+1747
+1753
+1759
+1777
+1783
+1787
+1789
+1801
+1811
+1823
+1831
+1847
+1861
+1867
+1871
+1873
+1877
+1879
+1889
+1901
+1907
+1913
+1931
+1933
+1949
+1951
+1973
+1979
+1987
+1993
+1997
+1999
+2003
+2011
+2017
+2027
+2029
+2039
+2053
+2063
+2069
+2081
+2083
+2087
+2089
+2099
+2111
+2113
+2129
+2131
+2137
+2141
+2143
+2153
+2161
+2179
+2203
+2207
+2213
+2221
+2237
+2239
+2243
+2251
+2267
+2269
+2273
+2281
+2287
+2293
+2297
+2309
+2311
+2333
+2339
+2341
+2347
+2351
+2357
+2371
+2377
+2381
+2383
+2389
+2393
+2399
+2411
+2417
+2423
+2437
+2441
+2447
+2459
+2467
+2473
+2477
+2503
+2521
+2531
+2539
+2543
+2549
+2551
+2557
+2579
+2591
+2593
+2609
+2617
+2621
+2633
+2647
+2657
+2659
+2663
+2671
+2677
+2683
+2687
+2689
+2693
+2699
+2707
+2711
+2713
+2719
+2729
+2731
+2741
+2749
+2753
+2767
+2777
+2789
+2791
+2797
+2801
+2803
+2819
+2833
+2837
+2843
+2851
+2857
+2861
+2879
+2887
+2897
+2903
+2909
+2917
+2927
+2939
+2953
+2957
+2963
+2969
+2971
+2999
+3001
+3011
+3019
+3023
+3037
+3041
+3049
+3061
+3067
+3079
+3083
+3089
+3109
+3119
+3121
+3137
+3163
+3167
+3169
+3181
+3187
+3191
+3203
+3209
+3217
+3221
+3229
+3251
+3253
+3257
+3259
+3271
+3299
+3301
+3307
+3313
+3319
+3323
+3329
+3331
+3343
+3347
+3359
+3361
+3371
+3373
+3389
+3391
+3407
+3413
+3433
+3449
+3457
+3461
+3463
+3467
+3469
+3491
+3499
+3511
+3517
+3527
+3529
+3533
+3539
+3541
+3547
+3557
+3559
+3571
+3581
+3583
+3593
+3607
+3613
+3617
+3623
+3631
+3637
+3643
+3659
+3671
+3673
+3677
+3691
+3697
+3701
+3709
+3719
+3727
+3733
+3739
+3761
+3767
+3769
+3779
+3793
+3797
+3803
+3821
+3823
+3833
+3847
+3851
+3853
+3863
+3877
+3881
+3889
+3907
+3911
+3917
+3919
+3923
+3929
+3931
+3943
+3947
+3967
+3989
+4001
+4003
+4007
+4013
+4019
+4021
+4027
+4049
+4051
+4057
+4073
+4079
+4091
+4093
+4099
+4111
+4127
+4129
+4133
+4139
+4153
+4157
+4159
+4177
+4201
+4211
+4217
+4219
+4229
+4231
+4241
+4243
+4253
+4259
+4261
+4271
+4273
+4283
+4289
+4297
+4327
+4337
+4339
+4349
+4357
+4363
+4373
+4391
+4397
+4409
+4421
+4423
+4441
+4447
+4451
+4457
+4463
+4481
+4483
+4493
+4507
+4513
+4517
+4519
+4523
+4547
+4549
+4561
+4567
+4583
+4591
+4597
+4603
+4621
+4637
+4639
+4643
+4649
+4651
+4657
+4663
+4673
+4679
+4691
+4703
+4721
+4723
+4729
+4733
+4751
+4759
+4783
+4787
+4789
+4793
+4799
+4801
+4813
+4817
+4831
+4861
+4871
+4877
+4889
+4903
+4909
+4919
+4931
+4933
+4937
+4943
+4951
+4957
+4967
+4969
+4973
+4987
+4993
+4999
+5003
+5009
+5011
+5021
+5023
+5039
+5051
+5059
+5077
+5081
+5087
+5099
+5101
+5107
+5113
+5119
+5147
+5153
+5167
+5171
+5179
+5189
+5197
+5209
+5227
+5231
+5233
+5237
+5261
+5273
+5279
+5281
+5297
+5303
+5309
+5323
+5333
+5347
+5351
+5381
+5387
+5393
+5399
+5407
+5413
+5417
+5419
+5431
+5437
+5441
+5443
+5449
+5471
+5477
+5479
+5483
+5501
+5503
+5507
+5519
+5521
+5527
+5531
+5557
+5563
+5569
+5573
+5581
+5591
+5623
+5639
+5641
+5647
+5651
+5653
+5657
+5659
+5669
+5683
+5689
+5693
+5701
+5711
+5717
+5737
+5741
+5743
+5749
+5779
+5783
+5791
+5801
+5807
+5813
+5821
+5827
+5839
+5843
+5849
+5851
+5857
+5861
+5867
+5869
+5879
+5881
+5897
+5903
+5923
+5927
\ No newline at end of file
diff --git a/static/114_generated_code.py b/static/114_generated_code.py
new file mode 100644
index 00000000..173154fc
--- /dev/null
+++ b/static/114_generated_code.py
@@ -0,0 +1,38 @@
+inputFiles = [] # DO NOT CHANGE THIS LINE
+
+def is_prime(n):
+ if n <= 1:
+ return False
+ if n <= 3:
+ return True
+ if n % 2 == 0 or n % 3 == 0:
+ return False
+ i = 5
+ while i * i <= n:
+ if n % i == 0 or n % (i + 2) == 0:
+ return False
+ i += 6
+ return True
+
+def generate_primes(limit):
+ primes = []
+ num = 2
+ while len(primes) < limit:
+ if is_prime(num):
+ primes.append(num)
+ num += 1
+ return primes
+
+primes = generate_primes(1000)
+primes_content = "\n".join(map(str, primes))
+
+result = {
+ "first_1000_primes.txt": {
+ "content": primes_content,
+ "base64Encoded": False,
+ "contentType": "text/plain"
+ }
+}
+
+import json
+print(json.dumps(result))
\ No newline at end of file
diff --git a/static/115_execution_history.json b/static/115_execution_history.json
new file mode 100644
index 00000000..9795dcc0
--- /dev/null
+++ b/static/115_execution_history.json
@@ -0,0 +1,19 @@
+[
+ {
+ "attempt": 1,
+ "code": "inputFiles = [] # DO NOT CHANGE THIS LINE\n\ndef is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef generate_primes(limit):\n primes = []\n num = 2\n while len(primes) < limit:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes\n\nprimes = generate_primes(1000)\nprimes_content = \"\\n\".join(map(str, primes))\n\nresult = {\n \"first_1000_primes.txt\": {\n \"content\": primes_content,\n \"base64Encoded\": False,\n \"contentType\": \"text/plain\"\n }\n}\n\nimport json\nprint(json.dumps(result))",
+ "result": {
+ "success": true,
+ "output": "{\"first_1000_primes.txt\": {\"content\": \"2\\n3\\n5\\n7\\n11\\n13\\n17\\n19\\n23\\n29\\n31\\n37\\n41\\n43\\n47\\n53\\n59\\n61\\n67\\n71\\n73\\n79\\n83\\n89\\n97\\n101\\n103\\n107\\n109\\n113\\n127\\n131\\n137\\n139\\n149\\n151\\n157\\n163\\n167\\n173\\n179\\n181\\n191\\n193\\n197\\n199\\n211\\n223\\n227\\n229\\n233\\n239\\n241\\n251\\n257\\n263\\n269\\n271\\n277\\n281\\n283\\n293\\n307\\n311\\n313\\n317\\n331\\n337\\n347\\n349\\n353\\n359\\n367\\n373\\n379\\n383\\n389\\n397\\n401\\n409\\n419\\n421\\n431\\n433\\n439\\n443\\n449\\n457\\n461\\n463\\n467\\n479\\n487\\n491\\n499\\n503\\n509\\n521\\n523\\n541\\n547\\n557\\n563\\n569\\n571\\n577\\n587\\n593\\n599\\n601\\n607\\n613\\n617\\n619\\n631\\n641\\n643\\n647\\n653\\n659\\n661\\n673\\n677\\n683\\n691\\n701\\n709\\n719\\n727\\n733\\n739\\n743\\n751\\n757\\n761\\n769\\n773\\n787\\n797\\n809\\n811\\n821\\n823\\n827\\n829\\n839\\n853\\n857\\n859\\n863\\n877\\n881\\n883\\n887\\n907\\n911\\n919\\n929\\n937\\n941\\n947\\n953\\n967\\n971\\n977\\n983\\n991\\n997\\n1009\\n1013\\n1019\\n1021\\n1031\\n1033\\n1039\\n1049\\n1051\\n1061\\n1063\\n1069\\n1087\\n1091\\n1093\\n1097\\n1103\\n1109\\n1117\\n1123\\n1129\\n1151\\n1153\\n1163\\n1171\\n1181\\n1187\\n1193\\n1201\\n1213\\n1217\\n1223\\n1229\\n1231\\n1237\\n1249\\n1259\\n1277\\n1279\\n1283\\n1289\\n1291\\n1297\\n1301\\n1303\\n1307\\n1319\\n1321\\n1327\\n1361\\n1367\\n1373\\n1381\\n1399\\n1409\\n1423\\n1427\\n1429\\n1433\\n1439\\n1447\\n1451\\n1453\\n1459\\n1471\\n1481\\n1483\\n1487\\n1489\\n1493\\n1499\\n1511\\n1523\\n1531\\n1543\\n1549\\n1553\\n1559\\n1567\\n1571\\n1579\\n1583\\n1597\\n1601\\n1607\\n1609\\n1613\\n1619\\n1621\\n1627\\n1637\\n1657\\n1663\\n1667\\n1669\\n1693\\n1697\\n1699\\n1709\\n1721\\n1723\\n1733\\n1741\\n1747\\n1753\\n1759\\n1777\\n1783\\n1787\\n1789\\n1801\\n1811\\n1823\\n1831\\n1847\\n1861\\n1867\\n1871\\n1873\\n1877\\n1879\\n1889\\n1901\\n1907\\n1913\\n1931\\n1933\\n1949\\n1951\\n1973\\n1979\\n1987\\n1993\\n1997\\n1999\\n2003\\n2011\\n2017\\n2027\\n2029\\n2039\\n2053\\n2063\\n2069\\n2081\\n2083\\n2087\\n2089\\n2099\\n2111\\n2113\\n2129\\n2131\\n2137\\n2141\\n2143\\n2153\\n2161\\n2179\\n2203\\n2207\\n2213\\n2221\\n2237\\n2239\\n2243\\n2251\\n2267\\n2269\\n2273\\n2281\\n2287\\n2293\\n2297\\n2309\\n2311\\n2333\\n2339\\n2341\\n2347\\n2351\\n2357\\n2371\\n2377\\n2381\\n2383\\n2389\\n2393\\n2399\\n2411\\n2417\\n2423\\n2437\\n2441\\n2447\\n2459\\n2467\\n2473\\n2477\\n2503\\n2521\\n2531\\n2539\\n2543\\n2549\\n2551\\n2557\\n2579\\n2591\\n2593\\n2609\\n2617\\n2621\\n2633\\n2647\\n2657\\n2659\\n2663\\n2671\\n2677\\n2683\\n2687\\n2689\\n2693\\n2699\\n2707\\n2711\\n2713\\n2719\\n2729\\n2731\\n2741\\n2749\\n2753\\n2767\\n2777\\n2789\\n2791\\n2797\\n2801\\n2803\\n2819\\n2833\\n2837\\n2843\\n2851\\n2857\\n2861\\n2879\\n2887\\n2897\\n2903\\n2909\\n2917\\n2927\\n2939\\n2953\\n2957\\n2963\\n2969\\n2971\\n2999\\n3001\\n3011\\n3019\\n3023\\n3037\\n3041\\n3049\\n3061\\n3067\\n3079\\n3083\\n3089\\n3109\\n3119\\n3121\\n3137\\n3163\\n3167\\n3169\\n3181\\n3187\\n3191\\n3203\\n3209\\n3217\\n3221\\n3229\\n3251\\n3253\\n3257\\n3259\\n3271\\n3299\\n3301\\n3307\\n3313\\n3319\\n3323\\n3329\\n3331\\n3343\\n3347\\n3359\\n3361\\n3371\\n3373\\n3389\\n3391\\n3407\\n3413\\n3433\\n3449\\n3457\\n3461\\n3463\\n3467\\n3469\\n3491\\n3499\\n3511\\n3517\\n3527\\n3529\\n3533\\n3539\\n3541\\n3547\\n3557\\n3559\\n3571\\n3581\\n3583\\n3593\\n3607\\n3613\\n3617\\n3623\\n3631\\n3637\\n3643\\n3659\\n3671\\n3673\\n3677\\n3691\\n3697\\n3701\\n3709\\n3719\\n3727\\n3733\\n3739\\n3761\\n3767\\n3769\\n3779\\n3793\\n3797\\n3803\\n3821\\n3823\\n3833\\n3847\\n3851\\n3853\\n3863\\n3877\\n3881\\n3889\\n3907\\n3911\\n3917\\n3919\\n3923\\n3929\\n3931\\n3943\\n3947\\n3967\\n3989\\n4001\\n4003\\n4007\\n4013\\n4019\\n4021\\n4027\\n4049\\n4051\\n4057\\n4073\\n4079\\n4091\\n4093\\n4099\\n4111\\n4127\\n4129\\n4133\\n4139\\n4153\\n4157\\n4159\\n4177\\n4201\\n4211\\n4217\\n4219\\n4229\\n4231\\n4241\\n4243\\n4253\\n4259\\n4261\\n4271\\n4273\\n4283\\n4289\\n4297\\n4327\\n4337\\n4339\\n4349\\n4357\\n4363\\n4373\\n4391\\n4397\\n4409\\n4421\\n4423\\n4441\\n4447\\n4451\\n4457\\n4463\\n4481\\n4483\\n4493\\n4507\\n4513\\n4517\\n4519\\n4523\\n4547\\n4549\\n4561\\n4567\\n4583\\n4591\\n4597\\n4603\\n4621\\n4637\\n4639\\n4643\\n4649\\n4651\\n4657\\n4663\\n4673\\n4679\\n4691\\n4703\\n4721\\n4723\\n4729\\n4733\\n4751\\n4759\\n4783\\n4787\\n4789\\n4793\\n4799\\n4801\\n4813\\n4817\\n4831\\n4861\\n4871\\n4877\\n4889\\n4903\\n4909\\n4919\\n4931\\n4933\\n4937\\n4943\\n4951\\n4957\\n4967\\n4969\\n4973\\n4987\\n4993\\n4999\\n5003\\n5009\\n5011\\n5021\\n5023\\n5039\\n5051\\n5059\\n5077\\n5081\\n5087\\n5099\\n5101\\n5107\\n5113\\n5119\\n5147\\n5153\\n5167\\n5171\\n5179\\n5189\\n5197\\n5209\\n5227\\n5231\\n5233\\n5237\\n5261\\n5273\\n5279\\n5281\\n5297\\n5303\\n5309\\n5323\\n5333\\n5347\\n5351\\n5381\\n5387\\n5393\\n5399\\n5407\\n5413\\n5417\\n5419\\n5431\\n5437\\n5441\\n5443\\n5449\\n5471\\n5477\\n5479\\n5483\\n5501\\n5503\\n5507\\n5519\\n5521\\n5527\\n5531\\n5557\\n5563\\n5569\\n5573\\n5581\\n5591\\n5623\\n5639\\n5641\\n5647\\n5651\\n5653\\n5657\\n5659\\n5669\\n5683\\n5689\\n5693\\n5701\\n5711\\n5717\\n5737\\n5741\\n5743\\n5749\\n5779\\n5783\\n5791\\n5801\\n5807\\n5813\\n5821\\n5827\\n5839\\n5843\\n5849\\n5851\\n5857\\n5861\\n5867\\n5869\\n5879\\n5881\\n5897\\n5903\\n5923\\n5927\\n5939\\n5953\\n5981\\n5987\\n6007\\n6011\\n6029\\n6037\\n6043\\n6047\\n6053\\n6067\\n6073\\n6079\\n6089\\n6091\\n6101\\n6113\\n6121\\n6131\\n6133\\n6143\\n6151\\n6163\\n6173\\n6197\\n6199\\n6203\\n6211\\n6217\\n6221\\n6229\\n6247\\n6257\\n6263\\n6269\\n6271\\n6277\\n6287\\n6299\\n6301\\n6311\\n6317\\n6323\\n6329\\n6337\\n6343\\n6353\\n6359\\n6361\\n6367\\n6373\\n6379\\n6389\\n6397\\n6421\\n6427\\n6449\\n6451\\n6469\\n6473\\n6481\\n6491\\n6521\\n6529\\n6547\\n6551\\n6553\\n6563\\n6569\\n6571\\n6577\\n6581\\n6599\\n6607\\n6619\\n6637\\n6653\\n6659\\n6661\\n6673\\n6679\\n6689\\n6691\\n6701\\n6703\\n6709\\n6719\\n6733\\n6737\\n6761\\n6763\\n6779\\n6781\\n6791\\n6793\\n6803\\n6823\\n6827\\n6829\\n6833\\n6841\\n6857\\n6863\\n6869\\n6871\\n6883\\n6899\\n6907\\n6911\\n6917\\n6947\\n6949\\n6959\\n6961\\n6967\\n6971\\n6977\\n6983\\n6991\\n6997\\n7001\\n7013\\n7019\\n7027\\n7039\\n7043\\n7057\\n7069\\n7079\\n7103\\n7109\\n7121\\n7127\\n7129\\n7151\\n7159\\n7177\\n7187\\n7193\\n7207\\n7211\\n7213\\n7219\\n7229\\n7237\\n7243\\n7247\\n7253\\n7283\\n7297\\n7307\\n7309\\n7321\\n7331\\n7333\\n7349\\n7351\\n7369\\n7393\\n7411\\n7417\\n7433\\n7451\\n7457\\n7459\\n7477\\n7481\\n7487\\n7489\\n7499\\n7507\\n7517\\n7523\\n7529\\n7537\\n7541\\n7547\\n7549\\n7559\\n7561\\n7573\\n7577\\n7583\\n7589\\n7591\\n7603\\n7607\\n7621\\n7639\\n7643\\n7649\\n7669\\n7673\\n7681\\n7687\\n7691\\n7699\\n7703\\n7717\\n7723\\n7727\\n7741\\n7753\\n7757\\n7759\\n7789\\n7793\\n7817\\n7823\\n7829\\n7841\\n7853\\n7867\\n7873\\n7877\\n7879\\n7883\\n7901\\n7907\\n7919\", \"base64Encoded\": false, \"contentType\": \"text/plain\"}}\n",
+ "error": "",
+ "result": {
+ "first_1000_primes.txt": {
+ "content": "2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n211\n223\n227\n229\n233\n239\n241\n251\n257\n263\n269\n271\n277\n281\n283\n293\n307\n311\n313\n317\n331\n337\n347\n349\n353\n359\n367\n373\n379\n383\n389\n397\n401\n409\n419\n421\n431\n433\n439\n443\n449\n457\n461\n463\n467\n479\n487\n491\n499\n503\n509\n521\n523\n541\n547\n557\n563\n569\n571\n577\n587\n593\n599\n601\n607\n613\n617\n619\n631\n641\n643\n647\n653\n659\n661\n673\n677\n683\n691\n701\n709\n719\n727\n733\n739\n743\n751\n757\n761\n769\n773\n787\n797\n809\n811\n821\n823\n827\n829\n839\n853\n857\n859\n863\n877\n881\n883\n887\n907\n911\n919\n929\n937\n941\n947\n953\n967\n971\n977\n983\n991\n997\n1009\n1013\n1019\n1021\n1031\n1033\n1039\n1049\n1051\n1061\n1063\n1069\n1087\n1091\n1093\n1097\n1103\n1109\n1117\n1123\n1129\n1151\n1153\n1163\n1171\n1181\n1187\n1193\n1201\n1213\n1217\n1223\n1229\n1231\n1237\n1249\n1259\n1277\n1279\n1283\n1289\n1291\n1297\n1301\n1303\n1307\n1319\n1321\n1327\n1361\n1367\n1373\n1381\n1399\n1409\n1423\n1427\n1429\n1433\n1439\n1447\n1451\n1453\n1459\n1471\n1481\n1483\n1487\n1489\n1493\n1499\n1511\n1523\n1531\n1543\n1549\n1553\n1559\n1567\n1571\n1579\n1583\n1597\n1601\n1607\n1609\n1613\n1619\n1621\n1627\n1637\n1657\n1663\n1667\n1669\n1693\n1697\n1699\n1709\n1721\n1723\n1733\n1741\n1747\n1753\n1759\n1777\n1783\n1787\n1789\n1801\n1811\n1823\n1831\n1847\n1861\n1867\n1871\n1873\n1877\n1879\n1889\n1901\n1907\n1913\n1931\n1933\n1949\n1951\n1973\n1979\n1987\n1993\n1997\n1999\n2003\n2011\n2017\n2027\n2029\n2039\n2053\n2063\n2069\n2081\n2083\n2087\n2089\n2099\n2111\n2113\n2129\n2131\n2137\n2141\n2143\n2153\n2161\n2179\n2203\n2207\n2213\n2221\n2237\n2239\n2243\n2251\n2267\n2269\n2273\n2281\n2287\n2293\n2297\n2309\n2311\n2333\n2339\n2341\n2347\n2351\n2357\n2371\n2377\n2381\n2383\n2389\n2393\n2399\n2411\n2417\n2423\n2437\n2441\n2447\n2459\n2467\n2473\n2477\n2503\n2521\n2531\n2539\n2543\n2549\n2551\n2557\n2579\n2591\n2593\n2609\n2617\n2621\n2633\n2647\n2657\n2659\n2663\n2671\n2677\n2683\n2687\n2689\n2693\n2699\n2707\n2711\n2713\n2719\n2729\n2731\n2741\n2749\n2753\n2767\n2777\n2789\n2791\n2797\n2801\n2803\n2819\n2833\n2837\n2843\n2851\n2857\n2861\n2879\n2887\n2897\n2903\n2909\n2917\n2927\n2939\n2953\n2957\n2963\n2969\n2971\n2999\n3001\n3011\n3019\n3023\n3037\n3041\n3049\n3061\n3067\n3079\n3083\n3089\n3109\n3119\n3121\n3137\n3163\n3167\n3169\n3181\n3187\n3191\n3203\n3209\n3217\n3221\n3229\n3251\n3253\n3257\n3259\n3271\n3299\n3301\n3307\n3313\n3319\n3323\n3329\n3331\n3343\n3347\n3359\n3361\n3371\n3373\n3389\n3391\n3407\n3413\n3433\n3449\n3457\n3461\n3463\n3467\n3469\n3491\n3499\n3511\n3517\n3527\n3529\n3533\n3539\n3541\n3547\n3557\n3559\n3571\n3581\n3583\n3593\n3607\n3613\n3617\n3623\n3631\n3637\n3643\n3659\n3671\n3673\n3677\n3691\n3697\n3701\n3709\n3719\n3727\n3733\n3739\n3761\n3767\n3769\n3779\n3793\n3797\n3803\n3821\n3823\n3833\n3847\n3851\n3853\n3863\n3877\n3881\n3889\n3907\n3911\n3917\n3919\n3923\n3929\n3931\n3943\n3947\n3967\n3989\n4001\n4003\n4007\n4013\n4019\n4021\n4027\n4049\n4051\n4057\n4073\n4079\n4091\n4093\n4099\n4111\n4127\n4129\n4133\n4139\n4153\n4157\n4159\n4177\n4201\n4211\n4217\n4219\n4229\n4231\n4241\n4243\n4253\n4259\n4261\n4271\n4273\n4283\n4289\n4297\n4327\n4337\n4339\n4349\n4357\n4363\n4373\n4391\n4397\n4409\n4421\n4423\n4441\n4447\n4451\n4457\n4463\n4481\n4483\n4493\n4507\n4513\n4517\n4519\n4523\n4547\n4549\n4561\n4567\n4583\n4591\n4597\n4603\n4621\n4637\n4639\n4643\n4649\n4651\n4657\n4663\n4673\n4679\n4691\n4703\n4721\n4723\n4729\n4733\n4751\n4759\n4783\n4787\n4789\n4793\n4799\n4801\n4813\n4817\n4831\n4861\n4871\n4877\n4889\n4903\n4909\n4919\n4931\n4933\n4937\n4943\n4951\n4957\n4967\n4969\n4973\n4987\n4993\n4999\n5003\n5009\n5011\n5021\n5023\n5039\n5051\n5059\n5077\n5081\n5087\n5099\n5101\n5107\n5113\n5119\n5147\n5153\n5167\n5171\n5179\n5189\n5197\n5209\n5227\n5231\n5233\n5237\n5261\n5273\n5279\n5281\n5297\n5303\n5309\n5323\n5333\n5347\n5351\n5381\n5387\n5393\n5399\n5407\n5413\n5417\n5419\n5431\n5437\n5441\n5443\n5449\n5471\n5477\n5479\n5483\n5501\n5503\n5507\n5519\n5521\n5527\n5531\n5557\n5563\n5569\n5573\n5581\n5591\n5623\n5639\n5641\n5647\n5651\n5653\n5657\n5659\n5669\n5683\n5689\n5693\n5701\n5711\n5717\n5737\n5741\n5743\n5749\n5779\n5783\n5791\n5801\n5807\n5813\n5821\n5827\n5839\n5843\n5849\n5851\n5857\n5861\n5867\n5869\n5879\n5881\n5897\n5903\n5923\n5927\n5939\n5953\n5981\n5987\n6007\n6011\n6029\n6037\n6043\n6047\n6053\n6067\n6073\n6079\n6089\n6091\n6101\n6113\n6121\n6131\n6133\n6143\n6151\n6163\n6173\n6197\n6199\n6203\n6211\n6217\n6221\n6229\n6247\n6257\n6263\n6269\n6271\n6277\n6287\n6299\n6301\n6311\n6317\n6323\n6329\n6337\n6343\n6353\n6359\n6361\n6367\n6373\n6379\n6389\n6397\n6421\n6427\n6449\n6451\n6469\n6473\n6481\n6491\n6521\n6529\n6547\n6551\n6553\n6563\n6569\n6571\n6577\n6581\n6599\n6607\n6619\n6637\n6653\n6659\n6661\n6673\n6679\n6689\n6691\n6701\n6703\n6709\n6719\n6733\n6737\n6761\n6763\n6779\n6781\n6791\n6793\n6803\n6823\n6827\n6829\n6833\n6841\n6857\n6863\n6869\n6871\n6883\n6899\n6907\n6911\n6917\n6947\n6949\n6959\n6961\n6967\n6971\n6977\n6983\n6991\n6997\n7001\n7013\n7019\n7027\n7039\n7043\n7057\n7069\n7079\n7103\n7109\n7121\n7127\n7129\n7151\n7159\n7177\n7187\n7193\n7207\n7211\n7213\n7219\n7229\n7237\n7243\n7247\n7253\n7283\n7297\n7307\n7309\n7321\n7331\n7333\n7349\n7351\n7369\n7393\n7411\n7417\n7433\n7451\n7457\n7459\n7477\n7481\n7487\n7489\n7499\n7507\n7517\n7523\n7529\n7537\n7541\n7547\n7549\n7559\n7561\n7573\n7577\n7583\n7589\n7591\n7603\n7607\n7621\n7639\n7643\n7649\n7669\n7673\n7681\n7687\n7691\n7699\n7703\n7717\n7723\n7727\n7741\n7753\n7757\n7759\n7789\n7793\n7817\n7823\n7829\n7841\n7853\n7867\n7873\n7877\n7879\n7883\n7901\n7907\n7919",
+ "base64Encoded": false,
+ "contentType": "text/plain"
+ }
+ },
+ "exitCode": 0
+ }
+ }
+]
\ No newline at end of file
diff --git a/static/116_workflowManager.py b/static/116_workflowManager.py
new file mode 100644
index 00000000..db4c4f65
--- /dev/null
+++ b/static/116_workflowManager.py
@@ -0,0 +1,1306 @@
+"""
+Workflow Manager Module for state machine-based backend chat workflow.
+Implements the state machine as defined in the documentation.
+"""
+
+import asyncio
+import os
+import logging
+import json
+import re
+import uuid
+import base64
+from datetime import datetime
+from typing import Dict, Any, List, Optional, Union, Tuple
+
+from modules.mimeUtils import isTextMimeType, determineContentEncoding
+
+# Required imports
+from modules.workflowAgentsRegistry import getAgentRegistry
+from modules.lucydomInterface import getLucydomInterface as domInterface
+from modules.documentProcessor import getDocumentContents
+
+# Configure logger
+logger = logging.getLogger(__name__)
+
+# Global settings for the workflow management
+GLOBAL_WORKFLOW_LABELS = {
+ "systemName": "AI Assistant", # Default system name for logs
+ "workflowStatusMessages": {
+ "init": "Workflow initialized",
+ "running": "Running workflow",
+ "waiting": "Waiting for input",
+ "completed": "Workflow completed successfully",
+ "stopped": "Workflow stopped by user",
+ "failed": "Error in workflow"
+ }
+}
+class WorkflowStoppedException(Exception):
+ """Exception raised when a workflow is forcibly stopped with function checkExitCriteria() """
+ pass
+
+class WorkflowManager:
+ """
+ Manages the processing of chat requests, agent execution, and
+ the integration of results into the workflow, following a state machine approach.
+ """
+
+ def __init__(self, mandateId: int, userId: int):
+ """
+ Initializes the WorkflowManager with mandate and user context.
+
+ Args:
+ mandateId: ID of the current mandate
+ userId: ID of the current user
+ """
+ self.mandateId = mandateId
+ self.userId = userId
+ self.mydom = domInterface(mandateId, userId)
+ self.agentRegistry = getAgentRegistry()
+ self.agentRegistry.setMydom(self.mydom)
+
+
+ ### Workflow State Machine Implementation
+
+ async def workflowStart(self, userInput: Dict[str, Any], workflowId: Optional[str] = None) -> Dict[str, Any]:
+ """
+ Main entry point for starting or continuing a workflow (State 1: Workflow Initialization).
+ Initializes a new workflow or loads an existing one based on workflowId.
+
+ Args:
+ userInput: User input with prompt and optional file list
+ workflowId: Optional workflow ID to continue an existing workflow
+
+ Returns:
+ Initialized workflow object with status "running"
+ """
+ # 1. Initialize workflow or load existing one
+ workflow = self.workflowInit(workflowId)
+ self.logAdd(workflow, "Starting workflow processing", level="info", progress=0)
+
+ # Start asynchronous processing
+ asyncio.create_task(self.workflowProcess(userInput, workflow))
+
+ return workflow
+
+ ### Forces exit
+
+ def checkExitCriteria(self, workflow: Dict[str, Any]):
+ current_workflow = self.mydom.loadWorkflowState(workflow["id"])
+ if current_workflow["status"] in ["stopped", "failed"]:
+ self.logAdd(workflow, f"Workflow processing terminated due to status: {current_workflow['status']}", level="info")
+ # Raise an exception to stop execution
+ raise WorkflowStoppedException(f"Workflow execution stopped due to status: {current_workflow['status']}")
+
+ async def workflowStop(self, workflowId: str) -> Dict[str, Any]:
+ """
+ Stops a running workflow (State 8: Workflow Stopped).
+ Sets status to "stopped" and adds a log entry.
+
+ Args:
+ workflowId: ID of the workflow to stop
+
+ Returns:
+ Updated workflow with status="stopped"
+ """
+ workflow = self.mydom.loadWorkflowState(workflowId)
+ if not workflow:
+ return {"error": "Workflow not found", "status": "failed"}
+
+ # Update status to stopped
+ workflow["status"] = "stopped"
+ workflow["lastActivity"] = datetime.now().isoformat()
+
+ # Update in database
+ self.mydom.updateWorkflow(workflowId, {
+ "status": workflow["status"],
+ "lastActivity": workflow["lastActivity"]
+ })
+
+ self.logAdd(workflow, GLOBAL_WORKFLOW_LABELS["workflowStatusMessages"]["stopped"], level="info", progress=100)
+ return workflow
+
+ async def workflowProcess(self, userInput: Dict[str, Any], workflow: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Main processing function that implements the workflow state machine.
+ Handles the complete workflow process from user input to final response.
+
+ Args:
+ userInput: User input with prompt and optional file list
+ workflow: Current workflow object
+
+ Returns:
+ Updated workflow with processing results
+ """
+ try:
+ # State 3: User Message Processing
+ self.checkExitCriteria(workflow)
+ messageUser = await self.chatMessageToWorkflow("user", "", userInput, workflow)
+ messageUser["status"] = "first" # For first message
+
+ # State 4: Project Manager Analysis
+ self.checkExitCriteria(workflow)
+ self.logAdd(workflow, "Analyzing request and planning work", level="info", progress=10)
+ projectManagerResponse = await self.projectManagerAnalysis(messageUser, workflow)
+ objFinalDocuments = projectManagerResponse.get("objFinalDocuments", [])
+ objWorkplan = projectManagerResponse.get("objWorkplan", [])
+ objUserResponse = projectManagerResponse.get("objUserResponse", "")
+
+ # Get detected language and set it in the mydom interface
+ self.checkExitCriteria(workflow)
+ userLanguage = projectManagerResponse.get("userLanguage", "en")
+ self.mydom.setUserLanguage(userLanguage)
+
+ # Save the response as a message in the workflow and add log entries
+ self.checkExitCriteria(workflow)
+ responseMessage = {
+ "role": "assistant",
+ "agentName": "project_manager",
+ "content": objUserResponse,
+ "status": "step" # As per state machine specification
+ }
+ self.messageAdd(workflow, responseMessage)
+
+ self.logAdd(workflow, f"Planned outputs: {len(objFinalDocuments)} documents", level="info", progress=20)
+ self.logAdd(workflow, f"Work plan created with {len(objWorkplan)} steps", level="info", progress=25)
+
+ # State 5: Agent Execution
+ objResults = []
+ if objWorkplan:
+ totalTasks = len(objWorkplan)
+ for taskIndex, task in enumerate(objWorkplan):
+ self.checkExitCriteria(workflow)
+
+ agentName = task.get("agent", "unknown")
+ progressValue = 30 + int((taskIndex / totalTasks) * 60) # Progress from 30% to 90%
+
+ progressMsg = f"Running task {taskIndex+1}/{totalTasks}: {agentName}"
+ self.logAdd(workflow, progressMsg, level="info", progress=progressValue)
+
+ taskResults = await self.agentProcessing(task, workflow)
+ objResults.extend(taskResults)
+
+ # Log completion of this task
+ self.logAdd(
+ workflow,
+ f"Completed task {taskIndex+1}/{totalTasks}: {agentName}",
+ level="info",
+ progress=progressValue + (60/totalTasks)/2
+ )
+
+ # State 6: Final Response Generation
+ self.checkExitCriteria(workflow)
+ self.logAdd(workflow, "Creating final response", level="info", progress=90)
+ finalMessage = await self.generateFinalMessage(objUserResponse, objFinalDocuments, objResults)
+ finalMessage["status"] = "last" # As per state machine specification
+ self.messageAdd(workflow, finalMessage)
+
+ # State 7: Workflow Completion
+ self.checkExitCriteria(workflow)
+ self.workflowFinish(workflow)
+
+ return workflow
+
+ except Exception as e:
+ # State 2: Workflow Exception
+ logger.error(f"Workflow processing error: {str(e)}", exc_info=True)
+ workflow["status"] = "failed"
+ workflow["lastActivity"] = datetime.now().isoformat()
+
+ # Update in database
+ self.mydom.updateWorkflow(workflow["id"], {
+ "status": "failed",
+ "lastActivity": workflow["lastActivity"]
+ })
+
+ self.logAdd(workflow, f"Workflow failed: {str(e)}", level="error", progress=100)
+ return workflow
+
+ def workflowInit(self, workflowId: Optional[str] = None) -> Dict[str, Any]:
+ """
+ Initializes a workflow or loads an existing one with round counting (State 1: Workflow Initialization).
+
+ Args:
+ workflowId: Optional - ID of the workflow to load
+
+ Returns:
+ Initialized workflow object
+ """
+ currentTime = datetime.now().isoformat()
+
+ logger.debug(f"CHECK DATA0 id'{workflowId}'")
+ workflowExist=self.mydom.getWorkflow(workflowId)
+ if workflowId is None or not workflowExist:
+ logger.debug(f"CHECK DATA1 id'{workflowId}'")
+ # Create new workflow
+ newWorkflowId = str(uuid.uuid4()) if workflowId is None else workflowId
+ workflow = {
+ "id": newWorkflowId,
+ "mandateId": self.mandateId,
+ "userId": self.userId,
+ "name": f"Workflow {newWorkflowId[:8]}",
+ "startedAt": currentTime,
+ "messages": [], # Empty list - will be filled with references
+ "messageIds": [], # Initialize empty messageIds list
+ "logs": [],
+ "dataStats": {},
+ "currentRound": 1,
+ "status": "running",
+ "lastActivity": currentTime,
+ }
+
+ # Save to database - only the workflow metadata
+ workflowDb = {
+ "id": workflow["id"],
+ "mandateId": workflow["mandateId"],
+ "userId": workflow["userId"],
+ "name": workflow["name"],
+ "startedAt": workflow["startedAt"],
+ "status": workflow["status"],
+ "dataStats": workflow["dataStats"],
+ "currentRound": workflow["currentRound"],
+ "lastActivity": workflow["lastActivity"],
+ "messageIds": workflow["messageIds"] # Include messageIds
+ }
+ self.mydom.createWorkflow(workflowDb)
+
+ self.logAdd(workflow, GLOBAL_WORKFLOW_LABELS["workflowStatusMessages"]["init"], level="info", progress=0)
+ logger.debug(f"CHECK DATA {workflow}")
+ return workflow
+ else:
+ # State 10: Workflow Resumption - Load existing workflow
+ workflow = self.mydom.loadWorkflowState(workflowId)
+
+ # Ensure messageIds exists
+ if "messageIds" not in workflow:
+ # Initialize from existing messages
+ workflow["messageIds"] = [msg["id"] for msg in workflow.get("messages", [])]
+
+ # Update in database
+ self.mydom.updateWorkflow(workflowId, {"messageIds": workflow["messageIds"]})
+
+ # Update status and increment round counter
+ workflow["status"] = "running"
+ workflow["lastActivity"] = currentTime
+
+ # Increment currentRound if it exists, otherwise set it to 1
+ if "currentRound" in workflow:
+ workflow["currentRound"] += 1
+ else:
+ workflow["currentRound"] = 1
+
+ # Update in database - only the relevant workflow fields
+ workflowUpdate = {
+ "status": workflow["status"],
+ "lastActivity": workflow["lastActivity"],
+ "currentRound": workflow["currentRound"]
+ }
+ self.mydom.updateWorkflow(workflowId, workflowUpdate)
+
+ self.logAdd(workflow, GLOBAL_WORKFLOW_LABELS["workflowStatusMessages"]["running"], level="info", progress=0)
+ return workflow
+
+ def workflowFinish(self, workflow: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Finalizes a workflow and sets the status to 'completed' (State 7: Workflow Completion).
+
+ Args:
+ workflow: Workflow object
+
+ Returns:
+ Updated workflow object
+ """
+ # Prepare workflow update data
+ workflowUpdate = {
+ "status": "completed",
+ "lastActivity": datetime.now().isoformat(),
+ }
+
+ # Update the workflow object in memory
+ workflow["status"] = workflowUpdate["status"]
+ workflow["lastActivity"] = workflowUpdate["lastActivity"]
+
+ # Save workflow state to database - only relevant fields
+ self.mydom.updateWorkflow(workflow["id"], workflowUpdate)
+
+ self.logAdd(workflow, GLOBAL_WORKFLOW_LABELS["workflowStatusMessages"]["completed"], level="info", progress=100)
+ return workflow
+
+ async def projectManagerAnalysis(self, messageUser: Dict[str, Any], workflow: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Creates the prompt for the project manager and processes the response (State 4: Project Manager Analysis).
+
+ Args:
+ messageUser: Message object with user request
+ workflow: Current workflow object
+
+ Returns:
+ Project manager's response with objFinalDocuments, objWorkplan and objUserResponse
+ """
+ # Get available agents with their capabilities
+ availableAgents = self.agentProfiles()
+
+ # Create a workflow summary
+ workflowSummary = await self.workflowSummarize(workflow, messageUser)
+
+ # Create a list of currently available documents from user input or previously generated documents
+ availableDocuments = self.getAvailableDocuments(workflow, messageUser)
+ availableDocsStr = json.dumps(availableDocuments, indent=2)
+
+ # Create the prompt for the project manager with language detection requirement
+ prompt = f"""
+Based on the user request and the provided documents, please analyze the requirements and create a processing plan.
+Also, identify the language of the user's request and include it in your response.
+
+
There was an error generating the analysis: {str(e)}
" + else: + content = f"Error in Analysis\n\nThere was an error generating the analysis: {str(e)}" + + return { + "label": outputLabel, + "content": content, + "metadata": { + "contentType": contentType + } + } + + def _getImageBase64(self, formatType: str = 'png') -> str: + """ + Convert current matplotlib figure to base64 string. + + Args: + formatType: Image format + + Returns: + Base64 encoded string of the image + """ + buffer = io.BytesIO() + plt.savefig(buffer, format=formatType, dpi=100) + buffer.seek(0) + imageData = buffer.getvalue() + buffer.close() + + # Convert to base64 + return base64.b64encode(imageData).decode('utf-8') + + +# Factory function for the Analyst agent +def getAgentAnalyst(): + """Returns an instance of the Analyst agent.""" + return AgentAnalyst() \ No newline at end of file diff --git a/static/119_agentCoder.py b/static/119_agentCoder.py new file mode 100644 index 00000000..a263c68e --- /dev/null +++ b/static/119_agentCoder.py @@ -0,0 +1,764 @@ +""" +Simple Coder Agent for execution of Python code. +Modified to pass expected output document names to the generated code. +""" + +import logging +import json +import os +import subprocess +import tempfile +import shutil +import sys +from typing import Dict, Any, List, Tuple + +from modules.workflowAgentsRegistry import AgentBase +from modules.configuration import APP_CONFIG + +logger = logging.getLogger(__name__) + +class AgentCoder(AgentBase): + """Simplified Agent for developing and executing Python code with integrated executor""" + + def __init__(self): + """Initialize the coder agent""" + super().__init__() + self.name = "coder" + self.description = "Develops and executes Python code for data processing and automation" + self.capabilities = [ + "code_development", + "data_processing", + "file_processing", + "automation", + "code_execution" + ] + + # Executor settings + self.executorTimeout = int(APP_CONFIG.get("Agent_Coder_EXECUTION_TIMEOUT")) # seconds + self.executionRetryLimit = int(APP_CONFIG.get("Agent_Coder_EXECUTION_RETRY")) # max retries + self.tempDir = None + + def setDependencies(self, mydom=None): + """Set external dependencies for the agent.""" + self.mydom = mydom + + async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]: + """ + Process a task and perform code development/execution. + First checks if the task can be completed without code execution, + then falls back to code generation if needed. + Enhanced to ensure all generated documents are included in output. + + Args: + task: Task dictionary with prompt, inputDocuments, outputSpecifications + + Returns: + Dictionary with feedback and documents + """ + # 1. Extract task information + prompt = task.get("prompt", "") + inputDocuments = task.get("inputDocuments", []) + outputSpecs = task.get("outputSpecifications", []) + + # Check if AI service is available + if not self.mydom: + logger.error("No AI service configured for the Coder agent") + return { + "feedback": "The Coder agent is not properly configured.", + "documents": [] + } + + # 2. Extract data from documents in separate categories + documentData = [] # For raw file data (for code execution) + contentData = [] # For content data (later use) + contentExtraction = [] # For AI-extracted data (for quick completion) + + for doc in inputDocuments: + # Create proper filename from name and ext + filename = f"{doc.get('name')}.{doc.get('ext')}" if doc.get('ext') else doc.get('name') + + # Add main document data to documentData if it exists + docData = doc.get('data', '') + if docData: + isBase64 = True # Assume base64 encoded for document data + documentData.append([filename, docData, isBase64]) + + # Process contents for different uses + if doc.get('contents'): + for content in doc.get('contents', []): + contentName = content.get('name', 'unnamed') + + # For AI-extracted data (quick completion) + if content.get('dataExtracted'): + contentExtraction.append({ + "filename": filename, + "contentName": contentName, + "contentData": content.get('dataExtracted', ''), + "contentType": content.get('contentType', ''), + "summary": content.get('summary', '') + }) + + # For raw content data + if content.get('data'): + rawData = content.get('data', '') + isBase64 = content.get('metadata', {}).get('base64Encoded', False) + contentData.append({ + "filename": filename, + "contentName": contentName, + "data": rawData, + "isBase64": isBase64, + "contentType": content.get('contentType', '') + }) + + # Also add to documentData for code execution if not already added + if not docData or docData != rawData: + documentData.append([filename, rawData, isBase64]) + + # 3. Check if task can be completed without code execution + quickCompletion = await self._checkQuickCompletion(prompt, contentExtraction, outputSpecs) + + if quickCompletion and quickCompletion.get("complete") == 1: + logger.info("Task completed without code execution") + return { + "feedback": quickCompletion.get("prompt", "Task completed successfully."), + "documents": quickCompletion.get("documents", []) + } + else: + logger.debug(f"Code to generate, no quick check") + + # If quick completion not possible, continue with code generation and execution + logger.info("Generating code to solve the task") + + # 4. Generate code using AI + code, requirements = await self._generateCode(prompt, outputSpecs) + + if not code: + return { + "feedback": "Failed to generate code for the task.", + "documents": [] + } + + # 5. Replace the placeholder with actual inputFiles data + documentDataJson = repr(documentData) + codeWithData = code.replace("inputFiles = \"=== JSONLOAD ===\"", f"inputFiles = {documentDataJson}") + + # 6. Execute code with retry logic + retryCount = 0 + maxRetries = self.executionRetryLimit + executionHistory = [] + + while retryCount <= maxRetries: + executionResult = self._executeCode(codeWithData, requirements) + executionHistory.append({ + "attempt": retryCount + 1, + "code": codeWithData, + "result": executionResult + }) + + # Check if execution was successful + if executionResult.get("success", False): + logger.info(f"Code execution succeeded on attempt {retryCount + 1}") + break + + # If we've reached max retries, exit the loop + if retryCount >= maxRetries: + logger.info(f"Reached maximum retry limit ({maxRetries}). Giving up.") + break + + # Log the error and attempt to improve the code + error = executionResult.get("error", "Unknown error") + logger.info(f"Execution attempt {retryCount + 1} failed: {error}. Attempting to improve code.") + + # Generate improved code based on error + improvedCode, improvedRequirements = await self._improveCode( + originalCode=codeWithData, + error=error, + executionResult=executionResult, + attempt=retryCount + 1, + outputSpecs=outputSpecs + ) + + if improvedCode: + codeWithData = improvedCode + requirements = improvedRequirements + logger.info(f"Code improved for retry {retryCount + 2}") + else: + logger.warning("Failed to improve code, using original code for retry") + + retryCount += 1 + + # 7. Process results and create output documents + documents = [] + + # Always add the final code document + documents.append(self.formatAgentDocumentOutput("generated_code.py", codeWithData, "text/plain")) + + # Add execution history document + executionHistoryStr = json.dumps(executionHistory, indent=2) + documents.append(self.formatAgentDocumentOutput("execution_history.json", executionHistoryStr, "application/json")) + + # Enhanced result handling: Create documents based on execution results - fixed for proper content extraction + if executionResult.get("success", False): + resultData = executionResult.get("result") + + # Process results from the result dictionary if available + if isinstance(resultData, dict): + # First, create a mapping of expected output labels to their specs + expectedOutputs = {spec.get("label"): spec for spec in outputSpecs} + createdOutputs = set() + + for label, result_item in resultData.items(): + # Check if result follows the expected structure with nested content + if isinstance(result_item, dict) and "content" in result_item: + # Extract values from the properly structured result + content = result_item.get("content", "") # Extract the inner content + base64Encoded = result_item.get("base64Encoded", False) + contentType = result_item.get("contentType", "text/plain") + + # Check if this label matches one of our expected output documents + # If not, but we haven't created all expected outputs yet, try to map it + finalLabel = label + if label not in expectedOutputs and len(expectedOutputs) > 0: + # Find an unused expected output label + for expectedLabel in expectedOutputs: + if expectedLabel not in createdOutputs: + logger.warning(f"Remapping output '{label}' to expected '{expectedLabel}'") + finalLabel = expectedLabel + break + + # Create document by passing only the content to formatAgentDocumentOutput + doc = self.formatAgentDocumentOutput(finalLabel, content, contentType) + + # Override the base64Encoded flag with the value from the result + # This is needed since formatAgentDocumentOutput might determine a different value + if isinstance(base64Encoded, bool): + doc["base64Encoded"] = base64Encoded + + documents.append(doc) + createdOutputs.add(finalLabel) + logger.info(f"Created document from result: {finalLabel} ({contentType}, base64={base64Encoded})") + else: + # Not properly structured - log warning + logger.warning(f"Skipping improperly formatted result for '{label}'. Results must include 'content' field.") + else: + # No result dictionary found + logger.warning("No valid result dictionary found or it's not properly formatted") + + # If no valid documents were created from the result dictionary but we have output specifications + if len(documents) <= 2 and outputSpecs: # Only code.py and history.json exist + logger.warning("No valid documents created from result dictionary, using execution output for specifications") + # Default to execution output + output = executionResult.get("output", "") + for spec in outputSpecs: + label = spec.get("label", "output.txt") + # Create basic document from output + doc = self.formatAgentDocumentOutput(label, output, "text/plain") + documents.append(doc) + logger.info(f"Created document from output specification: {label}") + + if retryCount > 0: + feedback = f"Code executed successfully after {retryCount + 1} attempts. Generated {len(documents) - 2} output files." + else: + feedback = f"Code executed successfully. Generated {len(documents) - 2} output files." + else: + # Execution failed + error = executionResult.get("error", "Unknown error") + documents.append(self.formatAgentDocumentOutput("execution_error.txt", f"Error executing code:\n\n{error}", "text/plain")) + if retryCount > 0: + feedback = f"Error during code execution after {retryCount + 1} attempts: {error}" + else: + feedback = f"Error during code execution: {error}" + + return { + "feedback": feedback, + "documents": documents + } + + async def _improveCode(self, originalCode: str, error: str, executionResult: Dict[str, Any], attempt: int, outputSpecs: List[Dict[str, Any]] = None) -> Tuple[str, List[str]]: + """ + Improve code based on execution error. + Enhanced to maintain proper output handling with correct document structure. + + Args: + originalCode: The code that failed to execute + error: The error message + executionResult: Complete execution result dictionary + attempt: Current attempt number + outputSpecs: List of expected output specifications + + Returns: + Tuple of (improvedCode, requirements) + """ + # Create a string with output specifications to be included in the prompt + outputSpecsStr = "" + if outputSpecs: + outputSpecsStr = "\nEXPECTED OUTPUT DOCUMENTS:\n" + for i, spec in enumerate(outputSpecs, 1): + label = spec.get("label", f"output{i}.txt") + description = spec.get("description", "") + outputSpecsStr += f"{i}. {label} - {description}\n" + + # Create prompt for code improvement + improvementPrompt = f""" +Fix the following Python code that failed during execution. This is attempt {attempt} to fix the code. + +ORIGINAL CODE: +{originalCode} + +ERROR MESSAGE: +{error} + +STDOUT: +{executionResult.get('output', '')} +{outputSpecsStr} +INSTRUCTIONS: +1. Fix all errors identified in the error message +2. Diagnose and fix any logical issues +3. Pay special attention to: +- Type conversions and data handling +- Error handling and edge cases +- Resource management (file handles, etc.) +- Syntax errors and typos +4. Keep the inputFiles handling logic intact +5. Maintain the same overall structure and purpose + +OUTPUT REQUIREMENTS (VERY IMPORTANT): +- Your code MUST define a 'result' variable as a dictionary to store ALL outputs +- The key for each entry MUST be the full filename with extension (e.g., "output.txt") +- The value for each entry MUST be a dictionary with the following structure: +{{ + "content": string, # The actual content (text or base64-encoded string) + "base64Encoded": boolean, # Set to true for binary data, false for text data + "contentType": string # MIME type of the content (e.g., "text/plain", "application/json") +}} +- Example result dictionary: +result = {{ + "output.txt": {{ + "content": "This is text content", + "base64Encoded": False, + "contentType": "text/plain" + }}, + "chart.png": {{ + "content": "base64encodedstring...", + "base64Encoded": True, + "contentType": "image/png" + }} +}} +- NEVER write files to disk using open() or similar methods - use the result dictionary instead + +JSON OUTPUT (CRITICAL): +- After creating the result dictionary, you MUST print it as JSON to stdout +- Make sure your code includes: print(json.dumps(result)) as the final line +- This printed JSON is how the system captures your result + +REQUIREMENTS: +Required packages should be specified as: +# REQUIREMENTS: library==version,library2>=version +- You may add/remove requirements as needed to fix the code + +Return ONLY Python code without explanations or markdown. +""" + + # Call AI service + messages = [ + {"role": "system", "content": "You are an expert Python code debugger. Provide only fixed Python code without explanations or formatting. Ensure all generated files are included in the 'result' dictionary and that result is printed as JSON with print(json.dumps(result))."}, + {"role": "user", "content": improvementPrompt} + ] + + try: + improvedContent = await self.mydom.callAi(messages, temperature=0.2) + + # Extract code and requirements + improvedCode = self._cleanCode(improvedContent) + + # Extract requirements + requirements = [] + for line in improvedCode.split('\n'): + if line.strip().startswith("# REQUIREMENTS:"): + reqStr = line.replace("# REQUIREMENTS:", "").strip() + requirements = [r.strip() for r in reqStr.split(',') if r.strip()] + break + + return improvedCode, requirements + except Exception as e: + logger.error(f"Error improving code: {str(e)}") + return None, [] + + + async def _checkQuickCompletion(self, prompt: str, contentExtraction: List[Dict], outputSpecs: List[Dict]) -> Dict: + """ + Check if the task can be completed without writing and executing code. + + Args: + prompt: The task prompt + contentExtraction: List of extracted content data with contentName and dataExtracted + outputSpecs: List of output specifications + + Returns: + Dictionary with completion status and results, or None if no quick completion + """ + # If no data or no output specs, can't do a quick completion + if not contentExtraction or not outputSpecs: + return None + + # Create a prompt for the AI to check if this can be completed directly + specsJson = json.dumps(outputSpecs) + dataJson = json.dumps(contentExtraction) + + checkPrompt = f""" +Analyze this task and determine if it can be completed directly without writing code. + +TASK: +{prompt} + +EXTRACTED DATA AVAILABLE: +{dataJson} + +Each entry in the extracted data contains: +- filename: The source file name +- contentName: The specific content section name +- contentData: The AI-extracted text from the content +- contentType: The type of content (text, csv, etc.) +- summary: A brief summary of the content + +REQUIRED OUTPUT: +{specsJson} + +If the task can be completed directly with the available extracted data, respond with: +{{"complete": 1, "prompt": "Brief explanation of the solution", "documents": [ + {{"label": "filename.ext", "content": "content here"}} +]}} + +If code would be needed to properly complete this task, respond with: +{{"complete": 0, "prompt": "Explanation why code is needed"}} + +Only return valid JSON. Your entire response must be parseable as JSON. +""" + + # Call AI service + logger.debug(f"Checking if task can be completed without code execution: {checkPrompt}") + messages = [ + {"role": "system", "content": "You are an AI assistant that determines if tasks require code execution. Reply with JSON only."}, + {"role": "user", "content": checkPrompt} + ] + + try: + # Use a lower temperature for more deterministic response + response = await self.mydom.callAi(messages, produceUserAnswer = True, temperature=0.1) + + # Parse response as JSON + if response: + try: + # Find JSON in response if there's any text around it + jsonStart = response.find('{') + jsonEnd = response.rfind('}') + 1 + + if jsonStart >= 0 and jsonEnd > jsonStart: + jsonStr = response[jsonStart:jsonEnd] + result = json.loads(jsonStr) + + # Check if this is a proper response + if "complete" in result: + return result + + except json.JSONDecodeError: + logger.debug("Failed to parse quick completion response as JSON") + pass + except Exception as e: + logger.debug(f"Error during quick completion check: {str(e)}") + + # Default to requiring code execution + return None + + async def _generateCode(self, prompt: str, outputSpecs: List[Dict[str, Any]] = None) -> Tuple[str, List[str]]: + """ + Generate Python code from a prompt with the inputFiles placeholder. + Enhanced to emphasize proper result output handling with correct document structure. + + Args: + prompt: The task prompt + outputSpecs: List of expected output specifications + + Returns: + Tuple of (code, requirements) + """ + # Create a string with output specifications to be included in the prompt + outputSpecsStr = "" + if outputSpecs: + outputSpecsStr = "\nEXPECTED OUTPUT DOCUMENTS:\n" + for i, spec in enumerate(outputSpecs, 1): + label = spec.get("label", f"output{i}.txt") + description = spec.get("description", "") + outputSpecsStr += f"{i}. {label} - {description}\n" + + # Create improved prompt for code generation + aiPrompt = f""" +Generate Python code to solve the following task: + +TASK: +{prompt} +{outputSpecsStr} +INPUT FILES: +- 'inputFiles' variable is provided as [[filename, data, isBase64], ...] +- For text files (isBase64=False): use data directly as string +- For binary files (isBase64=True): use base64.b64decode(data) + +OUTPUT REQUIREMENTS (VERY IMPORTANT): +- Your code MUST define a 'result' variable as a dictionary to store ALL outputs +- The key for each entry MUST be the full filename with extension (e.g., "output.txt") +- The value for each entry MUST be a dictionary with the following structure: +{{ + "content": string, # The actual content (text or base64-encoded string) + "base64Encoded": boolean, # Set to true for binary data, false for text data + "contentType": string # MIME type of the content (e.g., "text/plain", "application/json") +}} +- Example result dictionary: +result = {{ + "output.txt": {{ + "content": "This is text content", + "base64Encoded": False, + "contentType": "text/plain" + }}, + "chart.png": {{ + "content": "base64encodedstring...", + "base64Encoded": True, + "contentType": "image/png" + }} +}} +- NEVER write files to disk using open() or similar methods - use the result dictionary instead +- If you generate any charts, reports, or visualizations, ensure they are properly encoded and included + +IMPORTANT - USE EXACT OUTPUT FILENAMES: +- You MUST use the EXACT filenames specified in EXPECTED OUTPUT DOCUMENTS section +- The key in the result dictionary must match these filenames precisely +- If no output documents are specified, use appropriate descriptive filenames + +JSON OUTPUT (CRITICAL): +- After creating the result dictionary, you MUST print it as JSON to stdout using json.dumps() +- Add these lines at the end of your code: + import json # if not already imported + print(json.dumps(result)) +- This printed JSON is how the system captures your result +- Make sure this is the last thing your code prints + +BINARY DATA HANDLING: +- For binary content (images, PDFs, etc.), convert to base64 string and set base64Encoded=True +- For text content (text, JSON, HTML, etc.), use plain string and set base64Encoded=False +- Use appropriate MIME types for different content types + +CODE QUALITY: +- Use explicit type conversions where needed (int/float/str) +- Implement feature detection, not version checks +- Handle errors gracefully with appropriate fallbacks +- Follow latest API conventions for libraries +- Validate inputs before processing + +Your code must start with: +inputFiles = "=== JSONLOAD ===" # DO NOT CHANGE THIS LINE + +REQUIREMENTS: +Required packages should be specified as: +# REQUIREMENTS: library==version,library2>=version +- Specify exact versions for critical libraries +- Use constraint operators (==,>=,<=) as needed + +Return ONLY Python code without explanations or markdown. +""" + + # Call AI service + messages = [ + {"role": "system", "content": "You are a Python code generator. Provide only valid Python code without explanations or formatting. Always output the result dictionary as JSON using print(json.dumps(result)) at the end of your code."}, + {"role": "user", "content": aiPrompt} + ] + + generatedContent = await self.mydom.callAi(messages, temperature=0.1) + + # Extract code and requirements + code = self._cleanCode(generatedContent) + + # Extract requirements + requirements = [] + for line in code.split('\n'): + if line.strip().startswith("# REQUIREMENTS:"): + reqStr = line.replace("# REQUIREMENTS:", "").strip() + requirements = [r.strip() for r in reqStr.split(',') if r.strip()] + break + + return code, requirements + + def _executeCode(self, code: str, requirements: List[str] = None) -> Dict[str, Any]: + """ + Execute Python code in a virtual environment. + Integrated executor functionality with enhanced result extraction. + + Args: + code: Python code to execute + requirements: List of required packages + + Returns: + Execution result dictionary + """ + try: + # 1. Create temp directory and virtual environment + self.tempDir = tempfile.mkdtemp(prefix="code_exec_") + venvPath = os.path.join(self.tempDir, "venv") + + # Create venv + logger.debug(f"Creating virtual environment at {venvPath}") + subprocess.run([sys.executable, "-m", "venv", venvPath], + check=True, capture_output=True) + + # Get Python executable path + pythonExe = os.path.join(venvPath, "Scripts", "python.exe") if os.name == 'nt' else os.path.join(venvPath, "bin", "python") + + # 2. Install requirements if provided + if requirements: + logger.info(f"Installing requirements: {requirements}") + + # Create requirements.txt + reqFile = os.path.join(self.tempDir, "requirements.txt") + with open(reqFile, "w") as f: + f.write("\n".join(requirements)) + + x="\n".join(requirements) + logger.info(f"Requirements file: {x}.") + + # Install requirements + try: + pipResult = subprocess.run( + [pythonExe, "-m", "pip", "install", "-r", reqFile], + capture_output=True, + text=True, + timeout=int(APP_CONFIG.get("Agent_Coder_INSTALL_TIMEOUT")) + ) + if pipResult.returncode != 0: + logger.debug(f"Error installing requirements: {pipResult.stderr}") + else: + logger.debug(f"Requirements installed successfully") + # Log installed packages if in debug mode + if logger.isEnabledFor(logging.DEBUG): + pipList = subprocess.run( + [pythonExe, "-m", "pip", "list"], + capture_output=True, + text=True + ) + logger.debug(f"Installed packages:\n{pipList.stdout}") + + except Exception as e: + logger.debug(f"Exception during requirements installation: {str(e)}") + + # 3. Write code to file + codeFile = os.path.join(self.tempDir, "code.py") + with open(codeFile, "w", encoding="utf-8") as f: + f.write(code) + + # 4. Execute code + logger.debug(f"Executing code with timeout of {self.executorTimeout} seconds. Code: {code}") + process = subprocess.run( + [pythonExe, codeFile], + timeout=self.executorTimeout, + capture_output=True, + text=True + ) + + # 5. Process results + stdout = process.stdout + stderr = process.stderr + + # Try to extract result from stdout + resultData = None + if process.returncode == 0: + try: + # Find the last line that might be JSON + jsonLines = [] + for line in stdout.strip().split('\n'): + line = line.strip() + if line and line[0] in '{[' and line[-1] in '}]': + try: + parsed = json.loads(line) + jsonLines.append((line, parsed)) + except json.JSONDecodeError: + continue + + # Use the last valid JSON that appears to be a dictionary + if jsonLines: + for line, parsed in reversed(jsonLines): + if isinstance(parsed, dict): + resultData = parsed + logger.debug(f"Extracted result data from stdout: {type(resultData)}") + break + except Exception as e: + logger.debug(f"Error extracting result from stdout: {str(e)}") + + # Enhanced logging of what was found + if resultData: + logger.info(f"Found result dictionary with {len(resultData)} entries: {list(resultData.keys())}") + else: + logger.warning("No result dictionary found in output") + + # Create result dictionary + return { + "success": process.returncode == 0, + "output": stdout, + "error": stderr if process.returncode != 0 else "", + "result": resultData, + "exitCode": process.returncode + } + + except subprocess.TimeoutExpired: + logger.error(f"Execution timed out after {self.executorTimeout} seconds") + return { + "success": False, + "output": "", + "error": f"Execution timed out after {self.executorTimeout} seconds", + "result": None, + "exitCode": -1 + } + except Exception as e: + logger.error(f"Execution error: {str(e)}") + return { + "success": False, + "output": "", + "error": f"Execution error: {str(e)}", + "result": None, + "exitCode": -1 + } + finally: + # Clean up resources + self._cleanupExecution() + + def _cleanupExecution(self): + """Clean up temporary resources from code execution.""" + if self.tempDir and os.path.exists(self.tempDir): + try: + logger.debug(f"Cleaning up temporary directory: {self.tempDir}") + shutil.rmtree(self.tempDir) + self.tempDir = None + except Exception as e: + logger.warning(f"Error cleaning up temp directory: {str(e)}") + + def _cleanCode(self, code: str) -> str: + """Remove any markdown formatting or explanations.""" + # Remove code block markers + code = code.replace("```python", "").replace("```", "") + + # Remove explanations before or after code + lines = code.strip().split('\n') + startIndex = 0 + endIndex = len(lines) + + # Find start of actual code + for i, line in enumerate(lines): + if line.strip().startswith("inputFiles =") or line.strip().startswith("# REQUIREMENTS:"): + startIndex = i + break + + # Clean code + cleanedCode = '\n'.join(lines[startIndex:endIndex]) + return cleanedCode.strip() + + +# Factory function for the Coder agent +def getAgentCoder(): + """Returns an instance of the Coder agent.""" + return AgentCoder() \ No newline at end of file diff --git a/static/120_agentDocumentation.py b/static/120_agentDocumentation.py new file mode 100644 index 00000000..daae8a97 --- /dev/null +++ b/static/120_agentDocumentation.py @@ -0,0 +1,559 @@ +""" +Documentation agent for creating documentation, reports, and structured content. +Reimagined with an output-first, AI-driven approach with multi-step document generation. +""" + +import logging +import json +from typing import Dict, Any, List + +from modules.workflowAgentsRegistry import AgentBase + +logger = logging.getLogger(__name__) + +class AgentDocumentation(AgentBase): + """AI-driven agent for creating documentation and structured content using multi-step generation""" + + def __init__(self): + """Initialize the documentation agent""" + super().__init__() + self.name = "documentation" + self.description = "Creates structured documentation, reports, and content using AI with multi-step generation" + self.capabilities = [ + "report_generation", + "documentation", + "content_structuring", + "technical_writing", + "knowledge_organization" + ] + + def setDependencies(self, mydom=None): + """Set external dependencies for the agent.""" + self.mydom = mydom + + async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]: + """ + Process a task by focusing on required outputs and using AI to generate them. + + Args: + task: Task dictionary with prompt, inputDocuments, outputSpecifications + + Returns: + Dictionary with feedback and documents + """ + try: + # Extract task information + prompt = task.get("prompt", "") + inputDocuments = task.get("inputDocuments", []) + outputSpecs = task.get("outputSpecifications", []) + + # Check AI service + if not self.mydom: + return { + "feedback": "The Documentation agent requires an AI service to function.", + "documents": [] + } + + # Extract context from input documents - focusing only on dataExtracted + documentContext = self._extractDocumentContext(inputDocuments) + + # Create task analysis to understand the requirements + documentationPlan = await self._analyzeTask(prompt, documentContext, outputSpecs) + + # Generate all required output documents + documents = [] + + # If no output specs provided, create default document + if not outputSpecs: + defaultFormat = documentationPlan.get("recommendedFormat", "markdown") + defaultTitle = documentationPlan.get("title", "Documentation") + safeTitle = self._sanitizeFilename(defaultTitle) + + outputSpecs = [ + {"label": f"{safeTitle}.{defaultFormat}", "description": "Comprehensive documentation"} + ] + + # Process each output specification + for spec in outputSpecs: + outputLabel = spec.get("label", "") + outputDescription = spec.get("description", "") + + # Generate the document using multi-step approach + document = await self._createDocumentMultiStep( + prompt, + documentContext, + outputLabel, + outputDescription, + documentationPlan + ) + + documents.append(document) + + # Generate feedback + feedback = documentationPlan.get("feedback", f"Created {len(documents)} documents based on your requirements.") + + return { + "feedback": feedback, + "documents": documents + } + + except Exception as e: + logger.error(f"Error in documentation generation: {str(e)}", exc_info=True) + return { + "feedback": f"Error during documentation generation: {str(e)}", + "documents": [] + } + + def _extractDocumentContext(self, documents: List[Dict[str, Any]]) -> str: + """ + Extract context from input documents, focusing on dataExtracted. + + Args: + documents: List of document objects + + Returns: + Extracted context as text + """ + contextParts = [] + + for doc in documents: + docName = doc.get("name", "unnamed") + if doc.get("ext"): + docName = f"{docName}.{doc.get('ext')}" + + contextParts.append(f"\n\n--- {docName} ---\n") + + # Process contents for dataExtracted + for content in doc.get("contents", []): + if content.get("dataExtracted"): + contextParts.append(content.get("dataExtracted", "")) + + return "\n".join(contextParts) + + def _sanitizeFilename(self, filename: str) -> str: + """ + Sanitize a filename by removing invalid characters. + + Args: + filename: Filename to sanitize + + Returns: + Sanitized filename + """ + # Replace invalid characters with underscores + invalidChars = r'<>:"/\|?*' + for char in invalidChars: + filename = filename.replace(char, '_') + + # Trim filename if too long + if len(filename) > 100: + filename = filename[:97] + "..." + + return filename + + async def _analyzeTask(self, prompt: str, context: str, outputSpecs: List) -> Dict: + """ + Use AI to analyze the task and create a documentation plan. + + Args: + prompt: The task prompt + context: Document context + outputSpecs: Output specifications + + Returns: + Documentation plan dictionary + """ + analysisPrompt = f""" + Analyze this documentation task and create a detailed plan. + + TASK: {prompt} + + DOCUMENT CONTEXT SAMPLE: + {context[:1000]}... (truncated) + + OUTPUT REQUIREMENTS: + {json.dumps(outputSpecs, indent=2)} + + Create a detailed documentation plan in JSON format with the following structure: + {{ + "title": "Document Title", + "documentType": "report|manual|guide|whitepaper|etc", + "audience": "technical|general|executive|etc", + "detailedStructure": [ + {{ + "title": "Chapter/Section Title", + "keyPoints": ["point1", "point2", ...], + "subsections": ["subsection1", "subsection2", ...], + "importance": "high|medium|low", + "estimatedLength": "short|medium|long" + }}, + ... more sections ... + ], + "keyTopics": ["topic1", "topic2", ...], + "tone": "formal|conversational|instructional|etc", + "recommendedFormat": "markdown|html|text|etc", + "formattingRequirements": ["requirement1", "requirement2", ...], + "executiveSummary": "Brief description of what the document will cover", + "feedback": "Brief message explaining the documentation approach" + }} + + Only return valid JSON. No preamble or explanations. + """ + + try: + response = await self.mydom.callAi([ + {"role": "system", "content": "You are a documentation expert. Respond with valid JSON only."}, + {"role": "user", "content": analysisPrompt} + ]) + + # Extract JSON from response + jsonStart = response.find('{') + jsonEnd = response.rfind('}') + 1 + + if jsonStart >= 0 and jsonEnd > jsonStart: + plan = json.loads(response[jsonStart:jsonEnd]) + return plan + else: + # Fallback if JSON not found + return { + "title": "Documentation", + "documentType": "report", + "audience": "general", + "detailedStructure": [ + { + "title": "Introduction", + "keyPoints": ["Purpose", "Scope"], + "subsections": [], + "importance": "high", + "estimatedLength": "short" + }, + { + "title": "Main Content", + "keyPoints": ["Core Information"], + "subsections": ["Key Findings", "Analysis"], + "importance": "high", + "estimatedLength": "long" + }, + { + "title": "Conclusion", + "keyPoints": ["Summary", "Next Steps"], + "subsections": [], + "importance": "medium", + "estimatedLength": "short" + } + ], + "keyTopics": ["General Information"], + "tone": "formal", + "recommendedFormat": "markdown", + "formattingRequirements": ["Clear headings", "Professional formatting"], + "executiveSummary": "A comprehensive documentation covering the requested topics.", + "feedback": "Created documentation based on your requirements." + } + + except Exception as e: + logger.warning(f"Error creating documentation plan: {str(e)}") + return { + "title": "Documentation", + "documentType": "report", + "audience": "general", + "detailedStructure": [ + { + "title": "Introduction", + "keyPoints": ["Purpose", "Scope"], + "subsections": [], + "importance": "high", + "estimatedLength": "short" + }, + { + "title": "Main Content", + "keyPoints": ["Core Information"], + "subsections": ["Key Findings", "Analysis"], + "importance": "high", + "estimatedLength": "long" + }, + { + "title": "Conclusion", + "keyPoints": ["Summary", "Next Steps"], + "subsections": [], + "importance": "medium", + "estimatedLength": "short" + } + ], + "keyTopics": ["General Information"], + "tone": "formal", + "recommendedFormat": "markdown", + "formattingRequirements": ["Clear headings", "Professional formatting"], + "executiveSummary": "A comprehensive documentation covering the requested topics.", + "feedback": "Created documentation based on your requirements." + } + + async def _createDocumentMultiStep(self, prompt: str, context: str, outputLabel: str, + outputDescription: str, documentationPlan: Dict) -> Dict: + """ + Create a document using a multi-step approach with separate AI calls for each section. + + Args: + prompt: Original task prompt + context: Document context + outputLabel: Output filename + outputDescription: Description of desired output + documentationPlan: Documentation plan from AI + + Returns: + Document object + """ + # Determine format from filename + formatType = outputLabel.split('.')[-1].lower() if '.' in outputLabel else "md" + + # Map format to contentType + contentTypeMap = { + "md": "text/markdown", + "markdown": "text/markdown", + "html": "text/html", + "txt": "text/plain", + "text": "text/plain", + "json": "application/json", + "csv": "text/csv" + } + + contentType = contentTypeMap.get(formatType, "text/plain") + + # Get document information + title = documentationPlan.get("title", "Documentation") + documentType = documentationPlan.get("documentType", "document") + audience = documentationPlan.get("audience", "general") + tone = documentationPlan.get("tone", "formal") + keyTopics = documentationPlan.get("keyTopics", []) + formattingRequirements = documentationPlan.get("formattingRequirements", []) + + # Get the detailed structure + detailedStructure = documentationPlan.get("detailedStructure", []) + if not detailedStructure: + # Fallback structure if none provided + detailedStructure = [ + { + "title": "Introduction", + "keyPoints": ["Purpose", "Scope"], + "importance": "high" + }, + { + "title": "Main Content", + "keyPoints": ["Core Information"], + "importance": "high" + }, + { + "title": "Conclusion", + "keyPoints": ["Summary", "Next Steps"], + "importance": "medium" + } + ] + + try: + # Step 1: Generate document introduction + introPrompt = f""" + Create the introduction for a {documentType} titled "{title}". + + DOCUMENT OVERVIEW: + - Type: {documentType} + - Audience: {audience} + - Tone: {tone} + - Key Topics: {', '.join(keyTopics)} + - Format: {formatType} + + TASK CONTEXT: {prompt} + + This introduction should: + 1. Clearly state the purpose and scope of the document + 2. Provide context and background information + 3. Outline what the reader will find in the document + 4. Set the appropriate tone for the {audience} audience + + The introduction should be professional and engaging, formatted according to {formatType} standards. + """ + + introduction = await self.mydom.callAi([ + {"role": "system", "content": f"You are a documentation expert creating an introduction in {formatType} format."}, + {"role": "user", "content": introPrompt} + ], produceUserAnswer = True) + + # Step 2: Generate executive summary (if applicable) + if documentType in ["report", "whitepaper", "case study"]: + summaryPrompt = f""" + Create an executive summary for a {documentType} titled "{title}". + + DOCUMENT OVERVIEW: + - Type: {documentType} + - Audience: {audience} + - Key Topics: {', '.join(keyTopics)} + + TASK CONTEXT: {prompt} + + This executive summary should: + 1. Provide a concise overview of the entire document + 2. Highlight key findings, recommendations, or conclusions + 3. Be suitable for executives or busy readers who may only read this section + 4. Be professionally formatted according to {formatType} standards + + Keep the summary focused and impactful, approximately 200-300 words. + """ + + executiveSummary = await self.mydom.callAi([ + {"role": "system", "content": f"You are a documentation expert creating an executive summary in {formatType} format."}, + {"role": "user", "content": summaryPrompt} + ], produceUserAnswer = True) + else: + executiveSummary = "" + + # Step 3: Generate each section + sections = [] + + for section in detailedStructure: + sectionTitle = section.get("title", "Section") + keyPoints = section.get("keyPoints", []) + subsections = section.get("subsections", []) + importance = section.get("importance", "medium") + + # Adjust depth based on importance + detailLevel = "high" if importance == "high" else "medium" + + sectionPrompt = f""" + Create the "{sectionTitle}" section for a {documentType} titled "{title}". + + SECTION DETAILS: + - Title: {sectionTitle} + - Key Points to Cover: {', '.join(keyPoints)} + - Subsections: {', '.join(subsections)} + - Detail Level: {detailLevel} + + DOCUMENT CONTEXT: + - Type: {documentType} + - Audience: {audience} + - Tone: {tone} + - Format: {formatType} + + TASK CONTEXT: {prompt} + + AVAILABLE INFORMATION: + {context[:500]}... (truncated) + + This section should: + 1. Be comprehensive and well-structured + 2. Cover all the key points listed + 3. Include the specified subsections with appropriate headings + 4. Maintain a {tone} tone suitable for the {audience} audience + 5. Be properly formatted according to {formatType} standards + 6. Include specific examples, data, or evidence where appropriate + + Be thorough in your coverage of this section, providing substantive content. + """ + + sectionContent = await self.mydom.callAi([ + {"role": "system", "content": f"You are a documentation expert creating detailed content for the {sectionTitle} section."}, + {"role": "user", "content": sectionPrompt} + ], produceUserAnswer = True) + + sections.append(sectionContent) + + # Step 4: Generate conclusion + conclusionPrompt = f""" + Create the conclusion for a {documentType} titled "{title}". + + DOCUMENT OVERVIEW: + - Type: {documentType} + - Audience: {audience} + - Key Topics: {', '.join(keyTopics)} + + TASK CONTEXT: {prompt} + + This conclusion should: + 1. Summarize the key points covered in the document + 2. Provide closure to the topics discussed + 3. Include any relevant recommendations or next steps + 4. Leave the reader with a clear understanding of the document's significance + + The conclusion should be professional and impactful, formatted according to {formatType} standards. + """ + + conclusion = await self.mydom.callAi([ + {"role": "system", "content": f"You are a documentation expert creating a conclusion in {formatType} format."}, + {"role": "user", "content": conclusionPrompt} + ], produceUserAnswer = True) + + # Step 5: Assemble the complete document + if formatType in ["md", "markdown"]: + # Markdown format + documentContent = f"# {title}\n\n" + + if executiveSummary: + documentContent += f"## Executive Summary\n\n{executiveSummary}\n\n" + + documentContent += f"{introduction}\n\n" + + for i, sectionContent in enumerate(sections): + # Ensure section starts with heading if not already + sectionTitle = detailedStructure[i].get("title", f"Section {i+1}") + if not sectionContent.strip().startswith("#"): + documentContent += f"## {sectionTitle}\n\n" + documentContent += f"{sectionContent}\n\n" + + documentContent += f"## Conclusion\n\n{conclusion}\n" + + elif formatType == "html": + # HTML format + documentContent = f"\n\nThere was an error generating the documentation: {str(e)}
" + else: + content = f"Error in Documentation\n\nThere was an error generating the documentation: {str(e)}" + + return { + "label": outputLabel, + "content": content, + "metadata": { + "contentType": contentType + } + } + + +# Factory function for the Documentation agent +def getAgentDocumentation(): + """Returns an instance of the Documentation agent.""" + return AgentDocumentation() \ No newline at end of file diff --git a/static/121_auth.py b/static/121_auth.py new file mode 100644 index 00000000..6fdf7e2f --- /dev/null +++ b/static/121_auth.py @@ -0,0 +1,158 @@ +""" +Authentication module for backend API. +Handles JWT-based authentication, token generation, and user context. +""" + +from datetime import datetime, timedelta, timezone +from typing import Optional, Dict, Any, Tuple +from fastapi import Depends, HTTPException, status +from fastapi.security import OAuth2PasswordBearer +from jose import JWTError, jwt +import logging + +from modules.gatewayInterface import getGatewayInterface +from modules.configuration import APP_CONFIG + +# Get Config Data +SECRET_KEY = APP_CONFIG.get("APP_JWT_SECRET_SECRET") +ALGORITHM = APP_CONFIG.get("Auth_ALGORITHM") +ACCESS_TOKEN_EXPIRE_MINUTES = int(APP_CONFIG.get("APP_TOKEN_EXPIRY")) + +# OAuth2 Setup +oauth2Scheme = OAuth2PasswordBearer(tokenUrl="token") + +# Logger +logger = logging.getLogger(__name__) + +def createAccessToken(data: dict, expiresDelta: Optional[timedelta] = None) -> str: + """ + Creates a JWT Access Token. + + Args: + data: Data to encode (usually user ID or username) + expiresDelta: Validity duration of the token (optional) + + Returns: + JWT Token as string + """ + toEncode = data.copy() + + if expiresDelta: + expire = datetime.now(timezone.utc) + expiresDelta + else: + expire = datetime.now(timezone.utc) + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES) + + toEncode.update({"exp": expire}) + encodedJwt = jwt.encode(toEncode, SECRET_KEY, algorithm=ALGORITHM) + + return encodedJwt + +async def getCurrentUser(token: str = Depends(oauth2Scheme)) -> Dict[str, Any]: + """ + Extracts and validates the current user from the JWT token. + + Args: + token: JWT Token from the Authorization header + + Returns: + User data + + Raises: + HTTPException: For invalid token or user + """ + credentialsException = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid authentication credentials", + headers={"WWW-Authenticate": "Bearer"}, + ) + + try: + # Decode token + payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) + + # Extract username from token + username: str = payload.get("sub") + if username is None: + raise credentialsException + + # Extract mandate ID from token (if present) + mandateId: int = payload.get("mandateId", 1) # Default: Root mandate + + except JWTError: + logger.warning("Invalid JWT Token") + raise credentialsException + + # Initialize Gateway Interface without context + gateway = getGatewayInterface() + + # Retrieve user from database + user = gateway.getUserByUsername(username) + + if user is None: + logger.warning(f"User {username} not found") + raise credentialsException + + if user.get("disabled", False): + logger.warning(f"User {username} is disabled") + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User is disabled") + + return user + +async def getCurrentActiveUser(currentUser: Dict[str, Any] = Depends(getCurrentUser)) -> Dict[str, Any]: + """ + Ensures that the user is active. + + Args: + currentUser: Current user data + + Returns: + User data + + Raises: + HTTPException: If the user is disabled + """ + if currentUser.get("disabled", False): + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User is disabled") + + return currentUser + +async def getUserContext(currentUser: Dict[str, Any]) -> Tuple[int, int]: + """ + Extracts the mandate ID and user ID from the current user. + Enhanced with better logging. + + Args: + currentUser: The current user + + Returns: + Tuple of (mandateId, userId) + """ + # Default values + defaultMandateId = 0 + defaultUserId = 0 + + # Extract mandateId + mandateId = currentUser.get("mandateId", None) + if mandateId is None: + logger.warning(f"No mandateId found in currentUser, using default: {defaultMandateId}") + mandateId = defaultMandateId + else: + try: + mandateId = int(mandateId) + except (ValueError, TypeError): + logger.error(f"Invalid mandateId value: {mandateId}, using default: {defaultMandateId}") + mandateId = defaultMandateId + + # Extract userId + userId = currentUser.get("id", None) + if userId is None: + logger.warning(f"No userId found in currentUser, using default: {defaultUserId}") + userId = defaultUserId + else: + try: + userId = int(userId) + except (ValueError, TypeError): + logger.error(f"Invalid userId value: {userId}, using default: {defaultUserId}") + userId = defaultUserId + + return mandateId, userId \ No newline at end of file diff --git a/static/122_configuration.py b/static/122_configuration.py new file mode 100644 index 00000000..f7e9b81f --- /dev/null +++ b/static/122_configuration.py @@ -0,0 +1,183 @@ +""" +Utility module for configuration management. + +This module provides a global APP_CONFIG object for accessing configuration from both +config.ini files and environment variables stored in .env files, using a flat structure. +""" + +import os +import logging +from typing import Any, Dict, Optional +from pathlib import Path + +# Set up logging +logger = logging.getLogger(__name__) + +class Configuration: + """ + Configuration class with attribute-style access to flattened configuration. + """ + def __init__(self): + """Initialize the configuration object""" + self._data = {} + self._configFilePath = None + self._envFilePath = None + self._configMtime = 0 + self._envMtime = 0 + self.refresh() + + def refresh(self): + """Reload configuration from files""" + self._loadConfig() + self._loadEnv() + logger.info("Configuration refreshed") + + def _loadConfig(self): + """Load configuration from config.ini file in flattened format""" + # Find config.ini file (look in current directory and parent directory) + configPath = Path('config.ini') + if not configPath.exists(): + # Try in parent directory + configPath = Path('../config.ini') + if not configPath.exists(): + logger.warning(f"Configuration file not found at {configPath.absolute()}") + return + + self._configFilePath = configPath + currentMtime = os.path.getmtime(configPath) + + # Skip if file hasn't changed + if currentMtime <= self._configMtime: + return + + self._configMtime = currentMtime + + try: + with open(configPath, 'r') as f: + for line in f: + line = line.strip() + # Skip empty lines and comments + if not line or line.startswith('#'): + continue + + # Parse key-value pairs + if '=' in line: + key, value = line.split('=', 1) + key = key.strip() + value = value.strip() + + # Add directly to data dictionary + self._data[key] = value + + + except Exception as e: + logger.error(f"Error loading configuration: {e}") + + def _loadEnv(self): + """Load environment variables from .env file""" + # Find .env file (look in current directory and parent directory) + envPath = Path('.env') + if not envPath.exists(): + # Try in parent directory + envPath = Path('../.env') + if not envPath.exists(): + logger.warning(f"Environment file not found at {envPath.absolute()}") + return + + self._envFilePath = envPath + currentMtime = os.path.getmtime(envPath) + + # Skip if file hasn't changed + if currentMtime <= self._envMtime: + return + + self._envMtime = currentMtime + + try: + with open(envPath, 'r') as f: + for line in f: + line = line.strip() + # Skip empty lines and comments + if not line or line.startswith('#'): + continue + + # Parse key-value pairs + if '=' in line: + key, value = line.split('=', 1) + key = key.strip() + value = value.strip() + + # Add directly to data dictionary + self._data[key] = value + + logger.info(f"Loaded environment variables from {envPath.absolute()}") + + # Also load system environment variables (don't override existing) + for key, value in os.environ.items(): + if key not in self._data: + self._data[key] = value + + except Exception as e: + logger.error(f"Error loading environment variables: {e}") + + def checkForUpdates(self): + """Check if configuration files have changed and reload if necessary""" + if self._configFilePath and os.path.exists(self._configFilePath): + currentMtime = os.path.getmtime(self._configFilePath) + if currentMtime > self._configMtime: + logger.info("Config file has changed, reloading...") + self._loadConfig() + + if self._envFilePath and os.path.exists(self._envFilePath): + currentMtime = os.path.getmtime(self._envFilePath) + if currentMtime > self._envMtime: + logger.info("Environment file has changed, reloading...") + self._loadEnv() + + def get(self, key: str, default: Any = None) -> Any: + """Get configuration value with optional default""" + self.checkForUpdates() # Check for file changes + + if key in self._data: + value = self._data[key] + # Handle secrets (keys ending with _SECRET) + if key.endswith("_SECRET"): + return handleSecret(value) + return value + return default + + def __getattr__(self, name: str) -> Any: + """Enable attribute-style access to configuration""" + self.checkForUpdates() # Check for file changes + + value = self.get(name) + if value is None: + raise AttributeError(f"Configuration key '{name}' not found") + return value + + def __dir__(self) -> list: + """Support auto-completion of attributes""" + self.checkForUpdates() # Check for file changes + return list(self._data.keys()) + super().__dir__() + + def set(self, key: str, value: Any) -> None: + """Set a configuration value (for testing/overrides)""" + self._data[key] = value + +def handleSecret(value: str) -> str: + """ + Handle secret values. Currently just returns the plain text value, + but can be enhanced to provide actual decryption in the future. + + Args: + value: The secret value to handle + + Returns: + str: Processed secret value + """ + # For now, just return the value as-is + # In the future, this could be enhanced to decrypt values + return value + +# Create the global APP_CONFIG instance +APP_CONFIG = Configuration() \ No newline at end of file diff --git a/static/123_agentWebcrawler.py b/static/123_agentWebcrawler.py new file mode 100644 index 00000000..5cce8176 --- /dev/null +++ b/static/123_agentWebcrawler.py @@ -0,0 +1,796 @@ +""" +Webcrawler agent for research and retrieval of information from the web. +Reimagined with an output-first, AI-driven approach. +""" + +import logging +import json +import re +import time +from typing import Dict, Any, List +from urllib.parse import quote_plus, unquote + +from bs4 import BeautifulSoup +import requests +import markdown + +from modules.workflowAgentsRegistry import AgentBase +from modules.configuration import APP_CONFIG + +logger = logging.getLogger(__name__) + +class AgentWebcrawler(AgentBase): + """AI-driven agent for web research and information retrieval""" + + def __init__(self): + """Initialize the webcrawler agent""" + super().__init__() + self.name = "webcrawler" + self.description = "Conducts web research and collects information from online sources" + self.capabilities = [ + "webSearch", + "informationRetrieval", + "dataCollection", + "searchResultsAnalysis", + "webpageContentExtraction" + ] + + # Web crawling configuration + self.maxUrl = int(APP_CONFIG.get("Agent_Webcrawler_MAX_URLS", "5")) + self.maxSearchTerms = int(APP_CONFIG.get("Agent_Webcrawler_MAX_SEARCH_KEYWORDS", "3")) + self.maxResults = int(APP_CONFIG.get("Agent_Webcrawler_MAX_SEARCH_RESULTS", "5")) + self.timeout = int(APP_CONFIG.get("Agent_Webcrawler_TIMEOUT", "30")) + self.searchEngine = APP_CONFIG.get("Agent_Webcrawler_SEARCH_ENGINE", "https://html.duckduckgo.com/html/?q=") + self.userAgent = APP_CONFIG.get("Agent_Webcrawler_USER_AGENT", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36") + + def setDependencies(self, mydom=None): + """Set external dependencies for the agent.""" + self.mydom = mydom + + async def processTask(self, task: Dict[str, Any]) -> Dict[str, Any]: + """ + Process a task by focusing on required outputs and using AI to guide the research process. + + Args: + task: Task dictionary with prompt, inputDocuments, outputSpecifications + + Returns: + Dictionary with feedback and documents + """ + try: + # Extract task information + prompt = task.get("prompt", "") + outputSpecs = task.get("outputSpecifications", []) + + # Check AI service + if not self.mydom: + return { + "feedback": "The Webcrawler agent requires an AI service to function effectively.", + "documents": [] + } + + # Create research plan + researchPlan = await self._createResearchPlan(prompt) + + # Check if this is truly a web research task + if not researchPlan.get("requiresWebResearch", True): + return { + "feedback": "This task doesn't appear to require web research. Please try a different agent.", + "documents": [] + } + + # Gather raw material through web research + rawResults = await self._gatherResearchMaterial(researchPlan) + + # Format results into requested output documents + documents = await self._createOutputDocuments( + prompt, + rawResults, + outputSpecs, + researchPlan + ) + + # Generate feedback + feedback = researchPlan.get("feedback", f"I conducted web research on '{prompt[:50]}...' and gathered information from {len(rawResults)} relevant sources.") + + return { + "feedback": feedback, + "documents": documents + } + + except Exception as e: + logger.error(f"Error during web research: {str(e)}", exc_info=True) + return { + "feedback": f"Error during web research: {str(e)}", + "documents": [] + } + + async def _createResearchPlan(self, prompt: str) -> Dict[str, Any]: + """ + Use AI to create a detailed research plan. + + Args: + prompt: The research query + + Returns: + Research plan dictionary + """ + researchPrompt = f""" + Create a detailed web research plan for this task: "{prompt}" + + Analyze the request carefully and create a structured plan in JSON format with the following elements: + {{ + "requiresWebResearch": true/false, # Whether this genuinely requires web research + "researchQuestions": ["question1", "question2", ...], # 2-4 specific questions to answer + "searchTerms": ["term1", "term2", ...], # Up to {self.maxSearchTerms} effective search terms + "directUrls": ["url1", "url2", ...], # Any URLs directly mentioned in the request (up to {self.maxUrl}) + "expectedSources": ["type1", "type2", ...], # Types of sources that would be most valuable + "contentFocus": "what specific content to extract or focus on", + "feedback": "explanation of how the research will be conducted" + }} + + Respond with ONLY the JSON object, no additional text or explanations. + """ + + try: + # Get research plan from AI + response = await self.mydom.callAi([ + {"role": "system", "content": "You are a web research planning expert. Create precise research plans in JSON format only."}, + {"role": "user", "content": researchPrompt} + ]) + + # Extract JSON + jsonStart = response.find('{') + jsonEnd = response.rfind('}') + 1 + + if jsonStart >= 0 and jsonEnd > jsonStart: + plan = json.loads(response[jsonStart:jsonEnd]) + + # Ensure we have the expected fields with defaults if missing + if "searchTerms" not in plan: + plan["searchTerms"] = [prompt] + if "directUrls" not in plan: + plan["directUrls"] = [] + if "researchQuestions" not in plan: + plan["researchQuestions"] = ["What information can be found about this topic?"] + + return plan + else: + # Fallback plan + logger.warning(f"Not able creating research plan, generating fallback plan") + return { + "requiresWebResearch": True, + "researchQuestions": ["What information can be found about this topic?"], + "searchTerms": [prompt], + "directUrls": [], + "expectedSources": ["Web pages", "Articles"], + "contentFocus": "Relevant information about the topic", + "feedback": f"I'll conduct web research on '{prompt}' and gather relevant information." + } + + except Exception as e: + logger.warning(f"Error creating research plan: {str(e)}") + # Simple fallback plan + return { + "requiresWebResearch": True, + "researchQuestions": ["What information can be found about this topic?"], + "searchTerms": [prompt], + "directUrls": [], + "expectedSources": ["Web pages", "Articles"], + "contentFocus": "Relevant information about the topic", + "feedback": f"I'll conduct web research on '{prompt}' and gather relevant information." + } + + async def _gatherResearchMaterial(self, researchPlan: Dict[str, Any]) -> List[Dict[str, Any]]: + """ + Gather research material based on the research plan. + + Args: + researchPlan: Research plan dictionary + + Returns: + List of research results + """ + allResults = [] + + # Process direct URLs + directUrls = researchPlan.get("directUrls", [])[:self.maxUrl] + for url in directUrls: + logger.info(f"Processing direct URL: {url}") + try: + # Fetch and extract content + soup = self._readUrl(url) + + if soup: + # Extract title and content + title = self._extractTitle(soup, url) + content = self._extractMainContent(soup) + + # Add to results + allResults.append({ + "title": title, + "url": url, + "sourceType": "directUrl", + "content": content, + "summary": "" # Will be filled later + }) + except Exception as e: + logger.warning(f"Error processing URL {url}: {str(e)}") + + # Process search terms + searchTerms = researchPlan.get("searchTerms", [])[:self.maxSearchTerms] + for term in searchTerms: + logger.info(f"Searching for: {term}") + try: + # Perform search + searchResults = self._searchWeb(term) + + # Process each search result + for result in searchResults: + # Check if URL is already in results + if not any(r["url"] == result["url"] for r in allResults): + allResults.append({ + "title": result["title"], + "url": result["url"], + "sourceType": "searchResult", + "content": result["data"], + "snippet": result["snippet"], + "summary": "" # Will be filled later + }) + + # Stop if we've reached the maximum results + if len(allResults) >= self.maxResults: + break + except Exception as e: + logger.warning(f"Error searching for {term}: {str(e)}") + + # Stop if we've reached the maximum results + if len(allResults) >= self.maxResults: + break + + # Create summaries in parallel for all results + allResults = await self._summarizeAllResults(allResults, researchPlan) + + return allResults + + async def _summarizeAllResults(self, results: List[Dict[str, Any]], researchPlan: Dict[str, Any]) -> List[Dict[str, Any]]: + """ + Create summaries for all research results. + + Args: + results: List of research results + researchPlan: Research plan with questions and focus + + Returns: + Results with added summaries + """ + for i, result in enumerate(results): + logger.info(f"Summarizing result {i+1}/{len(results)}: {result['title'][:30]}...") + + try: + # Limit content length to avoid token issues + content = self._limitText(result.get("content", ""), maxChars=8000) + researchQuestions = researchPlan.get("researchQuestions", ["What relevant information does this page contain?"]) + contentFocus = researchPlan.get("contentFocus", "Relevant information") + + # Create summary using AI + summaryPrompt = f""" + Summarize this web page content based on these research questions: + {', '.join(researchQuestions)} + + Focus on: {contentFocus} + + Web page: {result['url']} + Title: {result['title']} + + Content: + {content} + + Create a concise summary that: + 1. Directly answers the research questions if possible + 2. Extracts the most relevant information from the page + 3. Includes specific facts, figures, or quotes if available + 4. Is around 2000 characters long + + Only include information actually found in the content. No fabrications or assumptions. + """ + + if self.mydom: + summary = await self.mydom.callAi([ + {"role": "system", "content": "You summarize web content accurately and concisely, focusing only on what is actually in the content."}, + {"role": "user", "content": summaryPrompt} + ]) + + # Store the summary + result["summary"] = summary + else: + # Fallback if no AI service + logger.warning(f"Not able to summarize result, using fallback plan.") + result["summary"] = f"Content from {result['url']} ({len(content)} characters)" + + except Exception as e: + logger.warning(f"Error summarizing result {i+1}: {str(e)}") + result["summary"] = f"Error creating summary: {str(e)}" + + return results + + async def _createOutputDocuments(self, prompt: str, results: List[Dict[str, Any]], + outputSpecs: List[Dict[str, Any]], researchPlan: Dict[str, Any]) -> List[Dict[str, Any]]: + """ + Create output documents based on research results and specifications. + + Args: + prompt: Original research prompt + results: List of research results + outputSpecs: Output specifications + researchPlan: Research plan + + Returns: + List of output documents + """ + # If no output specs provided, create default output + if not outputSpecs: + outputSpecs = [{ + "label": "webResearchResults.md", + "description": "Comprehensive web research results" + }] + + # Generate documents + documents = [] + + # Process each output specification + for spec in outputSpecs: + outputLabel = spec.get("label", "") + outputDescription = spec.get("description", "") + + # Determine format based on file extension + formatType = self._determineFormatType(outputLabel) + + # Create appropriate document based on format + if formatType == "json": + # JSON output - structured data + document = await self._createJsonDocument(prompt, results, researchPlan, outputLabel) + elif formatType == "csv": + # CSV output - tabular data + document = await self._createCsvDocument(results, outputLabel) + else: + # Text-based output (markdown, html, text) - narrative report + document = await self._createNarrativeDocument( + prompt, results, researchPlan, formatType, outputLabel, outputDescription + ) + + documents.append(document) + + return documents + + async def _createNarrativeDocument(self, prompt: str, results: List[Dict[str, Any]], + researchPlan: Dict[str, Any], formatType: str, + outputLabel: str, outputDescription: str) -> Dict[str, Any]: + """ + Create a narrative document (markdown, html, text) from research results. + + Args: + prompt: Original research prompt + results: Research results + researchPlan: Research plan + formatType: Output format (markdown, html, text) + outputLabel: Output filename + outputDescription: Output description + + Returns: + Document object + """ + # Create content based on format + if formatType == "markdown": + contentType = "text/markdown" + templateFormat = "markdown" + elif formatType == "html": + contentType = "text/html" + templateFormat = "html" + else: + contentType = "text/plain" + templateFormat = "text" + + # Prepare research context + researchQuestions = researchPlan.get("researchQuestions", []) + searchTerms = researchPlan.get("searchTerms", []) + + # Create document structure based on results + sourcesSummary = [] + for result in results: + sourcesSummary.append({ + "title": result.get("title", "Untitled"), + "url": result.get("url", ""), + "summary": result.get("summary", ""), + "snippet": result.get("snippet", "") + }) + + # Truncate content for prompt + sourcesJson = json.dumps(sourcesSummary, indent=2) + if len(sourcesJson) > 10000: + # Logic to truncate each summary while preserving structure + for i in range(len(sourcesSummary)): + if len(sourcesJson) <= 10000: + break + # Gradually truncate summaries + sourcesSummary[i]["summary"] = sourcesSummary[i]["summary"][:500] + "..." + sourcesJson = json.dumps(sourcesSummary, indent=2) + + # Create report prompt + reportPrompt = f""" + Create a comprehensive {formatType} research report based on the following web research: + + TASK: {prompt} + + RESEARCH QUESTIONS: + {', '.join(researchQuestions)} + + SEARCH TERMS USED: + {', '.join(searchTerms)} + + SOURCES AND FINDINGS: + {sourcesJson} + + REPORT DETAILS: + - Format: {templateFormat} + - Filename: {outputLabel} + - Description: {outputDescription} + + Create a well-structured report that: + 1. Includes an executive summary of key findings + 2. Addresses each research question directly + 3. Integrates information from all relevant sources + 4. Cites sources appropriately for each piece of information + 5. Provides a comprehensive synthesis of the research + 6. Is formatted professionally and appropriately for {templateFormat} + + The report should be scholarly, accurate, and focused on the original research task. + """ + + try: + # Generate report with AI + reportContent = await self.mydom.callAi([ + {"role": "system", "content": f"You create professional research reports in {templateFormat} format."}, + {"role": "user", "content": reportPrompt} + ]) + + # Convert to HTML if needed + if formatType == "html" and not reportContent.lower().startswith("An error occurred: {str(e)}
" + else: + content = f"WEB RESEARCH ERROR\n\nAn error occurred: {str(e)}" + + return self.formatAgentDocumentOutput(outputLabel, content, contentType) + + async def _createJsonDocument(self, prompt: str, results: List[Dict[str, Any]], + researchPlan: Dict[str, Any], outputLabel: str) -> Dict[str, Any]: + """ + Create a JSON document from research results. + + Args: + prompt: Original research prompt + results: Research results + researchPlan: Research plan + outputLabel: Output filename + + Returns: + Document object + """ + try: + # Create structured data + sourcesData = [] + for result in results: + sourcesData.append({ + "title": result.get("title", "Untitled"), + "url": result.get("url", ""), + "summary": result.get("summary", ""), + "snippet": result.get("snippet", ""), + "sourceType": result.get("sourceType", "") + }) + + # Create metadata + metadata = { + "query": prompt, + "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"), + "researchQuestions": researchPlan.get("researchQuestions", []), + "searchTerms": researchPlan.get("searchTerms", []) + } + + # Compile complete report object + jsonContent = { + "metadata": metadata, + "summary": researchPlan.get("feedback", "Web research results"), + "sources": sourcesData + } + + # Convert to JSON string + content = json.dumps(jsonContent, indent=2) + + return self.formatAgentDocumentOutput(outputLabel, content, "application/json") + + except Exception as e: + logger.error(f"Error creating JSON document: {str(e)}") + return self.formatAgentDocumentOutput(outputLabel, json.dumps({"error": str(e)}), "application/json") + + async def _createCsvDocument(self, results: List[Dict[str, Any]], outputLabel: str) -> Dict[str, Any]: + """ + Create a CSV document from research results. + + Args: + results: Research results + outputLabel: Output filename + + Returns: + Document object + """ + try: + # Create CSV header + csvLines = ["Title,URL,Source Type,Snippet"] + + # Add results + for result in results: + # Escape CSV fields + title = result.get("title", "").replace('"', '""') + url = result.get("url", "").replace('"', '""') + sourceType = result.get("sourceType", "").replace('"', '""') + snippet = result.get("snippet", "").replace('"', '""') + + csvLines.append(f'"{title}","{url}","{sourceType}","{snippet}"') + + # Combine into CSV content + content = "\n".join(csvLines) + + return self.formatAgentDocumentOutput(outputLabel, content, "text/csv") + + except Exception as e: + logger.error(f"Error creating CSV document: {str(e)}") + return self.formatAgentDocumentOutput(outputLabel, "Error,Error\nFailed to create CSV,{0}".format(str(e)), "text/csv") + + def _determineFormatType(self, outputLabel: str) -> str: + """ + Determine the format type based on the filename. + + Args: + outputLabel: Output filename + + Returns: + Format type (markdown, html, text, json, csv) + """ + outputLabelLower = outputLabel.lower() + + if outputLabelLower.endswith(".md"): + return "markdown" + elif outputLabelLower.endswith(".html"): + return "html" + elif outputLabelLower.endswith(".txt"): + return "text" + elif outputLabelLower.endswith(".json"): + return "json" + elif outputLabelLower.endswith(".csv"): + return "csv" + else: + # Default to markdown + return "markdown" + + def _searchWeb(self, query: str) -> List[Dict[str, str]]: + """ + Conduct a web search and return the results. + + Args: + query: The search query + + Returns: + List of search results + """ + formattedQuery = quote_plus(query) + url = f"{self.searchEngine}{formattedQuery}" + + searchResultsSoup = self._readUrl(url) + if not searchResultsSoup or not searchResultsSoup.select('.result'): + logger.warning(f"No search results found for: {query}") + return [] + + # Extract search results + results = [] + + # Find all result containers + resultElements = searchResultsSoup.select('.result') + + for result in resultElements: + # Extract title + titleElement = result.select_one('.result__a') + title = titleElement.text.strip() if titleElement else 'No title' + + # Extract URL (DuckDuckGo uses redirects) + urlElement = titleElement.get('href') if titleElement else '' + extractedUrl = 'No URL' + + if urlElement: + # Extract actual URL from DuckDuckGo's redirect + if urlElement.startswith('/d.js?q='): + start = urlElement.find('?q=') + 3 + end = urlElement.find('&', start) if '&' in urlElement[start:] else None + extractedUrl = unquote(urlElement[start:end]) + + # Ensure URL has correct protocol prefix + if not extractedUrl.startswith(('http://', 'https://')): + if not extractedUrl.startswith('//'): + extractedUrl = 'https://' + extractedUrl + else: + extractedUrl = 'https:' + extractedUrl + else: + extractedUrl = urlElement + + # Extract snippet directly from search results page + snippetElement = result.select_one('.result__snippet') + snippet = snippetElement.text.strip() if snippetElement else 'No description' + + # Get actual page content + try: + targetPageSoup = self._readUrl(extractedUrl) + content = self._extractMainContent(targetPageSoup) + except Exception as e: + logger.warning(f"Error extracting content from {extractedUrl}: {str(e)}") + content = f"Error extracting content: {str(e)}" + + results.append({ + 'title': title, + 'url': extractedUrl, + 'snippet': snippet, + 'data': content + }) + + # Limit number of results + if len(results) >= self.maxResults: + break + + return results + + def _readUrl(self, url: str) -> BeautifulSoup: + """ + Read a URL and return a BeautifulSoup parser for the content. + + Args: + url: The URL to read + + Returns: + BeautifulSoup object with the content or None on errors + """ + if not url or not url.startswith(('http://', 'https://')): + return None + + headers = { + 'User-Agent': self.userAgent, + 'Accept': 'text/html,application/xhtml+xml,application/xml', + 'Accept-Language': 'en-US,en;q=0.9', + } + + try: + # Initial request + response = requests.get(url, headers=headers, timeout=self.timeout) + + # Handling for status 202 + if response.status_code == 202: + # Retry with backoff + backoffTimes = [0.5, 1.0, 2.0, 5.0] + + for waitTime in backoffTimes: + time.sleep(waitTime) + response = requests.get(url, headers=headers, timeout=self.timeout) + + if response.status_code != 202: + break + + # Raise for error status codes + response.raise_for_status() + + # Parse HTML + return BeautifulSoup(response.text, 'html.parser') + + except Exception as e: + logger.error(f"Error reading URL {url}: {str(e)}") + return None + + def _extractTitle(self, soup: BeautifulSoup, url: str) -> str: + """ + Extract the title from a webpage. + + Args: + soup: BeautifulSoup object of the webpage + url: URL of the webpage + + Returns: + Extracted title + """ + if not soup: + return f"Error with {url}" + + # Extract title from title tag + titleTag = soup.find('title') + title = titleTag.text.strip() if titleTag else "No title" + + # Alternative: Also look for h1 tags if title tag is missing + if title == "No title": + h1Tag = soup.find('h1') + if h1Tag: + title = h1Tag.text.strip() + + return title + + def _extractMainContent(self, soup: BeautifulSoup, maxChars: int = 10000) -> str: + """ + Extract the main content from an HTML page. + + Args: + soup: BeautifulSoup object of the webpage + maxChars: Maximum number of characters + + Returns: + Extracted main content as a string + """ + if not soup: + return "" + + # Try to find main content elements in priority order + mainContent = None + for selector in ['main', 'article', '#content', '.content', '#main', '.main']: + content = soup.select_one(selector) + if content: + mainContent = content + break + + # If no main content found, use the body + if not mainContent: + mainContent = soup.find('body') or soup + + # Remove script, style, nav, footer elements that don't contribute to main content + for element in mainContent.select('script, style, nav, footer, header, aside, .sidebar, #sidebar, .comments, #comments, .advertisement, .ads, iframe'): + element.extract() + + # Extract text content + textContent = mainContent.get_text(separator=' ', strip=True) + + # Limit to maxChars + return textContent[:maxChars] + + def _limitText(self, text: str, maxChars: int = 10000) -> str: + """ + Limit text to a maximum number of characters. + + Args: + text: Input text + maxChars: Maximum number of characters + + Returns: + Limited text + """ + if not text: + return "" + + # If text is already under the limit, return unchanged + if len(text) <= maxChars: + return text + + # Otherwise limit text to maxChars + return text[:maxChars] + "... [Content truncated due to length]" + + +# Factory function for the Webcrawler agent +def getAgentWebcrawler(): + """Returns an instance of the Webcrawler agent.""" + return AgentWebcrawler() \ No newline at end of file diff --git a/static/124_defAttributes.py b/static/124_defAttributes.py new file mode 100644 index 00000000..731ecfd9 --- /dev/null +++ b/static/124_defAttributes.py @@ -0,0 +1,123 @@ +from pydantic import BaseModel, Field +from typing import List, Dict, Any, Optional + +# Define the model for attribute definitions +class AttributeDefinition(BaseModel): + name: str + label: str + type: str + required: bool = False + placeholder: Optional[str] = None + defaultValue: Optional[Any] = None + options: Optional[List[Dict[str, Any]]] = None + editable: bool = True + visible: bool = True + order: int = 0 + validation: Optional[Dict[str, Any]] = None + helpText: Optional[str] = None + +# Helper classes for type mapping +typeMappings = { + "int": "number", + "str": "string", + "float": "number", + "bool": "boolean", + "List[int]": "array", + "List[str]": "array", + "Dict[str, Any]": "object", + "Optional[str]": "string", + "Optional[int]": "number", + "Optional[Dict[str, Any]]": "object" +} + +# Special field types based on naming conventions +specialFieldTypes = { + "content": "textarea", + "description": "textarea", + "instructions": "textarea", + "password": "password", + "email": "email", + "workspaceId": "select", + "agentId": "select", + "type": "select" +} + +# Function to convert a Pydantic model into attribute definitions +def getModelAttributes(modelClass, userLanguage="de"): + """ + Converts a Pydantic model into a list of AttributeDefinition objects + """ + attributes = [] + + # Go through all fields in the model + for i, (fieldName, field) in enumerate(modelClass.__fields__.items()): + # Skip internal fields + if fieldName.startswith('_') or fieldName in ["label", "fieldLabels"]: + continue + + # Determine the field type + fieldType = typeMappings.get(str(field.type_), "string") + + # Check for special field types + if fieldName in specialFieldTypes: + fieldType = specialFieldTypes[fieldName] + + # Get the label (if available) + fieldLabel = fieldName.replace('_', ' ').capitalize() + if hasattr(modelClass, 'fieldLabels') and fieldName in modelClass.fieldLabels: + labelObj = modelClass.fieldLabels[fieldName] + fieldLabel = labelObj.getLabel(userLanguage) + + # Determine default values and required status + required = field.required + defaultValue = field.default if not field.required else None + + # Check for validation rules + validation = None + if field.validators: + validation = {"hasValidators": True} + + # Placeholder text + placeholder = f"Please enter {fieldLabel}" + + # Special options for Select fields + options = None + if fieldType == "select": + if fieldName == "type" and modelClass.__name__ == "Agent": + options = [ + {"value": "Analysis", "label": "Analysis"}, + {"value": "Transformation", "label": "Transformation"}, + {"value": "Generation", "label": "Generation"}, + {"value": "Classification", "label": "Classification"}, + {"value": "Custom", "label": "Custom"} + ] + + # Extract description from Field object + description = None + # Try to get description from various possible sources + if hasattr(field, 'field_info') and hasattr(field.field_info, 'description'): + description = field.field_info.description + elif hasattr(field, 'description'): + description = field.description + elif hasattr(field, 'schema') and hasattr(field.schema, 'description'): + description = field.schema.description + + # Create attribute definition + attrDef = AttributeDefinition( + name=fieldName, + label=fieldLabel, + type=fieldType, + required=required, + placeholder=placeholder, + defaultValue=defaultValue, + options=options, + editable=fieldName not in ["id", "mandateId", "userId", "createdAt", "uploadDate"], + visible=fieldName not in ["hashedPassword", "mandateId", "userId"], + order=i, + validation=validation, + helpText=description or "" # Set empty string as default value if no description found + ) + + attributes.append(attrDef) + + return attributes \ No newline at end of file diff --git a/static/125_gatewayInterface.py b/static/125_gatewayInterface.py new file mode 100644 index 00000000..3e1120c7 --- /dev/null +++ b/static/125_gatewayInterface.py @@ -0,0 +1,471 @@ +""" +Interface to the Gateway system. +Manages users and mandates for authentication. +""" + +import os +import logging +from typing import Dict, Any, List, Optional, Union +import importlib +from passlib.context import CryptContext + +from connectors.connectorDbJson import DatabaseConnector +from modules.configuration import APP_CONFIG + +logger = logging.getLogger(__name__) + +# Password-Hashing +pwdContext = CryptContext(schemes=["argon2"], deprecated="auto") + + +class GatewayInterface: + """ + Interface to the Gateway system. + Manages users and mandates. + """ + + def __init__(self, mandateId: int = None, userId: int = None): + """ + Initializes the Gateway Interface with optional mandate and user context. + + Args: + mandateId: ID of the current mandate (optional) + userId: ID of the current user (optional) + """ + # Context can be empty during initialization + self.mandateId = mandateId + self.userId = userId + + # Import data model module + try: + self.modelModule = importlib.import_module("modules.gatewayModel") + logger.info("gatewayModel successfully imported") + except ImportError as e: + logger.error(f"Error importing gatewayModel: {e}") + raise + + # Initialize database + self._initializeDatabase() + + def _initializeDatabase(self): + """ + Initializes the database with minimal objects + """ + + self.db = DatabaseConnector( + dbHost=APP_CONFIG.get("DB_SYSTEM_HOST"), + dbDatabase=APP_CONFIG.get("DB_SYSTEM_DATABASE"), + dbUser=APP_CONFIG.get("DB_SYSTEM_USER"), + dbPassword=APP_CONFIG.get("DB_SYSTEM_PASSWORD_SECRET"), + mandateId=self.mandateId if self.mandateId else 0, + userId=self.userId if self.userId else 0 + ) + + # Create Root mandate if needed + existingMandateId = self.getInitialId("mandates") + mandates = self.db.getRecordset("mandates") + if existingMandateId is None or not mandates: + logger.info("Creating Root mandate") + rootMandate = { + "name": "Root", + "language": "de" + } + createdMandate = self.db.recordCreate("mandates", rootMandate) + logger.info(f"Root mandate created with ID {createdMandate['id']}") + + # Update mandate context + self.mandateId = createdMandate['id'] + self.userId = createdMandate['userId'] + + # Recreate connector with correct context + self.db = DatabaseConnector( + dbHost=APP_CONFIG.get("DB_SYSTEM_HOST"), + dbDatabase=APP_CONFIG.get("DB_SYSTEM_DATABASE"), + dbUser=APP_CONFIG.get("DB_SYSTEM_USER"), + dbPassword=APP_CONFIG.get("DB_SYSTEM_PASSWORD_SECRET"), + mandateId=self.mandateId, + userId=self.userId + ) + + # Create Admin user if needed + existingUserId = self.getInitialId("users") + users = self.db.getRecordset("users") + if existingUserId is None or not users: + logger.info("Creating Admin user") + adminUser = { + "mandateId": self.mandateId, + "username": "admin", + "email": "admin@example.com", + "fullName": "Administrator", + "disabled": False, + "language": "de", + "privilege": "sysadmin", # SysAdmin privilege + "hashedPassword": self._getPasswordHash("admin") # Use a secure password in production! + } + createdUser = self.db.recordCreate("users", adminUser) + logger.info(f"Admin user created with ID {createdUser['id']}") + + # Update user context + self.userId = createdUser['id'] + + # Recreate connector with correct context + self.db = DatabaseConnector( + dbHost=APP_CONFIG.get("DB_SYSTEM_HOST"), + dbDatabase=APP_CONFIG.get("DB_SYSTEM_DATABASE"), + dbUser=APP_CONFIG.get("DB_SYSTEM_USER"), + dbPassword=APP_CONFIG.get("DB_SYSTEM_PASSWORD_SECRET"), + mandateId=self.mandateId, + userId=self.userId + ) + + def getInitialId(self, table: str) -> Optional[int]: + """Returns the initial ID for a table""" + return self.db.getInitialId(table) + + def _getPasswordHash(self, password: str) -> str: + """Creates a hash for a password""" + return pwdContext.hash(password) + + def _verifyPassword(self, plainPassword: str, hashedPassword: str) -> bool: + """Checks if the password matches the hash""" + return pwdContext.verify(plainPassword, hashedPassword) + + def _getCurrentTimestamp(self) -> str: + """Returns the current timestamp in ISO format""" + from datetime import datetime + return datetime.now().isoformat() + + # Mandate methods + + def getAllMandates(self) -> List[Dict[str, Any]]: + """Returns all mandates""" + return self.db.getRecordset("mandates") + + def getMandate(self, mandateId: int) -> Optional[Dict[str, Any]]: + """Returns a mandate by its ID""" + mandates = self.db.getRecordset("mandates", recordFilter={"id": mandateId}) + if mandates: + return mandates[0] + return None + + def createMandate(self, name: str, language: str = "de") -> Dict[str, Any]: + """Creates a new mandate""" + mandateData = { + "name": name, + "language": language + } + + return self.db.recordCreate("mandates", mandateData) + + def updateMandate(self, mandateId: int, mandateData: Dict[str, Any]) -> Dict[str, Any]: + """ + Updates an existing mandate + + Args: + mandateId: The ID of the mandate to update + mandateData: The mandate data to update + + Returns: + Dict[str, Any]: The updated mandate data + + Raises: + ValueError: If the mandate is not found + """ + # Check if the mandate exists + mandate = self.getMandate(mandateId) + if not mandate: + raise ValueError(f"Mandate with ID {mandateId} not found") + + # Update the mandate + updatedMandate = self.db.recordModify("mandates", mandateId, mandateData) + + return updatedMandate + + def deleteMandate(self, mandateId: int) -> bool: + """ + Deletes a mandate and all associated users and data + + Args: + mandateId: The ID of the mandate to delete + + Returns: + bool: True if the mandate was successfully deleted, otherwise False + """ + # Check if the mandate exists + mandate = self.getMandate(mandateId) + if not mandate: + return False + + # Check if it's the initial mandate + initialMandateId = self.getInitialId("mandates") + if initialMandateId is not None and mandateId == initialMandateId: + logger.warning(f"Attempt to delete the Root mandate was prevented") + return False + + # Find all users of the mandate + users = self.getUsersByMandate(mandateId) + + # Delete all users of the mandate and their associated data + for user in users: + self.deleteUser(user["id"]) + + # Delete the mandate + success = self.db.recordDelete("mandates", mandateId) + + if success: + logger.info(f"Mandate with ID {mandateId} was successfully deleted") + else: + logger.error(f"Error deleting mandate with ID {mandateId}") + + return success + + # User methods + + def getAllUsers(self) -> List[Dict[str, Any]]: + """Returns all users""" + users = self.db.getRecordset("users") + # Remove password hashes from the response + for user in users: + if "hashedPassword" in user: + del user["hashedPassword"] + return users + + def getUsersByMandate(self, mandateId: int) -> List[Dict[str, Any]]: + """ + Returns all users of a specific mandate + + Args: + mandateId: The ID of the mandate + + Returns: + List[Dict[str, Any]]: List of users in the mandate + """ + users = self.db.getRecordset("users", recordFilter={"mandateId": mandateId}) + # Remove password hashes from the response + for user in users: + if "hashedPassword" in user: + del user["hashedPassword"] + return users + + def getUserByUsername(self, username: str) -> Optional[Dict[str, Any]]: + """Returns a user by username""" + users = self.db.getRecordset("users") + for user in users: + if user.get("username") == username: + return user + return None + + def getUser(self, userId: int) -> Optional[Dict[str, Any]]: + """Returns a user by ID""" + users = self.db.getRecordset("users", recordFilter={"id": userId}) + if users: + user = users[0] + # Remove password hash from the API response + if "hashedPassword" in user: + userCopy = user.copy() + del userCopy["hashedPassword"] + return userCopy + return user + return None + + def createUser(self, username: str, password: str, email: str = None, + fullName: str = None, language: str = "de", mandateId: int = None, + disabled: bool = False, privilege: str = "user") -> Dict[str, Any]: + """ + Creates a new user + + Args: + username: The username + password: The password + email: The email address (optional) + fullName: The full name (optional) + language: The preferred language (default: "de") + mandateId: The ID of the mandate (optional) + disabled: Whether the user is disabled (default: False) + privilege: The privilege level (default: "user") + + Returns: + Dict[str, Any]: The created user data + + Raises: + ValueError: If the username already exists + """ + # Check if the username already exists + existingUser = self.getUserByUsername(username) + if existingUser: + raise ValueError(f"User '{username}' already exists") + + # Use the provided mandateId or the current context + userMandateId = mandateId if mandateId is not None else self.mandateId + + userData = { + "mandateId": userMandateId, + "username": username, + "email": email, + "fullName": fullName, + "disabled": disabled, + "language": language, + "privilege": privilege, + "hashedPassword": self._getPasswordHash(password) + } + + createdUser = self.db.recordCreate("users", userData) + + # Remove password hash from the response + if "hashedPassword" in createdUser: + del createdUser["hashedPassword"] + + return createdUser + + def authenticateUser(self, username: str, password: str) -> Optional[Dict[str, Any]]: + """ + Authenticates a user by username and password + + Args: + username: The username + password: The password + + Returns: + Optional[Dict[str, Any]]: The user data or None if authentication fails + """ + user = self.getUserByUsername(username) + + if not user: + return None + + if not self._verifyPassword(password, user.get("hashedPassword", "")): + return None + + # Check if the user is disabled + if user.get("disabled", False): + return None + + # Create a copy without password hash + authenticatedUser = {**user} + if "hashedPassword" in authenticatedUser: + del authenticatedUser["hashedPassword"] + + return authenticatedUser + + def updateUser(self, userId: int, userData: Dict[str, Any]) -> Dict[str, Any]: + """ + Updates a user + + Args: + userId: The ID of the user to update + userData: The user data to update + + Returns: + Dict[str, Any]: The updated user data + + Raises: + ValueError: If the user is not found + """ + # Get the current user with password hash (directly from DB) + users = self.db.getRecordset("users", recordFilter={"id": userId}) + if not users: + raise ValueError(f"User with ID {userId} not found") + + user = users[0] + + # If the password is being changed, hash it + if "password" in userData: + userData["hashedPassword"] = self._getPasswordHash(userData["password"]) + del userData["password"] + + # Update the user + updatedUser = self.db.recordModify("users", userId, userData) + + # Remove password hash from the response + if "hashedPassword" in updatedUser: + del updatedUser["hashedPassword"] + + return updatedUser + + def disableUser(self, userId: int) -> Dict[str, Any]: + """Disables a user""" + return self.updateUser(userId, {"disabled": True}) + + def enableUser(self, userId: int) -> Dict[str, Any]: + """Enables a user""" + return self.updateUser(userId, {"disabled": False}) + + def _deleteUserReferencedData(self, userId: int) -> None: + """ + Deletes all data associated with a user + + Args: + userId: The ID of the user + """ + # Here all tables are searched and all entries referencing this user are deleted + + # Delete user attributes + try: + attributes = self.db.getRecordset("attributes", recordFilter={"userId": userId}) + for attribute in attributes: + self.db.recordDelete("attributes", attribute["id"]) + except Exception as e: + logger.error(f"Error deleting attributes for user {userId}: {e}") + + # Other tables that might reference the user + # (Depending on the application's database structure) + + logger.info(f"All referenced data for user {userId} has been deleted") + + def deleteUser(self, userId: int) -> bool: + """ + Deletes a user and all associated data + + Args: + userId: The ID of the user to delete + + Returns: + bool: True if the user was successfully deleted, otherwise False + """ + # Check if the user exists + users = self.db.getRecordset("users", recordFilter={"id": userId}) + if not users: + return False + + # Check if it's the initial user + initialUserId = self.getInitialId("users") + if initialUserId is not None and userId == initialUserId: + logger.warning("Attempt to delete the Root Admin was prevented") + return False + + # Delete all data associated with the user + self._deleteUserReferencedData(userId) + + # Delete the user + success = self.db.recordDelete("users", userId) + + if success: + logger.info(f"User with ID {userId} was successfully deleted") + else: + logger.error(f"Error deleting user with ID {userId}") + + return success + + +# Singleton factory for GatewayInterface instances per context +_gatewayInterfaces = {} + +def getGatewayInterface(mandateId: int = None, userId: int = None) -> GatewayInterface: + """ + Returns a GatewayInterface instance for the specified context. + Reuses existing instances. + + Args: + mandateId: ID of the mandate + userId: ID of the user + + Returns: + GatewayInterface instance + """ + contextKey = f"{mandateId}_{userId}" + if contextKey not in _gatewayInterfaces: + _gatewayInterfaces[contextKey] = GatewayInterface(mandateId, userId) + return _gatewayInterfaces[contextKey] + +# Initialize the interface +getGatewayInterface() \ No newline at end of file diff --git a/static/126_gatewayModel.py b/static/126_gatewayModel.py new file mode 100644 index 00000000..83d759c7 --- /dev/null +++ b/static/126_gatewayModel.py @@ -0,0 +1,103 @@ +""" +Data models for the gateway system. +""" +from pydantic import BaseModel, Field +from typing import List, Dict, Any, Optional +from datetime import datetime + + +class Label(BaseModel): + """Label for an attribute or a class with support for multiple languages""" + default: str + translations: Dict[str, str] = {} + + def getLabel(self, language: str = None): + """Returns the label in the specified language, or the default value if not available""" + if language and language in self.translations: + return self.translations[language] + return self.default + + +class Mandate(BaseModel): + """Data model for a mandate""" + id: int = Field(description="Unique ID of the mandate") + name: str = Field(description="Name of the mandate") + language: str = Field(description="Default language of the mandate") + + label: Label = Field( + default=Label(default="Mandate", translations={"en": "Mandate", "fr": "Mandat"}), + description="Label for the class" + ) + + # Labels for attributes + fieldLabels: Dict[str, Label] = { + "id": Label(default="ID", translations={}), + "name": Label(default="Name of the mandate", translations={"en": "Mandate name", "fr": "Nom du mandat"}), + "language": Label(default="Language", translations={"en": "Language", "fr": "Langue"}) + } + +class User(BaseModel): + """Data model for a user""" + id: int = Field(description="Unique ID of the user") + mandateId: int = Field(description="ID of the associated mandate") + username: str = Field(description="Username for login") + email: Optional[str] = Field(None, description="Email address of the user") + fullName: Optional[str] = Field(None, description="Full name of the user") + language: str = Field(description="Preferred language of the user") + disabled: Optional[bool] = Field(False, description="Indicates whether the user is disabled") + privilege: str = Field(description="Permission level") #sysadmin,admin,user + + label: Label = Field( + default=Label(default="User", translations={"en": "User", "fr": "Utilisateur"}), + description="Label for the class" + ) + + # Labels for attributes + fieldLabels: Dict[str, Label] = { + "id": Label(default="ID", translations={}), + "mandateId": Label(default="Mandate ID", translations={"en": "Mandate ID", "fr": "ID de mandat"}), + "username": Label(default="Username", translations={"en": "Username", "fr": "Nom d'utilisateur"}), + "email": Label(default="Email", translations={"en": "Email", "fr": "E-mail"}), + "fullName": Label(default="Full name", translations={"en": "Full name", "fr": "Nom complet"}), + "language": Label(default="Language", translations={"en": "Language", "fr": "Langue"}), + "disabled": Label(default="Disabled", translations={"en": "Disabled", "fr": "Désactivé"}), + "privilege": Label(default="Permission level", translations={"en": "Access level", "fr": "Niveau d'accès"}), + } + + +class UserInDB(User): + """Extended user class with password hash""" + hashedPassword: str = Field(description="Hash of the user password") + + label: Label = Field( + default=Label(default="User Access", translations={"en": "User Access", "fr": "Accès de l'utilisateur"}), + description="Label for the class" + ) + + # Additional label for the password field + fieldLabels: Dict[str, Label] = { + "hashedPassword": Label(default="Password hash", translations={"en": "Password hash", "fr": "Hachage de mot de passe"}) + } + + +class Token(BaseModel): + """Data model for an authentication token""" + accessToken: str = Field(description="The issued access token") + tokenType: str = Field(description="Type of token (usually 'bearer')") + label: Label = Field( + default=Label(default="Token", translations={"en": "Token", "fr": "Jeton"}), + description="Label for the class" + ) + + # Labels for attributes + fieldLabels: Dict[str, Label] = { + "accessToken": Label(default="Access token", translations={"en": "Access token", "fr": "Jeton d'accès"}), + "tokenType": Label(default="Token type", translations={"en": "Token type", "fr": "Type de jeton"}) + } + + +class TokenData(BaseModel): + """Data for token decoding and validation""" + username: Optional[str] = None + mandateId: Optional[int] = None + exp: Optional[datetime] = None \ No newline at end of file diff --git a/static/127_documentProcessor.py b/static/127_documentProcessor.py new file mode 100644 index 00000000..528109be --- /dev/null +++ b/static/127_documentProcessor.py @@ -0,0 +1,933 @@ +""" +Module for extracting content from various file formats. +Provides specialized functions for processing text, PDF, Office documents, images, etc. +""" + +import logging +import os +import io +from typing import Dict, Any, List, Optional, Union, Tuple +import base64 + +# Configure logger +logger = logging.getLogger(__name__) + +# Optional imports - only loaded when needed +pdfExtractorLoaded = False +officeExtractorLoaded = False +imageProcessorLoaded = False + +def getDocumentContents(fileMetadata: Dict[str, Any], fileContent: bytes) -> List[Dict[str, Any]]: + """ + Main function for extracting content from a file based on its MIME type. + Delegates to specialized extraction functions. + + Args: + fileMetadata: File metadata (Name, MIME type, etc.) + fileContent: Binary data of the file + + Returns: + List of Document-Content objects with metadata and base64Encoded flag + """ + try: + mimeType = fileMetadata.get("mimeType", "application/octet-stream") + fileName = fileMetadata.get("name", "unknown") + + logger.info(f"Extracting content from file '{fileName}' (MIME type: {mimeType})") + + # Extract content based on MIME type + contents = [] + + # Text-based formats (excluding CSV which has its own handler) + if mimeType == "text/csv": + contents.extend(extractCsvContent(fileName, fileContent)) + + # Then handle other text-based formats + elif mimeType.startswith("text/") or mimeType in [ + "application/json", + "application/xml", + "application/javascript", + "application/x-python" + ]: + contents.extend(extractTextContent(fileName, fileContent, mimeType)) + + # SVG Files + elif mimeType == "image/svg+xml": + contents.extend(extractSvgContent(fileName, fileContent)) + + # Images + elif mimeType.startswith("image/"): + contents.extend(extractImageContent(fileName, fileContent, mimeType)) + + # PDF Documents + elif mimeType == "application/pdf": + contents.extend(extractPdfContent(fileName, fileContent)) + + # Word Documents + elif mimeType in [ + "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "application/msword" + ]: + contents.extend(extractWordContent(fileName, fileContent, mimeType)) + + # Excel Documents + elif mimeType in [ + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "application/vnd.ms-excel" + ]: + contents.extend(extractExcelContent(fileName, fileContent, mimeType)) + + # PowerPoint Documents + elif mimeType in [ + "application/vnd.openxmlformats-officedocument.presentationml.presentation", + "application/vnd.ms-powerpoint" + ]: + contents.extend(extractPowerpointContent(fileName, fileContent, mimeType)) + + # Binary data as fallback for unknown formats + else: + contents.extend(extractBinaryContent(fileName, fileContent, mimeType)) + + # Fallback when no content could be extracted + if not contents: + logger.warning(f"No content extracted from file '{fileName}', using binary fallback") + + # Convert binary content to base64 + encoded_data = base64.b64encode(fileContent).decode('utf-8') + + contents.append({ + "sequenceNr": 1, + "name": '1_undefined', + "ext": os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "bin", + "contentType": mimeType, + "data": encoded_data, + "base64Encoded": True, + "metadata": { + "isText": False + } + }) + + # Add generic attributes for all documents + for content in contents: + # Make sure all content items have the base64Encoded flag + if "base64Encoded" not in content: + if isinstance(content.get("data"), bytes): + # Convert bytes to base64 + content["data"] = base64.b64encode(content["data"]).decode('utf-8') + content["base64Encoded"] = True + else: + # Assume text content if not explicitly marked + content["base64Encoded"] = False + + # Maintain backward compatibility with old "base64Encoded" flag in metadata + if "metadata" not in content: + content["metadata"] = {} + + # Set base64Encoded in metadata for backward compatibility + content["metadata"]["base64Encoded"] = content["base64Encoded"] + + logger.info(f"Successfully extracted {len(contents)} content items from file '{fileName}'") + return contents + + except Exception as e: + logger.error(f"Error during content extraction: {str(e)}") + # Fallback on error - return original data + return [{ + "sequenceNr": 1, + "name": fileMetadata.get("name", "unknown"), + "ext": os.path.splitext(fileMetadata.get("name", ""))[1][1:] if os.path.splitext(fileMetadata.get("name", ""))[1] else "bin", + "contentType": fileMetadata.get("mimeType", "application/octet-stream"), + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False, + "base64Encoded": True # For backward compatibility + } + }] + + +def _loadPdfExtractor(): + """Loads PDF extraction libraries when needed""" + global pdfExtractorLoaded + if not pdfExtractorLoaded: + try: + global PyPDF2, fitz + import PyPDF2 + import fitz # PyMuPDF for more extensive PDF processing + pdfExtractorLoaded = True + logger.info("PDF extraction libraries successfully loaded") + except ImportError as e: + logger.warning(f"PDF extraction libraries could not be loaded: {e}") + +def _loadOfficeExtractor(): + """Loads Office document extraction libraries when needed""" + global officeExtractorLoaded + if not officeExtractorLoaded: + try: + global docx, openpyxl + import docx # python-docx for Word documents + import openpyxl # for Excel files + officeExtractorLoaded = True + logger.info("Office extraction libraries successfully loaded") + except ImportError as e: + logger.warning(f"Office extraction libraries could not be loaded: {e}") + +def _loadImageProcessor(): + """Loads image processing libraries when needed""" + global imageProcessorLoaded + if not imageProcessorLoaded: + try: + global PIL, Image + from PIL import Image + imageProcessorLoaded = True + logger.info("Image processing libraries successfully loaded") + except ImportError as e: + logger.warning(f"Image processing libraries could not be loaded: {e}") + +def extractTextContent(fileName: str, fileContent: bytes, mimeType: str) -> List[Dict[str, Any]]: + """ + Extracts text from text files. + + Args: + fileName: Name of the file + fileContent: Binary data of the file + mimeType: MIME type of the file + + Returns: + List of Text-Content objects with base64Encoded = False + """ + try: + # Keep original file extension + fileExtension = os.path.splitext(fileName)[1][1:] if os.path.splitext(fileName)[1] else "txt" + + # Extract text content + textContent = fileContent.decode('utf-8') + return [{ + "sequenceNr": 1, + "name": "1_text", # Simplified naming + "ext": fileExtension, + "contentType": "text", + "data": textContent, + "base64Encoded": False, + "metadata": { + "isText": True + } + }] + except UnicodeDecodeError: + logger.warning(f"Could not decode text from file '{fileName}' as UTF-8, trying alternative encodings") + try: + # Try alternative encodings + for encoding in ['latin-1', 'cp1252', 'iso-8859-1']: + try: + textContent = fileContent.decode(encoding) + logger.info(f"Text successfully decoded with encoding {encoding}") + return [{ + "sequenceNr": 1, + "name": "1_text", # Simplified naming + "ext": fileExtension, + "contentType": "text", + "data": textContent, + "base64Encoded": False, + "metadata": { + "isText": True, + "encoding": encoding + } + }] + except UnicodeDecodeError: + continue + + # Fallback to binary data if no encoding works + logger.warning(f"Could not decode text, using binary data") + return [{ + "sequenceNr": 1, + "name": "1_binary", # Simplified naming + "ext": fileExtension, + "contentType": mimeType, + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False + } + }] + except Exception as e: + logger.error(f"Error in alternative text decoding: {str(e)}") + # Return binary data as fallback + return [{ + "sequenceNr": 1, + "name": "1_binary", # Simplified naming + "ext": fileExtension, + "contentType": mimeType, + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False + } + }] + +def extractCsvContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]: + """ + Extracts content from CSV files. + + Args: + fileName: Name of the file + fileContent: Binary data of the file + + Returns: + List of CSV-Content objects with base64Encoded = False + """ + try: + # Extract text content + csvContent = fileContent.decode('utf-8') + return [{ + "sequenceNr": 1, + "name": "1_csv", # Simplified naming + "ext": "csv", + "contentType": "csv", + "data": csvContent, + "base64Encoded": False, + "metadata": { + "isText": True, + "format": "csv" + } + }] + except UnicodeDecodeError: + logger.warning(f"Could not decode CSV from file '{fileName}' as UTF-8, trying alternative encodings") + try: + # Try alternative encodings for CSV + for encoding in ['latin-1', 'cp1252', 'iso-8859-1']: + try: + csvContent = fileContent.decode(encoding) + logger.info(f"CSV successfully decoded with encoding {encoding}") + return [{ + "sequenceNr": 1, + "name": "1_csv", # Simplified naming + "ext": "csv", + "contentType": "csv", + "data": csvContent, + "base64Encoded": False, + "metadata": { + "isText": True, + "encoding": encoding, + "format": "csv" + } + }] + except UnicodeDecodeError: + continue + + # Fallback to binary data + return [{ + "sequenceNr": 1, + "name": "1_binary", # Simplified naming + "ext": "csv", + "contentType": "text/csv", + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False + } + }] + except Exception as e: + logger.error(f"Error in alternative CSV decoding: {str(e)}") + return [{ + "sequenceNr": 1, + "name": "1_binary", # Simplified naming + "ext": "csv", + "contentType": "text/csv", + "data": base64.b64encode(fileContent).decode('utf-8'), + "base64Encoded": True, + "metadata": { + "isText": False + } + }] + +def extractSvgContent(fileName: str, fileContent: bytes) -> List[Dict[str, Any]]: + """ + Extracts content from SVG files. + + Args: + fileName: Name of the file + fileContent: Binary data of the file + + Returns: + List of SVG-Content objects with dual text/image metadata + """ + contents = [] + + try: + # Extract SVG as text content (XML) + svgText = fileContent.decode('utf-8') + + # Check if it's actually SVG by looking for the SVG tag + if "