\n",
+ "\n",
+ "\n",
+ "\n",
+ "{body}\n",
+ "\n",
+ "\n",
+ "\"\"\"\n",
+ "\n",
+ "def spec_to_html(spec, filename=\"layout.html\"):\n",
+ " body = \"\"\n",
+ " for e in spec[\"elements\"]:\n",
+ " if e[\"type\"] == \"text\":\n",
+ " body += f'
{e[\"content\"]}
\\n'\n",
+ " elif e[\"type\"] == \"button\":\n",
+ " body += f'\\n'\n",
+ " \n",
+ " with open(filename, \"w\") as f:\n",
+ " f.write(HTML_TEMPLATE.format(body=body))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "id": "776d29e6-c4a6-4a62-8251-66f44545c7ec",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from selenium import webdriver\n",
+ "from selenium.webdriver.chrome.options import Options\n",
+ "\n",
+ "def take_screenshot(html_path, output_img):\n",
+ " opts = Options()\n",
+ " opts.headless = True # headless mode for automation\n",
+ "\n",
+ " options = Options()\n",
+ " options.add_argument(\"--headless=new\")\n",
+ " options.add_argument(\"--no-sandbox\")\n",
+ " options.add_argument(\"--disable-dev-shm-usage\")\n",
+ " options.add_argument(\"--disable-gpu\")\n",
+ " options.add_argument(\"--allow-file-access-from-files\")\n",
+ " options.add_argument(\"--enable-local-file-accesses\")\n",
+ "\n",
+ " driver = webdriver.Chrome(options=options)\n",
+ " driver.get(\"file:///home/ubuntu/Trace/tests/\" + html_path)\n",
+ " driver.save_screenshot(output_img)\n",
+ " driver.quit()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "5528becd-9e68-413f-8398-a8207f4eed73",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "spec_to_html(spec = json.loads(spec))\n",
+ "take_screenshot(\"layout.html\", \"layout.png\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "026caff0-f1d8-4353-887f-5a18db921058",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG5AwwDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiuQ+JnheLxT4H1G1ECPexRGa1cqCyuvzYB7Zxj8axIPFTT/AAdsLzQYIo9T1CJbO1ggUIFuWyrEAdMEM34Ur6N9v1HbVef6HpVFeX2Pij/hIvg0J7uGO51Ugac8M6B/9L3CMEg98kNWppmqHRLlvB/hTQ4Lx9ItozeSSXAtowzLwAQjFnbGTwB71T0bX9f1/mStk/6/r/I6/UNX0zSUR9S1G0slc4Q3M6xhj6DcRmroORkdK8l8c+JrfxZ8F7vU4IJLdhdxQzQSEFopFmUMuR1+tdnrXittL1XTdD06wOoaveoZEhMvlJHGvV3fBwM8cAk0W0+f6XH/AF+h09FcnP4wudG0zV77xLpK6fHpyI6tb3BuFuN2QAhKJzkAYI7iq1x421LR49OvNf0COx029lSETxXvnPAz/d81PLUAZ4JDNiktXYPM7WiuIvPGutReML7w5Y+GY724gtluo5E1EIrIxwN25BtPsN1UrH4i67qmk39zY+C5JLrTZ3gvbeTUo0COgyQrYO/j2A9M0r6X/rsO2tj0SiuLf4i2knhnQ9UsrGa4utbdYrOyLhCX53bm6BVwcnB+lXNH8WXF14puPDeraYthqUdsLqPybjz4pYicEhiqkEHjBWqs72Jvpc6iiiikMKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK8v8GeCtR0jx9q4uAw0KxuHudLQj5TJOBuI/wB0Ar7bjXqFFC0dweqseYab4K1Cw+Ld9MisPDkrDVVXb8v2sgpj68s3/fNU73QrbSPibrd/r3hJ9b0rVUikt7qPTftpgdV2spUKzLn1x2H4et0ULS3l/X+X3A9b+f8AX+f3nmHxAsbWT4XS2Ph7w/dQfaZ4pI7O00x0biRSxZEX5Tgd8ZxUutR32l/EDSfGVtpWoX+myaebG5jt7djPAd25W8ogMR64FelUU7/18rB5f1vc4HxjBN8QPAmpWGlWd9BdIY5YlvrV7bzGVg20eYB1xjPTkVn+M5bzxt4fsfDdjo2qQ3U9xA11Jc2bxRWqIwZiZGAVjxgbCc16dRSWj+afzQM89slmh+NOo3bWV+LNtKjt0uTZy+W0iuSQH246e9VPBpntIvHDXOn6nCLnUZ54A+nzgyoygAqNmTz2HNem0Umrq3k197uNOzv5p/crHhCeG9Rl+Hfgq7l8O3N82h3Lm/0q4tCJHjYnOI3A34GDgZr03wtb+GmuZLnRvCp0mdU2tLJo5s2IOPlBKKT+HHFdVRVX1b76k20QUUUUhhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFZra9YAnabiQZI3RWksinnHDKpBrSrA0Qk6BpxJyTaxZJ/3RQBc/t+x/uXv/AIAT/wDxFH9v2P8Acvf/AAAn/wDiKfRQAz+37H+5e/8AgBP/APEUf2/Y/wBy9/8AACf/AOIp9FADP7fsf7l7/wCAE/8A8RR/b9j/AHL3/wAAJ/8A4in0UAM/t+x/uXv/AIAT/wDxFH9v2P8Acvf/AAAn/wDiKfRQAz+37H+5e/8AgBP/APEUf2/Y/wBy9/8AACf/AOIp9FADP7fsf7l7/wCAE/8A8RVu0vre9D+Q7EoQGV0ZGXIyMqwBFV6gsCf7fvhnj7Lb8f8AApqANeiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACsDQ/8AkX9N/wCvWL/0AVv1gaH/AMi/pv8A16xf+gCgC/RRRQBXvb2206ylvLyZYbeFd0kjdFFUdP8AEml6pd/ZLeaVbnYZBFcW8kDMoOCyiRVLDkcjNO8RQxXGgXcM9hNfROoD28DYdhkcryOR1HOeK46VNYvIL6w0641fULCbT7iOT+1bIwPFJtwgRmRGfJ68N65pX3Ha9jvL+/ttLspLy8l8q3jxvfaWxk4HABPUirNeUeIIb3WLL/RNJ1RlXQxAQ1pJGxk82IlAGAOcAn+vFPvdDSd9QTRtGvLbS5WsVkgNrJF5konzIwQgE4TG5sYPqaq2tv63sTfS/wDXQ9UqrHqNtJNHDvZJZC4SOSNkZthwxAYAkcjnocjFefXfheO1l1uax0cxyRapaSWRityNi/uvMMWBwPv7tvHXNLpOktb+JdIu73SZjtutRUStZs5jLTBoySFO0EZIY4HPXmktbf10Q3ov68z0qiiigAqCw/5GC+/69bf/ANDmqeoLD/kYL7/r1t//AEOagDXooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArA0P/AJF/Tf8Ar1i/9AFb9c9aJfafY29mdLuZvIjWLzIpItrbRjI3ODzjPSgDRoqp9pvv+gLe/wDfyD/45R9pvv8AoC3v/fyD/wCOUAW6Kqfab7/oC3v/AH8g/wDjlH2m+/6At7/38g/+OUAW6Kqfab7/AKAt7/38g/8AjlH2m+/6At7/AN/IP/jlAFuiqn2m+/6At7/38g/+OUfab7/oC3v/AH8g/wDjlAFuiqn2m+/6At7/AN/IP/jlH2m+/wCgLe/9/IP/AI5QBbqCw/5GC+/69bf/ANDmqP7Tff8AQFvf+/kH/wAcqfTYJ/t11eTwNB5sccSxuylvlLnJ2kgffx1PSgDUooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigD/9k=",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwwAAAG5CAIAAACshSTDAAAilklEQVR4Ae3dT2xd5ZkHYGdUKbdiYbPBnkUTs4B4pIF4URKPgMFVg3JHUMVtkTBUCFMWBDak2STuZmBTwqa4iwF3AXHFFIxUWo9ERFCpcIeMCKUS7mQRA4sYNknYJB4J5WaV+a7tN34TPiAhTcDmuYu8r797/t3nfBI/nXPuZc2ZM2c6vAgQIECAAAECBM4V+Idz//QXAQIECBAgQIBAW0BIMg8IECBAgAABAhUBIamCYogAAQIECBAgICSZAwQIECBAgACBioCQVEExRIAAAQIECBAQkswBAgQIECBAgEBFQEiqoBgiQIAAAQIECAhJ5gABAgQIECBAoCIgJFVQDBEgQIAAAQIEhCRzgAABAgQIECBQERCSKiiGCBAgQIAAAQJCkjlAgAABAgQIEKgICEkVFEMECBAgQIAAASHJHCBAgAABAgQIVASEpAqKIQIECBAgQICAkGQOECBAgAABAgQqAkJSBcUQAQIECBAgQEBIMgcIECBAgAABAhUBIamCYogAAQIECBAgICSZAwQIECBAgACBioCQVEExRIAAAQIECBAQkswBAgQIECBAgEBFQEiqoBgiQIAAAQIECAhJ5gABAgQIECBAoCIgJFVQDBEgQIAAAQIEhCRzgAABAgQIECBQERCSKiiGCBAgQIAAAQJCkjlAgAABAgQIEKgICEkVFEMECBAgQIAAASHJHCBAgAABAgQIVASEpAqKIQIECBAgQICAkGQOECBAgAABAgQqAkJSBcUQAQIECBAgQEBIMgcIECBAgAABAhUBIamCYogAAQIECBAgICSZAwQIECBAgACBioCQVEExRIAAAQIECBAQkswBAgQIECBAgEBFQEiqoBgiQIAAAQIECAhJ5gABAgQIECBAoCIgJFVQDBEgQIAAAQIEhCRzgAABAgQIECBQERCSKiiGCBAgQIAAAQJCkjlAgAABAgQIEKgICEkVFEMECBAgQIAAASHJHCBAgAABAgQIVASEpAqKIQIECBAgQICAkGQOECBAgAABAgQqAkJSBcUQAQIECBAgQEBIMgcIECBAgAABAhUBIamCYogAAQIECBAgICSZAwQIECBAgACBioCQVEExRIAAAQIECBAQkswBAgQIECBAgEBFQEiqoHw1QydnJnYMNwd615TXwJ6Z1ldzFPZKgAABAgQILAoISRc8E1qzEzuG+nvaGWZNo3egOTyye2Lm5Oev3pqd2rN9eGhwYbWegaHdk7OfGX66+kfGJqfGtm/8/E16lwABAgQIELgiAt+6IntZFTtp9I2MTQ0ODPXf81/zXQM7xieHe8/7XCendw+P9Y5Nbu9rLL3T6BvaPT7Umh0fGtzTNT41OdRz3ir+JECAAAECBL6mAq4kXdyJ6R0caXZ3dBzfPzk9d/6axw5OTL62f+JTl5dac9OTM43B4QEJ6XwyfxMgQIAAga+vgJB0keemZ2C4ub6jY366pKRz75zN7R+f+rDj9NtTk+fehGtnpNmupox0kdIWJ0CAAAECX62AkHSx/pGSDk7uzympPH00sX++vbH3piamjy1vdSEj9SxfRzo2PT4y2NfT1X60qau3b3B4zznbWV4xdWXju4cGllYqT0R19fT2NXdPn4xFTs5O7jj7fqO91ZGx6WNLGW5ucqSv0d7bmt6R8Yk9I82yofbfXb0Dw2WpYzNTe0YG+3u7ylijp//8w1nY9eK75e3e/ub2iYNn9xv7VwkQIECAwCoUEJIu+qSWlDS04fxrSeWraRMzPVu3bVzb0fHh/onlq0ytuf2TsyUj9Xe1dzQ3NTL4vYenWoN79h85cfTw5I6+2ZdG/625fSqlqvMPqDwwPtz84ZP7F1c6ceLIu1NjI32tubmlFFSeeBoZvGd8tm/H5MGjJ04cnZl6bLA19bNmc/f+ha32Dk/Mzu7d1lkObGpsYrrV0z84NHTbhrXzH7790s+aA0M7Jua6BppDw83N3aeP/+2l0ZHdU3NLx3ByZk9z4IdPzvRun5w5euLowbFmx/SvH2gOjfnu3flnyd8ECBAgsAoFznhdtMCJt3a1v4K29ranDp9aXPnoqw9t6L7tibcOv3h3eWKpvPPM0jun3n3qtvUbd711or3c4nprNz/x7tJqZ04dfmZrSS+d2/YeaS9w5sypt55ob3p5kVOHn7qtBK/OrbHFhcVOvPHo1of+cLS9wpG97S1037/w18KbsaOODY++cWJx5MhCSFp//6vtVRZepw4vrNe5de+ROJil41te7cjCp+m+e3nTZb8lH5YlFj9QbEwlQIAAAQKrUMCVpPLf/It9dfUPDZcoc/rg5NTCN/rLHbXx/ScHRob7+5oj7atMpw9OTM2eLJttzU5PzfUML15HKotNz3Z09PYP9DaWdlnub/X1lKtSszPH2ot/+lXudk0ePN2xfmj70NmvzJWlugYeG989WNYsF6ompuc7ugeb+bHwcoDNcoDvlXVPfnqbCyONnr6+3rJ+62Tr7BJdvf19JeOdPLY4Vm7ETR/vWNs/2NcVi5QlekskOza78OliVCVAgAABAqtRQEj6Mme10d8c3ry2/ZD2/nLjqQSV8YON5kizZJ+ugZGhdn56e+FLbgsZqXeouZhvWsdm506X6PLr711dHglafF39L796rxxBySr14zg2e3D2dMfanr7ernMWKA8eLYycnGu/39HV0xO5a2GxEoF62mFmZjaeTDpn7cU/ltZYDknnLtM6OTd3snyU1x7+p2/H4a659oHXyoNXrfbr3KX9RYAAAQIEVpuAkPSlzmj5/aPhgXIX7O3JqZm58jjSbE9zpH1hp8SV/uGR9v2x9uPbczPlOlLvUFwDWgoWG5fvtp29NDk33uyqHUgJT+2rOuVJ7Ubt7TK2eNGnq6fr3AUaXe2B062li0KfsfLnD5ccVPJX990vnr1HF8fbml7+KajP34Z3CRAgQIDAihUQkr7cqWv0llRUrtX8bWpsbGxqrm9kZKBrcUuN/qGRZnnnw6nxsfHJub7h9gWmhVejvEpzMcGl0ehqr7MYlRa3ct6/i+Eo7pCdfbPcRWuVZ6Ma54enswtcQFMOt6S9sqXFIHYBa1iEAAECBAisIgEh6UuezEbv4HA7Jb330q9eKo8jxdWi9tZKfhpaXx40eu1Xv5lNGamjPIDUU1LH3MGZuRJgLuzVfgioXBGaO9i+81V5le/x9y0mmXM2WVJV+z5b+y5do7LWBQ217+j1tJ+Xmv6MfV/QVixEgAABAgRWqoCQ9GXPXDsltX98u6Nj/eLjSMsb6hlceHy7fCetBKnlG2Xlf/g22Nd+Xmlsz9SFxqSuvubghvIL3+XL+9VnsJey2ocHp2aOLR9A+1momfK496X9gmVPf7O/fUls8rH6vpf3pyNAgAABAqtQwP+77cuf1Pa1pPUvvdQ1tH3xcaTlLZXHt0c2j4/ONZfvtbXf7BrYvvv+yXt+8+FLI82OucdGBpYCVKu8M7j4Q0rL21jqugZ37Hlo/w9//efRwYGD23cMN8svQZa3WuUGW9fA0GBv+/8oNzY9tH1ix47+ibFy0+/k7MHJx7b/7M+nu+9+bOELcJ/a5AUP9DZ3775tevTPb48ODR3bs3to6b5hq9XoG1z+ht4Fb86CBAgQIEBgZQnEs7jqlxAoP4+0eetTZ3/1KG2h/ADSts3n/HZRvHn0jWfu37y+3CJbfHVv3Hr/E384fKL8uNG7ex+9e+vmcqOuvNZv3nr3o3vfLcPt14nDL+7atrFc1Vl6dW7YeNu2Xa+mXzg6/OpTD23bvGFxkbXrN9//1BtHl37/6MRbTz20dfGdzrKzXWVnR99oL71wGawsu+2hZ9462t7F3bedXawMnVja9R92bVvabtl75/rNdz/6zNltLyziHwIECBAgsCoF1pRPFf/pVQkQIECAAAECBJYEPJNkKhAgQIAAAQIEKgJCUgXFEAECBAgQIEBASDIHCBAgQIAAAQIVASGpgmKIAAECBAgQICAkmQMECBAgQIAAgYqAkFRBMUSAAAECBAgQEJLMAQIECBAgQIBARUBIqqAYIkCAAAECBAgISeYAAQIECBAgQKAiICRVUAwRIECAAAECBIQkc4AAAQIECBAgUBEQkioohggQIECAAAECQpI5QIAAAQIECBCoCAhJFRRDBAgQIECAAAEhyRwgQIAAAQIECFQEhKQKiiECBAgQIECAgJBkDhAgQIAAAQIEKgJCUgXFEAECBAgQIEBASDIHCBAgQIAAAQIVASGpgmKIAAECBAgQICAkmQMECBAgQIAAgYqAkFRBMUSAAAECBAgQEJLMAQIECBAgQIBARUBIqqAYIkCAAAECBAgISeYAAQIECBAgQKAiICRVUAwRIECAAAECBIQkc4AAAQIECBAgUBEQkioohggQIECAAAECQpI5QIAAAQIECBCoCAhJFRRDBAgQIECAAAEhyRwgQIAAAQIECFQEhKQKiiECBAgQIECAgJBkDhAgQIAAAQIEKgJCUgXFEAECBAgQIEBASDIHCBAgQIAAAQIVASGpgmKIAAECBAgQICAkmQMECBAgQIAAgYqAkFRBMUSAAAECBAgQEJLMAQIECBAgQIBARUBIqqAYIkCAAAECBAgISeYAAQIECBAgQKAiICRVUAwRIECAAAECBIQkc4AAAQIECBAgUBEQkioohggQIECAAAECQpI5QIAAAQIECBCoCAhJFRRDBAgQIECAAAEhyRwgQIAAAQIECFQEhKQKiiECBAgQIECAgJBkDhAgQIAAAQIEKgJCUgXFEAECBAgQIEBASDIHCBAgQIAAAQIVASGpgmKIAAECBAgQICAkmQMECBAgQIAAgYqAkFRBMUSAAAECBAgQEJLMAQIECBAgQIBARUBIqqAYIkCAAAECBAgISeYAAQIECBAgQKAiICRVUAwRIECAAAECBIQkc4AAAQIECBAgUBEQkioohggQIECAAAECQpI5QIAAAQIECBCoCAhJFRRDBAgQIECAAAEhyRwgQIAAAQIECFQEhKQKiiECBAgQIECAgJBkDhAgQIAAAQIEKgJCUgXFEAECBAgQIEBASDIHCBAgQIAAAQIVASGpgmKIAAECBAgQICAkmQMECBAgQIAAgYqAkFRBMUSAAAECBAgQEJLMAQIECBAgQIBARUBIqqAYIkCAAAECBAgISeYAAQIECBAgQKAiICRVUAwRIECAAAECBIQkc4AAAQIECBAgUBEQkioohggQIECAAAECQpI5QIAAAQIECBCoCAhJFRRDBAgQIECAAIFvIbhiAs8///wnn3xy5MiRK7bHlbWja6+99qqrrrrvvvtW1mE7WgIECBBYrQJrzpw5s1o/29fqc5WE9O1vf3vLli1fq6P6uh3M66+/furUKTnp63ZeHA8BAgS+mQJut12h816uIUlIX2hdiArUFy5mAQIECBAgcAUEhKQrgNzehbtsFwgN6gKhLEaAAAECl1tASLrcwrZPgAABAgQIrEgBIWlFnjYHTYAAAQIECFxuASHpcgvbPgECBAgQILAiBYSkFXnaHDQBAgQIECBwuQWEpMstbPsECBAgQIDAihQQklbkaXPQBAgQIECAwOUWEJIut7DtEyBAgAABAitSQEhakafNQRMgQIAAAQKXW0BIutzCtk+AAAECBAisSAEhaUWeNgdNgAABAgQIXG4BIelyC9s+AQIECBAgsCIFhKQVedocNAECBAgQIHC5BYSkyy182bffOn7guZ0/ufPWG9dffXXPjbfe+eDocwc+an3mbo/ve2TTrTsPzH/mAvU3WoeeufPGu557v2y49f7rLx84/tl7qG/AKAECBAgQWGECQtIKO2GfOtz5A0/ufPydznt/+cpf5o6+98rTD19z6PGfPLL30N85xDRuePiV//3dT69vdLQ+2Dc2/qcPTn/qSAwQIECAAIFVJSAkrfTTOf/x8Vb3TT+646Z13Z2NznU33PHEb99889kHbmh0zB/YeeumB/cdX/iE6UpQR8fp+T/tuWtTufK0/sY7d75wqFxVar3/zJ2b7tw5+uCdt2/asP7G2x957uWnHin9jRs23TW6r1yYWlr/0Acv73zkyf/5a7l29cgL7ctKXgQIECBAYLUKCEkr/cxec8uPblm77+ePPP7C6wfeOdS+z9a5bl134/M+VuuDA+903PMfb86+/ewDjX2Pj77cTjtrO44fev+a7b/941/++9kt87//+fj/3Vv6N5+9Y37fM/s+iDi09js//vcdd1z3z/f+8rdP31suK3kRIECAAIFVKyAkrfRT21j346df+c/t3Yf2Pr7zrn/d+I/rb/3J6MLFoc/5YN/57gMP/7hcerp+y70P3LL2o79+tPCEUuf137/lhs6Oju7rb77umnXf/X6777zuu9c15j+ad3Ptczi9RYAAAQKrU0BIWg3ntfOmn/7yd3988y8fnph7++ktp/eN/mR06S5b9dM1Gt2dJQC1X43O0s0vZaC1jcVLQ2vb4421pZTX4r8LrX8IECBAgMA3SUBIWuFnu/XRgZf3vXP2u2ad19+x6xc7bjh96K/tG2854LQ+nj8dN81a8/NLbenmlyPTCrdw+AQIECBA4O8pICT9PTW/gm2dnn9n784Hd74QOamEphdf/KDzppvXNdZec113a+le2vyhA++c/V2A1scHXvz94uPaf3rhnY7rbr4+p6kv/BDlctNpt9++kMkCBAgQILDSBYSkFX4GO294+OlnH+7+089/sLHn6vJ1tU0P7m3d8+zvfrGlu6Nx/R0PbGk994Nbb739rtEDnTfd1N2xmG0a133/5o/33Lnpxk13jc1v2b37jnUl91zwq/O6m9fN7/3Bjbc/ftE/tnTB+7AgAQIECBD46gXWnDlz5qs/im/AEezatWt0dPQb8EEv9SM+8cQTTz755KVuxfoECBAgQOCSBVxJumRCGyBAgAABAgRWo4CQtBrPqs9EgAABAgQIXLKAkHTJhDZAgAABAgQIrEYBIWk1nlWfiQABAgQIELhkASHpkgltgAABAgQIEFiNAkLSajyrPhMBAgQIECBwyQJC0iUT2gABAgQIECCwGgWEpNV4Vn0mAgQIECBA4JIFhKRLJrQBAgQIECBAYDUKCEmr8az6TAQIECBAgMAlCwhJl0xoAwQIECBAgMBqFBCSrtBZvfbaa6/Qnlb4bkCt8BPo8AkQILB6BISkK3Qur7rqqtdff/0K7WzF7qYQFagVe/gOnAABAgRWlcCaM2fOrKoP9DX+MM8///wnn3xy5MiRr/ExfpWHVq4hlYR03333fZUHYd8ECBAgQCAEhKSQUAkQIECAAAECScDttoShJUCAAAECBAiEgJAUEioBAgQIECBAIAkISQlDS4AAAQIECBAIASEpJFQCBAgQIECAQBIQkhKGlgABAgQIECAQAkJSSKgECBAgQIAAgSQgJCUMLQECBAgQIEAgBISkkFAJECBAgAABAklASEoYWgIECBAgQIBACAhJIaESIECAAAECBJKAkJQwtAQIECBAgACBEBCSQkIlQIAAAQIECCQBISlhaAkQIECAAAECISAkhYRKgAABAgQIEEgCQlLC0BIgQIAAAQIEQkBICgmVAAECBAgQIJAEhKSEoSVAgAABAgQIhICQFBIqAQIECBAgQCAJCEkJQ0uAAAECBAgQCAEhKSRUAgQIECBAgEASEJIShpYAAQIECBAgEAJCUkioBAgQIECAAIEkICQlDC0BAgQIECBAIASEpJBQCRAgQIAAAQJJQEhKGFoCBAgQIECAQAgISSGhEiBAgAABAgSSgJCUMLQECBAgQIAAgRAQkkJCJUCAAAECBAgkASEpYWgJECBAgAABAiEgJIWESoAAAQIECBBIAkJSwtASIECAAAECBEJASAoJlQABAgQIECCQBISkhKElQIAAAQIECISAkBQSKgECBAgQIEAgCQhJCUNLgAABAgQIEAgBISkkVAIECBAgQIBAEhCSEoaWAAECBAgQIBACQlJIqAQIECBAgACBJCAkJQwtAQIECBAgQCAEhKSQUAkQIECAAAECSUBIShhaAgQIECBAgEAICEkhoRIgQIAAAQIEkoCQlDC0BAgQIECAAIEQEJJCQiVAgAABAgQIJAEhKWFoCRAgQIAAAQIhICSFhEqAAAECBAgQSAJCUsLQEiBAgAABAgRCQEgKCZUAAQIECBAgkASEpIShJUCAAAECBAiEgJAUEioBAgQIECBAIAkISQlDS4AAAQIECBAIASEpJFQCBAgQIECAQBIQkhKGlgABAgQIECAQAkJSSKgECBAgQIAAgSQgJCUMLQECBAgQIEAgBISkkFAJECBAgAABAklASEoYWgIECBAgQIBACAhJIaESIECAAAECBJKAkJQwtAQIECBAgACBEBCSQkIlQIAAAQIECCQBISlhaAkQIECAAAECISAkhYRKgAABAgQIEEgCQlLC0BIgQIAAAQIEQkBICgmVAAECBAgQIJAEhKSEoSVAgAABAgQIhICQFBIqAQIECBAgQCAJCEkJQ0uAAAECBAgQCAEhKSRUAgQIECBAgEASEJIShpYAAQIECBAgEAJCUkioBAgQIECAAIEkICQlDC0BAgQIECBAIASEpJBQCRAgQIAAAQJJQEhKGFoCBAgQIECAQAgISSGhEiBAgAABAgSSgJCUMLQECBAgQIAAgRAQkkJCJUCAAAECBAgkASEpYWgJECBAgAABAiEgJIWESoAAAQIECBBIAkJSwtASIECAAAECBEJASAoJlQABAgQIECCQBISkhKElQIAAAQIECISAkBQSKgECBAgQIEAgCQhJCUNLgAABAgQIEAgBISkkVAIECBAgQIBAEhCSEoaWAAECBAgQIBACQlJIqAQIECBAgACBJCAkJQwtAQIECBAgQCAEhKSQUAkQIECAAAECSUBIShhaAgQIECBAgEAICEkhoRIgQIAAAQIEkoCQlDC0BAgQIECAAIEQEJJCQiVAgAABAgQIJAEhKWFoCRAgQIAAAQIhICSFhEqAAAECBAgQSAJCUsLQEiBAgAABAgRCQEgKCZUAAQIECBAgkASEpIShJUCAAAECBAiEgJAUEioBAgQIECBAIAkISQlDS4AAAQIECBAIASEpJFQCBAgQIECAQBIQkhKGlgABAgQIECAQAkJSSKgECBAgQIAAgSQgJCUMLQECBAgQIEAgBISkkFAJECBAgAABAklASEoYWgIECBAgQIBACAhJIaESIECAAAECBJKAkJQwtAQIECBAgACBEBCSQkIlQIAAAQIECCQBISlhaAkQIECAAAECISAkhYRKgAABAgQIEEgCQlLC0BIgQIAAAQIEQkBICgmVAAECBAgQIJAEhKSEoSVAgAABAgQIhICQFBIqAQIECBAgQCAJCEkJQ0uAAAECBAgQCAEhKSRUAgQIECBAgEASEJIShpYAAQIECBAgEAJCUkioBAgQIECAAIEkICQlDC0BAgQIECBAIASEpJBQCRAgQIAAAQJJQEhKGFoCBAgQIECAQAgISSGhEiBAgAABAgSSgJCUMLQECBAgQIAAgRAQkkJCJUCAAAECBAgkASEpYWgJECBAgAABAiEgJIWESoAAAQIECBBIAkJSwtASIECAAAECBEJASAoJlQABAgQIECCQBISkhKElQIAAAQIECISAkBQSKgECBAgQIEAgCQhJCUNLgAABAgQIEAgBISkkVAIECBAgQIBAEhCSEoaWAAECBAgQIBACQlJIqAQIECBAgACBJCAkJQwtAQIECBAgQCAEhKSQUAkQIECAAAECSUBIShhaAgQIECBAgEAICEkhoRIgQIAAAQIEkoCQlDC0BAgQIECAAIEQEJJCQiVAgAABAgQIJAEhKWFoCRAgQIAAAQIhICSFhEqAAAECBAgQSAJCUsLQEiBAgAABAgRCQEgKCZUAAQIECBAgkASEpIShJUCAAAECBAiEgJAUEioBAgQIECBAIAkISQlDS4AAAQIECBAIASEpJFQCBAgQIECAQBIQkhKGlgABAgQIECAQAkJSSKgECBAgQIAAgSQgJCUMLQECBAgQIEAgBISkkFAJECBAgAABAklASEoYWgIECBAgQIBACAhJIaESIECAAAECBJKAkJQwtAQIECBAgACBEBCSQkIlQIAAAQIECCQBISlhaAkQIECAAAECISAkhYRKgAABAgQIEEgCQlLC0BIgQIAAAQIEQkBICgmVAAECBAgQIJAEhKSEoSVAgAABAgQIhICQFBIqAQIECBAgQCAJCEkJQ0uAAAECBAgQCAEhKSRUAgQIECBAgEASEJIShpYAAQIECBAgEAJCUkioBAgQIECAAIEkICQlDC0BAgQIECBAIASEpJBQCRAgQIAAAQJJQEhKGFoCBAgQIECAQAgISSGhEiBAgAABAgSSgJCUMLQECBAgQIAAgRAQkkJCJUCAAAECBAgkASEpYWgJECBAgAABAiEgJIWESoAAAQIECBBIAkJSwtASIECAAAECBEJASAoJlQABAgQIECCQBISkhKElQIAAAQIECISAkBQSKgECBAgQIEAgCQhJCUNLgAABAgQIEAgBISkkVAIECBAgQIBAEhCSEoaWAAECBAgQIBACQlJIqAQIECBAgACBJCAkJQwtAQIECBAgQCAEhKSQUAkQIECAAAECSUBIShhaAgQIECBAgEAICEkhoRIgQIAAAQIEkoCQlDC0BAgQIECAAIEQEJJCQiVAgAABAgQIJAEhKWFoCRAgQIAAAQIhICSFhEqAAAECBAgQSAJCUsLQEiBAgAABAgRCQEgKCZUAAQIECBAgkASEpIShJUCAAAECBAiEgJAUEioBAgQIECBAIAkISQlDS4AAAQIECBAIASEpJFQCBAgQIECAQBIQkhKGlgABAgQIECAQAkJSSKgECBAgQIAAgSQgJCUMLQECBAgQIEAgBISkkFAJECBAgAABAklASEoYWgIECBAgQIBACAhJIaESIECAAAECBJKAkJQwtAQIECBAgACBEBCSQkIlQIAAAQIECCQBISlhaAkQIECAAAECISAkhYRKgAABAgQIEEgCQlLC0BIgQIAAAQIEQkBICgmVAAECBAgQIJAEhKSEoSVAgAABAgQIhICQFBIqAQIECBAgQCAJCEkJQ0uAAAECBAgQCAEhKSRUAgQIECBAgEASEJIShpYAAQIECBAgEAJCUkioBAgQIECAAIEkICQlDC0BAgQIECBAIASEpJBQCRAgQIAAAQJJQEhKGFoCBAgQIECAQAgISSGhEiBAgAABAgSSgJCUMLQECBAgQIAAgRAQkkJCJUCAAAECBAgkASEpYWgJECBAgAABAiEgJIWESoAAAQIECBBIAkJSwtASIECAAAECBEJASAoJlQABAgQIECCQBISkhKElQIAAAQIECISAkBQSKgECBAgQIEAgCQhJCUNLgAABAgQIEAgBISkkVAIECBAgQIBAEhCSEoaWAAECBAgQIBACQlJIqAQIECBAgACBJCAkJQwtAQIECBAgQCAEhKSQUAkQIECAAAECSUBIShhaAgQIECBAgEAICEkhoRIgQIAAAQIEkoCQlDC0BAgQIECAAIEQEJJCQiVAgAABAgQIJAEhKWFoCRAgQIAAAQIhICSFhEqAAAECBAgQSAJCUsLQEiBAgAABAgRCQEgKCZUAAQIECBAgkASEpIShJUCAAAECBAiEgJAUEioBAgQIECBAIAkISQlDS4AAAQIECBAIASEpJFQCBAgQIECAQBIQkhKGlgABAgQIECAQAkJSSKgECBAgQIAAgSQgJCUMLQECBAgQIEAgBISkkFAJECBAgAABAklASEoYWgIECBAgQIBACAhJIaESIECAAAECBJLA/wPBKjSNxi3sfwAAAABJRU5ErkJggg==",
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "ImageContent(\"layout.png\").show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "45d2a69f-a28f-40a6-a619-427f133359f6",
+ "metadata": {},
+ "source": [
+ "Now we have successfully took a screenshot of an HTML, we can start our optimization."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "67cb3612-b7ce-467d-b8b3-b7259a313378",
+ "metadata": {},
+ "source": [
+ "### Step 1: Using the multi-modal context block to provide a reference image \n",
+ "\n",
+ "The task is for the model to implement an HTML page according to the image"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "7ab8f0d7-9f08-4bc1-aad8-065334b14e88",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "html_param = node(\"\", trainable=True)\n",
+ "optimizer = OptoPrimeV3([html_param], use_json_object_format=True,\n",
+ " memory_size=5,\n",
+ " ignore_extraction_error=False,\n",
+ " include_example=False,\n",
+ " optimizer_prompt_symbol_set=OptimizerPromptSymbolSetJSON())\n",
+ "\n",
+ "optimizer.add_context(\"The reference image looks like this: \", ImageContent(\"./layout.png\"))\n",
+ "optimizer.zero_feedback()\n",
+ "\n",
+ "feedback_text = \"\"\"Please fill in the empty HTML page such that it looks like the target image.\"\"\"\n",
+ "\n",
+ "optimizer.backward(html_param, feedback=feedback_text)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "d5be0db1-9561-4b9d-9268-7a77706cf0f2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "summary = optimizer.summarize()\n",
+ "system_prompt, user_content_blocks = optimizer.construct_prompt(summary)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "36c5c516-702d-41f0-8bb4-781c991df55a",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "TextContent(type='text', text='\\nNow you see problem instance:\\n\\n================================\\n \\n# Instruction\\nYou need to change the `value` of the variables in # Variables to improve the output in accordance to # Feedback.\\n\\n# Code\\n\\n\\n# Documentation\\n\\n\\n# Variables\\n \\n\\n\\n\\n\\n\\n \\n\\n# Inputs\\n \\n\\n# Others\\n \\n\\n# Outputs\\n \\n\\n# Context\\n The reference image looks like this: ')\n",
+ "ImageContent(image_url=None, image_data=iVBORw0KGg..., image_bytes=None, media_type=image/png)\n",
+ "TextContent(type='text', text='\\n\\n# Feedback\\n Please fill in the empty HTML page such that it looks like the target image. \\n================================\\n\\n \\nWhat are your suggestions on variables str0?\\n\\nYour response:\\n')\n"
+ ]
+ }
+ ],
+ "source": [
+ "for block in user_content_blocks:\n",
+ " print(block)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3b3ea5fe-9a3b-4273-8280-5fd626356e4c",
+ "metadata": {},
+ "source": [
+ "The above information might be hard to read, but we can also directly print this.\n",
+ "\n",
+ "Note that the image becomes `[IMAGE]`, a placeholder in the text rendering (we are **not** sending this placeholder to the LLM)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "0db82fa4-91b6-4d94-b77b-1ead0489c034",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "Now you see problem instance:\n",
+ "\n",
+ "================================\n",
+ " \n",
+ "# Instruction\n",
+ "You need to change the `value` of the variables in # Variables to improve the output in accordance to # Feedback.\n",
+ "\n",
+ "# Code\n",
+ "\n",
+ "\n",
+ "# Documentation\n",
+ "\n",
+ "\n",
+ "# Variables\n",
+ " \n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "# Inputs\n",
+ " \n",
+ "\n",
+ "# Others\n",
+ " \n",
+ "\n",
+ "# Outputs\n",
+ " \n",
+ "\n",
+ "# Context\n",
+ " The reference image looks like this: \n",
+ "[IMAGE]\n",
+ " \n",
+ "\n",
+ "# Feedback\n",
+ " Please fill in the empty HTML page such that it looks like the target image. \n",
+ "================================\n",
+ "\n",
+ " \n",
+ "What are your suggestions on variables str0?\n",
+ "\n",
+ "Your response:\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(user_content_blocks)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "161f1f09-e385-4bf5-ae28-d3c477873f12",
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Prompt\n",
+ " You're tasked to solve a coding/algorithm problem. You will see the instruction, the code, the documentation of each function used in the code, and the feedback about the execution result.\n",
+ "\n",
+ "Specifically, a problem will be composed of the following parts:\n",
+ "- #Instruction: the instruction which describes the things you need to do or the question you should answer.\n",
+ "- #Code: the code defined in the problem.\n",
+ "- #Documentation: the documentation of each function used in #Code. The explanation might be incomplete and just contain high-level description. You can use the values in #Others to help infer how those functions work.\n",
+ "- #Variables: the input variables that you can change/tweak (trainable).\n",
+ "- #Inputs: the values of fixed inputs to the code, which CANNOT be changed (fixed).\n",
+ "- #Others: the intermediate values created through the code execution.\n",
+ "- #Outputs: the result of the code output.\n",
+ "- #Feedback: the feedback about the code's execution result.\n",
+ "- #Context: the context information that might be useful to solve the problem.\n",
+ "\n",
+ "In `#Variables`, `#Inputs`, `#Outputs`, and `#Others`, the format is:\n",
+ "\n",
+ "For variables we express as this:\n",
+ "\n",
+ "\n",
+ "\n",
+ "value\n",
+ "\n",
+ "\n",
+ "constraint_expression\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "If `data_type` is `code`, it means `value` is the source code of a python code, which may include docstring and definitions.\n",
+ "Output_format: Your output should be in the following XML or JSON format:\n",
+ "\n",
+ "{\n",
+ "\"reasoning\": ,\n",
+ "\"suggestion\": {\n",
+ ": ,\n",
+ ": ,\n",
+ "}\n",
+ "}\n",
+ "\n",
+ "In , explain the problem: 1. what the #Instruction means 2. what the #Feedback on #Outputs means to #Variables considering how #Variables are used in #Code and other values in #Documentation, #Inputs, #Others. 3. Reasoning about the suggested changes in #Variables (if needed) and the expected result.\n",
+ "\n",
+ "If you need to suggest a change in the values of #Variables, write down the suggested values in . Remember you can change only the values in #Variables, not others. When `type` of a variable is `code`, you should write the new definition in the format of python code without syntax errors, and you should not change the function name or the function signature.\n",
+ "\n",
+ "If no changes are needed, just output TERMINATE.\n",
+ "\n",
+ "Now you see problem instance:\n",
+ "\n",
+ "================================\n",
+ " \n",
+ "# Instruction\n",
+ "You need to change the `value` of the variables in # Variables to improve the output in accordance to # Feedback.\n",
+ "\n",
+ "# Code\n",
+ "\n",
+ "\n",
+ "# Documentation\n",
+ "\n",
+ "\n",
+ "# Variables\n",
+ " \n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "# Inputs\n",
+ " \n",
+ "\n",
+ "# Others\n",
+ " \n",
+ "\n",
+ "# Outputs\n",
+ " \n",
+ "\n",
+ "# Context\n",
+ " The reference image looks like this: \n",
+ "\n",
+ "# Feedback\n",
+ " Please fill in the empty HTML page such that it looks like the target image. \n",
+ "================================\n",
+ "\n",
+ " \n",
+ "What are your suggestions on variables str0?\n",
+ "\n",
+ "Your response:\n",
+ " [+ \n",
+ "[IMAGE]\n",
+ "]\n",
+ "LLM response:\n",
+ " AssistantTurn(role='assistant', content={\n",
+ "\"reasoning\": \"The instruction requires modifying the HTML code represented by the variable `str0` so that the output matches the reference image. The feedback indicates that the current HTML is empty. The reference image displays a webpage with the text 'Welcome' and a button labeled 'Submit'. To recreate this, the HTML structure should include these elements.\",\n",
+ "\"suggestion\": {\n",
+ "\"str0\": \"
\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(html_param.data)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "640c2a4e-c371-4891-bb77-ccd91ddcba33",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def save_html_to_file(html, filename=\"new_layout.html\"):\n",
+ " with open(filename, \"w\") as f:\n",
+ " f.write(html)\n",
+ "\n",
+ "save_html_to_file(html_param.data)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4dc5e13a-ab04-4ffe-bbae-3e0b49828ff2",
+ "metadata": {},
+ "source": [
+ "## Step 2: Using visual feedback to guide optimization / HTML generation\n",
+ "\n",
+ "In order to test if the LLM optimizer has true visual understanding, we want to test if it understands **visual feedback**. We use the following function to annotate the generated HTML"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "id": "3f37431a-7976-4596-be78-b1995c7736ae",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from selenium import webdriver\n",
+ "from selenium.webdriver.chrome.options import Options\n",
+ "from selenium.webdriver.common.by import By\n",
+ "from PIL import Image, ImageDraw, ImageFont\n",
+ "\n",
+ "def annotate_screenshot(html_path, output_img):\n",
+ " options = Options()\n",
+ " options.add_argument(\"--headless=new\")\n",
+ " # ... your other options ...\n",
+ "\n",
+ " driver = webdriver.Chrome(options=options)\n",
+ " # Ensure you use the absolute path for the file URL\n",
+ " driver.get(\"file:///home/ubuntu/Trace/tests/\" + html_path)\n",
+ "\n",
+ " # 1. Find the button and get its location/size\n",
+ " button = driver.find_element(By.TAG_NAME, \"button\")\n",
+ " location = button.location # {'x': 100, 'y': 200}\n",
+ " size = button.size # {'height': 30, 'width': 100}\n",
+ "\n",
+ " # 2. Save the initial screenshot\n",
+ " driver.save_screenshot(output_img)\n",
+ " driver.quit()\n",
+ "\n",
+ " # 3. Use PIL to draw the annotation\n",
+ " img = Image.open(output_img)\n",
+ " draw = ImageDraw.Draw(img)\n",
+ " \n",
+ " # Calculate positions\n",
+ " # We'll point to the right side of the button\n",
+ " arrow_start = (location['x'] + size['width'] + 50, location['y'] + (size['height'] // 2))\n",
+ " arrow_end = (location['x'] + size['width'] + 5, location['y'] + (size['height'] // 2))\n",
+ " text_pos = (arrow_start[0] + 5, arrow_start[1] - 10)\n",
+ "\n",
+ " # Draw a red arrow (line)\n",
+ " draw.line([arrow_start, arrow_end], fill=\"red\", width=3)\n",
+ " # Draw a simple arrowhead\n",
+ " draw.polygon([arrow_end, (arrow_end[0]+10, arrow_end[1]-5), (arrow_end[0]+10, arrow_end[1]+5)], fill=\"red\")\n",
+ "\n",
+ " # Draw text\n",
+ " # Note: To use a specific font size, you'll need to point to a .ttf file on your system\n",
+ " try:\n",
+ " font = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf\", 20)\n",
+ " except:\n",
+ " font = ImageFont.load_default()\n",
+ "\n",
+ " draw.text(text_pos, \"Make this bigger\", fill=\"red\", font=font)\n",
+ "\n",
+ " # Save the final annotated image\n",
+ " img.save(output_img)\n",
+ "\n",
+ "annotate_screenshot(\"new_layout.html\", \"new_layout_feedback.png\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "id": "c37eb39b-830b-4c47-a335-bc48dfd7004c",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG5AwwDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooooAKKKKACiqmo6pp+kW32nU7+1soCwXzbmZY1yegyxAzTdN1jS9ZhebS9Ss76JG2u9rOsoU9cEqTg0AXaKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK+Yfjz4StfD3iWx1jTrOKG0vlPmRomE85Tk8f7QI/I19PVwPxk8Of8JF8Ob8RpuubLF3Fgc/L94fipaonpaXYuGvu9zS0jXtF034ZQa/ZW8FppkVj9pEMKhVU4yVAHfdkfWuI+CvxJ1Hxbe6rput3Ilu1P2m2O0LiMnDIMAcA4/OvOvAGsXnivQtN+HAWQ28mo/aLiTsLVfnZPxYfrT/ABPC3wm+NcepWkJXT5HFwkaDAML5DoPod2B9K1bSqXez2/P+vRmSvyWW6/4b+vVHoll4W0LX/jlqsyaRZ/YtHt081ViAWW6c7tzDoSBn8RXb6t8SPB+h30llf65AlzHxJHGjymP/AHtgO38cVzvw6iubb4dat4llUjUNXe41I56gYOwfkM/jXkPwlfXLy18VQaXoEesS39uIZ5Zb1ITFv38/MDuyTnt0qNV7nZX+b6ffoXo/e7u3y/rU918b6jp2u/CfXb3T7mC8tHsZSksbBlyB+hB/EVxX7Nv/ACKWr/8AX8P/AEBao+HPCXiLwd8HvGtlr0CwLLbvLAizLIPuEMflJx0FY/wx1G60r4IeNL2zZkuI3bY69VJRRkfTOabai5tdl+YrOSgn3f5HsmpfEvwdpOovp93rcX2pM744IpJimOu7YpAx3zWvp/iXRdU0Rtas9SgfTV3Frlm2Iu3rktjGPevKv2cLG3Xwtquo7FN1LeeU0h+9tVQQM/Viai+OFjH4a+HEen6Z5kdtfaq006k9S25yPpuwce1E/cWvl+Ngj7z08/wO9T4r+B5LhYR4ggG5tokeORYyfTzCu39a1/EPi7Q/CtlDe6zem3tpjtSVYJJVJ69UU4/GvG/E1jbL+y9pDCNAYxDKpx/GznJ/8eNQXF9cX/7KytcuztFKsKs3XYs4C/kOPwolpzeTSCOvL5np7fF/wCllHdnxJb+W5ICiKQuMHHKBdw/EVcm+JfguDS49Rk8R2Itpc7MPlzjr8gG79K8p8J6Tp9z+zXq081lbvOEuHEpjG/cpypz14pfgdpWn6j8OfEwvLK3n3SOhaSMMceUOMnn3on7vN5K4R15fNtHtHh7xToniuye80PUI7yFG2OVVlKn0KsAR+IrN1b4k+D9DvpLK/wBcgS5j4kjjR5TH/vbAdv44rxb4A3U1pZ+MZYSd0VkkiD/aAkxWR8JX1y8tfFUGl6BHrEt/biGeWW9SExb9/PzA7sk57dKJLWy7XBba97H0/pmqWGs2Ed9pt3Dd2sn3ZYWDKfb6+1W68v8Agr4S8ReDtH1Oy16BYFlnWWBFmWQfdwx+UnHQV6hTkknoKLbWoUUUUhhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABTZI1ljaN1DI4KsD3Bp1FD1A8z+GnwsPgXXNa1Gea3m+0uY7PyySY4dxOGyBhj8vTPTrVn4r/DiXx/p+niymt4L60m/wBZOSAYm+8OATngEV6HRQ9bX6AnZt9yrZafBY6Vb6dEg+zwwrCq9toGMflXkemfCvxb4H8U3eo+CdW0prG5BDWuqCQDGchTsBzjscg17NRR9rm6gl7vL0OJ1nR/GWteBNR0q8m0KTU75DCTF5sMEKEYJBO9nP4LWF8NfhvrHhbQtX0HXxpd1p2o5LNbTyF8lQpUqyAYx3z+Fep0Ud/PQO3keN+Hfhv47+H9/ep4U1jRLrTLlg3k6osoII6HCDqBxkHn0rr9f8GX/jXwPJo/iW7s11B3Esc9hCyxwuOmA7EsOueRnPau1ooeqswWjujxK4+GHjzUfBVh4Nu9S0GPSbWYMbmJpmmZASQNpULxnpkdBzXS+KPh3ez/AAwtvBfhxrNYk2CSe9lZD8rbiwCo2SWz6Yr0iih6pp9dQWjTXQ8p0TwD4q0n4T6j4PY6NJc3G9Ipxcy7Ar/eLfus5HYDr7Unw88A+K/BHhrWtKlOi3LXgLwOt1KAHKhcN+66Y5yMntjuPV6KHre/VWBaW8tTyL4U/DPxF4D1PUDqcmk3NlfRBJDBPIXXbnGFaMAg555FVtM+Ffi3wP4pu9R8E6tpTWNyCGtdUEgGM5CnYDnHY5Br2ainfW4dGjK0FNfSxJ8RT6dJeM2QunxOkaD0y7Et9ePpWrRRSYBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc9aPfahY294dUuYfPjWXy4o4tq7hnA3ITxnHWuhrA0P/kX9N/69Yv/AEAUASfZr7/oNXv/AH7g/wDjdH2a+/6DV7/37g/+N153qWm2tmur65JZabrdmlzJLLeC6aG9tsHmNGwfukYADJ/jqX/ia/g0jxLcR3Zje0vII7Xei5RHWI4wRznc3XJ5oWoPRnYfZr7/AKDV7/37g/8AjdH2a+/6DV7/AN+4P/jded6fe6jpqXl9a6t+7/4SJ7VtP8pCrh5QDk437sHIwQMDoatWepeILt9IkbXpkW/1C6tZEW3hwiIZCpUlM7vkAycjnpQtUn/XT/MHpf8Arv8A5HdfZr7/AKDV7/37g/8AjdH2e+GSNZuyccbo4SM++Ix/OvMNR1nU7vSrsy3mLmLS79ftUcMayt5dwEB3beMgcgYHfAOMep2SPHYwI8zzMI1zJJjc3HU4AH5Chaq/9df8gejt/XT/ADL2m3RvdLtLtgA08KSEDoNyg/1qzXOpNqlv4CspdFtornUFtIPKilOFbhc55H8OT17V5B441nxjPcfZPECS2UD/AHbaIbYHwFJwQSHwdp5LYJ7dK56+IVJapv8AruevlWUSzCVozjH1evyju/y8z36KWOeFJoZFkikUMjocqwPIII6in15douq/EePQtPSx0DTpbNbaMQSPINzR7RtJ/ejkjHYVe/tf4o/9C5pf/fxf/j1CxCavyv7mOpk8ozcVVhp/fieh0V55/a/xR/6FzS/+/i//AB6j+1/ij/0Lml/9/F/+PU/rC/lf3Mj+yZ/8/af/AIHH/M9DrCuPGWgWutf2PPqAS/8AMWPyjE/3mxgbtu3uO9ReFrzxRd/a/wDhJdNtbLbs8jyGB353bs4duny+nWvLfFGkSaz8SNfhgLC4ig8+Lb1LJGhx+IyPrUVq8owUoLd9TfLsro1sRUo4iekY3vFprp11utdT2LWde0zw/apc6pdC3id9inYzEnBPRQT2qbTNUs9Z0+K/sJvOtpc7H2lc4JB4IB6g14trWrXHju2NywZbfSNN8ybsGnbAP5nB+imux8K3BtfhHZyrrEeksGkxdvEsgX983G09SelKGJc5tJe7a5tickjQwsJSb9q5KLW6V02tEm72s9L77HolZk3iHSoNdh0WS626jMu5IfLY5GCfvYx2PevMJviFe6TqFo0HieHXrZnxPE2n/Zyi8cg4Ge/f8Kl8YatBofxcstSuQzRW9qGKoMkna4AH4kUSxUbXj3V/n6CpZBV9pyVdbxk42uruPS0kn17ejPXaK4bw/q2v3GmXninWboRaYIXnt9PhRDlACclsbu3HPX06Vx4+I2q3lpc37eIrWwnQkw6aLAyCQDoDJtOCenX8qqWKhFJvqYUsjxFWcowafLZNq7V300T26vZdz2mq97qFlp0ImvruC1iZtoeeQIpPXGSevB/Ksnwf4gbxN4cg1GSJY5iSkqr03A9vY8H8a5D4yQXX9i2dx9s/0P7QqG18ocybXO/f16ZGOnOaupW5aXtI6nPhMvdXHLB1nyu9n8v69Dv9S1nT9I006jfXIjtBt/ehS456fdBJqezu4L+yhu7Z/MgmQSRvgjcpGQcHmvM/FNjqtp8M531HWf7QjkFsYY/sqxeSMjjK/e6jr6VVudY8U+HfBGh6xBf2y2ZSKEWXkA5XaSGZzychegxjNZPEuMveWlr/ANanbDJo1aKdKonJzcVvZ6J6e7e/roela14g0vw9bxz6pdfZ4pH2I3ls+TjOPlBrSVgyhgcgjIrzj4m6vKnhXSb2CK3P2iRX2z28cwAKE9HUgH3qHxJ4w1Oz8WLpJ1aPRLFLdHW6Nn55kJUHpjpnI49DVSxCjJp7afiZ0cnnXowlT+J8173fwtLRJN9el7+R6dVLVdWsdEsHvtRn8m2QgM+xmwScDhQTWd4Uv7jUNNkkn1ay1QCTCT2qbOMDh17N/TFZPxU/5EO6/wCusX/oQrSdS1NzXY48Pg1LGww1R7ySdvP1X5o62zu4L+yhu7Z/MgmQSRvgjcpGQcHmqeteINL8PW8c+qXX2eKR9iN5bPk4zj5Qa86fUvFeheA9J1u3v7VbKKKJDZeQDlMABi55JPHAxjNWviF4ge48FaLqltFb/wClurlLi3jmC5QkjDqRkHuKyeJtBu2qVzvp5K5YmEOZOEpOOj1TXR6b+iaPTVYMoYHIIyKWvPNU8Sa3qHja38MaJdRWIjiV57hoVkP3A5wDxjBAx6nrTNJ8Q+Iv+FnvoGo3kT28UZykUSqr/uwwbONwznOM8HIq/rEb2s97fM5/7HrcjnzRXu89ru/L91tfX1sejUV51H4h8Q+JfG+oaVo99Dp9lpxZXZ4BKZCrbTnPqc9COBUXhrxjrD674lTWp1eDTIZZPIiRQFKNyFOMkcYGSaX1mN7WG8mrqDlzK6SbV3fXbpa/zPSqK8WHxG1W8tLm/bxFa2E6EmHTRYGQSAdAZNpwT06/lXS3/ja/u/hf/b9ky2l+sqxOVUMA24A4DA8EflmlHF05Xt0VzSrkGLpOKlb3pKPXRv1Wq81dHolFcl4TPiTU7TTtX1HVohbSwBjZx2y5kyvDM/Yk/NgDHautreEuZXseXiKHsKjpuSbW9r79tUvw08woooqjAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACuasLkWGnWtnPb3olgiWJttnK4yoxwVUgjj1rpaKAORltdAnvxfy6CZL0HIuH0eQyA/wC95eadcW+hXd011c6I01yyhWmk0iRnIHQEmPOK6yigDk1t9CS9F6mhst2rFhONIkEgJ5J3eXnJyfzqZJNLj8rZpk6+S7SR7dLlGxmzuYfJwTk5I65NdNRQByuzRtrL/Y8m10dGH9ky/MrnLg/u+jHkjuetWLa6s7S2S2s7C7ihjXEcMWnyoqgdgNgA/SuiooAp6TbvaaPY20oxJDbxxsPcKAamurW3vbd7e7t4p4HxujlQOrYORkHjqAamooGm07ohtbWGys4LS3TZBBGsca5J2qowBk89BU1FFANtu7CiiigQVxtn4Sv7f4lXniN5rY2c8exUDN5gO1RyMY/hPeuyoqJQUrX6anRQxNSgpqH2lyv0ZzF94NsY/DeraboltDay36/MXdipb36kDrwKxZvh9eXfw5svD013BHeWszTK6FmjYlnIB4B6P6da9BoqHQg+nSx0U80xVNJKV2pKV3q7pW6+R5jrfgTxRrul6dbXN9pSmy+VIoldE24AznaSWOPQAfjWtrXgebWvHltq9x9kk0xYPKmhdm3t8rDgYx1Yd+1dxRS+rw6+X4Gv9s4rTlsrKSVlb4tzhtB8Hatok19pTXdvd+G7pXURSSMJowwwcfLjvg889eOlRaP4X8W+F4pbDR9R0uewaQun2yN96Z9AvH6131FNUIK1uhMs2xEnLnSfNa90rNrr6+fUp6ZDfQafHHqV0lzdjJeWOPYpySQAPYYH4Vj+OPDMvivQBYwTpDNHMsyNIDtJAIwcezGukoq5QUo8r2OSlialKsq8NJJ320+7Y4TUPDfijWfBs+i6hNpAmBhFu8LSAFVPO8kdeB0HrT9f8G6jqngHS9BgmtVurXyt7uzBDtQqcEKT1PpXcUVDoRd79VY6o5rXi4uNlyy5lp1tY4fxf4N1HX/C+laZazWqT2mzzGlZgpwm3jCk9far2taR4hupsWjaJdWm1QtvqVszeWQADgjrk8811VFN0Y3b7/oRHMqyjGOjUb2uv5tX+RyfgbwjN4Vtr03NxFLcXcgdlhUiNAM4C5+p/SrnjTQrrxH4Zn02zkhSaR0YNMSF4YE9AT+ldBRTVKKh7NbESx9aWK+tyfv3T+7Y83u/BHii88PWPh6TVdPGmxBPNYI/mgj+EdmUHp909M1oeL/A9xrHhnTNH0mWCNbJlwbhiMqFK9gea7iio+rws13Oj+2MSpxmrLlbeiVrvdvuebeJdMTS/GlrrNhrmm2GpmAebDqDFY5FA2ZB78DGPbNZfhHz9V+LF3qSXK30cURM11Gm2MsUC4X2zwPULmvVLzTrHUFC3tnb3Kr0E0SuB+Yp9taW1lCIbS3igiHOyJAq/kKh4e8730vc6Y5wo4Z03G8nHku7aL7r6dF+Z5tfWa+HfHWoahpPiHSLOS4Gbq21FipUthiVA+9zyMEdcVT+HOn/ANra34nuJWkubC6V4GuGXb529iSfYkc47ZFen3mk6bqDK17p9rcsvQzQq5H5irEMEVvEsUESRRrwqIoUD6AULD+/foEs5vhnTSfO0k3p09Fd+V9vM4XR/C/i3wvFLYaPqOlz2DSF0+2RvvTPoF4/WtLxD4d1fXPBLaTNe202ou6u0zKY4zh84wATwOPfFdZRWioRUXHocc8zrSqxrNLmTTvZXbXcz9BsJdL0DT7CdkaW3t0icoSVJAAOM44rQoorVKysjhqTc5uct3qFFFFMgKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigD/9k=",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwwAAAG5CAIAAACshSTDAAAu+0lEQVR4Ae3dC5Cd9Vk/8Dda3UW0u3hht16yQYVER0gcSxKHULYCEkuVFHBIgZq0OCZQlYDjAI5K8BbqhVC1JV5o4tCSVKGktjT5SzqkLUq4KKFUCaAmxEs2eMmuWrPxlv/z7jl7Lpuzmz3JJjzpft7JZM/lvTzv58nMfvN7f+c9Mw4fPlxYCBAgQIAAAQIEmgW+rPmpZwQIECBAgAABAqWAkOTfAQECBAgQIECghYCQ1ALFSwQIECBAgAABIcm/AQIECBAgQIBACwEhqQWKlwgQIECAAAECQpJ/AwQIECBAgACBFgJCUgsULxEgQIAAAQIEhCT/BggQIECAAAECLQSEpBYoXiJAgAABAgQICEn+DRAgQIAAAQIEWggISS1QvESAAAECBAgQEJL8GyBAgAABAgQItBAQklqgeIkAAQIECBAgICT5N0CAAAECBAgQaCGQOyQN77x74YzmZdbK7YMtzsNLBAgQIECAAIEpFRgvJA1uXzWnOZ00PJuzavJBZWDz8t6GTSsP592+Y3gyp9E5b9WmJ9evmNsxmZWtQ4AAAQIECBCYOoHxQlJ3/93bn3tk7bLmfNJz0c3rtzy5e8fd/d1HlDC4Z8f27Tt2DoxJP71L1u188cktG9dc0Vdu0jF32X1bntu6emHnETto+ULnrIWLF8/rbvmeFwkQIECAAAECJ0xgvJBUFJ2985asXL2qv6vh2IPFrHn9C2d1H5lwBrevXvy9b33r9/Yv37RrTEzq7J2zcHH/nJFtuhavWr1y8bzeI3fQcBQPCRAgQIAAAQKvu8D4IaksrbO3f3l/T73IQzs2bR2bgUbeHdixYfNL5aOh7es2t1hjYOfWHa8WRdfCJf299d15RIAAAQIECBDIKjBxSCo6Z/UvbUpJT23atHPwiJPZs3Xd5shA5XLoqQ0bdgyOPKz/Nbhr6/bRjGQQqe7iEQECBAgQIJBW4CghqSh6m1NS8fzmI1LS8K7NG7YO1U7xpc0btg/UnpUPBndt3h7jTB3zYhxJRmqi8YQAAQIECBBIKnDUkFT0Lly6eGTOdfUMXtq8qXmkaHDnhg2fOdRwfq9GZtozXH+hzEjPR0ZauLR/VnNGGt6zfcPtK5cuXjint3PGjO6Y8LRk5d2bdjZHrPqOxns0PLhz893LF8+bVe5lxozO3llzFvYvXrJ05e1jAt3wwI5Nd69auqR/3qzuWLE71lu8dOXdG7Y3llsMbFo65gN58Wm8Pbu2b4hNq6XO6J3Tv2TVuup2wwM7N6+7ffmSymmUc7CW3t0kMFr4FJ3v6O78JECAAAECBE6gwOGjL/u2rGhMSUXfsi376lsd2LJi9tj6Oi5a+9zB0VUOPrdmQXyGP157sfZavHfgufuuqe22Z8EVV1xUe9a14LbH60fY98iyhmlRfSsePzC65/LngefWN3wEr6PvomuWLbvmotldIyV1XbFx9+jKB1/cuGx21FF5Y/ZlI6uNPu9oPOLBfY/fd/NltWqqm7T80XfFzTdfs6BysOYVmggqhU7yfEcL9pMAAQIECBB4PQWKyRx8bA7qu+aRWobZvfGaMsJ0za7mkmpUmHvbkwcquz744tqLIot0LGjITYcP79ty8+zRVNGxYE0ZqQ6+uP6yWtyob394gpB08Ln76psUs1c8sns0hh18rjxqPSQdePK2MqlVlr5llfoP7t54RVn8yNITgWp068hej9fri7e75l627Oa16zduXLviotoWI5t1zL7ompvX3Ldx4/o1y5rT0uybG+JcG+c7mY5YhwABAgQIEDjRApMKSWMjQ9FzTXWE5uCLIyklBmK2bLltbiVtVP7uW1EZbqqtMRKEqqfTlIeqGalMSWsbg8zocNW4IamMX7VUFeNb9eQWhynHr7rK4DNyyGqUq5ZWX3NfJeJVXp+9YsuBkbXjr+aQFOdbS4VRTl/Dic6upcHDB3c3pLymobO2zne0BD8JECBAgACB11Pg6HOSykDQPW/p4sYItH/rppHpOOWU7e1DMdto+fL+/qXLyxGj0SUmJm3fE08GdmzeEZO65y1ZXLlT0sj71Rcr63Z2V2+81NnZXb8D06s7mycKVdZt/HtkLlRtvnjf4qULexve7py1ePmS/oWVieIxZ2j7/tE3O3pjRlL1SfecebNqRb+0ddOOo0+H6p3XP68ezYrhweHRHcedpRbPqe2tODQ4PPrWlJzv6GH8JECAAAECBE6GwORCUtE9pzklDW3fFDOTB3ds2PDUoaJn8cpIQJ1zlixf3JAe9m9dt3VX3IZ7884IMnP7q3eTHDmnwT07d9XSTdHZ3dlZPdXO2qP4SNxgPX60kohJ0OV08OrSNSfucTn6ZORn97yVGzbdPnJbpqhiVy0jxfG662s25rJiYNcRNwxv2mXlSdMmze/HnkdPZeSNakiakvNtPpJnBAgQIECAwAkWeMMk9x9jSUsX3Pt8RKLKUqak7d07yztI9i1e2T8rXh0Zu+n7+B+8OrrKjnUbtnfu2RHxZO7iJQ3jSDH60ph/9n/0HW/6aHWbhh8xRDM6ENPwav3h4MCugfqzMvk05ZP6W/EoDtjwPJLY6KoxeDX6MFY4NLCnLKzhlYatjuPhlJzvcRzfpgQIECBAgED7ApMNSUXnnMVL5q1+qpaSDn3m7lV7BiMQzV26fPQ6V9yfe8nsP3h/BKeR5dDz61atHi4zUv+SOd3VF0d+DDfln66+uXELgMb343Fcu2rML2PeLZ82hagJhnciIzUdL4ar6gerPBkd1mpescUxj/GlpuMXx3i+x3hsmxEgQIAAAQLHJDDpkFSmpKULVz9VvyPS/pciInUsWLp0YffoobtjctKCdXfUktTQq+Ww0uyxGamoX2CLt/sW37t105Le0X1M9mfDVbpyk+Yc0ryTpljUHJmax6uaV2zeyfE8m4rzPZ7j25YAAQIECBBoW2CSc5LK/Y6kpPq85JFDdfWvbLqQVk5MavpK3FhrdgxBdY+sPfpXd3dvwwtNI0Kjqxz9Z9yzsSFYDQ4MNI1ONW0fU8N7GwqPFYer75dDR6OPI/H1zjnK6FXTbif9ZErOd9JHsyIBAgQIECAwFQJthKTKF7k1TM0uqlO2G+uIJLV8cU/jKzGONCYjxY4WzquvM7Rn5556UGncdMLH5U76amsM7dze4jvlqm/HpbuGQDU0MDBY225wz8Ch2pNYbVZn7dnUPZiS8526cuyJAAECBAgQmIRAOyGpnJq9dGFDSpodw0a9Yw8yq5yYVH+x78iMFF8It3BJw9fmPr/p3s3HEJO65y1fWb+v0v7N6zbvGi9r9ca3xtUD1cDOXYPVAgd27dpTq7VlqbV3j+fBlJzv8RRgWwIECBAgQKBdgbZCUsym7l9eTzdzlyyvT0eqH7h34fIlc0ef9h1xrW3knd7+lctr6xSvfnT5kpUb6l/ZNjw4sGfXnsHRfYz3M2ZArV5ey2ND/+/G/iW3b67ln/IzbXv2VPcSdS+tBar9O7ZW7oc0HDcoKG/iNLJ0LFi5cnQG+nhHPObXp+R8j/noNiRAgAABAgSOQaDdO1nW7l099rvYGnZU/SaSqKb5a94aVinvr71xxdyOcSuu3rH7wO4n1zet1rHg5o1P1r8/pPwGuNkT7KV+c+246/WyWqLqqnx3W9/olh1zV2xs+Ga5Ay8+clvDzbzj83krNj63b+RLSw48t/HmWtyK6st6Km8d3PdkwxHirZ7L1mx58cDoSU/ufEfX9pMAAQIECBB4nQUm97UkjUXue6TyZW1XrN/d+HLT4/iCjpEvRRvzVSFN65RPDu7esuaaBbWoUiamjp65l61Y+0glXDR/OUj5dmVp+Ga3kb08vnbZ6HfaVteI3Vxx8/rHK8Gmdtwy4VzW/CVz5Yq3PdIQkOJ75Rq/q2T0kEV8xcm+g/u2NH0nSfXNrsvW7z7Q/J0ko5s1CxztfGuFekCAAAECBAi83gIzooDRX+h+EiBAgAABAgQIVAXam5OEjQABAgQIECAwTQSEpGnSaKdJgAABAgQItCcgJLXnZW0CBAgQIEBgmggISdOk0U6TAAECBAgQaE9ASGrPy9oECBAgQIDANBEQkqZJo50mAQIECBAg0J6AkNSel7UJECBAgACBaSIgJE2TRjtNAgQIECBAoD0BIak9L2sTIECAAAEC00RASJomjXaaBAgQIECAQHsCQlJ7XtYmQIAAAQIEpomAkDRNGu00CRAgQIAAgfYEhKT2vKxNgAABAgQITBMBIWmaNNppEiBAgAABAu0JCEnteVmbAAECBAgQmCYCQtI0abTTJECAAAECBNoTEJLa87I2AQIECBAgME0EhKRp0minSYAAAQIECLQnICS152VtAgQIECBAYJoICEnTpNFOkwABAgQIEGhPQEhqz8vaBAgQIECAwDQREJKmSaOdJgECBAgQINCegJDUnpe1CRAgQIAAgWkiICRNk0Y7TQIECBAgQKA9ASGpPS9rEyBAgAABAtNEQEiaJo12mgQIECBAgEB7AkJSe17WJkCAAAECBKaJgJA0TRrtNAkQIECAAIH2BN4wweoPPPDAF7/4xd27d0+wzuv41llnnXX66ae/613veh1rcGgCBAgQIEDgS1VgxuHDh1ueWySk00477ZJLLmn5bpIXt23bdvDgQTkpSTuUQYAAAQIEvpQExr3cFmNIyRNStCEqjDq/lPrhXAgQIECAAIEkAuOGpLRX2cbAnSp1jinbUwIECBAgQCC5wLghKXndp2R5u3YVM2bU/3zXd409i//+76K3t75CrDw4OHadiZ9v3lzf/AtfmHjdE/JuWwW0tfLE5ba1q7ZWnvi43iVAgACBL12B6ReSInkkWf7yL4vPfKaplj/6o2L//qZX0j6ppb3Vq9PWqDACBAgQIHA8AscSkoaeefCOG95+4Xl9Z5zRO3v+26+7de2jLw+PW8TQE3dcOP+Gh9v+3b/3wevOu3TtC7Hj4b1PPLxt7/hHGPfQY96o/F4f8+Lr+/QDH2g6/pinTe95QoAAAQIECJxUgfZD0tAz9/7MHY8WV6556Ok9+1767P2r3rx33U03vO+ZoSmue+a1H/n8Y7ec2xkZadu96z728vHsP2E8qmjFdZ99+6pwO3cWf/ZnU4yYfHdLlhTx4crKnyMvPp644l+v4564M7JnAgQIEDgBAu2HpOGh/UNd577tkkXn9HR1dvWce8ktH/7s5x5adX5XMfzM2kvPu/rB6pjPyEjQXeVIUIwFDT277oYLZ/ee0XvepTd9aCRP7X34uvmX3nTHTVdfOn923+wLb7jv4Q/devWlF543+7y33/pgOTBVHUnau+2u99716WcfvfX6G+6r7KwthbTxaP788jxiEtLv/m71hH7rt6oPFiyoPmj8ERGqdoUrHnz5lxdnnFHEmr/4i8W//Vvjii0eP/JI8ZVfWd38B3+wGB4dlHvyySLuMnXWWUVnZ/HVX12cd15xxx3Fa6+12EPjS3FXiCigttx1V72wv//72svVB9u2FRdeWJx+etHdXcSh4wpj49JyblC8+La3lXOzouav//rizW8ubrqp+PSni//7v8ZNj/L44x8vFi4svuqrim/4huL664u9e5vWb3nc//qv4pd/ufj2by86Oopv+7bil36p+J//KWbNqp5d7KRxaWvlyTiPKekP/7A4//yy/jh9CwECBAi8TgLth6Sec6+8uOfZu26948FHn3jmhb0xwNPZM7Ona8L6X3vmif0Xr3ns+ecfuvHMJ+76mXsrw05DL79QXPmBx55++qFrOx+9631fuPieT3zu6Ydu7Nx23/oXauNGZ15y+51XnX325fd8+P4bY1hpMktjmGi5fuMKJ/Rxy6NXXnznO4uv+7ryYYSk+GV84ECxcWP5NH5JX3ZZ+WDiJRJDzOl++uni53++/D36r/867uqRkK65pkxjscSDj32sjESxxIYXXFB8+MPFnj3FoUNF3EnhhReKu+8uzj23+Iu/KFc4/mXDhuL7v7944oniP/+zGBoqPvnJ4uKLjxLpPvjB4h3vKLZsKedmRc3/8i/Fn/95cd99cbOH8mQnufzO7xQxVvTUU8XBg8U//3PxkY+UafJv/3aircPzyiuLn/3Z4m/+pogAFCv/3M8VV1/dOpm1tfIxOP/2b5edevbZsv62ouFEZ+g9AgQIEGhboP2QVPRcsuahh24/e++D9956/Vvmzuqdf/WtH3pi/+joRMsKus69dtWVi2b2zFx07Y2X9+x/+ZUyA3V29Jx/+fk9RdF1zgVnzzzz3IvfPLOz6Jx57tldh157bcLdtTzGKfZiJJX3vKes+R//sYhRhPvvL38jxhKjJo3jNOVLI8u8efUrU3F9KhLSpz5VDpPE8sorxa/9WmWtsX83JqQf/dHiwQeLr/iKcp1Nm8ohqNhPjEhFBIm9/d3flcEilhhJigeVYsrnRywxOBQb1pY776wX9s3fXHu5fLBuXREjIrHztWurr0f0+dCHmtYZ86S25q/+ahmn4s/zz5cVvuUtZamTXCKcRRaM4z78cDmIFcvAQLFy5URb/97vFY8+Wl3hJ36iRIjjBmywHLlMfuVjc16/vnj/+8uYGM5TFViPPAuvECBAgMDRBI4hJMUuI/Ss+cgnH3v6pQP7nv/wO7ueuOuG935sgrnbRUfXmZXhi7iUcWZXjCqMXPHp7OrqqNbXWcTjkQGOoxX8JfT+jTcWXzbiHxfaIgfEEpdX3v3uSZ1hV1fxAz9QvP3t1ZX/5E9abNWYkG65pYhf7ZXDxaoxYlRZrr22TA+xt8g3lRri9UgGse3xLz/5k+VgTOx81ary4mBlefHFiXZcGxKLYbbIN1/zNeVFwKgwPgYYl58muUTKieGoOG4MDkXorCyPPTbRYFLkucoS19fuvbdMn3HcGNZquUx+5WNzfu97i6A788yWB/ciAQIECJw0gbZD0vD+Zx59+InaZ806Z15yy523LerY++zeQ8VIyqnOeBkeHh46NFw9j0PVWFTEhZ3XhopaZDoxpxn//679aXmE2rsn+kHLo9dejMlAEXRi+exnq7+/I7LE3J3xlrgI9cM/XHzrt5ZZqnKVMIYcKktt9nfjtnGhp3KVLR7cc0/9nX//93KYpLI88EB9RtGb3lRfJ6ZAHf8SF9dqy9d+bfVhXEGbYIloUlluuKGcJhXjZ3FdMi4/HXWmVOM+v+/76s/6++uP4wJWyyWur33+89V33vrWepSM4auYFzVmmfzKx+xc+Vcx5rieEiBAgMBJF2g7JBVDr6y/64Zb7xv9TP7w/ice3fhCcfYF53R0nnnOzM69XyinKZUf2//UyEW1kTMaemHjxmfjJgDx6oOfHpq56Ny2/5Mcs2aOaanEoGPa9GRs9OM/3nSUGEIYb4nLTzGd+aGHivi+4SOvhcVv7vGWiFMxDblxqY3WNL445nH8gj/+pXEs5A1vmNT+IszVYmKcZoS5uGIVI0NxCjHHaJJLLZDF+jH1u7bExK+WS1zUq039aVw/6Bp3Vdl28isfs3NMWrcQIECAQAKBtkNS5znXfuD+O8/+wr3XzY/7JJ3RN//qe1+5+J6HPnhVTCjqWfTuy7sevv4t8y+9+r0biwsWxUuVsaSZixa9cf3188+bf+kdr7z5tjuvPWdkzGmSp98x84Jzhz92/dwLb300ctYxLWmjUszRjpnalSWmUcfAScslMlBMIaos3/3d5QzrGCKKk1q+vOXq1Re/4zvKB7FaTH766Efrazb+4r/99vqoW0Wp8nft0lt9s/YftZxcNfFu4uxi0nRcGYzLjgFSmdsem/zHfxS33TbxpvV3G9NJ48BVLX7VVx159MY31kePGrcNisanla0mv/IxOx+D25gz8pQAAQIEpkJgcv+/bz5Sz/nXrjn/2ubXKs+6Ft352Kt31t55zy0jD9d87umRn5Vn1XdnXnX/01dVH3ddcs/Tl4w+XjS6etwnqXKQWPXzV91fff84fsTvvFhS/QaKYmJm0k/9VFnYBMNIMe84UkJl+ZEfKSq3FIrBj4k/8BXzl2MqUtx76X//t/wYfHyyvTI1uzLRp3KB6ROfKOPXJId5qhWM/IjKK56x86ldYvZSzDGPP7HEISIbVaalTzyZqbGG7dvLT8NVlnhcW8ab1RTX1CKeVqZIf+5ztdWLeHzkEN3kV54S53o1HhEgQIDAyRZoeyTpZBc45cer/Gqf8t0e8w5vvbU6lhOTb8ZbYh5xdeJ7UX5u6x/+oZyjExeh/uqvxtuifD2mLsWn7uMj/bHEXQbiU+XxgbjKUhuViRsXXXdd8dJL5V0AYr52fFw/7nsUiaFxAKa6TfOP2qW02CQ+RzZVS0y4/umfLidpxZ2NIqDEXKu//uvqvid/Eeo3f7OI+yTFdbH42GBt8nVMkIrpXOMtK1ZU33n55SJG1+L0Y7huvNg6+ZWP33m8gr1OgAABAideYPqFpBNvOvVHOO208gNilSWGN+KTaD09xR//cfFDP3SUY8WoTHzwrTInKTLHVVcV8QH+WGKGeNwTqDKoFp/SnzOnDGEzZ5Y3fly9upwJdNQoWTt0DNXEUWJXU3Lbwwhqv/7rxUUXFX195dDXN31T9XN28bm8KHiSy7Jl5ZhZfLotIldlBC4iXe0jaS13EgNXl19efed97ytnMsUU8nD7lm+pvlixqjyZ/MrH79yyWi8SIECAwEkRGDcknRWfvToVllOlzuO1/JVfKX7/94u5c8vxocpdpGMic+Pn0cY7QAzAxKffv/Eby/fjk4dXXFGO08QSV9l27ChnNcWkqAhh8ScGWuKzXXHX6bgS1zh/uVz7iOU3fqP8dH3EtcnfvuiIfbR4IZJfjP3EFPVKVXFtK9LS0qXlla/4ZN8klxjpiTGkuKd5nFRMDIqkEla1uV8tdxIhLIbowiQQ4qDxj/8XfqG8ydM//VN19YhctaWtlY/TuXZQDwgQIEDgpAvMODzOmMEDDzxw2mmnXVKb23HSK5vMAbdt23bw4MF3xXdrWAhMuUAMkkVqrCxxt8/KzT/HO0pbK4+3E68TIECAQCaBcUNSFBk56Ytf/OLu+Mx5yiXGkE4//XQJKWVzTsGiYm57zIiPQay4yhZf0hLjbTFdrHLH7Rixi2lbMRG7trS1cm0rDwgQIEDglBKYKCSdUieiWALHJ7BoUfGnf9piF3G9Mua/f8/3NL3V1spNW3pCgAABAqeMgJB0yrRKoSdWIAaN4nvl4hvc4kZN8cm4uKnSd35nOZv7x36snAM+Zmlr5THbekqAAAECp4iAkHSKNEqZBAgQIECAwMkVGPfTbSe3DEcjQIAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BIQknL1QzUECBAgQIBAEgEhKUkjlEGAAAECBAjkEhCScvVDNQQIECBAgEASASEpSSOUQYAAAQIECOQSEJJy9UM1BAgQIECAQBIBISlJI5RBgAABAgQI5BL4/1S4HGuV6Y/HAAAAAElFTkSuQmCC",
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "ImageContent(\"new_layout_feedback.png\").show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "dc95795b-914f-426f-9779-6b28bb092a92",
+ "metadata": {},
+ "source": [
+ "We essentially use an arrow and red text to indicate the feedback! Now let's see how to pass this in!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "5ddddcbb-e6bf-41e0-91db-45486c3cdaa5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# 2nd optimization step!\n",
+ "\n",
+ "optimizer.zero_feedback()\n",
+ "\n",
+ "feedback_text = \"\"\"Here’s the annotated visual feedback, please update the spec according to the annotation on the image.\"\"\"\n",
+ "\n",
+ "optimizer.backward(html_param, feedback=Content(feedback_text, ImageContent(\"./new_layout_feedback.png\")))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "id": "9652055f-97e8-4c50-a602-b5f19e8e2f17",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "summary = optimizer.summarize()\n",
+ "system_prompt, user_content_blocks = optimizer.construct_prompt(summary)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "id": "d1491b35-e9e5-4205-afaa-b1857c74d2ab",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "Now you see problem instance:\n",
+ "\n",
+ "================================\n",
+ " \n",
+ "# Instruction\n",
+ "You need to change the `value` of the variables in # Variables to improve the output in accordance to # Feedback.\n",
+ "\n",
+ "# Code\n",
+ "\n",
+ "\n",
+ "# Documentation\n",
+ "\n",
+ "\n",
+ "# Variables\n",
+ " \n",
+ "\n",
+ "
Welcome
\n",
+ "\n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "# Inputs\n",
+ " \n",
+ "\n",
+ "# Others\n",
+ " \n",
+ "\n",
+ "# Outputs\n",
+ " \n",
+ "\n",
+ "# Context\n",
+ " The reference image looks like this: \n",
+ "[IMAGE]\n",
+ " \n",
+ "\n",
+ "# Feedback\n",
+ " Here’s the annotated visual feedback, please update the spec according to the annotation on the image. \n",
+ "[IMAGE]\n",
+ " \n",
+ "================================\n",
+ "\n",
+ " \n",
+ "What are your suggestions on variables str0?\n",
+ "\n",
+ "Your response:\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(user_content_blocks)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ba6c3673-872d-4b50-bb2e-e6b7ac2f254d",
+ "metadata": {},
+ "source": [
+ "Now you can see we have two `[IMAGE]` placeholders because we have two images. Note that with `memory_size=5`, it means we are sending in ALL past history to the LLM. So since this is the 2nd optimization step, we are sending in **3** images in total."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "id": "d3bc8e3b-6465-4ad9-81f6-bc5c7e286528",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Prompt\n",
+ " You're tasked to solve a coding/algorithm problem. You will see the instruction, the code, the documentation of each function used in the code, and the feedback about the execution result.\n",
+ "\n",
+ "Specifically, a problem will be composed of the following parts:\n",
+ "- #Instruction: the instruction which describes the things you need to do or the question you should answer.\n",
+ "- #Code: the code defined in the problem.\n",
+ "- #Documentation: the documentation of each function used in #Code. The explanation might be incomplete and just contain high-level description. You can use the values in #Others to help infer how those functions work.\n",
+ "- #Variables: the input variables that you can change/tweak (trainable).\n",
+ "- #Inputs: the values of fixed inputs to the code, which CANNOT be changed (fixed).\n",
+ "- #Others: the intermediate values created through the code execution.\n",
+ "- #Outputs: the result of the code output.\n",
+ "- #Feedback: the feedback about the code's execution result.\n",
+ "- #Context: the context information that might be useful to solve the problem.\n",
+ "\n",
+ "In `#Variables`, `#Inputs`, `#Outputs`, and `#Others`, the format is:\n",
+ "\n",
+ "For variables we express as this:\n",
+ "\n",
+ "\n",
+ "\n",
+ "value\n",
+ "\n",
+ "\n",
+ "constraint_expression\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "If `data_type` is `code`, it means `value` is the source code of a python code, which may include docstring and definitions.\n",
+ "Output_format: Your output should be in the following XML or JSON format:\n",
+ "\n",
+ "{\n",
+ "\"reasoning\": ,\n",
+ "\"suggestion\": {\n",
+ ": ,\n",
+ ": ,\n",
+ "}\n",
+ "}\n",
+ "\n",
+ "In , explain the problem: 1. what the #Instruction means 2. what the #Feedback on #Outputs means to #Variables considering how #Variables are used in #Code and other values in #Documentation, #Inputs, #Others. 3. Reasoning about the suggested changes in #Variables (if needed) and the expected result.\n",
+ "\n",
+ "If you need to suggest a change in the values of #Variables, write down the suggested values in . Remember you can change only the values in #Variables, not others. When `type` of a variable is `code`, you should write the new definition in the format of python code without syntax errors, and you should not change the function name or the function signature.\n",
+ "\n",
+ "If no changes are needed, just output TERMINATE.\n",
+ "\n",
+ "Now you see problem instance:\n",
+ "\n",
+ "================================\n",
+ " \n",
+ "# Instruction\n",
+ "You need to change the `value` of the variables in # Variables to improve the output in accordance to # Feedback.\n",
+ "\n",
+ "# Code\n",
+ "\n",
+ "\n",
+ "# Documentation\n",
+ "\n",
+ "\n",
+ "# Variables\n",
+ " \n",
+ "\n",
+ "
Welcome
\n",
+ "\n",
+ "\n",
+ "\n",
+ " \n",
+ "\n",
+ "# Inputs\n",
+ " \n",
+ "\n",
+ "# Others\n",
+ " \n",
+ "\n",
+ "# Outputs\n",
+ " \n",
+ "\n",
+ "# Context\n",
+ " The reference image looks like this: \n",
+ "\n",
+ "# Feedback\n",
+ " Here’s the annotated visual feedback, please update the spec according to the annotation on the image.\n",
+ "================================\n",
+ "\n",
+ " \n",
+ "What are your suggestions on variables str0?\n",
+ "\n",
+ "Your response:\n",
+ " [+ \n",
+ "[IMAGE]\n",
+ "]\n",
+ "LLM response:\n",
+ " AssistantTurn(role='assistant', content={\n",
+ "\"reasoning\": \"The feedback suggests that the 'Submit' button should be made larger. To achieve this, we can apply CSS styling to increase the button's size. This can be done using inline CSS styles to specify a larger width and height for the button.\",\n",
+ "\"suggestion\": {\n",
+ "\"str0\": \"
\"}"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "optimizer.step(verbose=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "id": "22e3d487-8b69-4588-b64e-59715de9dc18",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "save_html_to_file(html_param.data, \"new_layout_rev2.html\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "id": "12b71a5b-3c50-4a4b-9b9b-401e4ac9b3fe",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "take_screenshot(\"new_layout_rev2.html\", \"new_layout_rev2.png\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "id": "a75f561e-4b2f-4d2d-acbf-a68aed3df410",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAG5AwwDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooooAKKKKACiqmo6pp+kW32nU7+1soCwXzbmZY1yegyxAzTdN1jS9ZhebS9Ss76JG2u9rOsoU9cEqTg0AXaKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAK+Yfjz4StfD3iWx1jTrOKG0vlPmRomE85Tk8f7QI/I19PVwPxk8Of8JF8Ob8RpuubLF3Fgc/L94fipaonpaXYuGvu9zS0jXtF034ZQa/ZW8FppkVj9pEMKhVU4yVAHfdkfWuI+CvxJ1Hxbe6rput3Ilu1P2m2O0LiMnDIMAcA4/OvOvAGsXnivQtN+HAWQ28mo/aLiTsLVfnZPxYfrT/ABPC3wm+NcepWkJXT5HFwkaDAML5DoPod2B9K1bSqXez2/P+vRmSvyWW6/4b+vVHoll4W0LX/jlqsyaRZ/YtHt081ViAWW6c7tzDoSBn8RXb6t8SPB+h30llf65AlzHxJHGjymP/AHtgO38cVzvw6iubb4dat4llUjUNXe41I56gYOwfkM/jXkPwlfXLy18VQaXoEesS39uIZ5Zb1ITFv38/MDuyTnt0qNV7nZX+b6ffoXo/e7u3y/rU918b6jp2u/CfXb3T7mC8tHsZSksbBlyB+hB/EVxX7Nv/ACKWr/8AX8P/AEBao+HPCXiLwd8HvGtlr0CwLLbvLAizLIPuEMflJx0FY/wx1G60r4IeNL2zZkuI3bY69VJRRkfTOabai5tdl+YrOSgn3f5HsmpfEvwdpOovp93rcX2pM744IpJimOu7YpAx3zWvp/iXRdU0Rtas9SgfTV3Frlm2Iu3rktjGPevKv2cLG3Xwtquo7FN1LeeU0h+9tVQQM/Viai+OFjH4a+HEen6Z5kdtfaq006k9S25yPpuwce1E/cWvl+Ngj7z08/wO9T4r+B5LhYR4ggG5tokeORYyfTzCu39a1/EPi7Q/CtlDe6zem3tpjtSVYJJVJ69UU4/GvG/E1jbL+y9pDCNAYxDKpx/GznJ/8eNQXF9cX/7KytcuztFKsKs3XYs4C/kOPwolpzeTSCOvL5np7fF/wCllHdnxJb+W5ICiKQuMHHKBdw/EVcm+JfguDS49Rk8R2Itpc7MPlzjr8gG79K8p8J6Tp9z+zXq081lbvOEuHEpjG/cpypz14pfgdpWn6j8OfEwvLK3n3SOhaSMMceUOMnn3on7vN5K4R15fNtHtHh7xToniuye80PUI7yFG2OVVlKn0KsAR+IrN1b4k+D9DvpLK/wBcgS5j4kjjR5TH/vbAdv44rxb4A3U1pZ+MZYSd0VkkiD/aAkxWR8JX1y8tfFUGl6BHrEt/biGeWW9SExb9/PzA7sk57dKJLWy7XBba97H0/pmqWGs2Ed9pt3Dd2sn3ZYWDKfb6+1W68v8Agr4S8ReDtH1Oy16BYFlnWWBFmWQfdwx+UnHQV6hTkknoKLbWoUUUUhhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABTZI1ljaN1DI4KsD3Bp1FD1A8z+GnwsPgXXNa1Gea3m+0uY7PyySY4dxOGyBhj8vTPTrVn4r/DiXx/p+niymt4L60m/wBZOSAYm+8OATngEV6HRQ9bX6AnZt9yrZafBY6Vb6dEg+zwwrCq9toGMflXkemfCvxb4H8U3eo+CdW0prG5BDWuqCQDGchTsBzjscg17NRR9rm6gl7vL0OJ1nR/GWteBNR0q8m0KTU75DCTF5sMEKEYJBO9nP4LWF8NfhvrHhbQtX0HXxpd1p2o5LNbTyF8lQpUqyAYx3z+Fep0Ud/PQO3keN+Hfhv47+H9/ep4U1jRLrTLlg3k6osoII6HCDqBxkHn0rr9f8GX/jXwPJo/iW7s11B3Esc9hCyxwuOmA7EsOueRnPau1ooeqswWjujxK4+GHjzUfBVh4Nu9S0GPSbWYMbmJpmmZASQNpULxnpkdBzXS+KPh3ez/AAwtvBfhxrNYk2CSe9lZD8rbiwCo2SWz6Yr0iih6pp9dQWjTXQ8p0TwD4q0n4T6j4PY6NJc3G9Ipxcy7Ar/eLfus5HYDr7Unw88A+K/BHhrWtKlOi3LXgLwOt1KAHKhcN+66Y5yMntjuPV6KHre/VWBaW8tTyL4U/DPxF4D1PUDqcmk3NlfRBJDBPIXXbnGFaMAg555FVtM+Ffi3wP4pu9R8E6tpTWNyCGtdUEgGM5CnYDnHY5Br2ainfW4dGjK0FNfSxJ8RT6dJeM2QunxOkaD0y7Et9ePpWrRRSYBRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFc9aPfahY294dUuYfPjWXy4o4tq7hnA3ITxnHWuhrA0P/kX9N/69Yv/AEAUASfZr7/oNXv/AH7g/wDjdH2a+/6DV7/37g/+N1booAqfZr7/AKDV7/37g/8AjdH2a+/6DV7/AN+4P/jdW6KAKn2a+/6DV7/37g/+N0fZr7/oNXv/AH7g/wDjdW6KAKn2a+/6DV7/AN+4P/jdH2a+/wCg1e/9+4P/AI3VuigCp9mvv+g1e/8AfuD/AON0fZr7/oNXv/fuD/43VuigCp9mvv8AoNXv/fuD/wCN0fZr7/oNXv8A37g/+N1booAqfZr7/oNXv/fuD/43UN299p9jcXg1S5m8iNpfLlji2ttGcHagPOMda0aoa5/yL+pf9esv/oBoA36KKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArA0P/kX9N/69Yv/AEAVv1gaH/yL+m/9esX/AKAKAL9FFFABRRQehoAz017R5NQOnpq1i16Dg2y3CGQH/dzmtCvLY7uy02CC00y903VYFvA66JeWuL2NjJkkYOcqSTlk6D73ercmvyRxNYtqcv8AaC+IxG0PnHzFhMvAIzkIVI9ucULW39dv8wel/wCu/wDkeh29xBdwrNbzRzRNna8bBlODg8j3qSvI/Dd0kEGhrp2r3cmoSXU6Xdl9pYokH705MWcKAQpDYBJPU1d0S2uLweGvP1fV3GoaXNNc/wDEwlG912bSMN8pG4/dxnvmjz/ra/6B1t/XY9JN3bCd4DcRCZFVnjLjcoJwCR2BIIH0qavGrnULnUfDzPqOoXLRJYabNIxuHQAm4YO5IIxwOT7A9q9hgaNreNoXEkRUFHD7twxwc9/rTsHUkooopAFUNc/5F/Uv+vWX/wBANX6oa5/yL+pf9esv/oBoA36KKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArA0P/kX9N/69Yv/AEAVv1gaH/yL+m/9esX/AKAKAL9FFFABRRRQAUUUUAFFFFABRRRQAUUUUAFUNc/5F/Uv+vWX/wBANX6oa5/yL+pf9esv/oBoA36KKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigArGj0m/toUgtb+2WCNdkYltCzBR0BIcA4GB0rZooAyPsGr/9BCy/8An/APjtH2DV/wDoIWX/AIBP/wDHa16KAMj7Bq//AEELL/wCf/47R9g1f/oIWX/gE/8A8drXooAyPsGr/wDQQsv/AACf/wCO0fYNX/6CFl/4BP8A/Ha16KAMj7Bq/wD0ELL/AMAn/wDjtH2DV/8AoIWX/gE//wAdrXooAyPsGr/9BCy/8An/APjtH2DV/wDoIWX/AIBP/wDHa16KAMj7Bq//AEELL/wCf/47R9g1f/oIWX/gE/8A8drXooAyPsGr/wDQQsv/AACf/wCO02TSb+5heC6v7ZoJF2SCK0KsVPUAlyBkZHStmigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA//Z",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwwAAAG5CAIAAACshSTDAAAlrklEQVR4Ae3dT2ycdZon8MpqJFeLg90X7DlMHA7gHBbiQ5N4RDN41GlhCVp4p5Hw0EIxwyGGS6JcQvqycJmES4c9LOQCyYhtkpaaISt1lKyGFp4lK0LTUsLkQGgOMVzi9CX2SijOKfOWXX8TO3HF9TR+yp86xPXnfZ/3+X0eS/nqrddVm27evFlyI0CAAAECBAgQaBX4L60PPSJAgAABAgQIEKgICEl+DwgQIECAAAECywgIScugeIoAAQIECBAgICT5HSBAgAABAgQILCMgJC2D4ikCBAgQIECAgJDkd4AAAQIECBAgsIyAkLQMiqcIECBAgAABAkKS3wECBAgQIECAwDICQtIyKJ4iQIAAAQIECAhJfgcIECBAgAABAssICEnLoHiKAAECBAgQICAk+R0gQIAAAQIECCwjICQtg+IpAgQIECBAgICQ5HeAAAECBAgQILCMwPoOSQsXDo1sar1tmZqeW2YdniJAgAABAgQIdFRgpZA0N713a2s6aXq0de/qg8rsycmBpl2X7g6/em5hNcsoD+898enR3dt6VrOxbQgQIECAAAECnRNYKST1jR6aPv/h4V2t+aT/iT1HT396+dyh0b7bWpibOTc9fe7C7C3pZ2D8yIUvPz19/OAzg5Vderbtevv0+TOvjZRvK7DsE+UtI2Njw33LvuZJAgQIECBAgECYwEohqVQqDwyPT722d7S36dhzpS3DoyNb+m5POHPTr4397d///d+OTp64dEtMKg9sHRkb3bq4T+/Y3temxoYHbi/QdBR3CRAgQIAAAQLfu8DKIanSWnlgdHK0v9HkjXMnztyagRZfnT137ORXlXvz00dOLrPF7IUz574plXpHxkcHGuXcI0CAAAECBAisV4E7h6RSecvoREtK+uzEiQtzty1m5syRk0UGqtxufHbs2Lm5xbuNf+YunZmuZSQnkRou7hEgQIAAAQLrVuAuIalUGmhNSaUvTt6WkhYunTx2Zr6+xK9OHpuerT+q3Jm7dHK6OM/UM1ycR5KRWmg8IECAAAECBNapwF1DUmlgZGJs8Zrr6gq+Onmi9UzR3IVjx/79RtP6viky08xC44lKRvqiyEgjE6NbWjPSwsz0sVenJsZGtg6UN23qKy54Gp86dOJCa8RqFFrp3sLchZOHJseGt1SqbNpUHtiydWR0bHxi6tVbAt3C7LkTh/ZOjI8Ob+krNuwrthubmDp0bLq53dLsiYlb/iCv+Gu8mUvTx4pdq61uGtg6Or73SHW/hdkLJ4+8Ojm+tIzKNVgTh1oEao13aL21cn4SIECAAAECgQI37367cnp3c0oqDe46faWx17XTu4du7a/nicPnr9c2uX7+4I7ib/iL576sP1e8du3828/Vy/bveOaZJ+qPenfs/7hxhCsf7mq6LGpw98fXapUrP6+dP9r0J3g9g088t2vXc08M9S621PvM8cu1ja9/eXzXUNHH0gtDTy5uVnvc03zE61c+fnvPk/Vuqrss+2PwmT17ntuxdLDWDVoIlhpd5XprDftJgAABAgQIfJ8CpdUc/NYcNPjch/UMc/n4c5UI0ztUzSXVqLBt/6fXlkpf//LwE0UW6dnRlJtu3rxyes9QLVX07DhYiVTXvzz6ZD1uNPa/eYeQdP38241dSkO7P7xci2HXz1eO2ghJ1z7dX0lqS7fBXUv9X798/JlK84u3/iJQ1fYustfHjf6Kl3u3Pblrz+Gjx48f3v1EfY/F3XqGnnhuz8G3jx8/enBXa1oa2tMU59pY72omYhsCBAgQIEAgWmBVIenWyFDqf656hub6l4sppTgRc/r0/m1LaWPp38HdS6eb6lssBqHqclryUDUjVVLS4eYgUztdtWJIqsSveqoqzm81kltxmMr5q95K8Fk8ZDXKVVtrbHllKeItPT+0+/S1xa2Lf1pDUrHeeios2hlsWuhQPQ3evH65KeW1nDpra721FvwkQIAAAQIEvk+Bu1+TVAkEfcMTY80R6OqZE4uX41Qu2Z6eL642mpwcHZ2YrJwxqt2KC5OmZ4oHs+dOnisu6h4eH1v6pKTF16tPLm1b7qt+8FK53Nf4BKZvLrReKLS0bfO/i9dC1a8XHxybGBloerm8ZWxyfHRk6ULx4pqh6au1F3sGiiuSqg/6tg5vqTf91ZkT5+5+OdTA8OhwI5qVFuYWaoWLT5Ya21qvVroxt1B7qSPrrR3GTwIECBAgQOAvIbC6kFTq29qakuanTxRXJs+dO3bssxul/rGpIgGVt45PjjWlh6tnjpy5VHwM98kLRZDZNlr9NMnFNc3NXLhUTzelcl+5XF1quX6v+JO4uUb8WE6iuAi6cjl49da7tfiMy9qDxZ99w1PHTry6+LFMRReX6hmpOF5fY8vmXFaavXTbB4a3lFx60LJL6+tF5dpSFl+ohqSOrLf1SB4RIECAAAECwQJ/tcr6xbmkiR1vflFEoqVbJSVN912ofILk4NjU6Jbi2cVzN4P/+1++qW1y7six6fLMuSKebBsbbzqPVJx9ac4/V3/z3/76N9V9mn4Up2hqJ2Kanm3cnZu9NNt4VEk+Lfmk8VJxrzhg0+MiidU2LU5e1e4WG9yYnak01vRM015ruNuR9a7h+HYlQIAAAQIE2hdYbUgqlbeOjQ+/9lk9Jd3490N7Z+aKQLRtYrL2Plfx+dzjQ//yP4rgtHi78cWRva8tVDLS6PjWvuqTiz8WWvJP7+C24iMAml8v7hfvXTXnl1terTxsCVF3OL1TZKSW4xWnqxoHW3pQO63VuuEyx7zHp1qOX7rH9d7jse1GgAABAgQI3JPAqkNSJSVNjLz2WeMTka5+VUSknh0TEyN9tUP3FRcn7ThyoJ6k5r+pnFYaujUjlRpvsBUvD469eebE+ECtxmp/Nr1LV9mlNYe0FmmJRa2RqfV8VeuGrUXW8qgT613L8e1LgAABAgQItC2wymuSKnUXU1LjuuTFQ/WOTrW8kVa5MKnlK3GLrYaKU1B9i1vX/unrG2h6ouWMUG2Tu/8sPrOxKVjNzc62nJ1q2b+4NHygqfFiw4Xq65VTR7X7ReIb2HqXs1ctZVf9oCPrXfXRbEiAAAECBAh0QqCNkLT0RW5Nl2aXqpdsN/dRJKnJsf7mZ4rzSLdkpKLQyHBjm/mZCzONoNK86x3vV4oM1reYvzC9zHfKVV8u3rprClTzs7Nz9f3mZmZv1B8Um20p1x917k5H1tu5dlQiQIAAAQIEViHQTkiqXJo9MdKUkoaK00YDtx5kS+XCpMaTg7dnpOIL4UbGm74294sTb568h5jUNzw51fhcpasnj5y8tFLWGii+Na4RqGYvXJqrNjh76dJMvddlW62/upY7HVnvWhqwLwECBAgQINCuQFshqbiaenSykW62jU82LkdqHHhgZHJ8W+3h4G3vtS2+MjA6NVnfpvTNbybHp441vrJtYW525tLMXK3GSj+LK6Bem6znsfn/8/Lo+Ksn6/mn8jdtMzPVKkXfE/VAdfXcmaXPQ1ooPqCg8iFOi7eeHVNTtSvQVzriPT/fkfXe89HtSIAAAQIECNyDQLufZFn/7Opbv4utqVD1m0iKblq/5q1pk8rnax/fva1nxY6rn9h97fKnR1s269mx5/inje8PqXwD3NAdqjQ+XLv41Otd9UTVu/TdbYO1PXu27T7e9M1y1778cH/Th3kXf5+3+/j5K4tfWnLt/PE99bhVdF/pZ+ml61c+bTpC8VL/kwdPf3mttujVrbe2tZ8ECBAgQIDA9yywuq8laW7yyodLX9b2zNHLzU+33C++oGPxS9Fu+aqQlm0qD65fPn3wuR31qFJJTD39257cffjDpXDR+uUglZeXbk3f7LZY5ePDu2rfaVvdoijzzJ6jHy8Fm/pxKwnnydYvmatsuP/DpoBUfK9c83eV1A5ZKr7i5Mr1K6dbvpOk+mLvk0cvX2v9TpLabq0Cd1tvvVF3CBAgQIAAge9bYFPRQO0/dD8JECBAgAABAgSqAu1dk4SNAAECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaExCS2vOyNQECBAgQILBBBISkDTJoyyRAgAABAgTaE/irO2z+3nvvfffdd5cvX77DNl66N4EHHnjgvvvue+GFF+5td3sRIECAAAEC0QKbbt68uewxioT0gx/8YOfOncu+6sm1C3z00UfXr1+Xk9YuqQIBAgQIEIgQWPHttuIckoQUIV6vWfAWyPWH7hAgQIAAAQLrSmDFkORdtr/AnCD/BZAdggABAgQI3JvAiiHp3srZiwABAgQIECDQHQJCUnfM0SoIECBAgACBDgsISR0GVY4AAQIECBDoDgEhqTvmaBUECBAgQIBAhwWEpA6DKkeAAAECBAh0h4CQ1B1ztAoCBAgQIECgwwJCUodBlSNAgAABAgS6Q0BI6o45WgUBAgQIECDQYQEhqcOgyhEgQIAAAQLdISAkdcccrYIAAQIECBDosICQ1GFQ5QgQIECAAIHuEBCSumOOVkGAAAECBAh0WEBI6jCocgQIECBAgEB3CAhJ3TFHqyBAgAABAgQ6LCAkdRhUOQIECBAgQKA7BISk7pijVRAgQIAAAQIdFhCSOgyqHAECBAgQINAdAusuJC1cPfvuvl88/fgjgz/84cAjjz/90oF3z367sCL21VOvbH9839n5FTdY/oWFi28//ciz7/6pKLzwp48+OHt15SMsX8CzBAgQIECAQJcLrLeQNH/2jX2vf977/K9+94eZK1/97q2X77/4+i9eOXqxwyGm/PDLv/uP3/7TQ+XSwten3jzy+69vdPmcLY8AAQIECBBoU2DdhaQ/X13of/Qfnnp0c39vuXfzw08d/PUnn7zz4sPl0vzZfY9vf+nU1cUFNp0JKpVuzP/+0LPbizNPg488ve/9i8VZpYU/vf309qf3HXjp6Z9uHxp85KevvPvB4VeK+48MbX/2wKnixFR1/4tff7DvlTf+3x+Lc1evvF85reRGgAABAgQIEFgSWG8h6f4f/8OPe0798pXX3//o7OcXK++z9W7e3F++07gWvj77eekf/+cnlz5758XyqdcPfFBJOz2lqxf/dP/Ur//tD//3nZ3z//rLI///+eL+J+88NX/q7VNf1+JQz9/8/L/vferB//r8r3791vPFaSU3AgQIECBAgEBVYL2FpPLmn7/1u/811X/x6Ov7nv27bX89+PgvDiyeHLrDxP7mRy++/PPi1NNDO59/8cc93/7x28UrlHof+smPH+4tlfofeuzB+zf/6CeV+70P/ujB8vy3895cuwOnlwgQIECAAIGKwHoLSZWeeh/9p1/99t8++cM312Y+e2vnjVMHfnGg+i5b5dXbbuVyf28RgCq3cm9xb76agXrKS6eGeirPl3uKH8Vt6d/Fu/4hQIAAAQIECKwssM5C0sK3Zz849Xn9b816H3pq/z/vffjGxT9W3nhrDjgLf56/UXvTbGF+vnq3uDffiEwrr9orBAgQIECAAIG7CKyzkHRj/vOj+17a934tJxWh6fjxr3sffWxzuef+B/sXqu+lzV88+3n9cwEW/nz2+L8uXa79+/c/Lz342EPNaeou6y/OMhVXfnv77a5MNiBAgAABAhtNYJ2FpN6HX37rnZf7f//Ln20b+GHx52rbXzq68I/v/Pafd/aXyg899eLOhXd/9vjjP332wNneRx/tLy1lm/KDP3nsz4ee3v7I9mffnN/56qtPbS5yz6pvvQ8+tnn+6M8e+enrbX/Y0qqPYUMCBAgQIEAgn8CmmzdvLtv1/v37Dxw4sOxLnuyUwMGDB994441OVVOHAAECBAgQ6KDAOjuT1MGVKUWAAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SsgJHXvbK2MAAECBAgQWIOAkLQGPLsSIECAAAEC3SuwYkh64IEHunfV62VlkNfLJPRBgAABAgRuE1gxJN13330fffTRbdt7omMCBW+B3LFyChEgQIAAAQIdFdh08+bNlQq+995733333eXLl1fawPP3LFCcQyoS0gsvvHDPFexIgAABAgQIhArcKSSFHlhxAgQIECBAgMB6Fljx7bb13LTeCBAgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikFhKSUY9M0AQIECBAgEC0gJEULq0+AAAECBAikFBCSUo5N0wQIECBAgEC0gJAULaw+AQIECBAgkFJASEo5Nk0TIECAAAEC0QJCUrSw+gQIECBAgEBKASEp5dg0TYAAAQIECEQLCEnRwuoTIECAAAECKQWEpJRj0zQBAgQIECAQLSAkRQurT4AAAQIECKQUEJJSjk3TBAgQIECAQLSAkBQtrD4BAgQIECCQUkBISjk2TRMgQIAAAQLRAkJStLD6BAgQIECAQEoBISnl2DRNgAABAgQIRAsISdHC6hMgQIAAAQIpBYSklGPTNAECBAgQIBAtICRFC6tPgAABAgQIpBQQklKOTdMECBAgQIBAtICQFC2sPgECBAgQIJBSQEhKOTZNEyBAgAABAtECQlK0sPoECBAgQIBASgEhKeXYNE2AAAECBAhECwhJ0cLqEyBAgAABAikF/hPfMETLfKxqDQAAAABJRU5ErkJggg==",
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "ImageContent(\"new_layout_rev2.png\").show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "316fd8d5-cef7-47ec-a129-24e5df6b55e7",
+ "metadata": {},
+ "source": [
+ "We can see now the button has become bigger!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4b92dfb7-6dd0-44ee-9961-2f50fc16490d",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.13.9"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/opto/features/flows/compose.py b/opto/features/flows/compose.py
index dff95bbc..3604723a 100644
--- a/opto/features/flows/compose.py
+++ b/opto/features/flows/compose.py
@@ -1,6 +1,7 @@
import opto.trace as trace
from typing import Union, get_type_hints, Any, Dict, List, Optional
from opto.utils.llm import AbstractModel, LLM
+from opto.features.flows.types import MultiModalPayload, QueryModel
import contextvars
"""
@@ -157,6 +158,7 @@ def __init__(self,
system_prompt: The system prompt to use for LLM calls. If None and the class has a docstring, the docstring will be used.
llm: The LLM model to use for inference
chat_history_on: if on, maintain chat history for multi-turn conversations
+ model_name: override the default name of the model
"""
if system_prompt is None:
system_prompt = "You are a helpful assistant."
@@ -178,7 +180,9 @@ def __init__(self,
self.model_name = model_name if model_name else f"{self.__class__.__name__}{len(current_llm_sessions)}"
current_llm_sessions.append(1) # just a marker
- def forward(self, user_query: str, chat_history_on: Optional[bool] = None) -> str:
+ def forward(self, user_query: str,
+ payload: Optional[MultiModalPayload] = None,
+ chat_history_on: Optional[bool] = None) -> str:
"""This function takes user_query as input, and returns the response from the LLM, with the system prompt prepended.
This method will always save chat history.
@@ -187,17 +191,19 @@ def forward(self, user_query: str, chat_history_on: Optional[bool] = None) -> st
If chat_history_on is True, the chat history will be included in the LLM input.
Args:
- user_query: The user query to send to the LLM
+ user_query: The user query to send to the LLM. This should be a string containing the user's input or question.
Returns:
str: For direct pattern
"""
chat_history_on = self.chat_history_on if chat_history_on is None else chat_history_on
+ user_message = QueryModel(query=user_query, multimodal_payload=payload).query
+
messages = [{"role": "system", "content": self.system_prompt.data}]
if chat_history_on:
messages.extend(self.chat_history.get_messages())
- messages.append({"role": "user", "content": user_query})
+ messages.append({"role": "user", "content": user_message})
response = self.llm(messages=messages)
@@ -225,5 +231,7 @@ def call_llm(*messages) -> str:
return response_node
- def chat(self, user_query: str) -> str:
- return self.forward(user_query)
+ def chat(self, user_query: str, payload: Optional[MultiModalPayload] = None, chat_history_on: Optional[bool] = None) -> str:
+ """Note that chat/forward always assumes it's a single turn of the conversation. History/context management will be accomplished
+ through other APIs"""
+ return self.forward(user_query, payload, chat_history_on)
diff --git a/opto/features/flows/types.py b/opto/features/flows/types.py
index 4196b926..957df332 100644
--- a/opto/features/flows/types.py
+++ b/opto/features/flows/types.py
@@ -1,10 +1,147 @@
"""Types for opto flows."""
-from pydantic import BaseModel, Field, create_model, ConfigDict
+from typing import List, Dict, Union
+from pydantic import BaseModel, model_validator
from typing import Any, Optional, Callable, Dict, Union, Type, List
+from dataclasses import dataclass
import re
import json
+from opto.optimizers.utils import encode_image_to_base64, encode_numpy_to_base64
+from opto import trace
class TraceObject:
def __str__(self):
# Any subclass that inherits this will be friendly to the optimizer
- raise NotImplementedError("Subclasses must implement __str__")
\ No newline at end of file
+ raise NotImplementedError("Subclasses must implement __str__")
+
+
+class MultiModalPayload(BaseModel):
+ """
+ A payload for multimodal content, particularly images.
+
+ Supports three types of image inputs:
+ 1. URL (string starting with 'http://' or 'https://')
+ 2. Local file path (string path to image file)
+ 3. Numpy array (RGB image array)
+ """
+ image_bytes: Optional[str] = None # Can be URL or base64-encoded data URL
+
+ @classmethod
+ def from_path(cls, path: str) -> "MultiModalPayload":
+ """Create a payload by loading an image from a local file path."""
+ data_url = encode_image_to_base64(path)
+ return cls(image_bytes=data_url)
+
+ @classmethod
+ def from_url(cls, url: str) -> "MultiModalPayload":
+ """Create a payload from an image URL."""
+ return cls(image_bytes=url)
+
+ @classmethod
+ def from_array(cls, array: Any, format: str = "PNG") -> "MultiModalPayload":
+ """Create a payload from a numpy array or array-like RGB image."""
+ data_url = encode_numpy_to_base64(array, format=format)
+ return cls(image_bytes=data_url)
+
+ def load_image(self, path: str) -> None:
+ """Mutate the current payload to include a new image from a file path."""
+ self.image_bytes = encode_image_to_base64(path)
+
+ def set_image(self, image: Union[str, Any], format: str = "PNG") -> None:
+ """
+ Set the image from various input formats.
+
+ Args:
+ image: Can be:
+ - URL string (starting with 'http://' or 'https://')
+ - Local file path (string)
+ - Numpy array or array-like RGB image
+ format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG
+ """
+ if isinstance(image, str):
+ # Check if it's a URL
+ if image.startswith('http://') or image.startswith('https://'):
+ # Direct URL - litellm supports this
+ self.image_bytes = image
+ else:
+ # Assume it's a local file path
+ self.image_bytes = encode_image_to_base64(image)
+ else:
+ # Assume it's a numpy array or array-like object
+ self.image_bytes = encode_numpy_to_base64(image, format=format)
+
+ def get_content_block(self) -> Optional[Dict[str, Any]]:
+ """
+ Get the content block for the image in litellm format.
+
+ Returns:
+ Dict with format: {"type": "image_url", "image_url": {"url": ...}}
+ or None if no image data is set
+ """
+ if self.image_bytes is None:
+ return None
+
+ return {
+ "type": "image_url",
+ "image_url": {
+ "url": self.image_bytes
+ }
+ }
+
+class QueryModel(BaseModel):
+ # Expose "query" as already-normalized: always a List[Dict[str, Any]]
+ query: List[Dict[str, Any]]
+ multimodal_payload: Optional[MultiModalPayload] = None
+
+ @model_validator(mode="before")
+ @classmethod
+ def normalize(cls, data: Any):
+ """
+ Accepts:
+ { "query": "hello" }
+ { "query": "hello", "multimodal_payload": {"image_bytes": "..."} }
+ And always produces:
+ { "query": [ {text block}, maybe {image_url block} ], "multimodal_payload": ...}
+ """
+ if not isinstance(data, dict):
+ raise TypeError("QueryModel input must be a dict")
+
+ raw_query: Any = data.get("query")
+ if isinstance(raw_query, trace.Node):
+ assert isinstance(raw_query.data, (str, list)), "If using trace.Node, its data must be str"
+ raw_query = raw_query.data
+
+ # 1) Start with the text part
+ if isinstance(raw_query, str):
+ out: List[Dict[str, Any]] = [{"type": "text", "text": raw_query}]
+ elif isinstance(raw_query, list):
+ # Normalize each element in the list
+ out = []
+ for item in raw_query:
+ if isinstance(item, str):
+ out.append({"type": "text", "text": item})
+ elif isinstance(item, dict):
+ out.append(item)
+ else:
+ raise TypeError("Elements of `query` list must be str or dict")
+ else:
+ raise TypeError("`query` must be a string or list")
+
+ # 2) If we have an image, append an image block
+ payload = data.get("multimodal_payload")
+ image_bytes: Optional[str] = None
+ if payload is not None:
+ if isinstance(payload, dict):
+ image_bytes = payload.get("image_bytes")
+ else:
+ # Could be already-parsed MultiModalPayload
+ image_bytes = getattr(payload, "image_bytes", None)
+
+ if image_bytes:
+ out = out + [{
+ "type": "image_url",
+ "image_url": {"url": image_bytes}
+ }]
+
+ # 3) Write back normalized fields
+ data["query"] = out
+ return data
diff --git a/opto/optimizers/README.md b/opto/optimizers/README.md
new file mode 100644
index 00000000..b6c479c0
--- /dev/null
+++ b/opto/optimizers/README.md
@@ -0,0 +1,2 @@
+# Optimizers
+
diff --git a/opto/optimizers/opro.py b/opto/optimizers/opro.py
index 24f5a2cd..8c02ad2d 100644
--- a/opto/optimizers/opro.py
+++ b/opto/optimizers/opro.py
@@ -1,7 +1,9 @@
import json
from textwrap import dedent
+from typing import List
from opto.optimizers.optoprime import OptoPrime
+from opto.trace.nodes import ParameterNode
class OPRO(OptoPrime):
@@ -84,6 +86,24 @@ def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.buffer = []
+ def parameter_check(self, parameters: List[ParameterNode]):
+ """Check if the parameters are valid.
+ This can be overloaded by subclasses to add more checks.
+
+ Args:
+ parameters: List[ParameterNode]
+ The parameters to check.
+
+ Raises:
+ AssertionError: If any parameter contains image data.
+ """
+ # Ensure no parameters contain image data
+ for param in parameters:
+ assert not param.is_image, (
+ f"Parameter '{param.name}' contains image data. "
+ f"OPROv1 optimizer does not support image parameters."
+ )
+
def construct_prompt(self, summary, mask=None, *args, **kwargs):
"""Construct system and user prompts using historical examples.
diff --git a/opto/optimizers/opro_v2.py b/opto/optimizers/opro_v2.py
index ff5c801d..ad0fd677 100644
--- a/opto/optimizers/opro_v2.py
+++ b/opto/optimizers/opro_v2.py
@@ -1,7 +1,8 @@
import json
from textwrap import dedent
from dataclasses import dataclass, asdict
-from typing import Dict
+from typing import Dict, Optional, List
+from opto.trace.nodes import ParameterNode
from opto.optimizers.optoprime_v2 import OptoPrimeV2, OptimizerPromptSymbolSet
@@ -15,8 +16,8 @@ class OPROPromptSymbolSet(OptimizerPromptSymbolSet):
Attributes
----------
- problem_context_section_title : str
- Title for the problem context section in prompts.
+ instruction_section_title : str
+ Title for the instruction section in prompts.
variable_section_title : str
Title for the variable/solution section in prompts.
feedback_section_title : str
@@ -49,9 +50,10 @@ class OPROPromptSymbolSet(OptimizerPromptSymbolSet):
more focused set of symbols specifically for OPRO optimization.
"""
- problem_context_section_title = "# Problem Context"
+ instruction_section_title = "# Instruction"
variable_section_title = "# Solution"
feedback_section_title = "# Feedback"
+ context_section_title = "# Context"
node_tag = "node" # nodes that are constants in the graph
variable_tag = "solution" # nodes that can be changed
@@ -72,6 +74,7 @@ def default_prompt_symbols(self) -> Dict[str, str]:
"variables": self.variables_section_title,
"feedback": self.feedback_section_title,
"instruction": self.instruction_section_title,
+ "context": self.context_section_title
}
@dataclass
@@ -89,6 +92,9 @@ class ProblemInstance:
The current proposed solution that can be modified.
feedback : str
Feedback about the current solution.
+ context: str
+ Optional context information that might be useful to solve the problem.
+
optimizer_prompt_symbol_set : OPROPromptSymbolSet
The symbol set used for formatting the problem.
problem_template : str
@@ -107,12 +113,13 @@ class ProblemInstance:
instruction: str
variables: str
feedback: str
+ context: Optional[str]
optimizer_prompt_symbol_set: OPROPromptSymbolSet
problem_template = dedent(
"""
- # Problem Context
+ # Instruction
{instruction}
# Solution
@@ -124,12 +131,24 @@ class ProblemInstance:
)
def __repr__(self) -> str:
- return self.problem_template.format(
+ optimization_query = self.problem_template.format(
instruction=self.instruction,
variables=self.variables,
feedback=self.feedback,
)
+ context_section = dedent("""
+
+ # Context
+ {context}
+ """)
+
+ if self.context is not None and self.context.strip() != "":
+ context_section = context_section.format(context=self.context)
+ optimization_query += context_section
+
+ return optimization_query
+
class OPROv2(OptoPrimeV2):
"""OPRO (Optimization by PROmpting) optimizer version 2.
@@ -197,6 +216,7 @@ class OPROv2(OptoPrimeV2):
- {instruction_section_title}: the instruction which describes the things you need to do or the question you should answer.
- {variables_section_title}: the proposed solution that you can change/tweak (trainable).
- {feedback_section_title}: the feedback about the solution.
+ - {context_section_title}: the context information that might be useful to solve the problem.
If `data_type` is `code`, it means `{value_tag}` is the source code of a python code, which may include docstring and definitions.
"""
@@ -229,6 +249,14 @@ class OPROv2(OptoPrimeV2):
"""
)
+ context_prompt = dedent(
+ """
+ Here is some additional **context** to solving this problem:
+
+ {context}
+ """
+ )
+
final_prompt = dedent(
"""
What are your revised solutions on {names}?
@@ -244,6 +272,7 @@ def __init__(self, *args,
optimizer_prompt_symbol_set: OptimizerPromptSymbolSet = None,
include_example=False, # default example in OptoPrimeV2 does not work in OPRO
memory_size=5,
+ problem_context: Optional[str] = None,
**kwargs):
"""Initialize the OPROv2 optimizer.
@@ -264,7 +293,33 @@ def __init__(self, *args,
optimizer_prompt_symbol_set = optimizer_prompt_symbol_set or OPROPromptSymbolSet()
super().__init__(*args, optimizer_prompt_symbol_set=optimizer_prompt_symbol_set,
include_example=include_example, memory_size=memory_size,
+ problem_context=problem_context,
**kwargs)
+
+ def parameter_check(self, parameters: List[ParameterNode]):
+ """Check if the parameters are valid.
+ This can be overloaded by subclasses to add more checks.
+
+ Args:
+ parameters: List[ParameterNode]
+ The parameters to check.
+
+ Raises:
+ AssertionError: If more than one parameter contains image data.
+
+ Notes:
+ OPROv2 supports image parameters, but only one parameter can be
+ an image at a time since LLMs can only generate one image per inference.
+ """
+ # Count image parameters
+ image_params = [param for param in parameters if param.is_image]
+
+ if len(image_params) > 1:
+ param_names = ', '.join([f"'{p.name}'" for p in image_params])
+ raise AssertionError(
+ f"OPROv2 supports at most one image parameter, but found {len(image_params)}: "
+ f"{param_names}. LLMs can only generate one image at a time."
+ )
def problem_instance(self, summary, mask=None):
"""Create a ProblemInstance from an optimization summary.
@@ -328,6 +383,7 @@ def initialize_prompt(self):
variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""),
feedback_section_title=self.optimizer_prompt_symbol_set.feedback_section_title.replace(" ", ""),
instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""),
+ context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "")
)
self.output_format_prompt = self.output_format_prompt_template.format(
output_format=self.optimizer_prompt_symbol_set.output_format,
@@ -336,4 +392,5 @@ def initialize_prompt(self):
instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""),
feedback_section_title=self.optimizer_prompt_symbol_set.feedback_section_title.replace(" ", ""),
variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""),
+ context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "")
)
diff --git a/opto/optimizers/opro_v3.py b/opto/optimizers/opro_v3.py
new file mode 100644
index 00000000..39aab1fe
--- /dev/null
+++ b/opto/optimizers/opro_v3.py
@@ -0,0 +1,543 @@
+"""
+Key difference to v2:
+1. Use the new backbone conversation history manager
+2. Support multimodal node (both trainable and non-trainable)
+3. Break from the OptoPrime style template, support more customizable template from user, for brevity and streamlined usage.
+"""
+
+from textwrap import dedent
+from dataclasses import dataclass
+from typing import Dict, Optional, List, Union
+from opto.trace.nodes import ParameterNode
+
+from opto.optimizers.optoprime_v3 import OptoPrimeV3, OptimizerPromptSymbolSet
+from opto.utils.backbone import (
+ ContentBlock, ImageContent, ContentBlockList,
+ DEFAULT_IMAGE_PLACEHOLDER
+)
+
+# Not inheriting from optoprime_v2 because this should have a smaller set
+class OPROPromptSymbolSet(OptimizerPromptSymbolSet):
+ """Prompt symbol set for OPRO optimizer.
+
+ This class defines the tags and symbols used in the OPRO optimizer's prompts
+ and output parsing. It provides a structured way to format problems and parse
+ responses from the language model.
+
+ Attributes
+ ----------
+ instruction_section_title : str
+ Title for the instruction section in prompts.
+ variable_section_title : str
+ Title for the variable/solution section in prompts.
+ feedback_section_title : str
+ Title for the feedback section in prompts.
+ node_tag : str
+ Tag used to identify constant nodes in the computation graph.
+ variable_tag : str
+ Tag used to identify variable nodes that can be optimized.
+ value_tag : str
+ Tag used to wrap the value of a node.
+ constraint_tag : str
+ Tag used to wrap constraint expressions for nodes.
+ reasoning_tag : str
+ Tag used to wrap reasoning in the output.
+ improved_variable_tag : str
+ Tag used to wrap improved variable values in the output.
+ name_tag : str
+ Tag used to wrap variable names.
+ expect_json : bool
+ Whether to expect JSON output format (default: False).
+
+ Methods
+ -------
+ default_prompt_symbols
+ Returns default prompt symbols dictionary.
+
+ Notes
+ -----
+ This class inherits from OptimizerPromptSymbolSet but defines a smaller,
+ more focused set of symbols specifically for OPRO optimization.
+ """
+
+ instruction_section_title = "# Instruction"
+ variables_section_title = "# Solution"
+ feedback_section_title = "# Feedback"
+ context_section_title = "# Context"
+
+ node_tag = "node" # nodes that are constants in the graph
+ variable_tag = "solution" # nodes that can be changed
+ value_tag = "value" # inside node, we have value tag
+ constraint_tag = "constraint" # inside node, we have constraint tag
+
+ # output format
+ # Note: we currently don't support extracting format's like "```code```" because we assume supplied tag is name-only, i.e.,
+ reasoning_tag = "reasoning"
+ improved_variable_tag = "variable"
+ name_tag = "name"
+
+ expect_json = False # this will stop `enforce_json` arguments passed to LLM calls
+
+ @property
+ def default_prompt_symbols(self) -> Dict[str, str]:
+ return {
+ "variables": self.variables_section_title,
+ "feedback": self.feedback_section_title,
+ "instruction": self.instruction_section_title,
+ "context": self.context_section_title
+ }
+
+@dataclass
+class ProblemInstance:
+ """Represents a problem instance for OPRO optimization.
+
+ This dataclass encapsulates a complete problem instance including the
+ instruction, current variables/solution, and feedback received.
+
+ Supports multimodal content - variables can contain images.
+
+ Attributes
+ ----------
+ instruction : str
+ The instruction describing what needs to be done or the question to answer.
+ variables : Union[str, List[ContentBlock]]
+ The current proposed solution that can be modified. Can contain images.
+ feedback : str
+ Feedback about the current solution.
+ context: str
+ Optional context information that might be useful to solve the problem.
+
+ optimizer_prompt_symbol_set : OPROPromptSymbolSet
+ The symbol set used for formatting the problem.
+ problem_template : str
+ Template for formatting the problem instance as a string.
+
+ Methods
+ -------
+ __repr__()
+ Returns a formatted string representation of the problem instance.
+ to_content_blocks()
+ Returns a ContentBlockList for multimodal prompts.
+ has_images()
+ Returns True if the problem instance contains images.
+
+ Notes
+ -----
+ The problem instance is formatted using the problem_template which
+ organizes the instruction, variables, and feedback into a structured format.
+ """
+ instruction: str
+ variables: Union[str, List[ContentBlock]]
+ feedback: str
+ context: Optional[ContentBlockList]
+
+ optimizer_prompt_symbol_set: OPROPromptSymbolSet
+
+ problem_template = dedent(
+ """
+ # Instruction
+ {instruction}
+
+ # Solution
+ {variables}
+
+ # Feedback
+ {feedback}
+ """
+ )
+
+ @staticmethod
+ def _content_to_text(content: Union[str, List[ContentBlock]]) -> str:
+ """Convert content (str or List[ContentBlock]) to text representation.
+
+ Handles both string content and ContentBlockList/List[ContentBlock].
+ Uses ContentBlockList.blocks_to_text for list content.
+ """
+ if isinstance(content, str):
+ return content
+ # Use the shared utility from ContentBlockList
+ return ContentBlockList.blocks_to_text(content, DEFAULT_IMAGE_PLACEHOLDER)
+
+ def __repr__(self) -> str:
+ """Return text-only representation for backward compatibility."""
+ optimization_query = self.problem_template.format(
+ instruction=self.instruction,
+ variables=self._content_to_text(self.variables),
+ feedback=self.feedback,
+ )
+
+ context_section = dedent("""
+
+ # Context
+ {context}
+ """)
+
+ if self.context is not None and self.context.to_text().strip() != "":
+ context_section = context_section.format(context=self.context.to_text())
+ optimization_query += context_section
+
+ return optimization_query
+
+ def to_content_blocks(self) -> ContentBlockList:
+ """Convert the problem instance to a list of ContentBlocks.
+
+ Consecutive TextContent blocks are merged into a single block for efficiency.
+ Images and other non-text blocks are kept separate.
+
+ Returns:
+ ContentBlockList: A list containing TextContent and ImageContent blocks
+ that represent the complete problem instance.
+ """
+ blocks = ContentBlockList()
+
+ # Instruction section
+ blocks.append(f"# Instruction\n{self.instruction}\n\n# Solution\n")
+
+ # Variables/Solution section (may contain images)
+ blocks.extend(self.variables)
+
+ # Feedback section
+ blocks.append(f"\n\n# Feedback\n{self.feedback}")
+
+ # Context section (optional)
+ if self.context is not None and self.context.to_text().strip() != "":
+ blocks.append(f"\n\n# Context\n")
+ blocks.extend(self.context)
+
+ return blocks
+
+ def has_images(self) -> bool:
+ """Check if this problem instance contains any images.
+
+ Returns:
+ bool: True if variables field contains ImageContent blocks.
+ """
+ if isinstance(self.variables, list):
+ for block in self.variables:
+ if isinstance(block, ImageContent):
+ return True
+ return False
+
+class OPROv3(OptoPrimeV3):
+ """OPRO (Optimization by PROmpting) optimizer version 2.
+
+ OPRO is an optimization algorithm that leverages large language models to
+ iteratively improve solutions based on feedback. It treats optimization as
+ a natural language problem where the LLM proposes improvements to variables
+ based on instruction and feedback.
+
+ Parameters
+ ----------
+ *args
+ Variable length argument list passed to parent class.
+ optimizer_prompt_symbol_set : OptimizerPromptSymbolSet, optional
+ The symbol set for formatting prompts and parsing outputs.
+ Defaults to OPROPromptSymbolSet().
+ include_example : bool, optional
+ Whether to include examples in the prompt. Default is False as
+ the default example in OptoPrimeV2 does not work well with OPRO.
+ memory_size : int, optional
+ Number of past optimization steps to remember. Default is 5.
+ **kwargs
+ Additional keyword arguments passed to parent class.
+
+ Attributes
+ ----------
+ representation_prompt : str
+ Template for explaining the problem representation to the LLM.
+ output_format_prompt_template : str
+ Template for specifying the expected output format.
+ user_prompt_template : str
+ Template for presenting the problem instance to the LLM.
+ final_prompt : str
+ Template for requesting the final revised solutions.
+ default_objective : str
+ Default objective when none is specified.
+
+ Methods
+ -------
+ problem_instance(summary, mask=None)
+ Creates a ProblemInstance from an optimization summary.
+ initialize_prompt()
+ Initializes and formats the prompt templates.
+
+ Notes
+ -----
+ OPRO differs from OptoPrime by focusing on simpler problem representations
+ and clearer feedback incorporation. It is particularly effective for
+ problems where the optimization can be expressed in natural language.
+
+ See Also
+ --------
+ OptoPrimeV2 : Parent class providing core optimization functionality.
+ OPROPromptSymbolSet : Symbol set used for formatting.
+
+ Examples
+ --------
+ >>> optimizer = OPROv3(memory_size=10)
+ >>> # Use optimizer to improve solutions based on feedback
+ """
+ representation_prompt = dedent(
+ """
+ You're tasked to change the proposed solution according to feedback.
+
+ Specifically, a problem will be composed of the following parts:
+ - {instruction_section_title}: the instruction which describes the things you need to do or the question you should answer.
+ - {variables_section_title}: the proposed solution that you can change/tweak (trainable).
+ - {feedback_section_title}: the feedback about the solution.
+ - {context_section_title}: the context information that might be useful to solve the problem.
+
+ If `data_type` is `code`, it means `{value_tag}` is the source code of a python code, which may include docstring and definitions.
+ """
+ )
+
+ output_format_prompt_template = dedent(
+ """
+ Output_format: Your output should be in the following XML/HTML format:
+
+ ```
+ {output_format}
+ ```
+
+ In <{reasoning_tag}>, explain the problem: 1. what the {instruction_section_title} means 2. what the {feedback_section_title} means to {variables_section_title} considering how {variables_section_title} follow {instruction_section_title}. 3. Reasoning about the suggested changes in {variables_section_title} (if needed) and the expected result.
+
+ If you need to suggest a change in the values of {variables_section_title}, write down the suggested values in <{improved_variable_tag}>. Remember you can change only the values in {variables_section_title}, not others. When `type` of a variable is `code`, you should write the new definition in the format of python code without syntax errors, and you should not change the function name or the function signature.
+
+ If no changes are needed, just output TERMINATE.
+ """
+ )
+
+ user_prompt_template = dedent(
+ """
+ Now you see problem instance:
+
+ ================================
+ {problem_instance}
+ ================================
+
+ """
+ )
+
+ context_prompt = dedent(
+ """
+ Here is some additional **context** to solving this problem:
+
+ {context}
+ """
+ )
+
+ final_prompt = dedent(
+ """
+ What are your revised solutions on {names}?
+
+ Your response:
+ """
+ )
+
+ # Default Objective becomes instruction for the next block
+ default_objective = "Propose a new solution that will incorporate the feedback."
+
+ def __init__(self, *args,
+ optimizer_prompt_symbol_set: OptimizerPromptSymbolSet = None,
+ include_example=False, # default example in OptoPrimeV2 does not work in OPRO
+ memory_size=5,
+ problem_context: Optional[ContentBlockList] = None,
+ **kwargs):
+ """Initialize the OPROv2 optimizer.
+
+ Parameters
+ ----------
+ *args
+ Variable length argument list passed to parent class.
+ optimizer_prompt_symbol_set : OptimizerPromptSymbolSet, optional
+ The symbol set for formatting prompts and parsing outputs.
+ If None, uses OPROPromptSymbolSet().
+ include_example : bool, optional
+ Whether to include examples in the prompt. Default is False.
+ memory_size : int, optional
+ Number of past optimization steps to remember. Default is 5.
+ **kwargs
+ Additional keyword arguments passed to parent class.
+ """
+ optimizer_prompt_symbol_set = optimizer_prompt_symbol_set or OPROPromptSymbolSet()
+ super().__init__(*args, optimizer_prompt_symbol_set=optimizer_prompt_symbol_set,
+ include_example=include_example, memory_size=memory_size,
+ problem_context=problem_context,
+ **kwargs)
+
+ def parameter_check(self, parameters: List[ParameterNode]):
+ """Check if the parameters are valid.
+ This can be overloaded by subclasses to add more checks.
+
+ Args:
+ parameters: List[ParameterNode]
+ The parameters to check.
+
+ Raises:
+ AssertionError: If more than one parameter contains image data.
+
+ Notes:
+ OPROv2 supports image parameters, but only one parameter can be
+ an image at a time since LLMs can only generate one image per inference.
+ """
+ # Count image parameters
+ image_params = [param for param in parameters if param.is_image]
+
+ if len(image_params) > 1:
+ param_names = ', '.join([f"'{p.name}'" for p in image_params])
+ raise AssertionError(
+ f"OPROv2 supports at most one image parameter, but found {len(image_params)}: "
+ f"{param_names}. LLMs can only generate one image at a time."
+ )
+
+ def problem_instance(self, summary, mask=None, use_content_blocks=False):
+ """Create a ProblemInstance from an optimization summary.
+
+ Parameters
+ ----------
+ summary : object
+ The optimization summary containing variables and feedback.
+ mask : list, optional
+ List of sections to mask/hide in the problem instance.
+ Can include "#Instruction", variable section title, or feedback section title.
+ use_content_blocks : bool, optional
+ If True, use content blocks for multimodal support (images).
+ If False, use text-only representation.
+
+ Returns
+ -------
+ ProblemInstance
+ A formatted problem instance ready for presentation to the LLM.
+
+ Notes
+ -----
+ The mask parameter allows selective hiding of problem components,
+ useful for ablation studies or specific optimization strategies.
+ """
+ mask = mask or []
+
+ if use_content_blocks:
+ # Use content block representation for multimodal support
+ variables_content = (
+ self.repr_node_value_compact_as_content_blocks(
+ summary.variables,
+ node_tag=self.optimizer_prompt_symbol_set.variable_tag,
+ value_tag=self.optimizer_prompt_symbol_set.value_tag,
+ constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag
+ )
+ if self.optimizer_prompt_symbol_set.variables_section_title not in mask
+ else ContentBlockList()
+ )
+ else:
+ # Use text-only representation (backward compatible)
+ variables_content = (
+ self.repr_node_value_compact(
+ summary.variables,
+ node_tag=self.optimizer_prompt_symbol_set.variable_tag,
+ value_tag=self.optimizer_prompt_symbol_set.value_tag,
+ constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag
+ )
+ if self.optimizer_prompt_symbol_set.variables_section_title not in mask
+ else ""
+ )
+
+ return ProblemInstance(
+ instruction=self.objective if "#Instruction" not in mask else "",
+ variables=variables_content,
+ feedback=summary.user_feedback if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else "",
+ context=self.problem_context if hasattr(self, 'problem_context') else None,
+ optimizer_prompt_symbol_set=self.optimizer_prompt_symbol_set
+ )
+
+ def repr_node_value_compact_as_content_blocks(self, node_dict, node_tag="node",
+ value_tag="value", constraint_tag="constraint") -> ContentBlockList:
+ """Returns a ContentBlockList with compact representation, including images.
+
+ Consecutive TextContent blocks are merged for efficiency.
+ Non-image values are truncated. Images break the text flow.
+ """
+ from opto.optimizers.optoprime_v3 import value_to_image_content
+
+ blocks = ContentBlockList()
+
+ for k, v in node_dict.items():
+ value_data = v[0]
+ constraint = v[1]
+
+ if "__code" not in k:
+ # Check if this is an image
+ image_content = value_to_image_content(value_data)
+
+ if image_content is not None:
+ # Image node: output XML structure, then image, then closing
+ type_name = "image"
+ constraint_expr = f"<{constraint_tag}>\n{constraint}\n{constraint_tag}>" if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag else ""
+
+ xml_text = f"<{node_tag} name=\"{k}\" type=\"{type_name}\">\n<{value_tag}>\n"
+ blocks.append(xml_text)
+ blocks.append(image_content) # Image breaks the text flow
+
+ closing_text = f"\n{value_tag}>\n{constraint_expr}{node_tag}>\n\n" if constraint_expr else f"\n{value_tag}>\n{node_tag}>\n\n"
+ blocks.append(closing_text)
+ else:
+ # Non-image node: truncated text representation
+ node_value = self.truncate_expression(value_data, self.initial_var_char_limit)
+ if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag:
+ constraint_expr = f"<{constraint_tag}>\n{constraint}\n{constraint_tag}>"
+ blocks.append(
+ f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{node_value}\n{value_tag}>\n{constraint_expr}\n{node_tag}>\n\n"
+ )
+ else:
+ blocks.append(
+ f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{node_value}\n{value_tag}>\n{node_tag}>\n\n"
+ )
+ else:
+ # Code node (never an image)
+ constraint_expr = f"<{constraint_tag}>\n{constraint}\n{constraint_tag}>"
+ signature = constraint.replace("The code should start with:\n", "")
+ func_body = value_data.replace(signature, "")
+ node_value = self.truncate_expression(func_body, self.initial_var_char_limit)
+ blocks.append(
+ f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{node_value}\n{value_tag}>\n{constraint_expr}\n{node_tag}>\n\n"
+ )
+
+ return blocks
+
+ def initialize_prompt(self):
+ """Initialize and format the prompt templates.
+
+ This method formats the representation_prompt and output_format_prompt
+ templates with the appropriate symbols from the optimizer_prompt_symbol_set.
+ It prepares the prompts for use in optimization.
+
+ Notes
+ -----
+ This method should be called during initialization to ensure all
+ prompt templates are properly formatted with the correct tags and symbols.
+ """
+ self.representation_prompt = self.representation_prompt.format(
+ variable_expression_format=dedent(f"""
+ <{self.optimizer_prompt_symbol_set.variable_tag} name="variable_name" type="data_type">
+ <{self.optimizer_prompt_symbol_set.value_tag}>
+ value
+ {self.optimizer_prompt_symbol_set.value_tag}>
+ <{self.optimizer_prompt_symbol_set.constraint_tag}>
+ constraint_expression
+ {self.optimizer_prompt_symbol_set.constraint_tag}>
+ {self.optimizer_prompt_symbol_set.variable_tag}>
+ """),
+ value_tag=self.optimizer_prompt_symbol_set.value_tag,
+ variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""),
+ feedback_section_title=self.optimizer_prompt_symbol_set.feedback_section_title.replace(" ", ""),
+ instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""),
+ context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "")
+ )
+ self.output_format_prompt = self.output_format_prompt_template.format(
+ output_format=self.optimizer_prompt_symbol_set.output_format,
+ reasoning_tag=self.optimizer_prompt_symbol_set.reasoning_tag,
+ improved_variable_tag=self.optimizer_prompt_symbol_set.improved_variable_tag,
+ instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""),
+ feedback_section_title=self.optimizer_prompt_symbol_set.feedback_section_title.replace(" ", ""),
+ variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""),
+ context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "")
+ )
diff --git a/opto/optimizers/optimizer.py b/opto/optimizers/optimizer.py
index 79b37370..bc965e48 100644
--- a/opto/optimizers/optimizer.py
+++ b/opto/optimizers/optimizer.py
@@ -69,9 +69,13 @@ class AbstractOptimizer:
"""
def __init__(self, parameters: List[ParameterNode], *args, **kwargs):
- assert type(parameters) is list
- assert all([isinstance(p, ParameterNode) for p in parameters])
+ self.parameter_check(parameters)
+ # this is a guaranteed basic check, not possible to be overloaded by subclasses
+ assert type(parameters) is list, "Parameters must be a list."
+ assert all([isinstance(p, ParameterNode) for p in parameters]), "Parameters must be a list of ParameterNode instances."
assert len(parameters) > 0, 'Parameters list is empty.'
+ for p in parameters:
+ assert p.trainable, "Parameter {} must be trainable.".format(p.name)
self.parameters = parameters
def step(self):
@@ -87,6 +91,17 @@ def propagator(self):
"""Return a Propagator object that can be used to propagate feedback in backward."""
raise NotImplementedError
+ def parameter_check(self, parameters: List[ParameterNode]):
+ """Check if the parameters are valid.
+ This can be overloaded by subclasses to add more checks.
+
+ Args:
+ parameters: List[ParameterNode]
+ The parameters to check.
+ """
+ pass
+
+
class Optimizer(AbstractOptimizer):
"""Base class for graph-based optimizers in the Trace framework.
@@ -127,7 +142,7 @@ class Optimizer(AbstractOptimizer):
update(update_dict)
Apply updates to trainable parameters.
backward(node, *args, **kwargs)
- Propagate feedback through the graph.
+ Propagate feedback through the graph. Feedback is passed in through *args and **kwargs.
zero_feedback()
Clear accumulated feedback from all parameters.
save(path)
@@ -176,6 +191,12 @@ class Optimizer(AbstractOptimizer):
ParameterNode : Parameters being optimized
Projection : Constraints applied during optimization
+ Usage
+ --------
+ result = traced_computation(x)
+ optimizer.zero_feedback()
+ optimizer.backward(result, 'user feedback')
+
Examples
--------
>>> class MyOptimizer(Optimizer):
@@ -353,9 +374,12 @@ def backward(self, node: Node, *args, **kwargs):
node : Node
Starting node for backward propagation.
*args
- Additional arguments passed to node.backward().
+ Additional arguments passed to node.backward(*args, **kwargs).
+ This corresponds to the positional arguments in node.backward
**kwargs
- Additional keyword arguments passed to node.backward().
+ Additional keyword arguments passed to node.backward(*args, **kwargs).
+ This corresponds to the keyword arguments in node.backward
+ If 'propagator' is not provided, uses the optimizer's propagator.
Returns
-------
@@ -364,9 +388,15 @@ def backward(self, node: Node, *args, **kwargs):
Notes
-----
- Uses the optimizer's propagator for feedback processing.
+ Uses the optimizer's propagator for feedback processing by default.
+
+ Usage
+ ------
+ optimizer.backward(result, 'make this number bigger', propagator=custom_propagator)
+ optimizer.backward(result, feedback='make this number bigger')
"""
- return node.backward(*args, propagator=self.propagator, **kwargs)
+ kwargs.setdefault('propagator', self.propagator)
+ return node.backward(*args, **kwargs)
def save(self, path: str):
"""Save the optimizer state to a file."""
diff --git a/opto/optimizers/optoprime.py b/opto/optimizers/optoprime.py
index 8727f743..67c2018c 100644
--- a/opto/optimizers/optoprime.py
+++ b/opto/optimizers/optoprime.py
@@ -525,6 +525,24 @@ def __init__(
self.use_json_object_format = use_json_object_format
self.highlight_variables = highlight_variables
+ def parameter_check(self, parameters: List[ParameterNode]):
+ """Check if the parameters are valid.
+ This can be overloaded by subclasses to add more checks.
+
+ Args:
+ parameters: List[ParameterNode]
+ The parameters to check.
+
+ Raises:
+ AssertionError: If any parameter contains image data.
+ """
+ # Ensure no parameters contain image data
+ for param in parameters:
+ assert not param.is_image, (
+ f"Parameter '{param.name}' contains image data. "
+ f"OptoPrimeV1 optimizer does not support image parameters."
+ )
+
def default_propagator(self):
"""Return the default Propagator object of the optimizer."""
return GraphPropagator()
diff --git a/opto/optimizers/optoprime_v2.py b/opto/optimizers/optoprime_v2.py
index a512af8e..6731c49b 100644
--- a/opto/optimizers/optoprime_v2.py
+++ b/opto/optimizers/optoprime_v2.py
@@ -3,7 +3,7 @@
from dataclasses import dataclass, asdict
from opto.optimizers.optoprime import OptoPrime, FunctionFeedback
from opto.trace.utils import dedent
-from opto.optimizers.utils import truncate_expression, extract_xml_like_data
+from opto.optimizers.utils import truncate_expression, extract_xml_like_data, MultiModalPayload
from opto.trace.nodes import ParameterNode, Node, MessageNode
from opto.trace.propagators import TraceGraph, GraphPropagator
@@ -16,7 +16,6 @@
import re
from typing import Dict, Any
-
class OptimizerPromptSymbolSet:
"""
By inheriting this class and pass into the optimizer. People can change the optimizer documentation
@@ -37,6 +36,7 @@ class OptimizerPromptSymbolSet:
instruction_section_title = "# Instruction"
code_section_title = "# Code"
documentation_section_title = "# Documentation"
+ context_section_title = "# Context"
node_tag = "node" # nodes that are constants in the graph
variable_tag = "variable" # nodes that can be changed
@@ -141,6 +141,7 @@ def default_prompt_symbols(self) -> Dict[str, str]:
"instruction": self.instruction_section_title,
"code": self.code_section_title,
"documentation": self.documentation_section_title,
+ "context": self.context_section_title
}
@@ -149,7 +150,7 @@ class OptimizerPromptSymbolSetJSON(OptimizerPromptSymbolSet):
expect_json = True
- custom_output_format_instruction = """
+ custom_output_format_instruction = dedent("""
{{
"reasoning": ,
"suggestion": {{
@@ -157,7 +158,7 @@ class OptimizerPromptSymbolSetJSON(OptimizerPromptSymbolSet):
: ,
}}
}}
- """
+ """)
def example_output(self, reasoning, variables):
"""
@@ -172,10 +173,13 @@ def example_output(self, reasoning, variables):
}
return json.dumps(output, indent=2)
- def output_response_extractor(self, response: str, suggestion_tag = "suggestion") -> Dict[str, Any]:
- # Use extract_llm_suggestion from OptoPrime => it could be implemented the other way around (OptoPrime would uses this helper but it should be moved out of OptoPrimev2)
- return OptoPrime.extract_llm_suggestion(self, response, suggestion_tag=suggestion_tag, reasoning_tag="reasoning", return_only_suggestion=False)
-
+ def output_response_extractor(self, response: str) -> Dict[str, Any]:
+ """
+ Extracts reasoning and suggestion variables from the LLM response using OptoPrime's extraction logic.
+ """
+ # Use the centralized extraction logic from OptoPrime
+ optoprime_instance = OptoPrime()
+ return optoprime_instance.extract_llm_suggestion(response)
class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet):
variables_section_title = "# Variables"
@@ -186,6 +190,7 @@ class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet):
instruction_section_title = "# Instruction"
code_section_title = "# Code"
documentation_section_title = "# Documentation"
+ context_section_title = "# Context"
node_tag = "const" # nodes that are constants in the graph
variable_tag = "var" # nodes that can be changed
@@ -208,6 +213,7 @@ class ProblemInstance:
others: str
outputs: str
feedback: str
+ context: Optional[str]
optimizer_prompt_symbol_set: OptimizerPromptSymbolSet
@@ -240,7 +246,7 @@ class ProblemInstance:
)
def __repr__(self) -> str:
- return self.problem_template.format(
+ optimization_query = self.problem_template.format(
instruction=self.instruction,
code=self.code,
documentation=self.documentation,
@@ -248,13 +254,25 @@ def __repr__(self) -> str:
inputs=self.inputs,
outputs=self.outputs,
others=self.others,
- feedback=self.feedback,
+ feedback=self.feedback
)
+ context_section = dedent("""
+
+ # Context
+ {context}
+ """)
+
+ if self.context is not None and self.context.strip() != "":
+ context_section = context_section.format(context=self.context)
+ optimization_query += context_section
+
+ return optimization_query
+
@dataclass
class MemoryInstance:
- variables: Dict[str, Tuple[Any, str]] # name -> (data, constraint)
+ variables: Dict[str, Tuple[Any, str]] # name -> (data, constraint)
feedback: str
optimizer_prompt_symbol_set: OptimizerPromptSymbolSet
@@ -303,6 +321,7 @@ class OptoPrimeV2(OptoPrime):
- {others_section_title}: the intermediate values created through the code execution.
- {outputs_section_title}: the result of the code output.
- {feedback_section_title}: the feedback about the code's execution result.
+ - {context_section_title}: the context information that might be useful to solve the problem.
In `{variables_section_title}`, `{inputs_section_title}`, `{outputs_section_title}`, and `{others_section_title}`, the format is:
@@ -357,17 +376,22 @@ class OptoPrimeV2(OptoPrime):
example_prompt = dedent(
"""
-
Here are some feasible but not optimal solutions for the current problem instance. Consider this as a hint to help you understand the problem better.
================================
-
{examples}
-
================================
"""
)
+ context_prompt = dedent(
+ """
+ Here is some additional **context** to solving this problem:
+
+ {context}
+ """
+ )
+
final_prompt = dedent(
"""
What are your suggestions on variables {names}?
@@ -393,14 +417,14 @@ def __init__(
optimizer_prompt_symbol_set: OptimizerPromptSymbolSet = OptimizerPromptSymbolSet(),
use_json_object_format=True, # whether to use json object format for the response when calling LLM
truncate_expression=truncate_expression,
+ problem_context: Optional[str] = None,
**kwargs,
):
super().__init__(parameters, *args, propagator=propagator, **kwargs)
- if optimizer_prompt_symbol_set is None:
- optimizer_prompt_symbol_set = OptimizerPromptSymbolSet()
-
self.truncate_expression = truncate_expression
+ self.problem_context = problem_context
+ self.multimodal_payload = MultiModalPayload()
self.use_json_object_format = use_json_object_format if optimizer_prompt_symbol_set.expect_json and use_json_object_format else False
self.ignore_extraction_error = ignore_extraction_error
@@ -443,6 +467,62 @@ def __init__(
self.prompt_symbols = copy.deepcopy(self.default_prompt_symbols)
self.initialize_prompt()
+ def parameter_check(self, parameters: List[ParameterNode]):
+ """Check if the parameters are valid.
+ This can be overloaded by subclasses to add more checks.
+
+ Args:
+ parameters: List[ParameterNode]
+ The parameters to check.
+
+ Raises:
+ AssertionError: If more than one parameter contains image data.
+
+ Notes:
+ OptoPrimeV2 supports image parameters, but only one parameter can be
+ an image at a time since LLMs can only generate one image per inference.
+ """
+ # Count image parameters
+ image_params = [param for param in parameters if param.is_image]
+
+ if len(image_params) > 1:
+ param_names = ', '.join([f"'{p.name}'" for p in image_params])
+ raise AssertionError(
+ f"OptoPrimeV2 supports at most one image parameter, but found {len(image_params)}: "
+ f"{param_names}. LLMs can only generate one image at a time."
+ )
+
+ def add_image_context(self, image: Union[str, Any], context: str = "", format: str = "PNG"):
+ """
+ Add an image to the optimizer context.
+
+ Args:
+ image: Can be:
+ - URL string (starting with 'http://' or 'https://')
+ - Local file path (string)
+ - Numpy array or array-like RGB image
+ context: Optional context text to describe the image. If empty, uses default.
+ format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG
+ """
+ if self.problem_context is None:
+ self.problem_context = ""
+
+ if context == "":
+ context = "The attached image is given to the workflow. You should use the image to help you understand the problem and provide better suggestions. You can refer to the image when providing your suggestions."
+
+ self.problem_context += f"{context}\n\n"
+
+ # Set the image using the multimodal payload
+ self.multimodal_payload.set_image(image, format=format)
+
+ self.initialize_prompt()
+
+ def add_context(self, context: str):
+ if self.problem_context is None:
+ self.problem_context = ""
+ self.problem_context += f"{context}\n\n"
+ self.initialize_prompt()
+
def initialize_prompt(self):
self.representation_prompt = self.representation_prompt.format(
variable_expression_format=dedent(f"""
@@ -463,7 +543,8 @@ def initialize_prompt(self):
instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""),
code_section_title=self.optimizer_prompt_symbol_set.code_section_title.replace(" ", ""),
documentation_section_title=self.optimizer_prompt_symbol_set.documentation_section_title.replace(" ", ""),
- others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", "")
+ others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", ""),
+ context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "")
)
self.output_format_prompt = self.output_format_prompt_template.format(
output_format=self.optimizer_prompt_symbol_set.output_format,
@@ -476,7 +557,8 @@ def initialize_prompt(self):
documentation_section_title=self.optimizer_prompt_symbol_set.documentation_section_title.replace(" ", ""),
variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""),
inputs_section_title=self.optimizer_prompt_symbol_set.inputs_section_title.replace(" ", ""),
- others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", "")
+ others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", ""),
+ context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "")
)
def repr_node_value(self, node_dict, node_tag="node",
@@ -523,7 +605,9 @@ def repr_node_value_compact(self, node_dict, node_tag="node",
return "\n".join(temp_list)
def construct_prompt(self, summary, mask=None, *args, **kwargs):
- """Construct the system and user prompt."""
+ """Construct the system and user prompt.
+ Expanded to construct a list of content blocks
+ """
system_prompt = (
self.representation_prompt + self.output_format_prompt
) # generic representation + output rule
@@ -603,6 +687,7 @@ def problem_instance(self, summary, mask=None):
constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag) if self.optimizer_prompt_symbol_set.others_section_title not in mask else ""
),
feedback=summary.user_feedback if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else "",
+ context=self.problem_context if self.optimizer_prompt_symbol_set.context_section_title not in mask else "",
optimizer_prompt_symbol_set=self.optimizer_prompt_symbol_set
)
@@ -664,9 +749,17 @@ def call_llm(
if verbose not in (False, "output"):
print("Prompt\n", system_prompt + user_prompt)
+ user_message_content = []
+ # Add image content block if available
+ image_block = self.multimodal_payload.get_content_block()
+ if image_block is not None:
+ user_message_content.append(image_block)
+
+ user_message_content.append({"type": "text", "text": user_prompt})
+
messages = [
{"role": "system", "content": system_prompt},
- {"role": "user", "content": user_prompt},
+ {"role": "user", "content": user_message_content},
]
response_format = {"type": "json_object"} if self.use_json_object_format else None
@@ -678,3 +771,42 @@ def call_llm(
if verbose:
print("LLM response:\n", response)
return response
+
+ def save(self, path: str):
+ """Save the optimizer state to a file."""
+ with open(path, 'wb') as f:
+ pickle.dump({
+ "truncate_expression": self.truncate_expression,
+ "use_json_object_format": self.use_json_object_format,
+ "ignore_extraction_error": self.ignore_extraction_error,
+ "objective": self.objective,
+ "initial_var_char_limit": self.initial_var_char_limit,
+ "optimizer_prompt_symbol_set": self.optimizer_prompt_symbol_set,
+ "include_example": self.include_example,
+ "max_tokens": self.max_tokens,
+ "memory": self.memory,
+ "default_prompt_symbols": self.default_prompt_symbols,
+ "prompt_symbols": self.prompt_symbols,
+ "representation_prompt": self.representation_prompt,
+ "output_format_prompt": self.output_format_prompt,
+ "context_prompt": self.context_prompt
+ }, f)
+
+ def load(self, path: str):
+ """Load the optimizer state from a file."""
+ with open(path, 'rb') as f:
+ state = pickle.load(f)
+ self.truncate_expression = state["truncate_expression"]
+ self.use_json_object_format = state["use_json_object_format"]
+ self.ignore_extraction_error = state["ignore_extraction_error"]
+ self.objective = state["objective"]
+ self.initial_var_char_limit = state["initial_var_char_limit"]
+ self.optimizer_prompt_symbol_set = state["optimizer_prompt_symbol_set"]
+ self.include_example = state["include_example"]
+ self.max_tokens = state["max_tokens"]
+ self.memory = state["memory"]
+ self.default_prompt_symbols = state["default_prompt_symbols"]
+ self.prompt_symbols = state["prompt_symbols"]
+ self.representation_prompt = state["representation_prompt"]
+ self.output_format_prompt = state["output_format_prompt"]
+ self.context_prompt = state["context_prompt"]
diff --git a/opto/optimizers/optoprime_v3.py b/opto/optimizers/optoprime_v3.py
new file mode 100644
index 00000000..ae464497
--- /dev/null
+++ b/opto/optimizers/optoprime_v3.py
@@ -0,0 +1,1265 @@
+"""
+Key difference to v2:
+1. Use the new backbone conversation history manager
+2. Support multimodal node (both trainable and non-trainable)
+"""
+
+import re
+import json
+from typing import List, Union, Tuple, Optional
+from dataclasses import dataclass
+from opto.optimizers.optoprime import OptoPrime, node_to_function_feedback
+from opto.trace.utils import dedent
+from opto.optimizers.utils import truncate_expression, extract_xml_like_data
+from opto.trace.nodes import ParameterNode, is_image
+from opto.trace.propagators import GraphPropagator
+from opto.trace.propagators.propagators import Propagator
+
+from opto.utils.llm import AbstractModel, LLM
+from opto.optimizers.buffers import FIFOBuffer
+from opto.utils.backbone import (
+ ConversationHistory, UserTurn, AssistantTurn, PromptTemplate,
+ TextContent, ImageContent, ContentBlockList,
+ DEFAULT_IMAGE_PLACEHOLDER, Content
+)
+import copy
+import pickle
+from typing import Dict, Any
+
+
+def value_to_image_content(value: Any) -> Optional[ImageContent]:
+ """Convert a value to ImageContent if it's an image, otherwise return None.
+
+ Uses is_image() from opto.trace.nodes for validation (stricter than ImageContent.build,
+ e.g., only accepts URLs with image extensions), then delegates to ImageContent.build().
+
+ Supports (via is_image detection):
+ - Base64 data URL strings (data:image/...)
+ - HTTP/HTTPS URLs pointing to images (pattern-based, must have image extension)
+ - PIL Image objects
+ - Raw image bytes
+ """
+ if not is_image(value):
+ return None
+ return ImageContent.build(value)
+
+
+class OptimizerPromptSymbolSet:
+ """
+ By inheriting this class and pass into the optimizer. People can change the optimizer documentation
+
+ This divides into three parts:
+ - Section titles: the title of each section in the prompt
+ - Node tags: the tags that capture the graph structure (only tag names are allowed to be changed)
+ - Output format: the format of the output of the optimizer
+ """
+
+ # Titles should be written as markdown titles (space between # and title)
+ # In text, we automatically remove space in the title, so it will become `#Title`
+ variables_section_title = "# Variables"
+ inputs_section_title = "# Inputs"
+ outputs_section_title = "# Outputs"
+ others_section_title = "# Others"
+ feedback_section_title = "# Feedback"
+ instruction_section_title = "# Instruction"
+ code_section_title = "# Code"
+ documentation_section_title = "# Documentation"
+ context_section_title = "# Context"
+
+ node_tag = "node" # nodes that are constants in the graph
+ variable_tag = "variable" # nodes that can be changed
+ value_tag = "value" # inside node, we have value tag
+ constraint_tag = "constraint" # inside node, we have constraint tag
+
+ # output format
+ # Note: we currently don't support extracting format's like "```code```" because we assume supplied tag is name-only, i.e.,
+ reasoning_tag = "reasoning"
+ improved_variable_tag = "variable"
+ name_tag = "name"
+
+ # only used by JSON format
+ suggestion_tag = "suggestion"
+
+ expect_json = False # this will stop `enforce_json` arguments passed to LLM calls
+
+ # custom output format
+ # if this is not None, then the user needs to implement the following functions:
+ # - output_response_extractor
+ # - example_output
+ custom_output_format_instruction = None
+
+ @property
+ def output_format(self) -> str:
+ """
+ This function defines the input to:
+ ```
+ {output_format}
+ ```
+ In the self.output_format_prompt_template in the OptoPrimeV2
+ """
+ if self.custom_output_format_instruction is None:
+ # we use a default XML like format
+ return dedent(f"""
+ <{self.reasoning_tag}>
+ reasoning
+ {self.reasoning_tag}>
+ <{self.improved_variable_tag}>
+ <{self.name_tag}>variable_name{self.name_tag}>
+ <{self.value_tag}>
+ value
+ {self.value_tag}>
+ {self.improved_variable_tag}>
+ """)
+ else:
+ return self.custom_output_format_instruction.strip()
+
+ def example_output(self, reasoning, variables):
+ """
+ reasoning: str
+ variables: format {variable_name, value}
+ """
+ if self.custom_output_format_instruction is not None:
+ raise NotImplementedError
+ else:
+ # Build the output string in the same XML-like format as self.output_format
+ output = []
+ if reasoning != "":
+ output.append(f"<{self.reasoning_tag}>")
+ output.append(reasoning)
+ output.append(f"{self.reasoning_tag}>")
+ for var_name, value in variables.items():
+ output.append(f"<{self.improved_variable_tag}>")
+ output.append(f"<{self.name_tag}>{var_name}{self.name_tag}>")
+ output.append(f"<{self.value_tag}>")
+ output.append(str(value))
+ output.append(f"{self.value_tag}>")
+ output.append(f"{self.improved_variable_tag}>")
+ return "\n".join(output)
+
+ def output_response_extractor(self, response: str) -> Dict[str, Any]:
+ # the response here should just be plain text
+
+ if self.custom_output_format_instruction is None:
+ extracted_data = extract_xml_like_data(response,
+ reasoning_tag=self.reasoning_tag,
+ improved_variable_tag=self.improved_variable_tag,
+ name_tag=self.name_tag,
+ value_tag=self.value_tag)
+
+ # if the suggested value is a code, and the entire code body is empty (i.e., not even function signature is present)
+ # then we remove such suggestion
+ keys_to_remove = []
+ for key, value in extracted_data['variables'].items():
+ if "__code" in key and value.strip() == "":
+ keys_to_remove.append(key)
+
+ for key in keys_to_remove:
+ del extracted_data['variables'][key]
+
+ return extracted_data
+ else:
+ raise NotImplementedError(
+ "If you supplied a custom output format prompt template, you need to implement your own response extractor")
+
+ @property
+ def default_prompt_symbols(self) -> Dict[str, str]:
+ return {
+ "variables": self.variables_section_title,
+ "inputs": self.inputs_section_title,
+ "outputs": self.outputs_section_title,
+ "others": self.others_section_title,
+ "feedback": self.feedback_section_title,
+ "instruction": self.instruction_section_title,
+ "code": self.code_section_title,
+ "documentation": self.documentation_section_title,
+ "context": self.context_section_title,
+ "reasoning": self.reasoning_tag,
+ "suggestion": self.suggestion_tag
+ }
+
+
+class OptimizerPromptSymbolSetJSON(OptimizerPromptSymbolSet):
+ """We enforce a JSON output format extraction"""
+
+ expect_json = True
+
+ custom_output_format_instruction = dedent("""
+ {
+ "reasoning": ,
+ "suggestion": {
+ : ,
+ : ,
+ }
+ }
+ """)
+
+ def example_output(self, reasoning, variables):
+ """
+ reasoning: str
+ variables: format {variable_name, value}
+ """
+
+ # Build the output string in the same JSON format as described in custom_output_format_instruction
+ output = {
+ "reasoning": reasoning,
+ "suggestion": {var_name: value for var_name, value in variables.items()}
+ }
+ return json.dumps(output, indent=2)
+
+ def output_response_extractor(self, response: str) -> Dict[str, Any]:
+ """
+ Extracts reasoning and suggestion variables from the LLM response using OptoPrime's extraction logic.
+ """
+ # Use the centralized extraction logic from OptoPrime
+ suggestion_tag = self.default_prompt_symbols.get("suggestion", "suggestion")
+ reasoning_tag = self.default_prompt_symbols.get("reasoning", "reasoning")
+
+ ignore_extraction_error = True
+
+ reasoning = "(Unable to extract, possibly due to parsing failure)"
+
+ if "```" in response:
+ # First try to extract from ```json ... ``` blocks
+ json_match = re.findall(r"```json\s*(.*?)```", response, re.DOTALL)
+ if len(json_match) > 0:
+ response = json_match[0].strip()
+ else:
+ # Fall back to regular ``` ... ``` blocks
+ match = re.findall(r"```(.*?)```", response, re.DOTALL)
+ if len(match) > 0:
+ # Remove language identifier if present (e.g., "json", "python")
+ content = match[0].strip()
+ # Check if first line is a language identifier
+ lines = content.split('\n', 1)
+ if len(lines) > 1 and lines[0].strip().isalpha() and len(lines[0].strip()) < 20:
+ response = lines[1].strip()
+ else:
+ response = content
+
+ json_extracted = {}
+ suggestion = {}
+ attempt_n = 0
+ while attempt_n < 2:
+ try:
+ json_extracted = json.loads(response)
+ if isinstance(json_extracted, dict): # trim all whitespace keys in the json_extracted
+ json_extracted = {k.strip(): v for k, v in json_extracted.items()}
+ suggestion = json_extracted.get(suggestion_tag, json_extracted)
+ reasoning = json_extracted.get(reasoning_tag, "")
+ break
+ except json.JSONDecodeError:
+ response = re.findall(r"{.*}", response, re.DOTALL)
+ if len(response) > 0:
+ response = response[0]
+ attempt_n += 1
+ except Exception:
+ attempt_n += 1
+
+ if not isinstance(suggestion, dict):
+ suggestion = json_extracted if isinstance(json_extracted, dict) else {}
+
+ if len(suggestion) == 0:
+ pattern = rf'"{suggestion_tag}"\s*:\s*\{{(.*?)\}}'
+ suggestion_match = re.search(pattern, str(response), re.DOTALL)
+ if suggestion_match:
+ suggestion = {}
+ suggestion_content = suggestion_match.group(1)
+ pair_pattern = r'"([a-zA-Z0-9_]+)"\s*:\s*"(.*)"'
+ pairs = re.findall(pair_pattern, suggestion_content, re.DOTALL)
+ for key, value in pairs:
+ suggestion[key] = value
+
+ if len(suggestion) == 0 and not ignore_extraction_error:
+ print(f"Cannot extract {suggestion_tag} from LLM's response:\n{response}")
+
+ keys_to_remove = []
+ for key, value in suggestion.items():
+ if "__code" in key and value.strip() == "":
+ keys_to_remove.append(key)
+ for key in keys_to_remove:
+ del suggestion[key]
+
+ return {"reasoning": reasoning, "variables": suggestion}
+
+
+class OptimizerPromptSymbolSet2(OptimizerPromptSymbolSet):
+ variables_section_title = "# Variables"
+ inputs_section_title = "# Inputs"
+ outputs_section_title = "# Outputs"
+ others_section_title = "# Others"
+ feedback_section_title = "# Feedback"
+ instruction_section_title = "# Instruction"
+ code_section_title = "# Code"
+ documentation_section_title = "# Documentation"
+ context_section_title = "# Context"
+
+ node_tag = "const" # nodes that are constants in the graph
+ variable_tag = "var" # nodes that can be changed
+ value_tag = "data" # inside node, we have value tag
+ constraint_tag = "constraint" # inside node, we have constraint tag
+
+ # output format
+ reasoning_tag = "reason"
+ improved_variable_tag = "var"
+ name_tag = "name"
+
+
+@dataclass
+class FunctionFeedback:
+ """Container for structured feedback from function execution traces.
+
+ Used by OptoPrime to organize execution traces into a format suitable
+ for LLM-based optimization.
+
+ Attributes
+ ----------
+ graph : list[tuple[int, str]]
+ Topologically sorted function calls with (depth, representation) pairs.
+ documentation : dict[str, str]
+ Mapping of function names to their documentation strings.
+ others : dict[str, Any]
+ Intermediate variables with (data, description) tuples.
+ roots : dict[str, Any]
+ Input/root variables with (data, description) tuples.
+ output : dict[str, Any]
+ Output/leaf variables with (data, description) tuples.
+ user_feedback : Union[str, ContentBlockList]
+ User-provided feedback about the execution. May include images.
+
+ Notes
+ -----
+ This structure separates the execution trace into logical components
+ that can be formatted into prompts for LLM-based optimization.
+ """
+
+ graph: List[
+ Tuple[int, str]
+ ] # Each item is is a representation of function call. The items are topologically sorted.
+ documentation: Dict[str, str] # Function name and its documentationstring
+ others: Dict[str, Any] # Intermediate variable names and their data
+ roots: Dict[str, Any] # Root variable name and its data
+ output: Dict[str, Any] # Leaf variable name and its data
+ user_feedback: Union[str, ContentBlockList] # User feedback at the leaf of the graph (may include images)
+
+
+@dataclass
+class ProblemInstance:
+ """Problem instance with multimodal content support.
+
+ A composite of multiple ContentBlockLists representing different parts
+ of a problem. Uses ContentBlockList for variables, inputs, others, and
+ outputs to support both text and image content in a unified way.
+
+ The class provides:
+ - __repr__: Returns text-only representation for logging
+ - to_content_blocks(): Returns ContentBlockList for multimodal prompts
+ - has_images(): Check if any field contains images
+ """
+ instruction: str
+ code: str
+ documentation: str
+ variables: ContentBlockList
+ inputs: ContentBlockList
+ others: ContentBlockList
+ outputs: ContentBlockList
+ feedback: ContentBlockList # May contain images mixed with text
+ context: Optional[ContentBlockList]
+
+ optimizer_prompt_symbol_set: OptimizerPromptSymbolSet
+
+ problem_template = dedent(
+ """
+ # Instruction
+ {instruction}
+
+ # Code
+ {code}
+
+ # Documentation
+ {documentation}
+
+ # Variables
+ {variables}
+
+ # Inputs
+ {inputs}
+
+ # Others
+ {others}
+
+ # Outputs
+ {outputs}
+
+ # Feedback
+ {feedback}
+ """
+ )
+
+ def __repr__(self) -> str:
+ """Return text-only representation for backward compatibility.
+
+ Uses ContentBlockList.to_text() for fields that may contain images.
+ """
+ optimization_query = self.problem_template.format(
+ instruction=self.instruction,
+ code=self.code,
+ documentation=self.documentation,
+ variables=self.variables.to_text(),
+ inputs=self.inputs.to_text(),
+ outputs=self.outputs.to_text(),
+ others=self.others.to_text(),
+ feedback=self.feedback.to_text()
+ )
+
+ return optimization_query
+
+ def to_content_blocks(self) -> ContentBlockList:
+ """Convert the problem instance to a list of ContentBlocks.
+
+ Consecutive TextContent blocks are merged into a single block for efficiency.
+ Images and other non-text blocks are kept separate.
+
+ Returns:
+ ContentBlockList: A list containing TextContent and ImageContent blocks
+ that represent the complete problem instance including any images
+ from variables, inputs, others, or outputs.
+ """
+ blocks = ContentBlockList()
+
+ # Header sections (always text)
+ header = dedent(f"""
+ # Instruction
+ {self.instruction}
+
+ # Code
+ {self.code}
+
+ # Documentation
+ {self.documentation}
+
+ # Variables
+ """)
+ blocks.append(header)
+
+ # Variables section (may contain images)
+ blocks.extend(self.variables)
+
+ # Inputs section
+ blocks.append("\n\n# Inputs\n")
+ blocks.extend(self.inputs)
+
+ # Others section
+ blocks.append("\n\n# Others\n")
+ blocks.extend(self.others)
+
+ # Outputs section
+ blocks.append("\n\n# Outputs\n")
+ blocks.extend(self.outputs)
+
+ # Context section (optional)
+ if self.context is not None and self.context.to_text().strip() != "":
+ blocks.append(f"\n\n# Context\n") # section name
+ blocks.extend(self.context) # extend the blocks
+
+ # Feedback section (may contain images)
+ blocks.append("\n\n# Feedback\n")
+ blocks.extend(self.feedback)
+
+ return blocks
+
+ def has_images(self) -> bool:
+ """Check if this problem instance contains any images.
+
+ Efficiently checks each ContentBlockList field directly
+ without building full content blocks.
+
+ Returns:
+ bool: True if any field contains ImageContent blocks.
+ """
+ return any(
+ field.has_images()
+ for field in [self.variables, self.inputs, self.others, self.outputs, self.feedback]
+ )
+
+
+
+
+
+# we provide two aliases for the Content class for semantic convenience
+Context = Content
+Feedback = Content
+
+class OptoPrimeV3(OptoPrime):
+ # This is generic representation prompt, which just explains how to read the problem.
+ representation_prompt = dedent(
+ """You're tasked to solve a coding/algorithm problem. You will see the instruction, the code, the documentation of each function used in the code, and the feedback about the execution result.
+
+ Specifically, a problem will be composed of the following parts:
+ - {instruction_section_title}: the instruction which describes the things you need to do or the question you should answer.
+ - {code_section_title}: the code defined in the problem.
+ - {documentation_section_title}: the documentation of each function used in #Code. The explanation might be incomplete and just contain high-level description. You can use the values in #Others to help infer how those functions work.
+ - {variables_section_title}: the input variables that you can change/tweak (trainable).
+ - {inputs_section_title}: the values of fixed inputs to the code, which CANNOT be changed (fixed).
+ - {others_section_title}: the intermediate values created through the code execution.
+ - {outputs_section_title}: the result of the code output.
+ - {feedback_section_title}: the feedback about the code's execution result.
+ - {context_section_title}: the context information that might be useful to solve the problem.
+
+ In `{variables_section_title}`, `{inputs_section_title}`, `{outputs_section_title}`, and `{others_section_title}`, the format is:
+
+ For variables we express as this:
+ {variable_expression_format}
+
+ If `data_type` is `code`, it means `{value_tag}` is the source code of a python code, which may include docstring and definitions."""
+ )
+
+ # Optimization
+ default_objective = "You need to change the `{value_tag}` of the variables in {variables_section_title} to improve the output in accordance to {feedback_section_title}."
+
+ output_format_prompt_template = dedent(
+ """
+ Output_format: Your output should be in the following XML or JSON format:
+
+ {output_format}
+
+ In <{reasoning_tag}>, explain the problem: 1. what the {instruction_section_title} means 2. what the {feedback_section_title} on {outputs_section_title} means to {variables_section_title} considering how {variables_section_title} are used in {code_section_title} and other values in {documentation_section_title}, {inputs_section_title}, {others_section_title}. 3. Reasoning about the suggested changes in {variables_section_title} (if needed) and the expected result.
+
+ If you need to suggest a change in the values of {variables_section_title}, write down the suggested values in <{improved_variable_tag}>. Remember you can change only the values in {variables_section_title}, not others. When `type` of a variable is `code`, you should write the new definition in the format of python code without syntax errors, and you should not change the function name or the function signature.
+
+ If no changes are needed, just output TERMINATE.
+ """
+ )
+
+ example_problem_template = PromptTemplate(dedent(
+ """
+ Here is an example of problem instance and response:
+
+ ================================
+ {example_problem}
+ ================================
+
+ Your response:
+ {example_response}
+ """
+ ))
+
+ user_prompt_template = PromptTemplate(dedent(
+ """
+ Now you see problem instance:
+
+ ================================
+ {problem_instance}
+ ================================
+
+ """
+ ))
+
+ final_prompt = dedent(
+ """
+ What are your suggestions on variables {names}?
+
+ Your response:
+ """
+ )
+
+ def __init__(
+ self,
+ parameters: List[ParameterNode],
+ llm: AbstractModel = None,
+ *args,
+ image_llm: AbstractModel = None,
+ propagator: Propagator = None,
+ objective: Union[None, str] = None,
+ ignore_extraction_error: bool = True,
+ # ignore the type conversion error when extracting updated values from LLM's suggestion
+ include_example=False,
+ memory_size=0, # Memory size to store the past feedback
+ max_tokens=8192,
+ log=True,
+ initial_var_char_limit=2000,
+ optimizer_prompt_symbol_set: OptimizerPromptSymbolSet = OptimizerPromptSymbolSet(),
+ use_json_object_format=True, # whether to use json object format for the response when calling LLM
+ truncate_expression=truncate_expression,
+ problem_context: Optional[ContentBlockList] = None,
+ **kwargs,
+ ):
+ super().__init__(parameters, *args, propagator=propagator, **kwargs)
+
+ self.truncate_expression = truncate_expression
+ self.problem_context: Optional[ContentBlockList] = problem_context
+ self.output_contains_image = False
+
+ self.use_json_object_format = use_json_object_format if optimizer_prompt_symbol_set.expect_json and use_json_object_format else False
+ self.ignore_extraction_error = ignore_extraction_error
+ self.llm = llm or LLM(mm_beta=True)
+ self.image_llm = image_llm
+
+ assert self.llm.mm_beta, "OptoPrimeV3 enables multi-modal LLM backbone by default. Please use LLM(model='...', mm_beta=True)."
+
+ self.objective = objective or self.default_objective.format(value_tag=optimizer_prompt_symbol_set.value_tag,
+ variables_section_title=optimizer_prompt_symbol_set.variables_section_title,
+ feedback_section_title=optimizer_prompt_symbol_set.feedback_section_title)
+ self.initial_var_char_limit = initial_var_char_limit
+ self.optimizer_prompt_symbol_set = optimizer_prompt_symbol_set
+
+ self.example_problem_summary = FunctionFeedback(graph=[(1, 'y = add(x=a,y=b)'), (2, "z = subtract(x=y, y=c)")],
+ documentation={'add': 'This is an add operator of x and y.',
+ 'subtract': "subtract y from x"},
+ others={'y': (6, None)},
+ roots={'a': (5, "a > 0"),
+ 'b': (1, None),
+ 'c': (5, None)},
+ output={'z': (1, None)},
+ user_feedback='The result of the code is not as expected. The result should be 10, but the code returns 1'
+ )
+ self.example_problem_summary.variables = {'a': (5, "a > 0")}
+ self.example_problem_summary.inputs = {'b': (1, None), 'c': (5, None)}
+
+ self.example_problem = self.problem_instance(self.example_problem_summary)
+ self.example_response = self.optimizer_prompt_symbol_set.example_output(
+ reasoning="In this case, the desired response would be to change the value of input a to 14, as that would make the code return 10.",
+ variables={
+ 'a': 10,
+ }
+ )
+
+ self.include_example = include_example
+ self.max_tokens = max_tokens
+ self.log = [] if log else None
+ self.summary_log = [] if log else None
+ self.memory = FIFOBuffer(memory_size)
+ self.conversation_history = ConversationHistory()
+ self.conversation_length = memory_size # Number of conversation turns to keep
+
+ self.default_prompt_symbols = self.optimizer_prompt_symbol_set.default_prompt_symbols
+
+ self.prompt_symbols = copy.deepcopy(self.default_prompt_symbols)
+ self.initialize_instruct_prompt()
+
+ def parameter_check(self, parameters: List[ParameterNode]):
+ """Check if the parameters are valid.
+ This can be overloaded by subclasses to add more checks.
+
+ Args:
+ parameters: List[ParameterNode]
+ The parameters to check.
+
+ Raises:
+ AssertionError: If more than one parameter contains image data.
+
+ Notes:
+ OptoPrimeV3 supports image parameters, but only one parameter can be
+ an image at a time since LLMs can only generate one image per inference.
+ """
+ # Count image parameters
+ image_params = [param for param in parameters if param.is_image]
+
+ if len(image_params) > 1:
+ param_names = ', '.join([f"'{p.name}'" for p in image_params])
+ raise AssertionError(
+ f"OptoPrimeV3 supports at most one image parameter, but found {len(image_params)}: "
+ f"{param_names}. LLMs can only generate one image at a time."
+ )
+ if len(image_params) == 1:
+ self.output_contains_image = True
+
+ def add_context(self, *args, images: Optional[List[Any]] = None, format: str = "PNG"):
+ """Add context to the optimizer, supporting both text and images.
+
+ Two usage patterns are supported:
+
+ **Usage 1: Variadic arguments (alternating text and images)**
+
+ optimizer.add_context("text part 1", image_link, "text part 2", image_file)
+
+ Each argument is either a string (text) or an image source.
+
+ **Usage 2: Template with placeholders**
+
+ optimizer.add_context(
+ "text part 1 [IMAGE] text part 2 [IMAGE]",
+ images=[image_link, image_file]
+ )
+
+ The text contains `[IMAGE]` placeholders that are replaced by images
+ from the `images` list in order. The number of placeholders must match
+ the number of images.
+
+ Args:
+ *args: Variable arguments. In Usage 1, alternating text and images.
+ In Usage 2, a single template string with placeholders.
+ images: Optional list of image sources for Usage 2. Each can be:
+ - URL string (http/https)
+ - Local file path
+ - PIL Image object
+ - Numpy array
+ format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG
+
+ Raises:
+ ValueError: If using Usage 2 and the number of placeholders doesn't
+ match the number of images.
+
+ Examples:
+ # Usage 1: Alternating text and images
+ optimizer.add_context("Here's the diagram:", "diagram.png", "And here's another:", "other.png")
+
+ # Usage 2: Template with placeholders
+ optimizer.add_context("See [IMAGE] and compare with [IMAGE]", images=["a.png", "b.png"])
+
+ # Text-only context
+ optimizer.add_context("Important background information")
+ """
+ ctx = Content(*args, images=images, format=format)
+
+ # Store the context
+ if self.problem_context is None:
+ self.problem_context = ctx
+ else:
+ # Append to existing context with a newline separator
+ self.problem_context.append("\n\n")
+ self.problem_context.extend(ctx.to_content_blocks())
+
+ def initialize_instruct_prompt(self):
+ self.representation_prompt = self.representation_prompt.format(
+ variable_expression_format=dedent(f"""
+ <{self.optimizer_prompt_symbol_set.variable_tag} name="variable_name" type="data_type">
+ <{self.optimizer_prompt_symbol_set.value_tag}>
+ value
+ {self.optimizer_prompt_symbol_set.value_tag}>
+ <{self.optimizer_prompt_symbol_set.constraint_tag}>
+ constraint_expression
+ {self.optimizer_prompt_symbol_set.constraint_tag}>
+ {self.optimizer_prompt_symbol_set.variable_tag}>
+ """),
+ value_tag=self.optimizer_prompt_symbol_set.value_tag,
+ variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""),
+ inputs_section_title=self.optimizer_prompt_symbol_set.inputs_section_title.replace(" ", ""),
+ outputs_section_title=self.optimizer_prompt_symbol_set.outputs_section_title.replace(" ", ""),
+ feedback_section_title=self.optimizer_prompt_symbol_set.feedback_section_title.replace(" ", ""),
+ instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""),
+ code_section_title=self.optimizer_prompt_symbol_set.code_section_title.replace(" ", ""),
+ documentation_section_title=self.optimizer_prompt_symbol_set.documentation_section_title.replace(" ", ""),
+ others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", ""),
+ context_section_title=self.optimizer_prompt_symbol_set.context_section_title.replace(" ", "")
+ )
+ self.output_format_prompt = self.output_format_prompt_template.format(
+ output_format=self.optimizer_prompt_symbol_set.output_format,
+ reasoning_tag=self.optimizer_prompt_symbol_set.reasoning_tag,
+ improved_variable_tag=self.optimizer_prompt_symbol_set.improved_variable_tag,
+ instruction_section_title=self.optimizer_prompt_symbol_set.instruction_section_title.replace(" ", ""),
+ feedback_section_title=self.optimizer_prompt_symbol_set.feedback_section_title.replace(" ", ""),
+ outputs_section_title=self.optimizer_prompt_symbol_set.outputs_section_title.replace(" ", ""),
+ code_section_title=self.optimizer_prompt_symbol_set.code_section_title.replace(" ", ""),
+ documentation_section_title=self.optimizer_prompt_symbol_set.documentation_section_title.replace(" ", ""),
+ variables_section_title=self.optimizer_prompt_symbol_set.variables_section_title.replace(" ", ""),
+ inputs_section_title=self.optimizer_prompt_symbol_set.inputs_section_title.replace(" ", ""),
+ others_section_title=self.optimizer_prompt_symbol_set.others_section_title.replace(" ", ""),
+ )
+
+ def repr_node_value(self, node_dict, node_tag="node",
+ value_tag="value", constraint_tag="constraint") -> str:
+ """Returns text-only representation of node values (backward compatible)."""
+ temp_list = []
+ for k, v in node_dict.items():
+ if "__code" not in k:
+ # For images, use placeholder text
+ value_repr = "[IMAGE]" if is_image(v[0]) else str(v[0])
+ if v[1] is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag:
+ constraint_expr = f"<{constraint_tag}>\n{v[1]}\n{constraint_tag}>"
+ temp_list.append(
+ f"<{node_tag} name=\"{k}\" type=\"{type(v[0]).__name__}\">\n<{value_tag}>\n{value_repr}\n{value_tag}>\n{constraint_expr}\n{node_tag}>\n")
+ else:
+ temp_list.append(
+ f"<{node_tag} name=\"{k}\" type=\"{type(v[0]).__name__}\">\n<{value_tag}>\n{value_repr}\n{value_tag}>\n{node_tag}>\n")
+ else:
+ constraint_expr = f"\n{v[1]}\n"
+ signature = v[1].replace("The code should start with:\n", "")
+ func_body = v[0].replace(signature, "")
+ temp_list.append(
+ f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{func_body}\n{value_tag}>\n{constraint_expr}\n{node_tag}>\n")
+ return "\n".join(temp_list)
+
+ def repr_node_value_compact(self, node_dict, node_tag="node",
+ value_tag="value", constraint_tag="constraint") -> str:
+ """Returns text-only compact representation of node values (backward compatible)."""
+ temp_list = []
+ for k, v in node_dict.items():
+ if "__code" not in k:
+ # For images, use placeholder text
+ if is_image(v[0]):
+ node_value = "[IMAGE]"
+ else:
+ node_value = self.truncate_expression(v[0], self.initial_var_char_limit)
+ if v[1] is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag:
+ constraint_expr = f"<{constraint_tag}>\n{v[1]}\n{constraint_tag}>"
+ temp_list.append(
+ f"<{node_tag} name=\"{k}\" type=\"{type(v[0]).__name__}\">\n<{value_tag}>\n{node_value}\n{value_tag}>\n{constraint_expr}\n{node_tag}>\n")
+ else:
+ temp_list.append(
+ f"<{node_tag} name=\"{k}\" type=\"{type(v[0]).__name__}\">\n<{value_tag}>\n{node_value}\n{value_tag}>\n{node_tag}>\n")
+ else:
+ constraint_expr = f"<{constraint_tag}>\n{v[1]}\n{constraint_tag}>"
+ # we only truncate the function body
+ signature = v[1].replace("The code should start with:\n", "")
+ func_body = v[0].replace(signature, "")
+ node_value = self.truncate_expression(func_body, self.initial_var_char_limit)
+ temp_list.append(
+ f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{node_value}\n{value_tag}>\n{constraint_expr}\n{node_tag}>\n")
+ return "\n".join(temp_list)
+
+ def repr_node_value_as_content_blocks(self, node_dict, node_tag="node",
+ value_tag="value", constraint_tag="constraint") -> ContentBlockList:
+ """Returns a ContentBlockList representing node values, including images.
+
+ Consecutive TextContent blocks are merged for efficiency.
+ For image values, the text before and after the image are separate blocks.
+ """
+ blocks = ContentBlockList()
+
+ for k, v in node_dict.items():
+ value_data = v[0]
+ constraint = v[1]
+
+ if "__code" not in k:
+ # Check if this is an image
+ image_content = value_to_image_content(value_data)
+
+ if image_content is not None:
+ # Image node: output XML structure, then image, then closing
+ type_name = "image"
+ constraint_expr = f"<{constraint_tag}>\n{constraint}\n{constraint_tag}>" if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag else ""
+
+ xml_text = f"<{node_tag} name=\"{k}\" type=\"{type_name}\">\n<{value_tag}>\n"
+ blocks.append(xml_text)
+ blocks.append(image_content) # Image breaks the text flow
+
+ closing_text = f"\n{value_tag}>\n{constraint_expr}{node_tag}>\n\n" if constraint_expr else f"\n{value_tag}>\n{node_tag}>\n\n"
+ blocks.append(closing_text)
+ else:
+ # Non-image node: text representation
+ if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag:
+ constraint_expr = f"<{constraint_tag}>\n{constraint}\n{constraint_tag}>"
+ blocks.append(
+ f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{value_data}\n{value_tag}>\n{constraint_expr}\n{node_tag}>\n\n"
+ )
+ else:
+ blocks.append(
+ f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{value_data}\n{value_tag}>\n{node_tag}>\n\n"
+ )
+ else:
+ # Code node (never an image)
+ constraint_expr = f"<{constraint_tag}>\n{constraint}\n{constraint_tag}>"
+ signature = constraint.replace("The code should start with:\n", "")
+ func_body = value_data.replace(signature, "")
+ blocks.append(
+ f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{func_body}\n{value_tag}>\n{constraint_expr}\n{node_tag}>\n\n"
+ )
+
+ return blocks
+
+ def repr_node_value_compact_as_content_blocks(self, node_dict, node_tag="node",
+ value_tag="value", constraint_tag="constraint") -> ContentBlockList:
+ """Returns a ContentBlockList with compact representation, including images.
+
+ Consecutive TextContent blocks are merged for efficiency.
+ Non-image values are truncated. Images break the text flow.
+ """
+ blocks = ContentBlockList()
+
+ for k, v in node_dict.items():
+ value_data = v[0]
+ constraint = v[1]
+
+ if "__code" not in k:
+ # Check if this is an image
+ image_content = value_to_image_content(value_data)
+
+ if image_content is not None:
+ # Image node: output XML structure, then image, then closing
+ type_name = "image"
+ constraint_expr = f"<{constraint_tag}>\n{constraint}\n{constraint_tag}>" if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag else ""
+
+ xml_text = f"<{node_tag} name=\"{k}\" type=\"{type_name}\">\n<{value_tag}>\n"
+ blocks.append(xml_text)
+ blocks.append(image_content) # Image breaks the text flow
+
+ closing_text = f"\n{value_tag}>\n{constraint_expr}{node_tag}>\n\n" if constraint_expr else f"\n{value_tag}>\n{node_tag}>\n\n"
+ blocks.append(closing_text)
+ else:
+ # Non-image node: truncated text representation
+ node_value = self.truncate_expression(value_data, self.initial_var_char_limit)
+ if constraint is not None and node_tag == self.optimizer_prompt_symbol_set.variable_tag:
+ constraint_expr = f"<{constraint_tag}>\n{constraint}\n{constraint_tag}>"
+ blocks.append(
+ f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{node_value}\n{value_tag}>\n{constraint_expr}\n{node_tag}>\n\n"
+ )
+ else:
+ blocks.append(
+ f"<{node_tag} name=\"{k}\" type=\"{type(value_data).__name__}\">\n<{value_tag}>\n{node_value}\n{value_tag}>\n{node_tag}>\n\n"
+ )
+ else:
+ # Code node (never an image)
+ constraint_expr = f"<{constraint_tag}>\n{constraint}\n{constraint_tag}>"
+ signature = constraint.replace("The code should start with:\n", "")
+ func_body = value_data.replace(signature, "")
+ node_value = self.truncate_expression(func_body, self.initial_var_char_limit)
+ blocks.append(
+ f"<{node_tag} name=\"{k}\" type=\"code\">\n<{value_tag}>\n{signature}{node_value}\n{value_tag}>\n{constraint_expr}\n{node_tag}>\n\n"
+ )
+
+ return blocks
+
+ def summarize(self):
+ """Aggregate feedback from parameters into a structured summary.
+
+ Collects and organizes feedback from all trainable parameters into
+ a FunctionFeedback structure suitable for problem representation.
+
+ Returns
+ -------
+ FunctionFeedback
+ Structured feedback containing:
+ - variables: Trainable parameters with values and descriptions
+ - inputs: Non-trainable root nodes
+ - graph: Topologically sorted function calls
+ - others: Intermediate computation values
+ - output: Final output values
+ - documentation: Function documentation strings
+ - user_feedback: Aggregated user feedback
+
+ Notes
+ -----
+ The method performs several transformations:
+ 1. Aggregates feedback from all trainable parameters
+ 2. Converts the trace graph to FunctionFeedback structure
+ 3. Separates root nodes into variables (trainable) and inputs (non-trainable)
+ 4. Preserves the computation graph and intermediate values
+
+ Parameters without feedback (disconnected from output) are still
+ included in the summary but may not receive updates.
+ """
+ # Aggregate feedback from all the parameters
+ feedbacks = [
+ self.propagator.aggregate(node.feedback)
+ for node in self.parameters
+ if node.trainable
+ ]
+ summary = sum(feedbacks) # TraceGraph
+ # Construct variables and update others
+ # Some trainable nodes might not receive feedback, because they might not be connected to the output
+ summary = node_to_function_feedback(summary)
+ # Classify the root nodes into variables and others
+ # summary.variables = {p.py_name: p.data for p in self.parameters if p.trainable and p.py_name in summary.roots}
+
+ trainable_param_dict = {p.py_name: p for p in self.parameters if p.trainable}
+ summary.variables = {
+ py_name: data
+ for py_name, data in summary.roots.items()
+ if py_name in trainable_param_dict
+ }
+ summary.inputs = {
+ py_name: data
+ for py_name, data in summary.roots.items()
+ if py_name not in trainable_param_dict
+ } # non-variable roots
+
+ return summary
+
+ def construct_prompt(self, summary, mask=None, *args, **kwargs):
+ """Construct the system and user prompt.
+
+ The prompt for the optimizer agent is rather complex.
+ There are prompts that are automatically constructed through the Trace frontend (aka the bundle/node API).
+ However, we also allow the user to provide additional context to the optimizer agent.
+
+ We handle multimodal (MM) conversion implicitly for the automatic part (TraceGraph),
+ but we handle the user-provided context explicitly.
+
+ Args:
+ summary: The FunctionFeedback summary containing graph information.
+ mask: List of section titles to exclude from the problem instance.
+
+ Returns:
+ Tuple of (system_prompt: str, user_prompt: ContentBlockList)
+ - system_prompt is always a string
+ - user_prompt is a ContentBlockList for multimodal support
+ """
+ system_prompt = (
+ self.representation_prompt + self.output_format_prompt
+ ) # generic representation + output rule
+
+ problem_inst = self.problem_instance(summary, mask=mask)
+
+ # Build user prompt as ContentBlockList (auto-merges consecutive text)
+ user_content_blocks = ContentBlockList()
+
+ # Add example if included
+ if self.include_example:
+ example_text = self.example_problem_template.format(
+ example_problem=str(self.example_problem), # Example is always text
+ example_response=self.example_response,
+ )
+ user_content_blocks.append(example_text)
+
+ # Add problem instance template
+ # context is part of the problem instance
+ user_content_blocks.append(self.user_prompt_template.format(
+ problem_instance=problem_inst.to_content_blocks(),
+ ))
+
+ # Add final prompt
+ var_names = ", ".join(k for k in summary.variables.keys())
+ user_content_blocks.append(self.final_prompt.format(
+ names=var_names,
+ ))
+
+ return system_prompt, user_content_blocks
+
+ def problem_instance(self, summary: FunctionFeedback, mask=None):
+ """Create a ProblemInstance from the summary.
+
+ Args:
+ summary: The FunctionFeedback summary containing graph information.
+ mask: List of section titles to exclude from the problem instance.
+
+ Returns:
+ ProblemInstance with content block fields for multimodal support.
+ """
+ mask = mask or []
+
+ # Use content block representations for multimodal support
+ variables_content = (
+ self.repr_node_value_as_content_blocks(
+ summary.variables,
+ node_tag=self.optimizer_prompt_symbol_set.variable_tag,
+ value_tag=self.optimizer_prompt_symbol_set.value_tag,
+ constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag
+ )
+ if self.optimizer_prompt_symbol_set.variables_section_title not in mask
+ else ContentBlockList()
+ )
+
+ # we add a temporary check here to ensure no more than 1 parameter is an image
+ variable_stats = variables_content.count_blocks()
+ if 'ImageContent' in variable_stats:
+ assert variable_stats['ImageContent'] <= 1, "Currently we do not support generating multiple images (more than 1 parameter is an image)"
+ self.output_contains_image = True
+
+ inputs_content = (
+ self.repr_node_value_compact_as_content_blocks(
+ summary.inputs,
+ node_tag=self.optimizer_prompt_symbol_set.node_tag,
+ value_tag=self.optimizer_prompt_symbol_set.value_tag,
+ constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag
+ )
+ if self.optimizer_prompt_symbol_set.inputs_section_title not in mask
+ else ContentBlockList()
+ )
+ outputs_content = (
+ self.repr_node_value_compact_as_content_blocks(
+ summary.output,
+ node_tag=self.optimizer_prompt_symbol_set.node_tag,
+ value_tag=self.optimizer_prompt_symbol_set.value_tag,
+ constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag
+ )
+ if self.optimizer_prompt_symbol_set.outputs_section_title not in mask
+ else ContentBlockList()
+ )
+ others_content = (
+ self.repr_node_value_compact_as_content_blocks(
+ summary.others,
+ node_tag=self.optimizer_prompt_symbol_set.node_tag,
+ value_tag=self.optimizer_prompt_symbol_set.value_tag,
+ constraint_tag=self.optimizer_prompt_symbol_set.constraint_tag
+ )
+ if self.optimizer_prompt_symbol_set.others_section_title not in mask
+ else ContentBlockList()
+ )
+
+ return ProblemInstance(
+ instruction=self.objective if "#Instruction" not in mask else "",
+ code=(
+ "\n".join([v for k, v in sorted(summary.graph)])
+ if self.optimizer_prompt_symbol_set.inputs_section_title not in mask
+ else ""
+ ),
+ documentation=(
+ "\n".join([f"[{k}] {v}" for k, v in summary.documentation.items()])
+ if self.optimizer_prompt_symbol_set.documentation_section_title not in mask
+ else ""
+ ),
+ variables=variables_content,
+ inputs=inputs_content,
+ outputs=outputs_content,
+ others=others_content,
+ feedback=Content(summary.user_feedback) if self.optimizer_prompt_symbol_set.feedback_section_title not in mask else Content(""),
+ context=self.problem_context,
+ optimizer_prompt_symbol_set=self.optimizer_prompt_symbol_set
+ )
+
+ def _step(
+ self, verbose=False, mask=None, *args, **kwargs
+ ) -> Dict[ParameterNode, Any]:
+ """Execute one optimization step.
+
+ Args:
+ verbose: If True, print prompts and responses.
+ mask: List of section titles to exclude from the problem instance.
+
+ Returns:
+ Dictionary mapping parameters to their updated values.
+ """
+ assert isinstance(self.propagator, GraphPropagator)
+ summary = self.summarize()
+
+ system_prompt, user_content_blocks = self.construct_prompt(summary, mask=mask)
+
+ response = self.call_llm(
+ system_prompt=system_prompt,
+ user_prompt=user_content_blocks,
+ verbose=verbose,
+ max_tokens=self.max_tokens,
+ )
+
+ if "TERMINATE" in response.to_text():
+ return {}
+
+ suggestion = self.extract_llm_suggestion(response.to_text())
+ update_dict = self.construct_update_dict(suggestion['variables'])
+ # suggestion has two keys: reasoning, and variables
+
+ # for update_dict, we manually update the image according to the variable name
+ if response.get_images().has_images():
+ images = response.get_images()
+ assert len(images) == 1, "Currently we only allow at most one image parameter"
+ # find the variable name
+ image_param = [param for param in self.parameters if param.is_image][0]
+ update_dict[image_param] = images[0].as_image() # parameter as PIL Image
+
+ if self.log is not None:
+ # For logging, use text representation
+ log_user_prompt = str(self.problem_instance(summary))
+ self.log.append(
+ {
+ "system_prompt": system_prompt,
+ "user_prompt": log_user_prompt,
+ "response": response,
+ }
+ )
+ self.summary_log.append(
+ {"problem_instance": self.problem_instance(summary), "summary": summary}
+ )
+
+ return update_dict
+
+ def extract_llm_suggestion(self, response: str):
+ """Extract the suggestion from the response."""
+
+ suggestion = self.optimizer_prompt_symbol_set.output_response_extractor(response)
+
+ if len(suggestion) == 0:
+ if not self.ignore_extraction_error:
+ print("Cannot extract suggestion from LLM's response:")
+ print(response)
+
+ return suggestion
+
+ def call_llm(
+ self,
+ system_prompt: str,
+ user_prompt: ContentBlockList,
+ verbose: Union[bool, str] = False,
+ max_tokens: int = 4096,
+ ) -> AssistantTurn:
+ """Call the LLM with a prompt and return the response.
+
+ Args:
+ system_prompt: The system prompt (always a string).
+ user_prompt: The user prompt as ContentBlockList for multimodal content.
+ verbose: If True, print the prompt and response. If "output", only print response.
+ max_tokens: Maximum tokens in the response.
+
+ Returns:
+ assistant_turn: AssistantTurn object
+ """
+ if verbose not in (False, "output"):
+ # Print text portions, indicate if images present
+ text_parts = [block.text for block in user_prompt if isinstance(block, TextContent)]
+ has_images = any(isinstance(block, ImageContent) for block in user_prompt)
+ suffix = f" [+ {DEFAULT_IMAGE_PLACEHOLDER}]" if has_images else ""
+ print("Prompt\n", system_prompt + "".join(text_parts) + suffix)
+
+ # Update system prompt in conversation history
+ self.conversation_history.system_prompt = system_prompt
+
+ # Create user turn with content
+ user_turn = UserTurn(user_prompt)
+ self.conversation_history.add_user_turn(user_turn)
+
+ # Get messages with conversation length control (truncate from start)
+ # conversation_length = n historical rounds (user+assistant pairs) to keep
+ # The current user turn is automatically included by to_messages()
+ messages = self.conversation_history.to_messages(
+ n=self.conversation_length if self.conversation_length > 0 else -1,
+ truncate_strategy="from_start",
+ model_name=self.llm.model_name
+ )
+
+ response_format = {"type": "json_object"} if self.use_json_object_format else None
+
+ # Prepare common arguments
+ llm_kwargs = {"messages": messages, "max_tokens": max_tokens, "response_format": response_format}
+
+ # Add image generation tool only for non-Gemini models when output contains image
+ if self.output_contains_image and 'gemini' not in self.llm.model_name:
+ llm_kwargs["tools"] = [{"type": "image_generation"}]
+
+ assistant_turn = self.llm(**llm_kwargs)
+
+ if verbose:
+ print("LLM response:\n", assistant_turn)
+
+ self.conversation_history.add_assistant_turn(assistant_turn)
+
+ return assistant_turn
+
+ def save(self, path: str):
+ """Save the optimizer state to a file."""
+ with open(path, 'wb') as f:
+ pickle.dump({
+ "truncate_expression": self.truncate_expression,
+ "use_json_object_format": self.use_json_object_format,
+ "ignore_extraction_error": self.ignore_extraction_error,
+ "objective": self.objective,
+ "initial_var_char_limit": self.initial_var_char_limit,
+ "optimizer_prompt_symbol_set": self.optimizer_prompt_symbol_set,
+ "include_example": self.include_example,
+ "max_tokens": self.max_tokens,
+ "memory": self.memory,
+ "conversation_history": self.conversation_history,
+ "conversation_length": self.conversation_length,
+ "default_prompt_symbols": self.default_prompt_symbols,
+ "prompt_symbols": self.prompt_symbols,
+ "representation_prompt": self.representation_prompt,
+ "output_format_prompt": self.output_format_prompt,
+ }, f)
+
+ def load(self, path: str):
+ """Load the optimizer state from a file."""
+ with open(path, 'rb') as f:
+ state = pickle.load(f)
+ self.truncate_expression = state["truncate_expression"]
+ self.use_json_object_format = state["use_json_object_format"]
+ self.ignore_extraction_error = state["ignore_extraction_error"]
+ self.objective = state["objective"]
+ self.initial_var_char_limit = state["initial_var_char_limit"]
+ self.optimizer_prompt_symbol_set = state["optimizer_prompt_symbol_set"]
+ self.include_example = state["include_example"]
+ self.max_tokens = state["max_tokens"]
+ self.memory = state["memory"]
+ self.conversation_history = state.get("conversation_history", ConversationHistory())
+ self.conversation_length = state.get("conversation_length", 0)
+ self.default_prompt_symbols = state["default_prompt_symbols"]
+ self.prompt_symbols = state["prompt_symbols"]
+ self.representation_prompt = state["representation_prompt"]
+ self.output_format_prompt = state["output_format_prompt"]
diff --git a/opto/optimizers/utils.py b/opto/optimizers/utils.py
index 13a5ad01..401b996e 100644
--- a/opto/optimizers/utils.py
+++ b/opto/optimizers/utils.py
@@ -1,4 +1,14 @@
-from typing import Dict, Any
+import base64
+import mimetypes
+import io
+from typing import Dict, Any, Union, Optional, List
+try:
+ import numpy as np
+ NUMPY_AVAILABLE = True
+except ImportError:
+ NUMPY_AVAILABLE = False
+
+import opto.trace as trace
def print_color(message, color=None, logger=None):
colors = {
@@ -134,3 +144,224 @@ def extract_xml_like_data(text: str, reasoning_tag: str = "reasoning",
if var_name: # Only require name to be non-empty, value can be empty
result['variables'][var_name] = var_value
return result
+
+
+class MultiModalPayload:
+ """
+ A payload for multimodal content, particularly images.
+
+ Supports three types of image inputs:
+ 1. URL (string starting with 'http://' or 'https://')
+ 2. Local file path (string path to image file)
+ 3. Numpy array (RGB image array)
+ """
+ image_data: Optional[str] = None # Can be URL or base64 data URL
+
+ def set_image(self, image: Union[str, Any], format: str = "PNG") -> None:
+ """
+ Set the image from various input formats.
+
+ Args:
+ image: Can be:
+ - URL string (starting with 'http://' or 'https://')
+ - Local file path (string)
+ - Numpy array or array-like RGB image
+ format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG
+ """
+ if isinstance(image, str):
+ # Check if it's a URL
+ if image.startswith('http://') or image.startswith('https://'):
+ # Direct URL - litellm supports this
+ self.image_data = image
+ else:
+ # Assume it's a local file path
+ self.image_data = encode_image_to_base64(image)
+ else:
+ # Assume it's a numpy array or array-like object
+ self.image_data = encode_numpy_to_base64(image, format=format)
+
+ def get_content_block(self) -> Optional[Dict[str, Any]]:
+ """
+ Get the content block for the image in litellm format.
+
+ Returns:
+ Dict with format: {"type": "image_url", "image_url": {"url": ...}}
+ or None if no image data is set
+ """
+ if self.image_data is None:
+ return None
+
+ return {
+ "type": "image_url",
+ "image_url": {
+ "url": self.image_data
+ }
+ }
+
+def encode_image_to_base64(path: str) -> str:
+ """Encode a local image file to base64 data URL."""
+ # Read binary
+ with open(path, "rb") as f:
+ image_bytes = f.read()
+ # Guess MIME type from file extension
+ mime_type, _ = mimetypes.guess_type(path)
+ if mime_type is None:
+ # fallback
+ mime_type = "image/jpeg"
+ b64 = base64.b64encode(image_bytes).decode("utf-8")
+ data_url = f"data:{mime_type};base64,{b64}"
+ return data_url
+
+
+def encode_numpy_to_base64(array, format: str = "PNG") -> str:
+ """
+ Encode a numpy array to base64 data URL.
+
+ Args:
+ array: numpy array representing an image (H, W, C) with values in [0, 255] or [0, 1]
+ format: Image format (PNG, JPEG, etc.)
+
+ Returns:
+ Base64 encoded data URL string
+ """
+ if not NUMPY_AVAILABLE:
+ raise ImportError("numpy is required to encode numpy arrays. Install it with: pip install numpy")
+
+ try:
+ from PIL import Image
+ except ImportError:
+ raise ImportError("Pillow is required to encode numpy arrays. Install it with: pip install Pillow")
+
+ # Convert to numpy array if not already
+ if not isinstance(array, np.ndarray):
+ array = np.array(array)
+
+ # Normalize to [0, 255] if needed
+ if array.dtype == np.float32 or array.dtype == np.float64:
+ if array.max() <= 1.0:
+ array = (array * 255).astype(np.uint8)
+ else:
+ array = array.astype(np.uint8)
+ elif array.dtype != np.uint8:
+ array = array.astype(np.uint8)
+
+ # Convert to PIL Image
+ image = Image.fromarray(array)
+
+ # Save to bytes buffer
+ buffer = io.BytesIO()
+ image.save(buffer, format=format.upper())
+ buffer.seek(0)
+
+ # Encode to base64
+ image_bytes = buffer.getvalue()
+ b64 = base64.b64encode(image_bytes).decode("utf-8")
+
+ # Determine MIME type
+ mime_type = f"image/{format.lower()}"
+ data_url = f"data:{mime_type};base64,{b64}"
+
+ return data_url
+
+class ChatHistory:
+ def __init__(self, max_turn=50, auto_summary=False):
+ """Initialize chat history for multi-turn conversation.
+
+ Args:
+ max_turn: Maximum number of conversation turns to keep in history.
+
+ auto_summary: Whether to automatically summarize old messages
+ """
+ self.messages: List[Dict[str, Any]] = []
+ self.max_len = max_turn * 2
+ self.auto_summary = auto_summary
+
+ def __len__(self):
+ return len(self.messages)
+
+ def add(self, content: Union[trace.Node, str], role):
+ """Add a message to history with role validation.
+
+ Args:
+ content: The content of the message
+ role: The role of the message ("user" or "assistant")
+ """
+ if role not in ["user", "assistant"]:
+ raise ValueError(f"Invalid role '{role}'. Must be 'user' or 'assistant'.")
+
+ # Check for alternating user/assistant pattern
+ if len(self.messages) > 0:
+ last_msg = self.messages[-1]
+ if last_msg["role"] == role:
+ print(f"Warning: Adding consecutive {role} messages. Consider alternating user/assistant messages.")
+
+ self.messages.append({"role": role, "content": content})
+ self._trim_history()
+
+ def append(self, message: Dict[str, Any]):
+ """Append a message directly to history."""
+ if "role" not in message or "content" not in message:
+ raise ValueError("Message must have 'role' and 'content' fields.")
+ self.add(message["content"], message["role"])
+
+ def __iter__(self):
+ return iter(self.messages)
+
+ def get_messages(self) -> List[Dict[str, str]]:
+ messages = []
+ for message in self.messages:
+ if isinstance(message['content'], trace.Node):
+ messages.append({"role": message["role"], "content": message["content"].data})
+ else:
+ messages.append(message)
+ return messages
+
+ def get_messages_as_node(self, llm_name="") -> List[trace.Node]:
+ node_list = []
+ for message in self.messages:
+ # If user query is a node and has other computation attached, we can't rename it
+ if isinstance(message['content'], trace.Node):
+ node_list.append(message['content'])
+ else:
+ role = message["role"]
+ content = message["content"]
+ name = f"{llm_name}_{role}" if llm_name else f"{role}"
+ if role == 'user':
+ name += "_query"
+ elif role == 'assistant':
+ name += "_response"
+ node_list.append(trace.node(content, name=name))
+
+ return node_list
+
+ def _trim_history(self):
+ """Trim history to max_len while preserving first user message."""
+ if len(self.messages) <= self.max_len:
+ return
+
+ # Find first user message index
+ first_user_idx = None
+ for i, msg in enumerate(self.messages):
+ if msg["role"] == "user":
+ first_user_idx = i
+ break
+
+ # Keep first user message
+ protected_messages = []
+ if first_user_idx is not None:
+ first_user_msg = self.messages[first_user_idx]
+ protected_messages.append(first_user_msg)
+
+ # Calculate how many recent messages we can keep
+ remaining_slots = self.max_len - len(protected_messages)
+ if remaining_slots > 0:
+ # Get recent messages
+ recent_messages = self.messages[-remaining_slots:]
+ # Avoid duplicating first user message
+ if first_user_idx is not None:
+ first_user_msg = self.messages[first_user_idx]
+ recent_messages = [msg for msg in recent_messages if msg != first_user_msg]
+
+ self.messages = protected_messages + recent_messages
+ else:
+ self.messages = protected_messages
\ No newline at end of file
diff --git a/opto/trace/nodes.py b/opto/trace/nodes.py
index 0f4c85e5..0e721706 100644
--- a/opto/trace/nodes.py
+++ b/opto/trace/nodes.py
@@ -6,6 +6,10 @@
import re
import heapq
import contextvars
+import requests
+from PIL import Image
+from io import BytesIO
+from urllib.parse import urlparse
def node(data, name=None, trainable=False, description=None):
@@ -275,16 +279,129 @@ def __len__(self):
GRAPH = Graph() # This is a global registry of all the nodes.
-# USED_NODES = (
-# list()
-# ) # A stack of sets. This is a global registry to track which nodes are read.
-
USED_NODES = contextvars.ContextVar('USED_NODES', default=list())
# A stack of sets. This is a global registry to track which nodes are read.
T = TypeVar("T")
+def verify_data_is_image_url(url: str, timeout: float = 1.0) -> bool:
+ """Verify if the node's data is an image URL by checking Content-Type via HEAD request.
+
+ This method performs an actual network request to verify that a URL points to an image.
+ It should be used when you need definitive verification beyond pattern matching.
+
+ The method should be called before we convert image to base64 string (e.g., optimization step)
+
+ Args:
+ timeout: Maximum time in seconds to wait for the request. Default is 1.0.
+
+ Returns:
+ bool: True if the URL returns an image Content-Type, False otherwise.
+
+ Notes:
+ - This method only applies to http/https URLs
+ - Returns False for non-URL data or if the request fails
+ - Uses HEAD request to avoid downloading the full image
+ - Requires network connectivity
+
+ Example:
+ >>> result = verify_data_is_image_url("https://example.com/photo.jpg")
+ >>> result # Network verification: True/False
+ """
+ if not isinstance(url, str):
+ return False
+
+ try:
+ parsed = urlparse(url)
+
+ # Only verify http/https URLs
+ if parsed.scheme not in ('http', 'https'):
+ return False
+
+ # Perform HEAD request to check Content-Type
+ try:
+ response = requests.head(url, timeout=timeout, allow_redirects=True)
+ content_type = response.headers.get('content-type', '').lower()
+ return content_type.startswith('image/')
+ except ImportError:
+ warnings.warn(
+ "requests library not available. Install with: pip install requests",
+ ImportWarning
+ )
+ return False
+ except (requests.RequestException, Exception):
+ # Network errors, timeouts, invalid URLs, etc.
+ return False
+
+ except (ValueError, AttributeError):
+ return False
+
+
+def is_image(data) -> bool:
+ """Check if the node is an image node.
+ This is a shared type check
+
+ Returns:
+ bool: True if the node is an image node, False otherwise.
+
+ Notes:
+ Supports four types of image data:
+ 1. Base64 encoded string (data URL format)
+ 2. PIL Image object
+ 3. Raw image bytes
+ 4. URL string pointing to an image (pattern-based check, no network request)
+
+ For URLs, this performs a fast pattern-based check only. For verification
+ with a network request, use verify_image_url() method.
+
+ If you have a numpy array, convert it to PIL Image first:
+ from PIL import Image
+ pil_image = Image.fromarray(numpy_array)
+ """
+ try:
+ if isinstance(data, Image.Image):
+ return True
+ except ImportError:
+ pass
+
+ # Check if it's a base64 data URL string
+ if isinstance(data, str) and data.startswith('data:image/'):
+ return True
+
+ # Check if it's raw image bytes
+ if isinstance(data, bytes):
+ try:
+ Image.open(BytesIO(data))
+ return True
+ except Exception:
+ pass
+
+ # Check if it's an image URL (pattern-based, no network request)
+ if isinstance(data, str):
+ try:
+ parsed = urlparse(data)
+ if parsed.scheme in ('http', 'https'):
+ path = parsed.path.lower()
+ # Common image extensions
+ image_extensions = ('.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp',
+ '.svg', '.ico', '.tiff', '.tif', '.heic', '.heif')
+ if any(path.endswith(ext) for ext in image_extensions):
+ return True
+ except (ValueError, AttributeError):
+ pass
+
+ # Check if it's a specialized container class
+ # We don't use isinstance check because we can't import other files into nodes.py, this file should have no
+ # external dependencies on other files.
+ try:
+ if 'ImageContent' in data.__class__.__name__:
+ return True
+ except AttributeError:
+ pass
+
+ return False
+
class AbstractNode(Generic[T]):
"""AbstractNode represents an abstract data node in a directed graph.
@@ -361,6 +478,31 @@ def data(self):
if len(current_used_nodes) > 0 and GRAPH.TRACE: # We're within trace_nodes context.
current_used_nodes[-1].add(self)
return self.__getattribute__("_data")
+
+ @property
+ def is_image(self) -> bool:
+ """Check if the node is an image node.
+ This is a shared type check
+
+ Returns:
+ bool: True if the node is an image node, False otherwise.
+
+ Notes:
+ Supports four types of image data:
+ 1. Base64 encoded string (data URL format)
+ 2. PIL Image object
+ 3. Raw image bytes
+ 4. URL string pointing to an image (pattern-based check, no network request)
+ 5. An ImageContent (customized data container)
+
+ For URLs, this performs a fast pattern-based check only. For verification
+ with a network request, use verify_image_url() method.
+
+ If you have a numpy array, convert it to PIL Image first:
+ from PIL import Image
+ pil_image = Image.fromarray(numpy_array)
+ """
+ return is_image(self._data)
@property
def parents(self):
diff --git a/opto/utils/backbone.py b/opto/utils/backbone.py
new file mode 100644
index 00000000..95885773
--- /dev/null
+++ b/opto/utils/backbone.py
@@ -0,0 +1,2576 @@
+"""
+Flexible conversation manager for multi-turn LLM conversations.
+Uses LiteLLM unified format for all providers (OpenAI, Anthropic, Google, etc.).
+
+The class here follows this philosophy:
+1. Every class is a data class (pickable/jsonable)
+2. Most classes have `autocast` feature that takes in some data form and tries to automatically determine how to parse them into the right structured format.
+
+In order to support three types of data class construction methods:
+1. Direct construction: `text = TextContent("Hello, world!")`
+2. Build from a value: `text = TextContent.build("Hello, world!")`
+3. Data class construction: `text = TextContent(text="Hello, world!")`
+
+We use this approach:
+`autocast()` method is the main automatic conversion method that determines how to parse the data.
+It will return a sequence of values that map to the fields of the data class.
+
+In `__init__()` method, if `kwargs` are provided, we follow path 3 to construct the data class.
+If not, we do autocast to construct the data class (path 1)
+
+Alternatively, people can call `.build()` to construct the class.
+"""
+from typing import List, Dict, Any, Optional, Literal, Union, Iterable, Tuple, TypeVar, Generic
+from dataclasses import dataclass, field
+import json
+import base64
+from pathlib import Path
+import warnings
+
+from PIL import Image
+import io
+
+
+# Default placeholder for images that cannot be rendered as text
+DEFAULT_IMAGE_PLACEHOLDER = "\n[IMAGE]\n"
+
+@dataclass
+class ContentBlock:
+ """Abstract base class for all content blocks."""
+
+ def __init__(self, **kwargs):
+ for key, value in kwargs.items():
+ setattr(self, key, value)
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert the content block to a dictionary representation.
+
+ Returns:
+ Dict[str, Any]: Dictionary representation of the content block
+ """
+ raise NotImplementedError("Subclasses must implement this method")
+
+ @classmethod
+ def build(cls, value: Any, **kwargs) -> 'ContentBlock':
+ """Build a content block from a value with auto-detection.
+
+ Args:
+ value: The value to build from (type depends on subclass)
+ **kwargs: Additional keyword arguments for building
+
+ Returns:
+ ContentBlock: The built content block
+ """
+ raise NotImplementedError("Subclasses must implement this method")
+
+ def is_empty(self) -> bool:
+ """Check if the content block is empty (has no meaningful content).
+
+ Returns:
+ bool: True if the block is empty, False otherwise
+ """
+ raise NotImplementedError("Subclasses must implement this method")
+
+class ContentBlockList(list):
+ """List of content blocks with automatic type conversion.
+
+ Supports automatic conversion from:
+ - str -> [TextContent(text=str)]
+ - TextContent -> [TextContent]
+ - ImageContent -> [ImageContent]
+ - List[ContentBlock] -> ContentBlockList
+ - None/empty -> []
+
+ Note: This list can contain mixed types of ContentBlocks (text, images, PDFs, etc.).
+ Type annotations like ContentBlockList[TextContent] are used for documentation
+ purposes in specialized methods but don't restrict the actual content.
+ """
+
+ def __init__(self, content: Union[str, 'ContentBlock', List['ContentBlock'], None] = None):
+ """Initialize ContentBlockList with automatic type conversion.
+
+ Args:
+ content: Can be a string (converted to TextContent), a single ContentBlock,
+ a list of ContentBlocks, or None (empty list).
+ """
+ super().__init__()
+ if content is not None:
+ self.extend(self._normalize(content))
+
+ @staticmethod
+ def _normalize(content: Union[str, 'ContentBlock', List['ContentBlock'], None]) -> List['ContentBlock']:
+ """Normalize content to a list of ContentBlocks."""
+ if content is None:
+ return []
+ if isinstance(content, str):
+ return [TextContent(text=content)] if content else []
+ if isinstance(content, list):
+ return content
+ # Single ContentBlock
+ return [content]
+
+ @classmethod
+ def ensure(cls, content: Union[str, 'ContentBlock', List['ContentBlock'], None]) -> 'ContentBlockList':
+ """Ensure content is a ContentBlockList with automatic conversion.
+
+ Args:
+ content: String, ContentBlock, list of ContentBlocks, or None
+
+ Returns:
+ ContentBlockList with the content
+ """
+ if isinstance(content, cls):
+ return content
+ return cls(content)
+
+ def __getitem__(self, key: Union[int, slice]) -> Union['ContentBlock', 'ContentBlockList']:
+ """Support indexing and slicing.
+
+ Args:
+ key: Integer index or slice object
+
+ Returns:
+ ContentBlock for single index, ContentBlockList for slices
+ """
+ if isinstance(key, slice):
+ # Return a new ContentBlockList with the sliced items
+ return ContentBlockList(list.__getitem__(self, key))
+ else:
+ # Return the single item for integer index
+ return list.__getitem__(self, key)
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {"type": "list", "blocks": [b.to_dict() for b in self]}
+
+ def append(self, item: Union[str, 'ContentBlock', 'ContentBlockList']) -> 'ContentBlockList':
+ """Append a string or ContentBlock, merging consecutive text.
+
+ Args:
+ item: String (auto-converted to TextContent) or ContentBlock.
+ If the last item is TextContent and item is also text,
+ they are merged into a single TextContent.
+ """
+ if isinstance(item, str):
+ # String: merge with last TextContent or create new one (with a separation mark " ")
+ if self and isinstance(self[-1], TextContent):
+ self[-1] = TextContent(text=self[-1].text + " " + item)
+ else:
+ super().append(TextContent(text=item))
+ elif isinstance(item, TextContent):
+ # TextContent: merge with last TextContent or add (with a separation mark " ")
+ if self and isinstance(self[-1], TextContent):
+ self[-1] = TextContent(text=self[-1].text + " " + item.text)
+ else:
+ super().append(item)
+ elif isinstance(item, ContentBlockList):
+ # we silently call extend here
+ super().extend(item)
+ else:
+ # Other ContentBlock types (ImageContent, etc.): just add
+ super().append(item)
+ return self
+
+ def extend(self, blocks: Union[str, 'ContentBlock', List[
+ 'ContentBlock'], 'ContentBlockList', None]) -> 'ContentBlockList':
+ """Extend with blocks, merging consecutive TextContent.
+
+ Args:
+ blocks: String, ContentBlock, list of ContentBlocks, or None.
+ Strings are auto-converted. Consecutive text is merged.
+ """
+ normalized = self._normalize(blocks)
+ for block in normalized:
+ self.append(block)
+ return self
+
+ def __add__(self, other) -> 'ContentBlockList':
+ """Concatenate content block lists with other content block lists or strings.
+
+ Args:
+ other: ContentBlockList, List[ContentBlock], or string to concatenate
+ """
+ if isinstance(other, (ContentBlockList, list)):
+ result = ContentBlockList(list(self))
+ result.extend(other)
+ return result
+ elif isinstance(other, str):
+ result = ContentBlockList(list(self))
+ result.append(TextContent(text=other))
+ return result
+ else:
+ return NotImplemented
+
+ def __radd__(self, other) -> 'ContentBlockList':
+ """Right-side concatenation (when string is on the left).
+ """
+ if isinstance(other, str):
+ result = ContentBlockList([TextContent(text=other)])
+ result.extend(self)
+ return result
+ else:
+ return NotImplemented
+
+ def is_empty(self) -> bool:
+ """Check if the content block list is empty."""
+ if len(self) == 0:
+ return True
+ return all(block.is_empty() for block in self)
+
+ def has_images(self) -> bool:
+ """Check if the content block list contains any images."""
+ return any(isinstance(block, ImageContent) for block in self)
+
+ def has_text(self) -> bool:
+ """Check if the content block list contains any text."""
+ return any(isinstance(block, TextContent) for block in self)
+
+ # --- Multimodal utilities ---
+ @staticmethod
+ def blocks_to_text(blocks: Iterable['ContentBlock'],
+ image_placeholder: str = DEFAULT_IMAGE_PLACEHOLDER) -> str:
+ """Convert any iterable of ContentBlocks to text representation.
+
+ This is a utility that can be used by composite classes containing
+ multiple ContentBlockLists. Handles nested ContentBlockLists recursively.
+
+ Args:
+ blocks: Iterable of ContentBlock objects (may include nested ContentBlockLists)
+ image_placeholder: Placeholder string for images (default: "[IMAGE]")
+
+ Returns:
+ str: Text representation where images are replaced with placeholder.
+ """
+ text_parts = []
+ for block in blocks:
+ if isinstance(block, TextContent):
+ text_parts.append(block.text)
+ elif isinstance(block, ImageContent):
+ text_parts.append(image_placeholder)
+ elif isinstance(block, ContentBlockList):
+ # Recursively handle nested ContentBlockList
+ nested_text = ContentBlockList.blocks_to_text(block, image_placeholder)
+ if nested_text:
+ text_parts.append(nested_text)
+ return " ".join(text_parts)
+
+ def to_text(self, image_placeholder: str = DEFAULT_IMAGE_PLACEHOLDER) -> str:
+ """Convert this list to text representation.
+
+ Args:
+ image_placeholder: Placeholder string for images (default: "[IMAGE]")
+
+ Returns:
+ str: Text representation where images are replaced with placeholder.
+ """
+ return self.blocks_to_text(self, image_placeholder)
+
+ def __bool__(self) -> bool:
+ """Check if there's any actual content (not just empty text).
+
+ Returns:
+ bool: True if content is non-empty (has images or non-whitespace text).
+ """
+ for block in self:
+ if isinstance(block, ImageContent):
+ return True
+ if isinstance(block, TextContent) and block.text.strip():
+ return True
+ return False
+
+ def __repr__(self) -> str:
+ """Return text-only representation for logging.
+
+ Images are represented as "[IMAGE]" placeholder.
+
+ Returns:
+ str: Text representation of the content.
+ """
+ return self.to_text()
+
+ def to_content_blocks(self) -> 'ContentBlockList':
+ """Return self (for interface compatibility with composites).
+
+ This allows ContentBlockList and classes that inherit from it
+ to be used interchangeably with composite classes that have
+ a to_content_blocks() method.
+
+ Returns:
+ ContentBlockList: Self reference.
+ """
+ return self
+
+ def count_blocks(self) -> Dict[str, int]:
+ """Count blocks by type, including nested structures.
+
+ Recursively traverses the content block structure and counts
+ each block type by its class name.
+
+ Returns:
+ Dict[str, int]: Dictionary mapping block class names to counts.
+ Example: {"TextContent": 3, "ImageContent": 1}
+ """
+ counts: Dict[str, int] = {}
+
+ def _count_recursive(item: Any) -> None:
+ """Recursively count blocks in nested structures."""
+ if isinstance(item, ContentBlock):
+ # Count this block
+ class_name = item.__class__.__name__
+ counts[class_name] = counts.get(class_name, 0) + 1
+
+ # Check if this block has any attributes that might contain nested blocks
+ if hasattr(item, '__dict__'):
+ for attr_value in item.__dict__.values():
+ if isinstance(attr_value, (ContentBlockList, list)):
+ for nested_item in attr_value:
+ _count_recursive(nested_item)
+ elif isinstance(attr_value, ContentBlock):
+ _count_recursive(attr_value)
+ elif isinstance(item, (ContentBlockList, list)):
+ # Recursively count items in lists
+ for nested_item in item:
+ _count_recursive(nested_item)
+
+ # Count all blocks in this list
+ for block in self:
+ _count_recursive(block)
+
+ return counts
+
+ def to_litellm_format(self, role: Optional[str] = None) -> List[Dict[str, Any]]:
+ """Convert content blocks to LiteLLM Response API format.
+
+ Args:
+ role: Optional role context ("user" or "assistant") to determine the correct type.
+ If not provided, defaults to "user" for backward compatibility.
+
+ Returns:
+ List[Dict[str, Any]]: List of content block dictionaries in Response API format
+ """
+ if role is None:
+ role = "user"
+
+ content = []
+ for block in self:
+ # Skip empty content blocks
+ if block.is_empty():
+ continue
+
+ # Handle different content block types
+ if isinstance(block, TextContent):
+ # Pass role context to TextContent for proper type selection
+ content.append(block.to_litellm_format(role=role))
+ elif isinstance(block, ImageContent):
+ # ImageContent always uses input_image for user messages
+ content.append(block.to_litellm_format())
+ elif isinstance(block, PDFContent):
+ # LiteLLM supports PDFs for providers like Claude
+ # Use input_file type with PDF data URL for Response API
+ if block.pdf_url:
+ warnings.warn("PDF URLs may not be supported by all providers through LiteLLM")
+ content.append({"type": "input_text", "text": f"[PDF: {block.pdf_url}]"})
+ else:
+ # Encode as data URL for providers that support PDFs
+ data_url = f"data:application/pdf;base64,{block.pdf_data}"
+ content.append({"type": "input_file", "input_file": {"url": data_url}})
+ elif isinstance(block, FileContent):
+ # For file content, add as text or data URL based on type
+ if block.is_binary:
+ data_url = f"data:{block.mime_type};base64,{block.file_data}"
+ content.append({"type": "input_file", "input_file": {"url": data_url}})
+ else:
+ content.append({"type": "input_text", "text": f"[File: {block.filename}]\n{block.file_data}"})
+ elif hasattr(block, 'to_litellm_format'):
+ # Fallback: use block's own to_litellm_format method
+ content.append(block.to_litellm_format())
+ else:
+ # Last resort: use to_dict()
+ content.append(block.to_dict())
+
+ return content
+
+
+class Content(ContentBlockList):
+ """Semantic wrapper providing multi-modal content for the optimizer agent.
+
+ The goal is to provide a flexible interface for user to add mixed text and image content to the optimizer agent.
+
+ Inherits all ContentBlockList functionality (append, extend, has_images,
+ to_text, __bool__, __repr__, etc.) with a flexible constructor that
+ supports multiple input patterns.
+
+ Primary use cases:
+ - Building problem context for the optimizer agent
+ - Providing user feedback
+
+ Creation patterns:
+ - Variadic: Content("text", image, "more text")
+ - Template: Content("See [IMAGE] here", images=[img])
+ - Empty: Content()
+
+ Examples:
+ # Text-only content
+ ctx = Content("Important background information")
+
+ # Image content
+ ctx = Content(ImageContent.build("diagram.png"))
+
+ # Mixed content (variadic mode)
+ ctx = Content(
+ "Here's the diagram:",
+ "diagram.png", # auto-detected as image file
+ "And the analysis."
+ )
+
+ # Template mode with placeholders
+ ctx = Content(
+ "Compare [IMAGE] with [IMAGE]:",
+ images=[img1, img2]
+ )
+
+ # Manual building
+ ctx = Content()
+ ctx.append("Here's the relevant diagram:")
+ ctx.append(ImageContent.build("diagram.png"))
+ """
+
+ def __init__(
+ self,
+ *args,
+ images: Optional[List[Any]] = None,
+ format: str = "PNG"
+ ):
+ """Initialize a Content from various input patterns.
+
+ Supports two usage modes:
+
+ **Mode 1: Variadic (images=None)**
+ Pass any mix of text and image sources as arguments.
+ Strings are auto-detected as text or image paths/URLs.
+
+ Content("Hello", some_image, "World")
+ Content("Check this:", "path/to/image.png")
+
+ **Mode 2: Template (images provided)**
+ Pass a template string with [IMAGE] placeholders and a list of images.
+
+ Content(
+ "Compare [IMAGE] with [IMAGE]",
+ images=[img1, img2]
+ )
+
+ Args:
+ *args: Variable arguments - text strings and/or image sources (Mode 1),
+ or a single template string (Mode 2)
+ images: Optional list of images for template mode. When provided,
+ expects exactly one template string in args.
+ format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG
+
+ Raises:
+ ValueError: In template mode, if placeholder count doesn't match image count,
+ or if args is not a single template string.
+ """
+ # Initialize empty list first
+ super().__init__()
+
+ # Build content based on mode
+ if images is not None:
+ if len(args) != 1 or not isinstance(args[0], str):
+ raise ValueError(
+ "Template mode requires exactly one template string as the first argument. "
+ f"Got {len(args)} arguments."
+ )
+ self._build_from_template(args[0], images=images, format=format)
+ elif args:
+ self._build_from_variadic(*args)
+
+ def _build_from_variadic(self, *args) -> None:
+ """Populate self from variadic arguments.
+
+ Each argument is either text (str) or an image source.
+ Strings are auto-detected: if they look like image paths/URLs,
+ they're converted to ImageContent; otherwise treated as text.
+
+ Args:
+ *args: Alternating text and image sources
+ format: Image format for numpy arrays
+ """
+ for arg in args:
+ # for Future expansion, we can check if the string is any special content type
+ # by is_empty() on special ContentBlock subclasses
+ image_content = ImageContent.build(arg)
+ if not image_content.is_empty():
+ self.append(image_content)
+ else:
+ self.append(arg)
+
+ def _build_from_template(
+ self,
+ template: str,
+ images: List[Any],
+ format: str = "PNG"
+ ) -> None:
+ """Populate self from template with [IMAGE] placeholders.
+
+ The template string contains [IMAGE] placeholders that are replaced
+ by images from the images list in order.
+
+ Args:
+ template: Template string containing [IMAGE] placeholders
+ images: List of image sources to insert at placeholders
+ format: Image format for numpy arrays
+
+ Raises:
+ ValueError: If placeholder count doesn't match the number of images.
+ """
+ placeholder = DEFAULT_IMAGE_PLACEHOLDER
+
+ # Count placeholders
+ placeholder_count = template.count(placeholder)
+ if placeholder_count != len(images):
+ raise ValueError(
+ f"Number of {placeholder} placeholders ({placeholder_count}) "
+ f"does not match number of images ({len(images)})"
+ )
+
+ # Split template by placeholder and interleave with images
+ parts = template.split(placeholder)
+
+ for i, part in enumerate(parts):
+ if part: # Add text part if non-empty
+ self.append(part)
+
+ # Add image after each part except the last
+ if i < len(images):
+ image_content = ImageContent.build(images[i], format=format)
+ if image_content is None:
+ raise ValueError(
+ f"Could not convert image at index {i} to ImageContent: {type(images[i])}"
+ )
+ self.append(image_content)
+
+
+class PromptTemplate:
+ """Template for building ContentBlockLists with {placeholder} support.
+
+ Similar to str.format(), but supports multimodal content (ContentBlockList).
+
+ Return type depends on values:
+ - All strings → returns str (backward compatible)
+ - Any multimodal content → returns ContentBlockList
+
+ Features:
+ - Multiple placeholders: {a}, {b}, {c}
+ - Escaping: {{ and }} for literal braces
+ - Missing placeholders: left as-is in text
+ - Extra kwargs: silently ignored (no error)
+ - Nested templates: if value is PromptTemplate, formats it first
+ - Mixed values: str, ContentBlockList, or objects with to_content_blocks()
+
+ Examples:
+ # Define template (can be class attribute)
+ user_prompt_template = PromptTemplate('''
+ Now you see problem instance:
+
+ ================================
+ {problem_instance}
+ ================================
+ ''')
+
+ # Format with ContentBlockList (may contain images)
+ content = user_prompt_template.format(
+ problem_instance=problem.to_content_blocks()
+ )
+ # Returns ContentBlockList: [TextContent("Now you see..."), *problem_blocks, TextContent("===...")]
+
+ # Multiple placeholders
+ template = PromptTemplate("User: {user}\\nAssistant: {assistant}")
+ result = template.format(user=user_blocks, assistant=assistant_blocks)
+
+ # Nested templates
+ outer = PromptTemplate("Header\\n{body}\\nFooter")
+ inner = PromptTemplate("Content: {data}")
+ result = outer.format(body=inner, data="some data") # inner gets same kwargs
+
+ # Escaping braces
+ template = PromptTemplate('JSON example: {{"key": "{value}"}}')
+ result = template.format(value="hello") # {"key": "hello"}
+
+ # Extra kwargs are ignored (no error)
+ result = template.format(value="hello", unused_key="ignored")
+
+ # Missing placeholders left as-is
+ template = PromptTemplate("Hello {name}, score: {score}")
+ result = template.format(name="Alice") # "Hello Alice, score: {score}"
+ """
+
+ # Regex to find {placeholder} but not {{ or }}
+ _PLACEHOLDER_PATTERN = None # Lazy compiled
+
+ def __init__(self, template: str):
+ """Initialize with a template string.
+
+ Args:
+ template: Template string with {placeholder} syntax.
+ """
+ self.template = template
+
+ @classmethod
+ def _get_pattern(cls):
+ """Lazily compile the placeholder regex pattern."""
+ if cls._PLACEHOLDER_PATTERN is None:
+ import re
+ # Match {name} but not {{ or }}
+ # Captures the placeholder name
+ cls._PLACEHOLDER_PATTERN = re.compile(r'\{(\w+)\}')
+ return cls._PLACEHOLDER_PATTERN
+
+ def format(self, **kwargs) -> Union[str, 'ContentBlockList']:
+ """Format the template with the given values.
+
+ Similar to str.format(), but supports multimodal content.
+ Extra kwargs are silently ignored.
+
+ If all values are strings, returns a str (backward compatible).
+ If any value is a ContentBlockList or multimodal, returns ContentBlockList.
+
+ Args:
+ **kwargs: Placeholder values. Each value can be:
+ - str: inserted as text
+ - ContentBlockList: blocks spliced in at that position
+ - PromptTemplate: formatted first, then spliced in
+ - Object with to_content_blocks(): method called, result spliced
+ - Other: converted to str
+
+ Returns:
+ str: If all values are strings (backward compatible behavior).
+ ContentBlockList: If any value is multimodal content.
+ """
+ # Check if all values are simple strings - if so, use simple string formatting
+ pattern = self._get_pattern()
+ placeholder_names = set(pattern.findall(self.template))
+
+ # Only check values for placeholders that exist in the template
+ relevant_values = {k: v for k, v in kwargs.items() if k in placeholder_names}
+
+ if all(isinstance(v, str) for v in relevant_values.values()):
+ # All strings: use simple string replacement, return str
+ # Handle escaping and missing placeholders
+ result = self.template.replace("{{", "\x00LBRACE\x00").replace("}}", "\x00RBRACE\x00")
+
+ for name in placeholder_names:
+ placeholder = "{" + name + "}"
+ if name in kwargs:
+ result = result.replace(placeholder, kwargs[name])
+ # Missing placeholders left as-is
+
+ result = result.replace("\x00LBRACE\x00", "{").replace("\x00RBRACE\x00", "}")
+ return result
+
+ # Multimodal content: build ContentBlockList
+ result = ContentBlockList()
+
+ # Handle escaping: replace {{ with a sentinel, }} with another
+ LBRACE_SENTINEL = "\x00LBRACE\x00"
+ RBRACE_SENTINEL = "\x00RBRACE\x00"
+
+ text = self.template.replace("{{", LBRACE_SENTINEL).replace("}}", RBRACE_SENTINEL)
+
+ last_end = 0
+
+ for match in pattern.finditer(text):
+ # Add text before this placeholder
+ prefix = text[last_end:match.start()]
+ if prefix:
+ # Restore escaped braces in prefix
+ prefix = prefix.replace(LBRACE_SENTINEL, "{").replace(RBRACE_SENTINEL, "}")
+ result.append(prefix)
+
+ # Get placeholder name and value
+ placeholder_name = match.group(1)
+
+ if placeholder_name in kwargs:
+ value = kwargs[placeholder_name]
+ # Convert value to ContentBlockList and splice in
+ content = self._value_to_content(value, **kwargs)
+ result.extend(content)
+ else:
+ # Missing placeholder: leave as-is (restore original {name})
+ result.append("{" + placeholder_name + "}")
+
+ last_end = match.end()
+
+ # Add remaining text after last placeholder
+ suffix = text[last_end:]
+ if suffix:
+ suffix = suffix.replace(LBRACE_SENTINEL, "{").replace(RBRACE_SENTINEL, "}")
+ result.append(suffix)
+
+ return result
+
+ def _value_to_content(self, value, **kwargs) -> 'ContentBlockList':
+ """Convert a value to ContentBlockList.
+
+ Args:
+ value: The value to convert
+ **kwargs: Passed to nested PromptTemplate.render()
+
+ Returns:
+ ContentBlockList: The value as content blocks.
+ """
+ if isinstance(value, ContentBlockList):
+ return value
+ elif isinstance(value, PromptTemplate):
+ # Nested template: format it with the same kwargs
+ return value.format(**kwargs)
+ elif hasattr(value, 'to_content_blocks'):
+ # Object with to_content_blocks method (e.g., ProblemInstance)
+ return value.to_content_blocks()
+ elif isinstance(value, str):
+ return ContentBlockList(value)
+ else:
+ # Fallback: convert to string
+ return ContentBlockList(str(value))
+
+ def __repr__(self) -> str:
+ """Return a preview of the template."""
+ preview = self.template[:50] + "..." if len(self.template) > 50 else self.template
+ return f"PromptTemplate({preview!r})"
+
+
+@dataclass
+class TextContent(ContentBlock):
+ """Text content block"""
+ type: Literal["text"] = "text"
+ text: str = ""
+
+ def __init__(self, text: str = ""):
+ super().__init__(text=text)
+
+ def is_empty(self) -> bool:
+ """Check if the text content is empty."""
+ return not self.text
+
+ @classmethod
+ def build(cls, value: Any = "", **kwargs) -> 'TextContent':
+ """Build a text content block from a value.
+
+ Args:
+ value: String or any value to convert to text
+ **kwargs: Unused, for compatibility with base class
+
+ Returns:
+ TextContent: Text content block with the value as text
+ """
+ if isinstance(value, str):
+ return cls(text=value)
+ return cls(text=str(value))
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert to dictionary for serialization."""
+ return {"type": self.type, "text": self.text}
+
+ def to_litellm_format(self, role: str = "user") -> Dict[str, Any]:
+ """Convert to LiteLLM/OpenAI Response API compatible format.
+
+ Args:
+ role: The role context ("user" or "assistant") to determine the correct type
+
+ Returns dict in format:
+ - {"type": "input_text", "text": "..."} for user messages
+ - {"type": "output_text", "text": "..."} for assistant messages
+ """
+ text_type = "input_text" if role == "user" else "output_text"
+ return {"type": text_type, "text": self.text}
+
+ def __add__(self, other) -> 'TextContent':
+ """Concatenate text content with strings or other TextContent objects.
+
+ Args:
+ other: String or TextContent to concatenate
+
+ Returns:
+ TextContent: New TextContent with concatenated text
+ """
+ if isinstance(other, str):
+ return TextContent(text=self.text + " " + other)
+ elif isinstance(other, TextContent):
+ return TextContent(text=self.text + " " + other.text)
+ else:
+ return NotImplemented
+
+ def __radd__(self, other) -> 'TextContent':
+ """Right-side concatenation (when string is on the left).
+
+ Args:
+ other: String to concatenate
+
+ Returns:
+ TextContent: New TextContent with concatenated text
+ """
+ if isinstance(other, str):
+ return TextContent(text=other + " " + self.text)
+ else:
+ return NotImplemented
+
+
+@dataclass
+class ImageContent(ContentBlock):
+ """Image content block - supports URLs, base64, file paths, and numpy arrays.
+
+ OpenAI uses base64 encoded images in the image_data field and recombine it into a base64 string of the format `"image_url": f"data:image/jpeg;base64,{base64_image}"` when sending to the API.
+ Gemini uses raw bytes in the image_bytes field:
+ ```
+ types.Part.from_bytes(
+ data=image_bytes,
+ mime_type='image/jpeg',
+ )
+ ```
+
+ Supports multiple ways to create an ImageContent:
+ 1. Direct instantiation with image_url or image_data
+ 2. from_file/from_path: Load from local file path
+ 3. from_url: Create from HTTP/HTTPS URL
+ 4. from_array: Create from numpy array or array-like RGB image
+ 5. from_value: Auto-detect and create from various formats
+ """
+ type: Literal["image"] = "image"
+ image_url: Optional[str] = None
+ image_data: Optional[str] = None # base64 encoded
+ image_bytes: Optional[bytes] = None
+ media_type: str = "image/jpeg" # image/jpeg, image/png, image/gif, image/webp
+ detail: Optional[str] = None # OpenAI: "auto", "low", "high"
+
+ def __init__(self, value: Any = None, format: str = "PNG", **kwargs):
+ """Initialize ImageContentBlock with auto-detection of input type.
+
+ Args:
+ value: Can be:
+ - URL string (starting with 'http://' or 'https://')
+ - Data URL string (starting with 'data:image/')
+ - Local file path (string)
+ - Numpy array or array-like RGB image
+ - PIL Image object
+ - Raw bytes
+ - None (empty image)
+ format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG
+ **kwargs: Direct field values (image_url, image_data, media_type, detail)
+ """
+ # If explicit field values are provided, use them directly
+ if kwargs:
+ kwargs.setdefault('type', 'image')
+ kwargs.setdefault('media_type', 'image/jpeg')
+ super().__init__(**kwargs)
+ else:
+ # Use autocast to detect and convert the value
+ value_dict = self.autocast(value, format=format)
+ super().__init__(**value_dict)
+
+ def __str__(self) -> str:
+ # Truncate image_data and image_bytes for readability
+ image_data_str = f"{self.image_data[:10]}..." if self.image_data and len(self.image_data) > 10 else self.image_data
+ image_bytes_str = f"{str(self.image_bytes[:10])}..." if self.image_bytes and len(self.image_bytes) > 10 else self.image_bytes
+ return f"ImageContent(image_url={self.image_url}, image_data={image_data_str}, image_bytes={image_bytes_str}, media_type={self.media_type})"
+
+ def __repr__(self) -> str:
+ # Truncate image_data and image_bytes for readability
+ image_data_str = f"{self.image_data[:10]}..." if self.image_data and len(self.image_data) > 10 else self.image_data
+ image_bytes_str = f"{str(self.image_bytes[:10])}..." if self.image_bytes and len(self.image_bytes) > 10 else self.image_bytes
+ return f"ImageContent(image_url={self.image_url}, image_data={image_data_str}, image_bytes={image_bytes_str}, media_type={self.media_type})"
+
+ def is_empty(self) -> bool:
+ """Check if the image content is empty (no URL or data)."""
+ return not self.image_url and not self.image_data and not self.image_bytes
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert to dictionary for serialization (not LiteLLM format).
+
+ For LiteLLM format, use to_litellm_format() instead.
+ """
+ result = {
+ "type": self.type,
+ "media_type": self.media_type
+ }
+ if self.image_url:
+ result["image_url"] = self.image_url
+ if self.image_data:
+ result["image_data"] = self.image_data
+ if self.image_bytes:
+ result["image_bytes"] = self.image_bytes
+ if self.detail:
+ result["detail"] = self.detail
+ return result
+
+ def to_litellm_format(self) -> Dict[str, Any]:
+ """Convert to LiteLLM Response API compatible format.
+
+ Returns dict in format:
+ {"type": "input_image", "image_url": {"url": "..."}}
+ """
+ # Determine the URL to use
+ if self.image_url:
+ url = self.image_url
+ elif self.image_data:
+ # Convert base64 data to data URL
+ url = f"data:{self.media_type};base64,{self.image_data}"
+ elif self.image_bytes:
+ # Convert bytes to base64 and then to data URL
+ import base64
+ b64_data = base64.b64encode(self.image_bytes).decode('utf-8')
+ url = f"data:{self.media_type};base64,{b64_data}"
+ else:
+ # Empty image
+ return {"type": "input_image", "image_url": ""}
+
+ # Build the result in Response API format
+ result = {
+ "type": "input_image",
+ "image_url": url
+ }
+
+ # Add detail if specified (OpenAI-specific)
+ if self.detail:
+ result["detail"] = self.detail
+
+ return result
+
+ @classmethod
+ def from_file(cls, filepath: str, media_type: Optional[str] = None):
+ """Load image from file path."""
+ path = Path(filepath)
+ if not media_type:
+ ext_to_type = {
+ '.jpg': 'image/jpeg',
+ '.jpeg': 'image/jpeg',
+ '.png': 'image/png',
+ '.gif': 'image/gif',
+ '.webp': 'image/webp'
+ }
+ media_type = ext_to_type.get(path.suffix.lower(), 'image/jpeg')
+
+ with open(filepath, 'rb') as f:
+ image_data = base64.b64encode(f.read()).decode('utf-8')
+
+ return cls(image_data=image_data, media_type=media_type)
+
+ @classmethod
+ def from_path(cls, filepath: str, media_type: Optional[str] = None):
+ """Load image from file path. Alias for from_file."""
+ return cls.from_file(filepath, media_type)
+
+ @classmethod
+ def from_url(cls, url: str, media_type: str = "image/jpeg"):
+ """Create ImageContent from an HTTP/HTTPS URL.
+
+ Args:
+ url: HTTP or HTTPS URL pointing to an image
+ media_type: MIME type of the image (default: image/jpeg)
+ """
+ return cls(image_url=url, media_type=media_type)
+
+ @classmethod
+ def from_array(cls, array: Any, format: str = "PNG"):
+ """Create ImageContent from a numpy array or array-like RGB image.
+
+ Args:
+ array: numpy array representing an image (H, W, C) with values in [0, 255] or [0, 1]
+ format: Image format (PNG, JPEG, etc.). Default: PNG
+
+ Returns:
+ ImageContent with base64-encoded image data
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ raise ImportError("numpy is required for from_array. Install with: pip install numpy")
+
+ try:
+ from PIL import Image
+ except ImportError:
+ raise ImportError("Pillow is required for from_array. Install with: pip install Pillow")
+
+ import io
+
+ # Convert to numpy array if not already
+ if not isinstance(array, np.ndarray):
+ array = np.array(array)
+
+ # Normalize to [0, 255] if needed
+ if array.dtype == np.float32 or array.dtype == np.float64:
+ if array.max() <= 1.0:
+ array = (array * 255).astype(np.uint8)
+ else:
+ array = array.astype(np.uint8)
+ elif array.dtype != np.uint8:
+ array = array.astype(np.uint8)
+
+ # Convert to PIL Image and encode
+ image = Image.fromarray(array)
+ buffer = io.BytesIO()
+ image.save(buffer, format=format.upper())
+ buffer.seek(0)
+
+ image_data = base64.b64encode(buffer.getvalue()).decode('utf-8')
+ media_type = f"image/{format.lower()}"
+
+ return cls(image_data=image_data, media_type=media_type)
+
+ @classmethod
+ def from_pil(cls, image: Any, format: str = "PNG"):
+ """Create ImageContent from a PIL Image.
+
+ Args:
+ image: PIL Image object
+ format: Image format (PNG, JPEG, etc.). Default: PNG
+
+ Returns:
+ ImageContent with base64-encoded image data
+ """
+ import io
+
+ buffer = io.BytesIO()
+ img_format = image.format or format.upper()
+ image.save(buffer, format=img_format)
+ buffer.seek(0)
+
+ image_data = base64.b64encode(buffer.getvalue()).decode('utf-8')
+ media_type = f"image/{img_format.lower()}"
+
+ return cls(image_data=image_data, media_type=media_type)
+
+ @classmethod
+ def from_bytes(cls, data: bytes, media_type: str = "image/jpeg"):
+ """Create ImageContent from raw image bytes.
+
+ Args:
+ data: Raw image bytes
+ media_type: MIME type of the image (default: image/jpeg)
+
+ Returns:
+ ImageContent with base64-encoded data
+ """
+ image_data = base64.b64encode(data).decode('utf-8')
+ return cls(image_data=image_data, media_type=media_type)
+
+ @classmethod
+ def from_base64(cls, b64_data: str, media_type: str = "image/jpeg"):
+ """Create ImageContent from base64-encoded string.
+
+ Args:
+ b64_data: Base64-encoded image data (without data URL prefix)
+ media_type: MIME type of the image (default: image/jpeg)
+
+ Returns:
+ ImageContent with the provided base64 data
+ """
+ return cls(image_data=b64_data, media_type=media_type)
+
+ @classmethod
+ def from_data_url(cls, data_url: str):
+ """Create ImageContent from a data URL (data:image/...;base64,...).
+
+ Args:
+ data_url: Data URL string in format data:image/;base64,
+
+ Returns:
+ ImageContent with extracted base64 data and media type
+ """
+ try:
+ header, b64_data = data_url.split(',', 1)
+ media_type = header.split(':')[1].split(';')[0] # e.g., "image/png"
+ return cls(image_data=b64_data, media_type=media_type)
+ except (ValueError, IndexError):
+ # Fallback: assume the whole thing is base64 data
+ return cls(image_data=data_url.split(',')[-1], media_type="image/jpeg")
+
+ @staticmethod
+ def autocast(value: Any, format: str = "PNG") -> Dict[str, Any]:
+ """Auto-detect value type and return image field values.
+
+ Args:
+ value: Can be:
+ - URL string (starting with 'http://' or 'https://')
+ - Data URL string (starting with 'data:image/')
+ - Local file path (string)
+ - Numpy array or array-like RGB image
+ - PIL Image object
+ - Raw bytes
+ - None (empty image)
+ format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG
+
+ Returns:
+ Dictionary with keys: image_url, image_data, image_bytes, media_type
+ """
+ # Handle None or empty
+ if value is None:
+ return {"image_url": None, "image_data": None, "image_bytes": None, "media_type": "image/jpeg"}
+
+ # Handle ImageContentBlock instance
+ if isinstance(value, ImageContent):
+ return {
+ "image_url": value.image_url,
+ "image_data": value.image_data,
+ "image_bytes": value.image_bytes,
+ "media_type": value.media_type
+ }
+
+ # Handle string inputs
+ if isinstance(value, str):
+ if not value.strip():
+ return {"image_url": None, "image_data": None, "image_bytes": None, "media_type": "image/jpeg"}
+
+ # Data URL
+ if value.startswith('data:image/'):
+ try:
+ header, b64_data = value.split(',', 1)
+ media_type = header.split(':')[1].split(';')[0]
+ return {"image_url": None, "image_data": b64_data, "image_bytes": None, "media_type": media_type}
+ except (ValueError, IndexError):
+ return {"image_url": None, "image_data": value.split(',')[-1], "image_bytes": None, "media_type": "image/jpeg"}
+
+ # HTTP/HTTPS URL
+ if value.startswith('http://') or value.startswith('https://'):
+ return {"image_url": value, "image_data": None, "image_bytes": None, "media_type": "image/jpeg"}
+
+ # File path
+ path = Path(value)
+ if path.exists():
+ ext_to_type = {
+ '.jpg': 'image/jpeg',
+ '.jpeg': 'image/jpeg',
+ '.png': 'image/png',
+ '.gif': 'image/gif',
+ '.webp': 'image/webp'
+ }
+ media_type = ext_to_type.get(path.suffix.lower(), 'image/jpeg')
+ with open(value, 'rb') as f:
+ image_data = base64.b64encode(f.read()).decode('utf-8')
+ return {"image_url": None, "image_data": image_data, "image_bytes": None, "media_type": media_type}
+
+ # Handle bytes - store as base64 for portability
+ if isinstance(value, bytes):
+ image_data = base64.b64encode(value).decode('utf-8')
+ return {"image_url": None, "image_data": image_data, "image_bytes": None, "media_type": "image/jpeg"}
+
+ # Handle PIL Image
+ try:
+ from PIL import Image
+ if isinstance(value, Image.Image):
+ import io
+ buffer = io.BytesIO()
+ img_format = value.format or format.upper()
+ value.save(buffer, format=img_format)
+ buffer.seek(0)
+ image_data = base64.b64encode(buffer.getvalue()).decode('utf-8')
+ media_type = f"image/{img_format.lower()}"
+ return {"image_url": None, "image_data": image_data, "image_bytes": None, "media_type": media_type}
+ except ImportError:
+ pass
+
+ # Handle numpy array or array-like
+ try:
+ import numpy as np
+ if isinstance(value, np.ndarray) or hasattr(value, '__array__'):
+ try:
+ from PIL import Image
+ except ImportError:
+ raise ImportError("Pillow is required for array conversion. Install with: pip install Pillow")
+
+ import io
+
+ if not isinstance(value, np.ndarray):
+ value = np.array(value)
+
+ # Normalize to [0, 255] if needed
+ if value.dtype == np.float32 or value.dtype == np.float64:
+ if value.max() <= 1.0:
+ value = (value * 255).astype(np.uint8)
+ else:
+ value = value.astype(np.uint8)
+ elif value.dtype != np.uint8:
+ value = value.astype(np.uint8)
+
+ image = Image.fromarray(value)
+ buffer = io.BytesIO()
+ image.save(buffer, format=format.upper())
+ buffer.seek(0)
+
+ image_data = base64.b64encode(buffer.getvalue()).decode('utf-8')
+ media_type = f"image/{format.lower()}"
+ return {"image_url": None, "image_data": image_data, "image_bytes": None, "media_type": media_type}
+ except ImportError:
+ pass
+
+ return {"image_url": None, "image_data": None, "image_bytes": None, "media_type": "image/jpeg"}
+
+ @classmethod
+ def build(cls, value: Any, format: str = "PNG") -> 'ImageContent':
+ """Auto-detect format and create ImageContent from various input types.
+
+ Args:
+ value: Can be:
+ - URL string (starting with 'http://' or 'https://')
+ - Data URL string (starting with 'data:image/')
+ - Local file path (string)
+ - Numpy array or array-like RGB image
+ - PIL Image object
+ - Raw bytes
+ format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG
+
+ Returns:
+ ImageContent or None if the value cannot be converted
+ """
+ # Handle ImageContentBlock instance directly
+ if isinstance(value, cls):
+ return value
+
+ value_dict = cls.autocast(value, format=format)
+ return cls(**value_dict)
+
+ def set_image(self, image: Any, format: str = "PNG") -> None:
+ """Set the image from various input formats (mutates self).
+
+ Args:
+ image: Can be:
+ - URL string (starting with 'http://' or 'https://')
+ - Data URL string (starting with 'data:image/')
+ - Local file path (string)
+ - Numpy array or array-like RGB image
+ - PIL Image object
+ - Raw bytes
+ format: Image format for numpy arrays (PNG, JPEG, etc.). Default: PNG
+ """
+ result = ImageContent.build(image, format=format)
+ if result:
+ self.image_url = result.image_url
+ self.image_data = result.image_data
+ # Only copy image_bytes if it was explicitly set (e.g., from Google API)
+ if result.image_bytes:
+ self.image_bytes = result.image_bytes
+ self.media_type = result.media_type
+
+ def as_image(self) -> Image.Image:
+ """Convert the image to a PIL Image.
+
+ Fetches the image from URL if necessary (including HTTP/HTTPS URLs).
+
+ Returns:
+ PIL Image object
+
+ Raises:
+ ValueError: If no image data is available
+ requests.RequestException: If fetching from URL fails
+ """
+ # Try to get image bytes from any available source
+ image_bytes = self.get_bytes()
+
+ if image_bytes:
+ return Image.open(io.BytesIO(image_bytes))
+ elif self.image_url:
+ if self.image_url.startswith(('http://', 'https://')):
+ # Fetch image from URL
+ try:
+ import requests
+ response = requests.get(self.image_url, timeout=30)
+ response.raise_for_status()
+ return Image.open(io.BytesIO(response.content))
+ except ImportError:
+ # Fallback to urllib if requests is not available
+ from urllib.request import urlopen
+ with urlopen(self.image_url, timeout=30) as response:
+ return Image.open(io.BytesIO(response.read()))
+ else:
+ # If it's a local file path
+ return Image.open(self.image_url)
+ else:
+ raise ValueError("No image data available to convert to PIL Image")
+
+ def show(self) -> Image.Image:
+ """A convenience alias for as_image()"""
+ return self.as_image()
+
+ def get_bytes(self) -> Optional[bytes]:
+ """Get raw image bytes.
+
+ Returns image_bytes if available, otherwise decodes image_data from base64.
+
+ Returns:
+ Raw image bytes or None if no image data available
+ """
+ if self.image_bytes:
+ return self.image_bytes
+ elif self.image_data:
+ return base64.b64decode(self.image_data)
+ return None
+
+ def get_base64(self) -> Optional[str]:
+ """Get base64-encoded image data.
+
+ Returns image_data if available, otherwise encodes image_bytes to base64.
+
+ Returns:
+ Base64-encoded string or None if no image data available
+ """
+ if self.image_data:
+ return self.image_data
+ elif self.image_bytes:
+ return base64.b64encode(self.image_bytes).decode('utf-8')
+ return None
+
+ def ensure_bytes(self) -> None:
+ """Ensure image_bytes is populated (converts from image_data if needed)."""
+ if not self.image_bytes and self.image_data:
+ self.image_bytes = base64.b64decode(self.image_data)
+
+ def ensure_base64(self) -> None:
+ """Ensure image_data is populated (converts from image_bytes if needed)."""
+ if not self.image_data and self.image_bytes:
+ self.image_data = base64.b64encode(self.image_bytes).decode('utf-8')
+
+@dataclass
+class PDFContent(ContentBlock):
+ """PDF content block"""
+ type: Literal["pdf"] = "pdf"
+ pdf_url: Optional[str] = None
+ pdf_data: Optional[str] = None # base64 encoded
+ filename: Optional[str] = None
+
+ def __post_init__(self):
+ # Ensure type is always "pdf" (fixes issue when user passes positional arg)
+ object.__setattr__(self, 'type', 'pdf')
+
+ def is_empty(self) -> bool:
+ """Check if the PDF content is empty (no URL or data)."""
+ return not self.pdf_url and not self.pdf_data
+
+ @classmethod
+ def build(cls, value: Any, **kwargs) -> 'PDFContent':
+ """Build a PDF content block from a value.
+
+ Args:
+ value: Can be:
+ - URL string (starting with 'http://' or 'https://')
+ - Local file path (string)
+ - Raw bytes
+ **kwargs: Unused, for compatibility with base class
+
+ Returns:
+ PDFContent or None if the value cannot be converted
+ """
+ if isinstance(value, str):
+ # HTTP/HTTPS URL
+ if value.startswith('http://') or value.startswith('https://'):
+ return cls(pdf_url=value)
+ # Assume it's a file path
+ if Path(value).exists():
+ return cls.from_file(value)
+ return None
+
+ # Handle bytes
+ if isinstance(value, bytes):
+ pdf_data = base64.b64encode(value).decode('utf-8')
+ return cls(pdf_data=pdf_data)
+
+ return None
+
+ def to_dict(self) -> Dict[str, Any]:
+ if self.pdf_url:
+ return {
+ "type": "document",
+ "source": {"type": "url", "url": self.pdf_url},
+ "filename": self.filename
+ }
+ else:
+ return {
+ "type": "document",
+ "source": {
+ "type": "base64",
+ "media_type": "application/pdf",
+ "data": self.pdf_data
+ },
+ "filename": self.filename
+ }
+
+ @classmethod
+ def from_file(cls, filepath: str):
+ """Load PDF from file"""
+ path = Path(filepath)
+ with open(filepath, 'rb') as f:
+ pdf_data = base64.b64encode(f.read()).decode('utf-8')
+
+ return cls(pdf_data=pdf_data, filename=path.name)
+
+
+@dataclass
+class FileContent(ContentBlock):
+ """Generic file content block (for code, data files, etc.)"""
+ file_data: str # Could be text content or base64 for binary
+ filename: str
+ type: Literal["file"] = "file"
+ mime_type: str = "text/plain"
+ is_binary: bool = False
+
+ @classmethod
+ def build(cls, value: Any, **kwargs) -> 'FileContent':
+ """Build a file content block from a value.
+
+ Args:
+ value: Can be:
+ - Local file path (string)
+ - Tuple of (filename, content) where content is str or bytes
+ **kwargs: Additional arguments like mime_type
+
+ Returns:
+ FileContent or None if the value cannot be converted
+ """
+ mime_type = kwargs.get('mime_type')
+
+ if isinstance(value, str):
+ # Assume it's a file path
+ if Path(value).exists():
+ return cls.from_file(value, mime_type=mime_type)
+ return None
+
+ # Handle tuple of (filename, content)
+ if isinstance(value, tuple) and len(value) == 2:
+ filename, content = value
+ if isinstance(content, bytes):
+ file_data = base64.b64encode(content).decode('utf-8')
+ is_binary = True
+ else:
+ file_data = str(content)
+ is_binary = False
+ return cls(
+ file_data=file_data,
+ filename=filename,
+ mime_type=mime_type or 'application/octet-stream',
+ is_binary=is_binary
+ )
+
+ return None
+
+ def is_empty(self) -> bool:
+ """Check if the file content is empty (no data)."""
+ return not self.file_data
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "type": self.type,
+ "filename": self.filename,
+ "mime_type": self.mime_type,
+ "file_data": self.file_data,
+ "is_binary": self.is_binary
+ }
+
+ @classmethod
+ def from_file(cls, filepath: str, mime_type: Optional[str] = None):
+ """Load file from disk"""
+ path = Path(filepath)
+
+ # Try to read as text first
+ try:
+ with open(filepath, 'r', encoding='utf-8') as f:
+ file_data = f.read()
+ is_binary = False
+ except UnicodeDecodeError:
+ # Fall back to binary
+ with open(filepath, 'rb') as f:
+ file_data = base64.b64encode(f.read()).decode('utf-8')
+ is_binary = True
+
+ if not mime_type:
+ # Simple mime type detection
+ ext_to_type = {
+ '.py': 'text/x-python',
+ '.js': 'text/javascript',
+ '.json': 'application/json',
+ '.csv': 'text/csv',
+ '.txt': 'text/plain',
+ '.md': 'text/markdown',
+ '.html': 'text/html',
+ }
+ mime_type = ext_to_type.get(path.suffix.lower(), 'application/octet-stream')
+
+ return cls(
+ file_data=file_data,
+ filename=path.name,
+ mime_type=mime_type,
+ is_binary=is_binary
+ )
+
+# Union type alias for common content types (for type hints)
+# Note: ContentBlock remains the abstract base class for inheritance
+ContentBlockUnion = Union[TextContent, ImageContent, PDFContent, FileContent]
+
+
+@dataclass
+class ToolCall(ContentBlock):
+ """Represents a tool call made by the LLM"""
+ id: str
+ type: str # "function", "web_search", etc.
+ name: Optional[str] = None # function name
+ arguments: Optional[Dict[str, Any]] = None # function arguments
+
+ def is_empty(self) -> bool:
+ """Check if the tool call is empty (no id)."""
+ return not self.id
+
+ def to_dict(self) -> Dict[str, Any]:
+ result = {"id": self.id, "type": self.type}
+ if self.name:
+ result["name"] = self.name
+ if self.arguments:
+ result["arguments"] = self.arguments
+ return result
+
+
+@dataclass
+class ToolResult(ContentBlock):
+ """Represents the result of a tool execution"""
+ tool_call_id: str
+ content: str # Result as string (can be JSON stringified)
+ is_error: bool = False
+
+ def is_empty(self) -> bool:
+ """Check if the tool result is empty (no tool_call_id)."""
+ return not self.tool_call_id
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "tool_call_id": self.tool_call_id,
+ "content": self.content,
+ "is_error": self.is_error
+ }
+
+
+@dataclass
+class ToolDefinition(ContentBlock):
+ """Defines a tool that the LLM can use"""
+ type: str # "function", "web_search", "file_search", etc.
+ name: Optional[str] = None
+ description: Optional[str] = None
+ parameters: Optional[Dict[str, Any]] = None
+ strict: bool = False # OpenAI strict mode
+ # Provider-specific fields
+ extra: Dict[str, Any] = field(default_factory=dict)
+
+ def is_empty(self) -> bool:
+ """Check if the tool definition is empty (no type)."""
+ return not self.type
+
+ def to_dict(self) -> Dict[str, Any]:
+ result = {"type": self.type}
+ if self.name:
+ result["name"] = self.name
+ if self.description:
+ result["description"] = self.description
+ if self.parameters:
+ result["parameters"] = self.parameters
+ if self.strict:
+ result["strict"] = self.strict
+ result.update(self.extra)
+ return result
+
+@dataclass
+class UserTurn:
+ """Represents a user message turn in the conversation"""
+ role: str = "user"
+
+ content: ContentBlockList = field(default_factory=ContentBlockList)
+ tools: List[ToolDefinition] = field(default_factory=list)
+
+ # Provider-specific settings
+ temperature: Optional[float] = None
+ max_tokens: Optional[int] = None
+ top_p: Optional[float] = None
+
+ # Metadata
+ timestamp: Optional[str] = None
+ metadata: Dict[str, Any] = field(default_factory=dict)
+
+ def __init__(self, content=None, tools=None, **kwargs):
+ """
+ Initialize UserTurn with content and tools.
+
+ Four ways to initialize:
+ 1. Empty: UserTurn() - creates empty turn with defaults
+ 2. Copy: UserTurn(existing_turn) - creates a copy of an existing UserTurn
+ 3. Positional args: UserTurn(content, tools) - pass content and/or tools
+ 4. Keyword args: UserTurn(content=..., tools=..., temperature=...) - explicit fields
+
+ Args:
+ content: ContentBlockList, list of content blocks, UserTurn (for copying), or None
+ tools: List of ToolDefinition or None
+ **kwargs: Additional fields (temperature, max_tokens, top_p, timestamp, metadata)
+ """
+ self.output_contains_image = False
+
+ # Handle copy constructor: UserTurn(existing_turn)
+ if isinstance(content, UserTurn):
+ source = content
+ self.role = source.role
+ self.content = ContentBlockList(source.content) # Deep copy the content list
+ self.tools = list(source.tools) # Shallow copy the tools list
+ self.temperature = source.temperature
+ self.max_tokens = source.max_tokens
+ self.top_p = source.top_p
+ self.timestamp = source.timestamp
+ self.metadata = dict(source.metadata) # Copy the metadata dict
+ return
+
+ # Handle content
+ if content is None:
+ content = ContentBlockList()
+ elif not isinstance(content, ContentBlockList):
+ # If it's a list, wrap it in ContentBlockList
+ content = ContentBlockList(content) if isinstance(content, list) else ContentBlockList([content])
+
+ # Handle tools
+ if tools is None:
+ tools = []
+
+ # Set all fields
+ self.role = kwargs.get('role', "user")
+ self.content = content
+ self.tools = tools
+ self.temperature = kwargs.get('temperature', None)
+ self.max_tokens = kwargs.get('max_tokens', None)
+ self.top_p = kwargs.get('top_p', None)
+ self.timestamp = kwargs.get('timestamp', None)
+ self.metadata = kwargs.get('metadata', {})
+
+ def add_text(self, text: str) -> 'UserTurn':
+ """Add text content"""
+ self.content.append(TextContent(text=text))
+ return self
+
+ def add_image(self, url: Optional[str] = None, data: Optional[str] = None,
+ media_type: str = "image/jpeg") -> 'UserTurn':
+ """Add image content"""
+ self.content.append(ImageContent(
+ image_url=url,
+ image_data=data,
+ media_type=media_type
+ ))
+ return self
+
+ def add_image_file(self, filepath: str) -> 'UserTurn':
+ """Add image from file"""
+ self.content.append(ImageContent.from_file(filepath))
+ return self
+
+ def add_pdf(self, url: Optional[str] = None, data: Optional[str] = None) -> 'UserTurn':
+ """Add PDF content"""
+ self.content.append(PDFContent(pdf_url=url, pdf_data=data))
+ return self
+
+ def add_pdf_file(self, filepath: str) -> 'UserTurn':
+ """Add PDF from file"""
+ self.content.append(PDFContent.from_file(filepath))
+ return self
+
+ def add_file(self, filepath: str, mime_type: Optional[str] = None) -> 'UserTurn':
+ """Add file from disk"""
+ self.content.append(FileContent.from_file(filepath, mime_type))
+ return self
+
+ def add_tool(self, tool: ToolDefinition) -> 'UserTurn':
+ """Add a tool definition"""
+ self.tools.append(tool)
+ return self
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert to dictionary format"""
+ return {
+ "role": "user",
+ "content": [c.to_dict() for c in self.content],
+ "tools": [t.to_dict() for t in self.tools] if self.tools else None,
+ "temperature": self.temperature,
+ "max_tokens": self.max_tokens,
+ "top_p": self.top_p,
+ "metadata": self.metadata
+ }
+
+ def enable_image_generation(self):
+ self.output_contains_image = True
+
+ def to_litellm_format(self) -> Dict[str, Any]:
+ """Convert to LiteLLM Response API format (OpenAI Response API compatible)"""
+ return {
+ "role": "user",
+ "content": self.content.to_litellm_format(role="user")
+ }
+
+
+@dataclass
+class Turn:
+ def __init__(self, **kwargs):
+ for key, value in kwargs.items():
+ setattr(self, key, value)
+
+
+@dataclass
+class AssistantTurn(Turn):
+ """Represents an assistant message turn in the conversation"""
+ role: str = "assistant"
+ content: ContentBlockList = field(default_factory=ContentBlockList)
+
+ # Tool usage (Option B: Everything in AssistantTurn)
+ tool_calls: List[ToolCall] = field(default_factory=list)
+ tool_results: List[ToolResult] = field(default_factory=list)
+
+ # Provider-specific features
+ reasoning: Optional[str] = None # OpenAI reasoning/thinking
+ finish_reason: Optional[str] = None # "stop", "length", "tool_calls", etc.
+
+ # Token usage
+ prompt_tokens: Optional[int] = None
+ completion_tokens: Optional[int] = None
+
+ # Metadata
+ model: Optional[str] = None
+ timestamp: Optional[str] = None
+ metadata: Dict[str, Any] = field(default_factory=dict)
+
+ def __init__(self, *args, **kwargs):
+ """
+ Initialize AssistantTurn from a raw response or with explicit fields.
+
+ Three ways to initialize:
+ 1. Empty: AssistantTurn() - creates empty turn with defaults
+ 2. From raw response: AssistantTurn(response) - autocasts the response
+ 3. With fields: AssistantTurn(role="assistant", content=[...]) - explicit fields
+ """
+ if len(args) == 1 and isinstance(args[0], AssistantTurn):
+ # Case: Copy constructor - create a copy of another AssistantTurn
+ other = args[0]
+ super().__init__(
+ role=other.role,
+ content=ContentBlockList(other.content),
+ tool_calls=list(other.tool_calls),
+ tool_results=list(other.tool_results),
+ reasoning=other.reasoning,
+ finish_reason=other.finish_reason,
+ prompt_tokens=other.prompt_tokens,
+ completion_tokens=other.completion_tokens,
+ model=other.model,
+ timestamp=other.timestamp,
+ metadata=dict(other.metadata)
+ )
+ return
+
+ if len(args) > 0 and len(kwargs) == 0:
+ # Case 2: Single positional arg - autocast from raw response
+ value_dict = self.autocast(args[0])
+ super().__init__(**value_dict)
+ elif len(kwargs) > 0:
+ # Case 3: Keyword arguments - use them directly
+ super().__init__(**kwargs)
+ else:
+ # Case 1: No arguments - initialize with defaults
+ super().__init__(
+ role="assistant",
+ content=ContentBlockList(),
+ tool_calls=[],
+ tool_results=[],
+ reasoning=None,
+ finish_reason=None,
+ prompt_tokens=None,
+ completion_tokens=None,
+ model=None,
+ timestamp=None,
+ metadata={}
+ )
+
+ @staticmethod
+ def from_google_genai(value: Any) -> Dict[str, Any]:
+ """Parse a Google GenAI response into a dictionary of AssistantTurn fields.
+
+ Supports both the legacy generate_content API and the new Interactions API.
+
+ Args:
+ value: Raw response from Google GenAI API
+
+ Returns:
+ Dict[str, Any]: Dictionary with keys corresponding to AssistantTurn fields
+ """
+ # Initialize the result dictionary with default values
+ result = {
+ "role": "assistant",
+ "content": ContentBlockList(),
+ "tool_calls": [],
+ "tool_results": [],
+ "reasoning": None,
+ "finish_reason": None,
+ "prompt_tokens": None,
+ "completion_tokens": None,
+ "model": None,
+ "timestamp": None,
+ "metadata": {}
+ }
+
+ # Check if this is a normalized response (from our GoogleGenAILLM)
+ if hasattr(value, 'raw_response'):
+ raw_response = value.raw_response
+ else:
+ raw_response = value
+
+ # Handle Interactions API format (new)
+ if hasattr(raw_response, 'outputs'):
+ # This is an Interaction object
+ interaction = raw_response
+
+ # Extract text from outputs
+ if interaction.outputs and len(interaction.outputs) > 0:
+ for output in interaction.outputs:
+ if hasattr(output, 'text') and output.text:
+ result["content"].append(TextContent(text=output.text))
+ # Handle other output types if they exist
+ elif hasattr(output, 'content'):
+ # Content could be a list of parts
+ if isinstance(output.content, list):
+ for part in output.content:
+ if hasattr(part, 'text') and part.text:
+ result["content"].append(TextContent(text=part.text))
+ else:
+ result["content"].append(TextContent(text=str(output.content)))
+
+ # Extract model info
+ if hasattr(interaction, 'model'):
+ result["model"] = interaction.model
+
+ # Extract status as finish_reason
+ if hasattr(interaction, 'status'):
+ result["finish_reason"] = interaction.status
+
+ # Extract token usage from Interactions API
+ if hasattr(interaction, 'usage'):
+ usage = interaction.usage
+ if hasattr(usage, 'input_tokens'):
+ result["prompt_tokens"] = usage.input_tokens
+ elif hasattr(usage, 'prompt_token_count'):
+ result["prompt_tokens"] = usage.prompt_token_count
+
+ if hasattr(usage, 'output_tokens'):
+ result["completion_tokens"] = usage.output_tokens
+ elif hasattr(usage, 'candidates_token_count'):
+ result["completion_tokens"] = usage.candidates_token_count
+
+ # Extract interaction ID as metadata
+ if hasattr(interaction, 'id'):
+ result["metadata"]['interaction_id'] = interaction.id
+
+ # Handle legacy generate_content API format
+ else:
+ # Extract thinking/reasoning (for Gemini 2.5+ models)
+ if hasattr(raw_response, 'thoughts') and raw_response.thoughts:
+ # Gemini's thinking budget feature
+ result["reasoning"] = str(raw_response.thoughts)
+
+ # Extract model info
+ if hasattr(raw_response, 'model_version'):
+ result["model"] = raw_response.model_version
+
+ # Extract token usage (if available)
+ if hasattr(raw_response, 'usage_metadata'):
+ usage = raw_response.usage_metadata
+ if hasattr(usage, 'prompt_token_count'):
+ result["prompt_tokens"] = usage.prompt_token_count
+ if hasattr(usage, 'candidates_token_count'):
+ result["completion_tokens"] = usage.candidates_token_count
+
+ # Handle multimodal content from Gemini (candidates with parts)
+ content_extracted = False
+ if hasattr(raw_response, 'candidates') and raw_response.candidates:
+ candidate = raw_response.candidates[0]
+
+ # Extract from parts (supports multimodal responses with text and images)
+ if hasattr(candidate, 'content') and hasattr(candidate.content, 'parts'):
+ for part in candidate.content.parts:
+ # Handle text parts
+ if hasattr(part, 'text') and part.text:
+ result["content"].append(TextContent(text=part.text))
+ content_extracted = True
+ # Handle inline data (images, generated images, etc.)
+ elif hasattr(part, 'inline_data'):
+ # Try to extract image data, preferring direct inline_data access
+ inline = part.inline_data
+ image_bytes = None
+ image_data = None
+ media_type = 'image/jpeg'
+
+
+ # Extract from inline_data Blob (most reliable method)
+ # Google's Blob.data should be raw bytes
+ if hasattr(inline, 'data'):
+ data = inline.data
+ # Check if it's bytes or string
+ if isinstance(data, bytes):
+ # Store raw bytes for Gemini compatibility
+ # (Gemini prefers raw bytes when sending images)
+ image_bytes = data
+ elif isinstance(data, str):
+ # Already base64-encoded string
+ image_data = data
+ # Don't decode to bytes - keep as base64 for portability
+
+ if hasattr(inline, 'mime_type'):
+ media_type = inline.mime_type
+
+ # If we got the data, create ImageContent
+ # Store image_bytes only if we got raw bytes from Google
+ if image_data or image_bytes:
+ result["content"].append(ImageContent(
+ image_data=image_data,
+ image_bytes=image_bytes if isinstance(data, bytes) else None,
+ media_type=media_type
+ ))
+ content_extracted = True
+
+ # Extract finish reason
+ if hasattr(candidate, 'finish_reason'):
+ result["finish_reason"] = str(candidate.finish_reason)
+
+ # Fallback: Extract simple text content if no candidates/parts were found
+ if not content_extracted:
+ if hasattr(raw_response, 'text'):
+ result["content"].append(TextContent(text=raw_response.text))
+ elif hasattr(value, 'choices'):
+ # Fallback to normalized format
+ result["content"].append(TextContent(text=value.choices[0].message.content))
+
+ return result
+
+ @staticmethod
+ def from_litellm_openai_response_api(value: Any) -> Dict[str, Any]:
+ """Parse a LiteLLM/OpenAI-style response into a dictionary of AssistantTurn fields.
+
+ Handles both formats:
+ - New Responses API: Has 'output' field with ResponseOutputMessage objects
+ - Legacy Completion API: Has 'choices' field with message objects
+
+ Args:
+ value: Response from LiteLLM/OpenAI API (Responses API or Completion API)
+
+ Returns:
+ Dict[str, Any]: Dictionary with keys corresponding to AssistantTurn fields
+ """
+ # Initialize the result dictionary with default values
+ result = {
+ "role": "assistant",
+ "content": ContentBlockList(),
+ "tool_calls": [],
+ "tool_results": [],
+ "reasoning": None,
+ "finish_reason": None,
+ "prompt_tokens": None,
+ "completion_tokens": None,
+ "model": None,
+ "timestamp": None,
+ "metadata": {}
+ }
+
+ # Handle Responses API format (new format with 'output' field)
+ if hasattr(value, 'output') and hasattr(value, 'object') and value.object == 'response':
+ # Extract metadata
+ if hasattr(value, 'id'):
+ result["metadata"]['response_id'] = value.id
+ if hasattr(value, 'created_at'):
+ result["timestamp"] = str(value.created_at)
+
+ # Extract model info
+ if hasattr(value, 'model'):
+ result["model"] = value.model
+
+ # Extract status as finish_reason
+ if hasattr(value, 'status'):
+ result["finish_reason"] = value.status
+
+ # Extract content from output
+ if value.output and len(value.output) > 0:
+ for output_item in value.output:
+ # Handle ImageGenerationCall
+ if hasattr(output_item, 'type') and output_item.type == 'image_generation_call':
+ # Extract generated image
+ if hasattr(output_item, 'result') and output_item.result:
+ # Determine media type from output_format
+ media_type = 'image/jpeg' # default
+ if hasattr(output_item, 'output_format'):
+ format_map = {
+ 'png': 'image/png',
+ 'jpeg': 'image/jpeg',
+ 'jpg': 'image/jpeg',
+ 'webp': 'image/webp',
+ 'gif': 'image/gif'
+ }
+ media_type = format_map.get(output_item.output_format.lower(), 'image/jpeg')
+
+ # Add image to content
+ result["content"].append(ImageContent(
+ image_data=output_item.result,
+ media_type=media_type
+ ))
+
+ # Store additional metadata about the image generation
+ if hasattr(output_item, 'revised_prompt') and output_item.revised_prompt:
+ if 'image_generation' not in result["metadata"]:
+ result["metadata"]['image_generation'] = []
+ result["metadata"]['image_generation'].append({
+ 'id': output_item.id if hasattr(output_item, 'id') else None,
+ 'revised_prompt': output_item.revised_prompt,
+ 'size': output_item.size if hasattr(output_item, 'size') else None,
+ 'quality': output_item.quality if hasattr(output_item, 'quality') else None,
+ 'status': output_item.status if hasattr(output_item, 'status') else None
+ })
+
+ # Handle ResponseOutputMessage
+ elif hasattr(output_item, 'type') and output_item.type == 'message':
+ # Extract role
+ if hasattr(output_item, 'role'):
+ result["role"] = output_item.role
+
+ # Extract status for this message
+ if hasattr(output_item, 'status') and not result["finish_reason"]:
+ result["finish_reason"] = output_item.status
+
+ # Extract content items
+ if hasattr(output_item, 'content') and output_item.content:
+ for content_item in output_item.content:
+ # Handle text content
+ if hasattr(content_item, 'type') and content_item.type == 'output_text':
+ if hasattr(content_item, 'text') and content_item.text:
+ result["content"].append(TextContent(text=content_item.text))
+ # Handle other content types as they become available
+ elif hasattr(content_item, 'text') and content_item.text:
+ result["content"].append(TextContent(text=str(content_item.text)))
+
+ # Extract reasoning (for models with reasoning capabilities)
+ if hasattr(value, 'reasoning'):
+ reasoning_parts = []
+ if isinstance(value.reasoning, dict):
+ if value.reasoning.get('summary'):
+ reasoning_parts.append(f"Summary: {value.reasoning['summary']}")
+ if value.reasoning.get('effort'):
+ reasoning_parts.append(f"Effort: {value.reasoning['effort']}")
+ if reasoning_parts:
+ result["reasoning"] = "\n".join(reasoning_parts)
+ elif value.reasoning:
+ result["reasoning"] = str(value.reasoning)
+
+ # Extract token usage (Responses API format)
+ if hasattr(value, 'usage'):
+ if hasattr(value.usage, 'input_tokens'):
+ result["prompt_tokens"] = value.usage.input_tokens
+ if hasattr(value.usage, 'output_tokens'):
+ result["completion_tokens"] = value.usage.output_tokens
+
+ # Handle legacy Completion API format (has 'choices' field)
+ elif hasattr(value, 'choices') and len(value.choices) > 0:
+ choice = value.choices[0]
+ message = choice.message if hasattr(choice, 'message') else choice
+
+ # Extract text content
+ if hasattr(message, 'content') and message.content:
+ result["content"].append(TextContent(text=str(message.content)))
+
+ # Extract tool calls
+ if hasattr(message, 'tool_calls') and message.tool_calls:
+ for tc in message.tool_calls:
+ tool_call = ToolCall(
+ id=tc.id if hasattr(tc, 'id') else None,
+ type=tc.type if hasattr(tc, 'type') else "function",
+ name=tc.function.name if hasattr(tc, 'function') else tc.name,
+ arguments=json.loads(tc.function.arguments) if hasattr(tc, 'function') and hasattr(tc.function, 'arguments') else {}
+ )
+ result["tool_calls"].append(tool_call)
+
+ # Extract finish reason
+ if hasattr(choice, 'finish_reason'):
+ result["finish_reason"] = choice.finish_reason
+
+ # Extract reasoning/thinking (for OpenAI o1/o3 models)
+ if hasattr(message, 'reasoning') and message.reasoning:
+ result["reasoning"] = message.reasoning
+
+ # Extract token usage (Completion API format)
+ if hasattr(value, 'usage'):
+ if hasattr(value.usage, 'prompt_tokens'):
+ result["prompt_tokens"] = value.usage.prompt_tokens
+ if hasattr(value.usage, 'completion_tokens'):
+ result["completion_tokens"] = value.usage.completion_tokens
+
+ # Extract model info
+ if hasattr(value, 'model'):
+ result["model"] = value.model
+
+ return result
+
+ @staticmethod
+ def autocast(value: Any) -> Dict[str, Any]:
+ """Automatically parse a response from any API into a dictionary of AssistantTurn fields.
+
+ Automatically detects the response format and uses the appropriate parser:
+ - Google GenAI (generate_content or Interactions API)
+ - LiteLLM/OpenAI Responses API (new format with 'output' field)
+ - LiteLLM/OpenAI Completion API (legacy format with 'choices' field)
+
+ Args:
+ value: Raw response from any supported API
+
+ Returns:
+ Dict[str, Any]: Dictionary with keys corresponding to AssistantTurn fields
+ """
+ # Check if this is a normalized response (from our GoogleGenAILLM)
+ raw_response = value.raw_response if hasattr(value, 'raw_response') else value
+
+ # Detect Google GenAI format (Interactions API or generate_content)
+ # Google GenAI has 'outputs' (Google Interactions API) or 'candidates' (generate_content)
+ # Note: 'outputs' is for Google's Interactions API, 'output' is for LiteLLM Responses API
+ if hasattr(raw_response, 'outputs') or \
+ (hasattr(raw_response, 'candidates') and not hasattr(value, 'choices')) or \
+ hasattr(raw_response, 'usage_metadata'):
+ return AssistantTurn.from_google_genai(value)
+
+ # Detect LiteLLM/OpenAI format (Responses API or Completion API)
+ # Responses API has 'output' field and object='response'
+ # Completion API has 'choices' field
+ elif hasattr(value, 'output') or hasattr(value, 'choices'):
+ return AssistantTurn.from_litellm_openai_response_api(value)
+
+ # Fallback: if has 'text' attribute, might be a simple Google response
+ elif hasattr(raw_response, 'text'):
+ return AssistantTurn.from_google_genai(value)
+
+ # Default to empty result if format is not recognized
+ else:
+ return {
+ "role": "assistant",
+ "content": ContentBlockList(),
+ "tool_calls": [],
+ "tool_results": [],
+ "reasoning": None,
+ "finish_reason": None,
+ "prompt_tokens": None,
+ "completion_tokens": None,
+ "model": None,
+ "timestamp": None,
+ "metadata": {}
+ }
+
+ def add_text(self, text: str) -> 'AssistantTurn':
+ """Add text content"""
+ self.content.append(text)
+ return self
+
+ def add_image(self, url: Optional[str] = None, data: Optional[str] = None,
+ media_type: str = "image/jpeg") -> 'AssistantTurn':
+ """Add image content (some models can generate images)"""
+ self.content.append(ImageContent(
+ image_url=url,
+ image_data=data,
+ media_type=media_type
+ ))
+ return self
+
+ def add_tool_call(self, tool_call: ToolCall) -> 'AssistantTurn':
+ """Add a tool call"""
+ self.tool_calls.append(tool_call)
+ return self
+
+ def add_tool_result(self, result: ToolResult) -> 'AssistantTurn':
+ """Add a tool result"""
+ self.tool_results.append(result)
+ return self
+
+ def to_text(self) -> str:
+ """Get all text content concatenated. Images will be presented as placeholder text."""
+ return self.content.to_text()
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert to dictionary format"""
+ return {
+ "role": self.role,
+ "content": [c.to_dict() for c in self.content],
+ "tool_calls": [tc.to_dict() for tc in self.tool_calls] if self.tool_calls else None,
+ "tool_results": [tr.to_dict() for tr in self.tool_results] if self.tool_results else None,
+ "reasoning": self.reasoning,
+ "finish_reason": self.finish_reason,
+ "prompt_tokens": self.prompt_tokens,
+ "completion_tokens": self.completion_tokens,
+ "model": self.model,
+ "metadata": self.metadata
+ }
+
+ def get_text(self) -> ContentBlockList:
+ """Get all text content blocks.
+
+ Returns:
+ ContentBlockList: List containing only TextContent blocks
+ """
+ text_blocks = ContentBlockList()
+ for block in self.content:
+ if isinstance(block, TextContent):
+ text_blocks.append(block)
+ return text_blocks
+
+ def get_images(self) -> ContentBlockList:
+ """Get all image content blocks.
+
+ Returns:
+ ContentBlockList: List containing only ImageContent blocks
+ """
+ image_blocks = ContentBlockList()
+ for block in self.content:
+ if isinstance(block, ImageContent):
+ image_blocks.append(block)
+ return image_blocks
+
+ def to_litellm_format(self) -> Dict[str, Any]:
+ """Convert to LiteLLM Response API format (OpenAI Response API compatible)"""
+ result = {"role": self.role}
+
+ # Handle content blocks (text, images, etc.) - delegate to ContentBlockList
+ result["content"] = self.content.to_litellm_format(role=self.role)
+
+ if self.tool_calls:
+ result["tool_calls"] = [
+ {
+ "id": tc.id,
+ "type": tc.type,
+ "function": {
+ "name": tc.name,
+ "arguments": json.dumps(tc.arguments) if tc.arguments else "{}"
+ }
+ }
+ for tc in self.tool_calls
+ ]
+
+ return result
+
+
+@dataclass
+class ConversationHistory:
+ """Manages conversation history across multiple turns using LiteLLM unified format"""
+ turns: List[Union[UserTurn, AssistantTurn]] = field(default_factory=list)
+ system_prompt: Optional[str] = None
+ protected_rounds: int = 0 # Initial rounds to never truncate (task definition)
+
+ def add_user_turn(self, turn: UserTurn) -> 'ConversationHistory':
+ """Add a user turn"""
+ self.turns.append(turn)
+ return self
+
+ def add_assistant_turn(self, turn: AssistantTurn) -> 'ConversationHistory':
+ """Add an assistant turn"""
+ self.turns.append(turn)
+ return self
+
+ def get_last_user_turn(self) -> Optional[UserTurn]:
+ """Get the most recent user turn"""
+ for turn in reversed(self.turns):
+ if isinstance(turn, UserTurn):
+ return turn
+ return None
+
+ def get_last_assistant_turn(self) -> Optional[AssistantTurn]:
+ """Get the most recent assistant turn"""
+ for turn in reversed(self.turns):
+ if isinstance(turn, AssistantTurn):
+ return turn
+ return None
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert to dictionary format"""
+ return {
+ "system_prompt": self.system_prompt,
+ "protected_rounds": self.protected_rounds,
+ "turns": [turn.to_dict() for turn in self.turns]
+ }
+
+ def to_litellm_format(
+ self,
+ n: int = -1,
+ truncate_strategy: Literal["from_start", "from_end"] = "from_start",
+ protected_rounds: Optional[int] = None
+ ) -> List[Dict[str, Any]]:
+ """
+ Convert to LiteLLM messages format (OpenAI-compatible, works with all providers)
+
+ Args:
+ n: Number of historical rounds (user+assistant pairs) to include.
+ -1 means all history (default: -1).
+ The current (potentially incomplete) round is always included.
+ truncate_strategy: How to truncate when n is specified:
+ - "from_start": Remove oldest rounds, keep the most recent n rounds (default)
+ - "from_end": Remove newest rounds, keep the oldest n rounds
+ protected_rounds: Number of initial rounds to never truncate (task definition).
+ If None, uses self.protected_rounds. These rounds count towards n, so
+ if n=5 and protected_rounds=1, you get 1 protected + 4 truncatable rounds.
+
+ Returns:
+ List of message dictionaries in LiteLLM format
+ """
+ # Determine protected rounds
+ n_protected = protected_rounds if protected_rounds is not None else self.protected_rounds
+ protected_turns = n_protected * 2 # Each round = user + assistant
+
+ # Apply truncation to turns
+ if n == -1:
+ selected_turns = self.turns
+ else:
+ # Protected rounds count towards N
+ # So if N=5 and protected_rounds=1, we keep 1 protected + 4 from truncatable
+ remaining_rounds = max(0, n - n_protected)
+
+ # Split into protected and truncatable turns
+ protected_part = self.turns[:protected_turns]
+ truncatable_part = self.turns[protected_turns:]
+
+ # remaining_rounds = number of rounds (pairs) from the truncatable part
+ # Each round = 2 turns (user + assistant)
+ # Plus include current incomplete round (if last turn is user, +1)
+ has_incomplete_round = len(truncatable_part) > 0 and isinstance(truncatable_part[-1], UserTurn)
+ n_turns = remaining_rounds * 2 + (1 if has_incomplete_round else 0)
+
+ if truncate_strategy == "from_start":
+ # Keep last n_turns from truncatable part (remove from start)
+ truncated_part = truncatable_part[-n_turns:] if n_turns > 0 else []
+ elif truncate_strategy == "from_end":
+ # Keep first n_turns from truncatable part (remove from end)
+ truncated_part = truncatable_part[:n_turns] if n_turns > 0 else []
+ else:
+ raise ValueError(f"Unknown truncate_strategy: {truncate_strategy}. Use 'from_start' or 'from_end'")
+
+ # Combine protected + truncated
+ selected_turns = protected_part + truncated_part
+
+ messages = []
+
+ if self.system_prompt:
+ messages.append({"role": "system", "content": self.system_prompt})
+
+ for turn in selected_turns:
+ messages.append(turn.to_litellm_format())
+
+ # Add tool results as separate messages in LiteLLM/OpenAI format
+ if isinstance(turn, AssistantTurn) and turn.tool_results:
+ for result in turn.tool_results:
+ messages.append({
+ "role": "tool",
+ "tool_call_id": result.tool_call_id,
+ "content": result.content
+ })
+
+ return messages
+
+ def to_messages(
+ self,
+ n: int = -1,
+ truncate_strategy: Literal["from_start", "from_end"] = "from_start",
+ protected_rounds: Optional[int] = None,
+ model_name: Optional[str] = None
+ ) -> List[Dict[str, Any]]:
+ """
+ Smart message format conversion that auto-detects the appropriate format.
+
+ This method automatically chooses between Gemini format and LiteLLM format based on
+ the model name. Detection priority:
+ 1. If model_name argument is provided and contains "gemini", uses Gemini format
+ 2. Otherwise, checks if any AssistantTurn has a model name containing "gemini"
+ 3. If no Gemini model detected, uses LiteLLM format (default)
+
+ Note: This detection may not work for custom LLM backends with Gemini model names.
+ In such cases, call to_gemini_format() or to_litellm_format() explicitly.
+
+ Args:
+ n: Number of historical rounds (user+assistant pairs) to include.
+ -1 means all history (default: -1).
+ The current (potentially incomplete) round is always included.
+ truncate_strategy: How to truncate when n is specified:
+ - "from_start": Remove oldest rounds, keep the most recent n rounds (default)
+ - "from_end": Remove newest rounds, keep the oldest n rounds
+ protected_rounds: Number of initial rounds to never truncate (task definition).
+ If None, uses self.protected_rounds. Counts towards n.
+ model_name: Optional model name to use for format detection. If provided and
+ contains "gemini" (case-insensitive), forces Gemini format.
+
+ Returns:
+ List of message dictionaries in the appropriate format
+
+ Example:
+ # Automatically uses Gemini format if model is Gemini
+ history = ConversationHistory()
+ history.system_prompt = "You are helpful."
+ history.add_user_turn(UserTurn().add_text("Hello"))
+
+ # Force Gemini format by providing model name
+ messages = history.to_messages(model_name="gemini-2.5-flash")
+
+ # Or be explicit:
+ messages = history.to_gemini_format() # Force Gemini format
+ messages = history.to_litellm_format() # Force LiteLLM format
+ """
+ # Check if model_name argument indicates Gemini (highest priority)
+ use_gemini_format = False
+ if model_name and 'gemini' in model_name.lower():
+ use_gemini_format = True
+ else:
+ # Check if any AssistantTurn has a Gemini model
+ for turn in self.turns:
+ if isinstance(turn, AssistantTurn) and turn.model:
+ if 'gemini' in turn.model.lower():
+ use_gemini_format = True
+ break
+
+ # Use the appropriate format
+ if use_gemini_format:
+ return self.to_gemini_format(
+ n=n,
+ truncate_strategy=truncate_strategy,
+ protected_rounds=protected_rounds
+ )
+ else:
+ return self.to_litellm_format(
+ n=n,
+ truncate_strategy=truncate_strategy,
+ protected_rounds=protected_rounds
+ )
+
+ def to_gemini_format(
+ self,
+ n: int = -1,
+ truncate_strategy: Literal["from_start", "from_end"] = "from_start",
+ protected_rounds: Optional[int] = None
+ ) -> List[Dict[str, Any]]:
+ """
+ Convert to Google Gemini format (messages with 'model' role instead of 'assistant')
+
+ This method converts the conversation history to a format compatible with Google's
+ Gemini API. The main differences from LiteLLM format are:
+ - Uses 'model' instead of 'assistant' for role names
+ - Content is structured as 'parts' (list of text/image parts)
+ - System message (if present) remains as first message with role='system'
+
+ The GoogleGenAILLM class will extract the system message and convert it to
+ system_instruction when making the API call.
+
+ Args:
+ n: Number of historical rounds (user+assistant pairs) to include.
+ -1 means all history (default: -1).
+ The current (potentially incomplete) round is always included.
+ truncate_strategy: How to truncate when n is specified:
+ - "from_start": Remove oldest rounds, keep the most recent n rounds (default)
+ - "from_end": Remove newest rounds, keep the oldest n rounds
+ protected_rounds: Number of initial rounds to never truncate (task definition).
+ If None, uses self.protected_rounds. These rounds count towards n.
+
+ Returns:
+ List of message dictionaries in Gemini format with 'role' and 'parts'.
+ System message (if present) is included as first message with role='system'.
+
+ Example:
+ from opto.utils.llm import LLM
+ from opto.utils.backbone import ConversationHistory, UserTurn
+
+ # Create conversation
+ history = ConversationHistory()
+ history.system_prompt = "You are a helpful assistant."
+ history.add_user_turn(UserTurn().add_text("Hello!"))
+
+ # Convert to Gemini format
+ messages = history.to_gemini_format()
+
+ # Use with GoogleGenAILLM
+ llm = LLM(model="gemini-2.5-flash")
+ response = llm(messages=messages)
+ """
+ # Get the LiteLLM format messages first (handles truncation logic)
+ litellm_messages = self.to_litellm_format(
+ n=n,
+ truncate_strategy=truncate_strategy,
+ protected_rounds=protected_rounds
+ )
+
+ # Convert messages to Google GenAI format
+ gemini_messages = []
+
+ for msg in litellm_messages:
+ role = msg.get('role')
+ content = msg.get('content')
+
+ # Keep system messages as-is (will be extracted by GoogleGenAILLM)
+ if role == 'system':
+ gemini_messages.append({'role': 'system', 'content': content})
+ continue
+
+ # Map roles: user -> user, assistant -> model
+ if role == 'assistant':
+ role = 'model'
+ elif role == 'tool':
+ # Skip tool messages for now - Gemini handles these differently
+ # TODO: Handle tool results properly if needed
+ continue
+
+ # Handle content (can be string or list of content blocks)
+ if isinstance(content, str):
+ gemini_messages.append({'role': role, 'parts': [{'text': content}]})
+ elif isinstance(content, list):
+ # Convert content blocks to parts
+ parts = []
+ for block in content:
+ if block.get('type') == 'text':
+ parts.append({'text': block.get('text', '')})
+ elif block.get('type') == 'image':
+ # Handle image URLs
+ image_url = block.get('image_url', '')
+ if image_url.startswith('data:'):
+ # Extract base64 data
+ import re
+ match = re.match(r'data:([^;]+);base64,(.+)', image_url)
+ if match:
+ mime_type, data = match.groups()
+ parts.append({'inline_data': {'mime_type': mime_type, 'data': data}})
+ else:
+ # External URL
+ parts.append({'file_data': {'file_uri': image_url}})
+ if parts:
+ gemini_messages.append({'role': role, 'parts': parts})
+
+ return gemini_messages
+
+ def save_to_file(self, filepath: str):
+ """Save conversation history to JSON file"""
+ with open(filepath, 'w') as f:
+ json.dump(self.to_dict(), f, indent=2)
+
+ @classmethod
+ def load_from_file(cls, filepath: str) -> 'ConversationHistory':
+ """Load conversation history from JSON file"""
+ with open(filepath, 'r') as f:
+ data = json.load(f)
+
+ # This is a simplified loader - you'd want more robust deserialization
+ history = cls(
+ system_prompt=data.get('system_prompt'),
+ protected_rounds=data.get('protected_rounds', 0)
+ )
+
+ # Note: Full deserialization would require reconstructing objects from dicts
+ # This is left as an exercise since it depends on your exact needs
+
+ return history
+
+ def clear(self):
+ """Clear all turns from history"""
+ self.turns.clear()
+
+ def get_token_count_estimate(self) -> int:
+ """Rough estimate of token count (actual count requires tokenizer)"""
+ total = 0
+ for turn in self.turns:
+ if isinstance(turn, (UserTurn, AssistantTurn)):
+ for block in turn.content:
+ if isinstance(block, TextContent):
+ # Very rough estimate: ~4 chars per token
+ total += len(block.text) // 4
+ return total
\ No newline at end of file
diff --git a/opto/utils/llm.py b/opto/utils/llm.py
index b6fbd4fe..ec42ce59 100644
--- a/opto/utils/llm.py
+++ b/opto/utils/llm.py
@@ -1,4 +1,12 @@
-from typing import List, Tuple, Dict, Any, Callable, Union
+"""
+When MM (multimodal) is enabled, we primarily either use:
+1. LiteLLM's response API
+2. Google's Interaction API design (not supported by LiteLLM response API at all)
+When MM is disabled, for backward compatibility, we use:
+1. LiteLLM's completion API
+"""
+
+from typing import List, Tuple, Dict, Any, Callable, Union, Optional
import os
import time
import json
@@ -6,11 +14,37 @@
import warnings
from .auto_retry import retry_with_exponential_backoff
+import openai
+from google import genai
+from google.genai import types
+
+# Import AssistantTurn and related types for mm_beta mode
+from .backbone import AssistantTurn, TextContent, ImageContent, ToolCall, ToolResult
+
try:
import autogen # We import autogen here to avoid the need of installing autogen
except ImportError:
pass
+
+def _is_image_generation_model(model_name: str) -> bool:
+ """Detect if a model is for image generation based on its name.
+
+ Detects:
+ - OpenAI: gpt-image-1, gpt-image-1.5, gpt-image-1-mini, dall-e-2, dall-e-3
+ - Gemini: gemini-2.5-flash-image, gemini-2.5-pro-image, etc.
+
+ Args:
+ model_name: The name of the model to check
+
+ Returns:
+ bool: True if the model is an image generation model, False otherwise
+ """
+ if model_name is None:
+ return False
+ model_lower = model_name.lower()
+ return 'image' in model_lower or 'dall-e' in model_lower
+
class AbstractModel:
"""Abstract base class for LLM model wrappers with automatic refreshing.
@@ -24,6 +58,12 @@ class AbstractModel:
reset_freq : int or None, optional
Number of seconds after which to refresh the model. If None, the model
is never refreshed.
+ mm_beta : bool, optional
+ If True, returns AssistantTurn objects with rich multimodal content.
+ If False (default), returns raw API responses in legacy format.
+ model_name : str or None, optional
+ The name of the model being used (e.g., "gpt-4o", "claude-3-5-sonnet-latest").
+ If None, no model name is stored.
Attributes
----------
@@ -31,13 +71,21 @@ class AbstractModel:
The factory function for creating model instances.
reset_freq : int or None
Refresh frequency in seconds.
+ mm_beta : bool
+ Whether to use multimodal beta mode.
+ model_name : str or None
+ The name of the model being used.
+ is_image_model : bool
+ Whether the model is for image generation (auto-detected from model name).
+
model : Any
Property that returns the current model instance.
Methods
-------
__call__(*args, **kwargs)
- Execute the model, refreshing if needed.
+ Execute the model, refreshing if needed. Returns AssistantTurn if mm_beta=True,
+ otherwise returns raw API response.
Notes
-----
@@ -45,8 +93,9 @@ class AbstractModel:
1. **Automatic Refreshing**: Recreates the model instance periodically
to prevent issues with long-running connections.
2. **Serialization**: Supports pickling by recreating the model on load.
- 3. **Consistent Interface**: Ensures responses are available at
- `response['choices'][0]['message']['content']`.
+ 3. **Response Formats**:
+ - Legacy (mm_beta=False): `response['choices'][0]['message']['content']`
+ - Multimodal (mm_beta=True): AssistantTurn object with .content, .tool_calls, etc.
Subclasses should override the `model` property to customize behavior.
@@ -56,32 +105,58 @@ class AbstractModel:
LiteLLM : Concrete implementation using LiteLLM
"""
- def __init__(self, factory: Callable, reset_freq: Union[int, None] = None) -> None:
+ def __init__(self, factory: Callable, reset_freq: Union[int, None] = None,
+ mm_beta: bool = False, model_name: Union[str, None] = None) -> None:
"""
Args:
factory: A function that takes no arguments and returns a model that is callable.
reset_freq: The number of seconds after which the model should be
refreshed. If None, the model is never refreshed.
+ mm_beta: If True, returns AssistantTurn objects with rich multimodal content.
+ If False (default), returns raw API responses in legacy format.
+ model_name: The name of the model being used (e.g., "gpt-4o", "claude-3-5-sonnet-latest").
+ If None, no model name is stored.
"""
self.factory = factory
self._model = self.factory()
self.reset_freq = reset_freq
self._init_time = time.time()
+ self.mm_beta = mm_beta
+ self.model_name = model_name
# Overwrite this `model` property when subclassing.
@property
def model(self):
"""When self.model is called, text responses should always be available at `response['choices'][0]['message']['content']`"""
return self._model
+
+ @property
+ def is_image_model(self) -> bool:
+ """Check if this model is for image generation based on model name.
+
+ Returns True if the model name contains 'image' or 'dall-e', False otherwise.
+ """
+ return _is_image_generation_model(self.model_name)
# This is the main API
def __call__(self, *args, **kwargs) -> Any:
""" The call function handles refreshing the model if needed.
+
+ Returns:
+ If mm_beta=False: Raw completion API response (backward compatible)
+ If mm_beta=True: AssistantTurn object with parsed multimodal content
"""
if self.reset_freq is not None and time.time() - self._init_time > self.reset_freq:
self._model = self.factory()
self._init_time = time.time()
- return self.model(*args, **kwargs)
+
+ response = self.model(*args, **kwargs)
+
+ # Parse to AssistantTurn if mm_beta mode is enabled
+ if self.mm_beta:
+ return AssistantTurn(response)
+
+ return response
def __getstate__(self):
state = self.__dict__.copy()
@@ -151,7 +226,8 @@ class AutoGenLLM(AbstractModel):
>>> response = llm(messages=[{"role": "user", "content": "Hello"}])
"""
- def __init__(self, config_list: List = None, filter_dict: Dict = None, reset_freq: Union[int, None] = None) -> None:
+ def __init__(self, config_list: List = None, filter_dict: Dict = None,
+ reset_freq: Union[int, None] = None, mm_beta: bool = False) -> None:
if config_list is None:
try:
config_list = autogen.config_list_from_json("OAI_CONFIG_LIST")
@@ -163,8 +239,11 @@ def __init__(self, config_list: List = None, filter_dict: Dict = None, reset_fre
if filter_dict is not None:
config_list = autogen.filter_config(config_list, filter_dict)
+ # Extract model name from config_list if available
+ model_name = config_list[0].get('model') if config_list and len(config_list) > 0 else None
+
factory = lambda *args, **kwargs: self._factory(config_list)
- super().__init__(factory, reset_freq)
+ super().__init__(factory, reset_freq, mm_beta=mm_beta, model_name=model_name)
@classmethod
def _factory(cls, config_list):
@@ -243,16 +322,40 @@ class LiteLLM(AbstractModel):
This is an LLM backend supported by LiteLLM library.
https://docs.litellm.ai/docs/completion/input
+ https://docs.litellm.ai/docs/response_api
+ https://docs.litellm.ai/docs/image_generation
To use this, set the credentials through the environment variable as
instructed in the LiteLLM documentation. For convenience, you can set the
default model name through the environment variable TRACE_LITELLM_MODEL.
When using Azure models via token provider, you can set the Azure token
provider scope through the environment variable AZURE_TOKEN_PROVIDER_SCOPE.
+
+ This class now supports storing default completion parameters (like temperature,
+ top_p, max_tokens, etc.) that will be used for all calls unless overridden.
+
+ Text Generation:
+ When mm_beta=True, the Responses API is used for rich multimodal content.
+ When mm_beta=False (default), the Completion API is used for backward compatibility.
+
+ See: https://docs.litellm.ai/docs/response_api
+
+ Image Generation:
+ Automatically detects image generation models (containing 'image' or 'dall-e' in name).
+ Uses litellm.image_generation() API for models like:
+ - gpt-image-1, gpt-image-1.5, gpt-image-1-mini
+ - dall-e-2, dall-e-3
+
+ Image models require a single string prompt:
+ llm = LLM(model="gpt-image-1.5")
+ result = llm(prompt="A serene mountain landscape")
+
+ Check llm.is_image_model to determine if a model is for image generation.
"""
def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None,
- cache=True, max_retries=10, base_delay=1.0) -> None:
+ cache=True, max_retries=10, base_delay=1.0,
+ mm_beta: bool = False, **default_params) -> None:
if model is None:
model = os.environ.get('TRACE_LITELLM_MODEL')
if model is None:
@@ -261,38 +364,116 @@ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None]
self.model_name = model
self.cache = cache
- factory = lambda: self._factory(self.model_name, max_retries=max_retries, base_delay=base_delay) # an LLM instance uses a fixed model
- super().__init__(factory, reset_freq)
+ self.default_params = default_params # Store default completion parameters
+
+ factory = lambda: self._factory(
+ self.model_name,
+ self.default_params,
+ mm_beta,
+ max_retries=max_retries,
+ base_delay=base_delay
+ )
+ super().__init__(factory, reset_freq, mm_beta=mm_beta, model_name=model)
@classmethod
- def _factory(cls, model_name: str, max_retries=10, base_delay=1.0):
+ def _factory(cls, model_name: str, default_params: dict, mm_beta: bool,
+ max_retries=10, base_delay=1.0):
import litellm
+
+ # Check if this is an image generation model
+ is_image_model = _is_image_generation_model(model_name)
+
+ if is_image_model:
+ # Image generation API
+ api_func = litellm.image_generation
+ operation_name = "LiteLLM_image_generation"
+
+ # Standard image generation wrapper
+ def image_wrapper(prompt, **kwargs):
+ assert isinstance(prompt, str), (
+ f"Image generation requires a single string prompt. "
+ f"Got {type(prompt).__name__}. "
+ f"Usage: llm(prompt='your prompt here')"
+ )
+ return retry_with_exponential_backoff(
+ lambda: api_func(model=model_name, prompt=prompt, **{**default_params, **kwargs}),
+ max_retries=max_retries,
+ base_delay=base_delay,
+ operation_name=operation_name
+ )
+ return image_wrapper
+
+ # Use Responses API when mm_beta=True, otherwise use Completion API
+ api_func = litellm.responses if mm_beta else litellm.completion
+ operation_name = "LiteLLM_responses" if mm_beta else "LiteLLM_completion"
+
if model_name.startswith('azure/'): # azure model
azure_token_provider_scope = os.environ.get('AZURE_TOKEN_PROVIDER_SCOPE', None)
if azure_token_provider_scope is not None:
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
credential = get_bearer_token_provider(DefaultAzureCredential(), azure_token_provider_scope)
- return lambda *args, **kwargs: retry_with_exponential_backoff(
- lambda: litellm.completion(model_name, *args,
- azure_ad_token_provider=credential, **kwargs),
+ if mm_beta:
+ # Responses API: model as keyword argument, convert messages to input
+ def azure_responses_wrapper(*args, **kwargs):
+ # Convert 'messages' to 'input' for Responses API
+ if 'messages' in kwargs and 'input' not in kwargs:
+ kwargs['input'] = kwargs.pop('messages')
+ return retry_with_exponential_backoff(
+ lambda: api_func(model=model_name,
+ azure_ad_token_provider=credential, **{**default_params, **kwargs}),
+ max_retries=max_retries,
+ base_delay=base_delay,
+ operation_name=operation_name
+ )
+ return azure_responses_wrapper
+ else:
+ # Completion API: model as positional argument
+ return lambda *args, **kwargs: retry_with_exponential_backoff(
+ lambda: api_func(model_name, *args,
+ azure_ad_token_provider=credential, **{**default_params, **kwargs}),
+ max_retries=max_retries,
+ base_delay=base_delay,
+ operation_name=operation_name
+ )
+
+ if mm_beta:
+ # Responses API: model as keyword argument, convert messages to input
+ def responses_wrapper(*args, **kwargs):
+ # Convert 'messages' to 'input' for Responses API
+ if 'messages' in kwargs and 'input' not in kwargs:
+ kwargs['input'] = kwargs.pop('messages')
+ return retry_with_exponential_backoff(
+ lambda: api_func(model=model_name, **{**default_params, **kwargs}),
max_retries=max_retries,
base_delay=base_delay,
- operation_name="LiteLLM_completion"
+ operation_name=operation_name
)
- return lambda *args, **kwargs: retry_with_exponential_backoff(
- lambda: litellm.completion(model_name, *args, **kwargs),
- max_retries=max_retries,
- base_delay=base_delay,
- operation_name="LiteLLM_completion"
- )
+ return responses_wrapper
+ else:
+ # Completion API: model as positional argument
+ return lambda *args, **kwargs: retry_with_exponential_backoff(
+ lambda: api_func(model_name, *args, **{**default_params, **kwargs}),
+ max_retries=max_retries,
+ base_delay=base_delay,
+ operation_name=operation_name
+ )
@property
def model(self):
"""
- response = litellm.completion(
- model=self.model,
- messages=[{"content": message, "role": "user"}]
- )
+ Calls either litellm.completion() or litellm.responses() depending on mm_beta.
+
+ For completion API (mm_beta=False):
+ response = litellm.completion(
+ model=self.model,
+ messages=[{"content": message, "role": "user"}]
+ )
+
+ For responses API (mm_beta=True):
+ response = litellm.responses(
+ model=self.model,
+ input="Your input text"
+ )
"""
return lambda *args, **kwargs: self._model(*args, **kwargs)
@@ -304,7 +485,7 @@ class CustomLLM(AbstractModel):
"""
def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None,
- cache=True) -> None:
+ cache=True, mm_beta: bool = False) -> None:
if model is None:
model = os.environ.get('TRACE_CUSTOMLLM_MODEL', 'gpt-4o')
base_url = os.environ.get('TRACE_CUSTOMLLM_URL', 'http://xx.xx.xxx.xx:4000/')
@@ -315,7 +496,7 @@ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None]
self.model_name = model
self.cache = cache
factory = lambda: self._factory(base_url, server_api_key) # an LLM instance uses a fixed model
- super().__init__(factory, reset_freq)
+ super().__init__(factory, reset_freq, mm_beta=mm_beta, model_name=model)
@classmethod
def _factory(cls, base_url: str, server_api_key: str):
@@ -332,83 +513,414 @@ def create(self, **config: Any):
config['model'] = self.model_name
return self._model.chat.completions.create(**config)
+class GoogleGenAILLM(AbstractModel):
+ """
+ This is an LLM backend using Google's GenAI SDK with the Interactions API.
+
+ https://ai.google.dev/gemini-api/docs/text-generation
+ https://ai.google.dev/gemini-api/docs/image-generation
+
+ The Interactions API is a unified interface for interacting with Gemini models,
+ similar to OpenAI's Response API. It provides better state management, tool
+ orchestration, and support for long-running tasks.
+
+ To use this, set the GEMINI_API_KEY environment variable with your API key.
+ For convenience, you can set the default model name through the environment
+ variable TRACE_GOOGLE_GENAI_MODEL.
+
+ Supported models:
+ - Text: gemini-2.5-flash, gemini-2.5-pro, gemini-2.5-flash-lite
+ - Image: gemini-2.5-flash-image, gemini-2.5-pro-image
+
+ This class supports storing default generation parameters (like temperature,
+ max_output_tokens, etc.) that will be used for all calls unless overridden.
+
+ Text Generation:
+ Use ConversationHistory.to_gemini_format() to convert conversation history
+ to the format expected by Google GenAI.
+
+ Example:
+ from opto.utils.llm import LLM
+ from opto.utils.backbone import ConversationHistory, UserTurn, AssistantTurn
+
+ # Initialize LLM
+ llm = LLM(model="gemini-2.5-flash")
+
+ # Create conversation history
+ history = ConversationHistory()
+ history.system_prompt = "You are a helpful assistant."
+ history.add_user_turn(UserTurn().add_text("What is AI?"))
+
+ # Convert to Gemini format and call LLM
+ messages = history.to_gemini_format()
+ response = llm(messages=messages, max_tokens=100)
+
+ # Parse response
+ at = AssistantTurn(response)
+ print(at.get_text())
+
+ Image Generation:
+ Automatically detects image generation models (containing 'image' in name).
+ Uses client.models.generate_images() API for models like gemini-2.5-flash-image.
+
+ Image models require a single string prompt:
+ llm = LLM(model="gemini-2.5-flash-image")
+ result = llm(prompt="A serene mountain landscape", number_of_images=2)
+
+ Check llm.is_image_model to determine if a model is for image generation.
+ """
+
+ def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None,
+ cache=True, mm_beta: bool = False, **default_params) -> None:
+ if model is None:
+ model = os.environ.get('TRACE_GOOGLE_GENAI_MODEL', 'gemini-2.5-flash')
+
+ self.model_name = model
+ self.cache = cache
+ self.default_params = default_params # Store default generation parameters
+ factory = lambda: self._factory(self.model_name, self.default_params)
+ super().__init__(factory, reset_freq, mm_beta=mm_beta, model_name=model)
+
+ @classmethod
+ def _factory(cls, model_name: str, default_params: dict):
+ """Create a Google GenAI client wrapper using the Interactions API."""
+ # Get API key from environment variable
+ api_key = os.environ.get('GEMINI_API_KEY')
+ if api_key:
+ client = genai.Client(api_key=api_key)
+ else:
+ # Try without API key (will use default credentials or fail gracefully)
+ client = genai.Client()
+
+ # Check if this is an image generation model
+ is_image_model = _is_image_generation_model(model_name)
+
+ if is_image_model:
+ # Image generation for Gemini
+ def image_api_func(prompt, **kwargs):
+ assert isinstance(prompt, str), (
+ f"Image generation requires a single string prompt. "
+ f"Got {type(prompt).__name__}. "
+ f"Usage: llm(prompt='your prompt here')"
+ )
+
+ # Gemini image generation API
+ # https://ai.google.dev/gemini-api/docs/image-generation
+ # Filter kwargs to only valid parameters for generate_images
+ valid_params = {
+ k: v for k, v in kwargs.items()
+ if k in ['number_of_images', 'aspect_ratio', 'safety_filter_level']
+ }
+ response = client.models.generate_images(
+ model=model_name,
+ prompt=prompt,
+ **valid_params
+ )
+ return response
+
+ return lambda *args, **kwargs: retry_with_exponential_backoff(
+ lambda: image_api_func(*args, **{**default_params, **kwargs}),
+ max_retries=5,
+ base_delay=1,
+ operation_name=f"{model_name}_image_gen"
+ )
+
+ # Build config if there are generation parameters
+ config_params = {}
+
+ # Handle thinking config for Gemini 2.5+ models
+ if 'thinking_budget' in default_params:
+ thinking_budget = default_params.pop('thinking_budget')
+ config_params['thinking_config'] = types.ThinkingConfig(
+ thinking_budget=thinking_budget
+ )
+
+ def api_func(model_name, *args, **kwargs):
+ # Extract system_instruction if present (needs to be at config level, not in kwargs)
+ system_instruction = kwargs.pop('system_instruction', None)
+
+ # Handle messages parameter (from history.to_gemini_format())
+ messages = kwargs.pop('messages', None)
+ contents = kwargs.pop('contents', None)
+
+ if messages:
+ # Extract system message if present and not explicitly overridden
+ if messages and messages[0].get('role') == 'system':
+ if system_instruction is None:
+ system_instruction = messages[0].get('content')
+ # Remove system message from contents
+ contents = messages[1:]
+ else:
+ contents = messages
+
+ # Use contents if provided, otherwise use positional args
+ contents_to_use = contents if contents is not None else (args[0] if args else None)
+
+ # Map max_tokens to max_output_tokens for Google GenAI
+ if 'max_tokens' in kwargs:
+ kwargs['max_output_tokens'] = kwargs.pop('max_tokens')
+
+ # Remove any other parameters that shouldn't go to GenerateContentConfig
+ # Keep only valid config parameters
+ valid_config_params = {
+ 'temperature', 'max_output_tokens', 'top_p', 'top_k',
+ 'stop_sequences', 'candidate_count', 'presence_penalty',
+ 'frequency_penalty', 'response_mime_type', 'response_schema'
+ }
+ config_kwargs = {k: v for k, v in kwargs.items() if k in valid_config_params}
+
+ if system_instruction:
+ config_params_with_system = {**config_params, 'system_instruction': system_instruction}
+ else:
+ config_params_with_system = config_params
+
+ response = client.models.generate_content(
+ model=model_name,
+ contents=contents_to_use,
+ config=types.GenerateContentConfig(**{**config_params_with_system, **config_kwargs})
+ )
+
+ return response
+
+ return lambda *args, **kwargs: retry_with_exponential_backoff(
+ lambda: api_func(model_name, *args, **{**default_params, **kwargs}),
+ max_retries=5,
+ base_delay=1,
+ operation_name=f"{model_name}"
+ )
+
+ @property
+ def model(self):
+ """
+ Wrapper that injects the model name into calls.
+
+ Example:
+ response = llm(contents="How does AI work?")
+ """
+ return lambda *args, **kwargs: self._model(model=self.model_name, *args, **kwargs)
+
# Registry of available backends
_LLM_REGISTRY = {
"LiteLLM": LiteLLM,
"AutoGen": AutoGenLLM,
"CustomLLM": CustomLLM,
+ "GoogleGenAI": GoogleGenAILLM,
}
class LLMFactory:
- """Factory for creating LLM instances with predefined profiles.
-
- The code comes with these built-in profiles:
-
- llm_default = LLM(profile="default") # gpt-4o-mini
- llm_premium = LLM(profile="premium") # gpt-4
- llm_cheap = LLM(profile="cheap") # gpt-4o-mini
- llm_fast = LLM(profile="fast") # gpt-3.5-turbo-mini
- llm_reasoning = LLM(profile="reasoning") # o1-mini
-
- You can override those built-in profiles:
-
- LLMFactory.register_profile("default", "LiteLLM", model="gpt-4o", temperature=0.5)
- LLMFactory.register_profile("premium", "LiteLLM", model="o1-preview", max_tokens=8000)
- LLMFactory.register_profile("cheap", "LiteLLM", model="gpt-3.5-turbo", temperature=0.9)
- LLMFactory.register_profile("fast", "LiteLLM", model="gpt-3.5-turbo", max_tokens=500)
- LLMFactory.register_profile("reasoning", "LiteLLM", model="o1-preview")
-
- An Example of using Different Backends
-
- # Register custom profiles for different use cases
- LLMFactory.register_profile("advanced_reasoning", "LiteLLM", model="o1-preview", max_tokens=4000)
- LLMFactory.register_profile("claude_sonnet", "LiteLLM", model="claude-3-5-sonnet-latest", temperature=0.3)
- LLMFactory.register_profile("custom_server", "CustomLLM", model="llama-3.1-8b")
-
- # Use in different contexts
- reasoning_llm = LLM(profile="advanced_reasoning") # For complex reasoning
- claude_llm = LLM(profile="claude_sonnet") # For Claude responses
- local_llm = LLM(profile="custom_server") # For local deployment
-
- # Single LLM optimizer with custom profile
- optimizer1 = OptoPrime(parameters, llm=LLM(profile="advanced_reasoning"))
+ """Factory for creating LLM instances with named profiles.
+
+ Profiles allow you to save and reuse LLM configurations with specific settings.
+ Each profile can include any LiteLLM-supported parameters like model, temperature,
+ top_p, max_tokens, etc.
+
+ The default profile uses 'gpt-4o-mini' with standard settings.
+
+ Basic Usage:
+ # Use default model (gpt-4o-mini)
+ llm = LLM()
+
+ # Specify a model directly
+ llm = LLM(model="gpt-4o")
+
+ # Use a named profile
+ llm = LLM(profile="my_profile")
+
+ Creating Custom Profiles:
+ # Register a profile with full LiteLLM configuration
+ LLMFactory.create_profile(
+ "creative_writer",
+ backend="LiteLLM",
+ model="gpt-4o",
+ temperature=0.9,
+ top_p=0.95,
+ max_tokens=2000,
+ presence_penalty=0.6
+ )
+
+ # Register a reasoning profile
+ LLMFactory.create_profile(
+ "deep_thinker",
+ backend="LiteLLM",
+ model="o1-preview",
+ max_completion_tokens=8000
+ )
+
+ # Register a profile with specific formatting
+ LLMFactory.create_profile(
+ "json_responder",
+ backend="LiteLLM",
+ model="gpt-4o-mini",
+ temperature=0.3,
+ response_format={"type": "json_object"}
+ )
- # Multi-LLM optimizer with multiple profiles
- optimizer2 = OptoPrimeMulti(parameters, llm_profiles=["cheap", "premium", "claude_sonnet"], generation_technique="multi_llm")
+ Using Profiles:
+ # Use your custom profile
+ llm = LLM(profile="creative_writer")
+
+ # In optimizers
+ optimizer = OptoPrime(parameters, llm=LLM(profile="deep_thinker"))
+
+ Profile Management:
+ # List all available profiles
+ profiles = LLMFactory.list_profiles()
+
+ # Get profile configuration
+ config = LLMFactory.get_profile_info("creative_writer")
+
+ # Override existing profile
+ LLMFactory.create_profile("default", "LiteLLM", model="gpt-4o", temperature=0.5)
+
+ Supported LiteLLM Parameters:
+ See https://docs.litellm.ai/docs/completion/input for full list:
+ - model: Model name (required)
+ - temperature: Sampling temperature (0-2)
+ - top_p: Nucleus sampling parameter
+ - max_tokens: Maximum tokens to generate
+ - max_completion_tokens: Upper bound for completion tokens
+ - presence_penalty: Penalize new tokens based on presence
+ - frequency_penalty: Penalize new tokens based on frequency
+ - stop: Stop sequences (string or list)
+ - stream: Enable streaming responses
+ - response_format: Output format specification
+ - seed: Deterministic sampling seed
+ - tools: Function calling tools
+ - tool_choice: Control function calling behavior
+ - logprobs: Return log probabilities
+ - top_logprobs: Number of most likely tokens to return
+ - n: Number of completions to generate
+ - and many more...
"""
- # Default profiles for different use cases
+ # Default profile - just gpt-4o-mini with no opinionated settings
_profiles = {
- 'default': {'backend': 'LiteLLM', 'params': {'model': 'gpt-4o-mini'}},
- 'premium': {'backend': 'LiteLLM', 'params': {'model': 'gpt-4'}},
- 'cheap': {'backend': 'LiteLLM', 'params': {'model': 'gpt-4o-mini'}},
- 'fast': {'backend': 'LiteLLM', 'params': {'model': 'gpt-3.5-turbo-mini'}},
- 'reasoning': {'backend': 'LiteLLM', 'params': {'model': 'o1-mini'}},
+ 'default': {'backend': 'LiteLLM', 'params': {'model': 'gpt-4o'}},
}
-
@classmethod
- def get_llm(cls, profile: str = 'default') -> AbstractModel:
- """Get an LLM instance for the specified profile."""
+ def get_llm(cls, profile: str = 'default', model: str = None, mm_beta: bool = False, **kwargs) -> AbstractModel:
+ """Get an LLM instance for the specified profile or model.
+
+ Args:
+ profile: Name of the profile to use. Defaults to 'default'.
+ model: Model name to use directly. If provided, overrides profile.
+ mm_beta: If True, returns AssistantTurn objects with rich multimodal content.
+ If False (default), returns raw API responses in legacy format.
+ **kwargs: Additional parameters to pass to the backend (e.g., temperature, top_p).
+ These override profile settings if both are specified.
+
+ Returns:
+ An LLM instance configured according to the profile/model and parameters.
+
+ Examples:
+ # Use default profile
+ llm = LLMFactory.get_llm()
+
+ # Use specific model
+ llm = LLMFactory.get_llm(model="gpt-4o")
+
+ # Use named profile
+ llm = LLMFactory.get_llm(profile="creative_writer")
+
+ # Use model with custom parameters
+ llm = LLMFactory.get_llm(model="gpt-4o", temperature=0.7, max_tokens=1000)
+
+ # Override profile settings
+ llm = LLMFactory.get_llm(profile="creative_writer", temperature=0.5)
+
+ # Use mm_beta mode for multimodal responses
+ llm = LLMFactory.get_llm(model="gpt-4o", mm_beta=True)
+ """
+ # If model is specified directly, create a simple config
+ if model is not None:
+ backend = kwargs.pop('backend', None)
+
+ # Determine backend with priority: Gemini models > explicit backend > default
+ if model.startswith('gemini'):
+ # Gemini models use GoogleGenAILLM backend (highest priority)
+ backend_cls = _LLM_REGISTRY['GoogleGenAI']
+ # Strip 'gemini/' prefix if present (LiteLLM format: gemini/gemini-pro)
+ if model.startswith('gemini/'):
+ model = model[len('gemini/'):]
+ elif backend is not None:
+ # Explicit backend specified
+ backend_cls = _LLM_REGISTRY[backend]
+ else:
+ # Default to LiteLLM for other models
+ backend_cls = _LLM_REGISTRY['LiteLLM']
+
+ params = {'model': model, 'mm_beta': mm_beta, **kwargs}
+ return backend_cls(**params)
+ # Otherwise use profile
if profile not in cls._profiles:
- raise ValueError(f"Unknown profile '{profile}'. Available profiles: {list(cls._profiles.keys())}")
+ raise ValueError(
+ f"Unknown profile '{profile}'. Available profiles: {list(cls._profiles.keys())}. "
+ f"Use LLMFactory.create_profile() to create custom profiles, or pass model= directly."
+ )
- config = cls._profiles[profile]
+ config = cls._profiles[profile].copy()
backend_cls = _LLM_REGISTRY[config['backend']]
- return backend_cls(**config['params'])
+
+ # Merge profile params with any override kwargs
+ params = config['params'].copy()
+ params['mm_beta'] = mm_beta
+ params.update(kwargs)
+
+ return backend_cls(**params)
@classmethod
- def register_profile(cls, name: str, backend: str, **params):
- """Register a new LLM profile."""
+ def create_profile(cls, name: str, backend: str = 'LiteLLM', **params):
+ """Register a new LLM profile with custom configuration.
+
+ Args:
+ name: Profile name to register.
+ backend: Backend to use ('LiteLLM', 'AutoGen', or 'CustomLLM'). Defaults to 'LiteLLM'.
+ **params: Configuration parameters for the backend. For LiteLLM, this can include
+ any parameters from https://docs.litellm.ai/docs/completion/input
+
+ Examples:
+ # Simple profile with just a model
+ LLMFactory.create_profile("gpt4", model="gpt-4o")
+
+ # Profile with temperature and token settings
+ LLMFactory.create_profile(
+ "creative",
+ model="gpt-4o",
+ temperature=0.9,
+ max_tokens=2000
+ )
+
+ # Profile with advanced settings
+ LLMFactory.create_profile(
+ "structured_json",
+ model="gpt-4o-mini",
+ temperature=0.3,
+ response_format={"type": "json_object"},
+ max_tokens=1500,
+ top_p=0.9
+ )
+ """
+ if backend not in _LLM_REGISTRY:
+ raise ValueError(
+ f"Unknown backend '{backend}'. Valid options: {list(_LLM_REGISTRY.keys())}"
+ )
cls._profiles[name] = {'backend': backend, 'params': params}
@classmethod
def list_profiles(cls):
- """List all available profiles."""
+ """List all available profile names."""
return list(cls._profiles.keys())
@classmethod
def get_profile_info(cls, profile: str = None):
- """Get information about a profile or all profiles."""
+ """Get configuration information about one or all profiles.
+
+ Args:
+ profile: Profile name to get info for. If None, returns all profiles.
+
+ Returns:
+ Dictionary with profile configuration(s).
+ """
if profile:
return cls._profiles.get(profile)
return cls._profiles
@@ -419,10 +931,12 @@ class DummyLLM(AbstractModel):
def __init__(self,
callable,
- reset_freq: Union[int, None] = None) -> None:
+ reset_freq: Union[int, None] = None,
+ mm_beta: bool = False,
+ model_name: Union[str, None] = None) -> None:
# self.message = message
self.callable = callable
- super().__init__(self._factory, reset_freq)
+ super().__init__(self._factory, reset_freq, mm_beta=mm_beta, model_name=model_name)
def _factory(self):
@@ -441,26 +955,183 @@ def __init__(self, content):
return lambda *args, **kwargs: Response(self.callable(*args, **kwargs))
-
class LLM:
"""
A unified entry point for all supported LLM backends.
- Usage:
- # pick by env var (default: LiteLLM)
- llm = LLM()
- # or override explicitly
- llm = LLM(backend="AutoGen", config_list=my_configs)
- # or use predefined profiles
- llm = LLM(profile="premium") # Use premium model
- llm = LLM(profile="cheap") # Use cheaper model
- llm = LLM(profile="reasoning") # Use reasoning/thinking model
+ The LLM class provides a simple interface for creating language model instances.
+ By default, it uses gpt-4o-mini through LiteLLM.
+
+ Basic Usage:
+ # Use default model (gpt-4o-mini)
+ llm = LLM()
+
+ # Specify a model directly
+ llm = LLM(model="gpt-4o")
+ llm = LLM(model="claude-3-5-sonnet-latest")
+ llm = LLM(model="o1-preview")
+
+ # Add LiteLLM parameters
+ llm = LLM(model="gpt-4o", temperature=0.7, max_tokens=2000)
+ llm = LLM(model="gpt-4o-mini", temperature=0.3, top_p=0.9)
+
+ Image Generation:
+ # OpenAI image models (auto-detected by 'image' or 'dall-e' in name)
+ img_llm = LLM(model="gpt-image-1.5")
+ print(img_llm.is_image_model) # True
+ result = img_llm(prompt="A serene mountain landscape at sunset")
+
+ # With additional parameters
+ img_llm = LLM(model="gpt-image-1", size="1024x1024", quality="hd")
+ result = img_llm(prompt="A futuristic cityscape")
+
+ # DALL-E models
+ dalle = LLM(model="dall-e-3")
+ result = dalle(prompt="A cat astronaut in space", size="1024x1792")
+
+ # Gemini image models
+ gemini_img = LLM(model="gemini-2.5-flash-image")
+ result = gemini_img(prompt="Abstract art", number_of_images=2)
+
+ # Check if model generates images
+ if llm.is_image_model:
+ result = llm(prompt="Your prompt here")
+ else:
+ result = llm(messages=[{"role": "user", "content": "Your message"}])
+
+ Using Multimodal Beta Mode:
+ # Enable mm_beta for rich AssistantTurn responses
+ llm = LLM(model="gpt-4o", mm_beta=True)
+ response = llm(messages=[{"role": "user", "content": "Hello"}])
+ # response is now an AssistantTurn object with .content, .tool_calls, etc.
+
+ # Legacy mode (default, mm_beta=False)
+ llm = LLM(model="gpt-4o")
+ response = llm(messages=[{"role": "user", "content": "Hello"}])
+ # response is raw API response: response.choices[0].message.content
+
+ Using System Messages:
+
+ # LiteLLM (OpenAI, Anthropic, etc.) - Use messages array with role="system"
+ llm = LLM(model="gpt-4o-mini", mm_beta=True)
+ response = llm(messages=[
+ {"role": "system", "content": "You are a helpful math tutor."},
+ {"role": "user", "content": "What is 2+2?"}
+ ])
+ print(response.get_text()) # AssistantTurn object
+
+ # LiteLLM Legacy mode (mm_beta=False)
+ llm = LLM(model="gpt-4o-mini")
+ response = llm(messages=[
+ {"role": "system", "content": "You are a pirate assistant."},
+ {"role": "user", "content": "Hello!"}
+ ])
+ print(response.choices[0].message.content) # Raw API response
+
+ # Google Gemini - Use system_instruction parameter (not in messages array)
+ llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash-image", mm_beta=True)
+ response = llm(
+ "Hello there",
+ system_instruction="You are a helpful assistant."
+ )
+ print(response.get_text()) # AssistantTurn object
+
+ # Gemini with messages format (system_instruction separate from messages)
+ llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash-image", mm_beta=True)
+ response = llm(
+ messages=[
+ {"role": "user", "content": "What is your purpose?"}
+ ],
+ system_instruction="You are a creative writing instructor."
+ )
+
+ # Our Gemini wrapper also automatically extracts system instruction from messages array if not passed explicitly
+ messages = [
+ {"role": "system", "content": "You are a Shakespearean poet."},
+ {"role": "user", "content": "Tell me about the sun."}
+ ]
+ response1 = llm(messages=messages)
+ messages.append({"role": "assistant", "content": response1.get_text()})
+ messages.append({"role": "user", "content": "And the moon?"})
+ response2 = llm(messages=messages) # System message still applies
+
+ Using Named Profiles:
+ # Use a saved profile
+ llm = LLM(profile="my_custom_profile")
+
+ # Create profiles with LLMFactory
+ LLMFactory.create_profile("creative", model="gpt-4o", temperature=0.9)
+ llm = LLM(profile="creative")
+
+ Using Different Backends:
+ # Explicitly specify backend (default: LiteLLM)
+ llm = LLM(backend="AutoGen", config_list=my_configs)
+ llm = LLM(backend="CustomLLM", model="llama-3.1-8b")
+ llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash-image")
+
+ # Or set via environment variable
+ # export TRACE_DEFAULT_LLM_BACKEND=AutoGen
+ llm = LLM()
+
+ Examples with LiteLLM Parameters:
+ # Structured output
+ llm = LLM(
+ model="gpt-4o-mini",
+ response_format={"type": "json_object"},
+ temperature=0.3
+ )
+
+ # High creativity
+ llm = LLM(
+ model="gpt-4o",
+ temperature=0.9,
+ top_p=0.95,
+ presence_penalty=0.6
+ )
+
+ # Deterministic responses
+ llm = LLM(
+ model="gpt-4o-mini",
+ temperature=0,
+ seed=42
+ )
+
+ Key Differences Between Backends:
+ LiteLLM (OpenAI, Anthropic, etc.):
+ - System message: Include in messages array with role="system"
+ - Format: messages=[{"role": "system", "content": "..."}]
+ - Works with: OpenAI, Anthropic, Cohere, etc.
+
+ Google Gemini:
+ - System instruction: Pass as system_instruction parameter
+ - Format: system_instruction="You are a helpful assistant."
+ - Separate from messages array
+ - Works with: gemini-2.5-flash, gemini-2.5-pro, etc.
+
+ See Also:
+ - LLMFactory: For managing named profiles
+ - AssistantTurn: Returned when mm_beta=True
+ - https://docs.litellm.ai/docs/completion/input: Full list of LiteLLM parameters
+ - https://ai.google.dev/gemini-api/docs/system-instructions: Gemini system instructions
"""
- def __new__(cls, *args, profile: str = None, backend: str = None, **kwargs):
- # New: if profile is specified, use LLMFactory
+ def __new__(cls, model: str = None, profile: str = 'default', backend: str = None,
+ mm_beta: bool = False, **kwargs):
+
+ if _is_image_generation_model(model):
+ mm_beta = True
+
+ # Priority 1: If model is specified, use LLMFactory with model
+ if model:
+ if backend is not None:
+ kwargs['backend'] = backend
+ return LLMFactory.get_llm(model=model, mm_beta=mm_beta, **kwargs)
+
+ # Priority 2: If profile is specified, use LLMFactory
if profile:
- return LLMFactory.get_llm(profile)
- # Decide which backend to use
+ return LLMFactory.get_llm(profile=profile, mm_beta=mm_beta, **kwargs)
+
+ # Priority 3: Use backend-specific instantiation (for AutoGen, CustomLLM, etc.)
+ # This path is for when neither profile nor model is specified
name = backend or os.getenv("TRACE_DEFAULT_LLM_BACKEND", "LiteLLM")
try:
backend_cls = _LLM_REGISTRY[name]
@@ -468,4 +1139,5 @@ def __new__(cls, *args, profile: str = None, backend: str = None, **kwargs):
raise ValueError(f"Unknown LLM backend: {name}. "
f"Valid options are: {list(_LLM_REGISTRY)}")
# Instantiate and return the chosen subclass
- return backend_cls(*args, **kwargs)
\ No newline at end of file
+ kwargs['mm_beta'] = mm_beta
+ return backend_cls(**kwargs)
\ No newline at end of file
diff --git a/setup.py b/setup.py
index dbd60be5..394d4046 100644
--- a/setup.py
+++ b/setup.py
@@ -11,9 +11,11 @@
install_requires = [
"graphviz>=0.20.1",
"pytest",
- "litellm==1.75.0",
+ "litellm==1.80.8",
+ "google-genai",
"black",
"scikit-learn",
+ "pillow",
"tensorboardX",
"tensorboard"
]
diff --git a/tests/llm_optimizers_tests/test_gepa_benchmark.py b/tests/llm_optimizers_tests/test_gepa_benchmark.py
index 2811d4ec..0b7d0906 100644
--- a/tests/llm_optimizers_tests/test_gepa_benchmark.py
+++ b/tests/llm_optimizers_tests/test_gepa_benchmark.py
@@ -66,12 +66,12 @@ def test_gepa_benchmark_gsm8k_real_llm():
train = ds["train"][:6]
train_dataset = {"inputs": train["question"], "infos": train["answer"]}
- # Teacher/judge with a low-cost profile
- guide = LLMJudge(llm=LLM(profile="cheap"))
+ # Teacher/judge with default model (gpt-4o-mini is cost-effective)
+ guide = LLMJudge(llm=LLM(model='gpt-4o-mini'))
- # Agent and optimizer (low-cost profile)
- agent = Learner(llm=LLM(profile="cheap"))
- optimizer = OptoPrimeV2(agent.parameters(), llm=LLM(profile="cheap"))
+ # Agent and optimizer (using default model)
+ agent = Learner(llm=LLM(model='gpt-4o-mini'))
+ optimizer = OptoPrimeV2(agent.parameters(), llm=LLM(model='gpt-4o-mini'))
algos = [
("GEPA-Base", GEPAAlgorithmBase(agent, optimizer=optimizer, logger=None, num_threads=2), dict(num_iters=2, train_batch_size=1, merge_every=2)),
diff --git a/tests/llm_optimizers_tests/test_optimizer.py b/tests/llm_optimizers_tests/test_optimizer.py
index aa278d8e..ccabdd73 100644
--- a/tests/llm_optimizers_tests/test_optimizer.py
+++ b/tests/llm_optimizers_tests/test_optimizer.py
@@ -84,7 +84,7 @@ def model_profile(request, monkeypatch):
# Register a runtime profile (does not modify source files)
# Use CustomLLM backend which uses OpenAI-compatible calls.
- LLMFactory.register_profile(profile_name, backend="CustomLLM", model=model_id)
+ LLMFactory.create_profile(profile_name, backend="CustomLLM", model=model_id)
return profile_name
diff --git a/tests/llm_optimizers_tests/test_optoprime_v2.py b/tests/llm_optimizers_tests/test_optoprime_v2.py
index b1032f28..ce29d92e 100644
--- a/tests/llm_optimizers_tests/test_optoprime_v2.py
+++ b/tests/llm_optimizers_tests/test_optoprime_v2.py
@@ -101,18 +101,18 @@ def multiply(num):
assert function_repr in part2, "Expected function representation to be present in part2"
def test_big_data_truncation():
- num_1 = node(1, trainable=True)
+ num_1 = node("**2", trainable=True)
- list_1 = node([1, 2, 3, 4, 5, 6, 7, 8, 9, 20] * 10, trainable=True)
+ list_1 = node("12345691912338" * 10, trainable=False)
- result = num_1 + list_1[30]
+ result = list_1 + num_1
- optimizer = OptoPrimeV2([num_1, list_1], use_json_object_format=False,
+ optimizer = OptoPrimeV2([num_1], use_json_object_format=False,
ignore_extraction_error=False,
include_example=True, initial_var_char_limit=10)
optimizer.zero_feedback()
- optimizer.backward(result, 'make this number bigger')
+ optimizer.backward(result, 'compute the expression')
summary = optimizer.summarize()
part1, part2 = optimizer.construct_prompt(summary)
@@ -120,11 +120,7 @@ def test_big_data_truncation():
part1 = optimizer.replace_symbols(part1, optimizer.prompt_symbols)
part2 = optimizer.replace_symbols(part2, optimizer.prompt_symbols)
- truncated_repr = """
-
-[1, 2, 3, ...(skipped due to length limit)
-
-"""
+ truncated_repr = """1234569191...(skipped due to length limit)"""
assert truncated_repr in part2, "Expected truncated list representation to be present in part2"
@@ -177,5 +173,5 @@ def test_extraction_pipeline():
assert 'variables' in suggestion, "Expected 'variables' in suggestion"
assert 'int0' in suggestion['variables'], "Expected 'int0' variable in suggestion"
assert 'int1' in suggestion['variables'], "Expected 'int1' variable in suggestion"
- assert suggestion['variables']['int0'] == 5, "Expected int0 to be incremented to 5"
- assert suggestion['variables']['int1'] == 5, "Expected int1 to be incremented to 5"
+ assert suggestion['variables']['int0'] == '5', "Expected int0 to be incremented to 5"
+ assert suggestion['variables']['int1'] == '5', "Expected int1 to be incremented to 5"
diff --git a/tests/llm_optimizers_tests/test_optoprime_v3.py b/tests/llm_optimizers_tests/test_optoprime_v3.py
new file mode 100644
index 00000000..36126114
--- /dev/null
+++ b/tests/llm_optimizers_tests/test_optoprime_v3.py
@@ -0,0 +1,509 @@
+import os
+import pytest
+from opto.trace import GRAPH
+from opto.utils.llm import LLM
+
+from opto.trace import node, bundle
+from opto.optimizers.optoprime_v3 import (
+ OptoPrimeV3, OptimizerPromptSymbolSet2, ProblemInstance,
+ OptimizerPromptSymbolSet, value_to_image_content
+)
+from opto.utils.backbone import TextContent, ImageContent
+
+# You can override for temporarly testing a specific optimizer ALL_OPTIMIZERS = [TextGrad] # [OptoPrimeMulti] ALL_OPTIMIZERS = [OptoPrime]
+
+# Skip tests if no API credentials are available
+SKIP_REASON = "No API credentials found"
+HAS_CREDENTIALS = os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get(
+ "OPENAI_API_KEY") or os.environ.get("GEMINI_API_KEY")
+llm = LLM()
+
+
+@pytest.fixture(autouse=True)
+def clear_graph():
+ """Reset the graph before each test"""
+ GRAPH.clear()
+ yield
+ GRAPH.clear()
+
+
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+def test_response_extraction():
+ pass
+
+
+def test_tag_template_change():
+ num_1 = node(1, trainable=True)
+ num_2 = node(2, trainable=True, description="<=5")
+ result = num_1 + num_2
+ optimizer = OptoPrimeV3([num_1, num_2], use_json_object_format=False,
+ ignore_extraction_error=False,
+ include_example=True,
+ optimizer_prompt_symbol_set=OptimizerPromptSymbolSet2())
+
+ optimizer.zero_feedback()
+ optimizer.backward(result, 'make this number bigger')
+
+ summary = optimizer.summarize()
+ system_prompt, user_prompt = optimizer.construct_prompt(summary)
+
+ # system_prompt is a string, user_prompt is a ContentBlockList
+ system_prompt = optimizer.replace_symbols(system_prompt, optimizer.prompt_symbols)
+
+ # Convert ContentBlockList to text for symbol replacement
+ user_prompt_text = "".join(block.text for block in user_prompt if isinstance(block, TextContent))
+ user_prompt_text = optimizer.replace_symbols(user_prompt_text, optimizer.prompt_symbols)
+
+ assert """""" in system_prompt, "Expected tag to be present in system_prompt"
+ assert """""" in user_prompt_text, "Expected tag to be present in user_prompt"
+
+ print(system_prompt)
+ print(user_prompt_text)
+
+
+@bundle()
+def transform(num):
+ """Add number"""
+ return num + 1
+
+
+@bundle(trainable=True)
+def multiply(num):
+ return num * 5
+
+
+def test_function_repr():
+ num_1 = node(1, trainable=False)
+
+ result = multiply(transform(num_1))
+ optimizer = OptoPrimeV3([multiply.parameter], use_json_object_format=False,
+ ignore_extraction_error=False,
+ include_example=True)
+
+ optimizer.zero_feedback()
+ optimizer.backward(result, 'make this number bigger')
+
+ summary = optimizer.summarize()
+ system_prompt, user_prompt = optimizer.construct_prompt(summary)
+
+ system_prompt = optimizer.replace_symbols(system_prompt, optimizer.prompt_symbols)
+ # Convert ContentBlockList to text for symbol replacement
+ user_prompt_text = "".join(block.text for block in user_prompt if isinstance(block, TextContent))
+ user_prompt_text = optimizer.replace_symbols(user_prompt_text, optimizer.prompt_symbols)
+
+ function_repr = """
+
+def multiply(num):
+ return num * 5
+
+
+The code should start with:
+def multiply(num):
+
+"""
+
+ assert function_repr in user_prompt_text, "Expected function representation to be present in user_prompt"
+
+def test_big_data_truncation():
+ num_1 = node("**2", trainable=True)
+
+ list_1 = node("12345691912338" * 10, trainable=False)
+
+ result = list_1 + num_1
+
+ optimizer = OptoPrimeV3([num_1], use_json_object_format=False,
+ ignore_extraction_error=False,
+ include_example=True, initial_var_char_limit=10)
+
+ optimizer.zero_feedback()
+ optimizer.backward(result, 'compute the expression')
+
+ summary = optimizer.summarize()
+ system_prompt, user_prompt = optimizer.construct_prompt(summary)
+
+ system_prompt = optimizer.replace_symbols(system_prompt, optimizer.prompt_symbols)
+ # Convert ContentBlockList to text for symbol replacement
+ user_prompt_text = "".join(block.text for block in user_prompt if isinstance(block, TextContent))
+ user_prompt_text = optimizer.replace_symbols(user_prompt_text, optimizer.prompt_symbols)
+
+ truncated_repr = """1234569191...(skipped due to length limit)"""
+
+ assert truncated_repr in user_prompt_text, "Expected truncated list representation to be present in user_prompt"
+
+def test_extraction_pipeline():
+ num_1 = node(1, trainable=True)
+ num_2 = node(2, trainable=True, description="<=5")
+ result = num_1 + num_2
+ optimizer = OptoPrimeV3([num_1, num_2], use_json_object_format=False,
+ ignore_extraction_error=False,
+ include_example=True,
+ optimizer_prompt_symbol_set=OptimizerPromptSymbolSet2())
+
+ optimizer.zero_feedback()
+ optimizer.backward(result, 'make this number bigger')
+
+ summary = optimizer.summarize()
+ system_prompt, user_prompt = optimizer.construct_prompt(summary)
+
+ # Verify construct_prompt returns expected types
+ assert isinstance(system_prompt, str)
+ assert isinstance(user_prompt, list)
+
+ # Test extraction from a mock response
+ response = """
+The instruction suggests that the output, `add0`, needs to be made bigger than it currently is (3). The code performs an addition of `int0` and `int1` to produce `add0`. To increase `add0`, we can increase the values of `int0` or `int1`, or both. Given that `int1` has a constraint of being less than or equal to 5, we can set `int0` to a higher value, since it has no explicit constraint. By adjusting `int0` to a higher value, the output can be made larger in accordance with the feedback.
+
+
+
+int0
+
+5
+
+
+
+
+int1
+
+5
+
+"""
+ suggestion = optimizer.extract_llm_suggestion(response)
+
+ assert 'reasoning' in suggestion, "Expected 'reasoning' in suggestion"
+ assert 'variables' in suggestion, "Expected 'variables' in suggestion"
+ assert 'int0' in suggestion['variables'], "Expected 'int0' variable in suggestion"
+ assert 'int1' in suggestion['variables'], "Expected 'int1' variable in suggestion"
+ assert suggestion['variables']['int0'] == '5', "Expected int0 to be incremented to 5"
+ assert suggestion['variables']['int1'] == '5', "Expected int1 to be incremented to 5"
+
+
+# ==================== Multimodal / Content Block Tests ====================
+
+def test_problem_instance_text_only():
+ """Test that ProblemInstance with text-only content works correctly."""
+ from opto.utils.backbone import ContentBlockList
+ symbol_set = OptimizerPromptSymbolSet()
+
+ instance = ProblemInstance(
+ instruction="Test instruction",
+ code="y = add(x=a, y=b)",
+ documentation="[add] Adds two numbers",
+ variables=ContentBlockList("5"),
+ inputs=ContentBlockList("3"),
+ others=ContentBlockList(),
+ outputs=ContentBlockList("8"),
+ feedback="Result should be 10",
+ context="Some context",
+ optimizer_prompt_symbol_set=symbol_set
+ )
+
+ # Test __repr__ returns string
+ text_repr = str(instance)
+ assert "Test instruction" in text_repr
+ assert "y = add(x=a, y=b)" in text_repr
+ assert "Result should be 10" in text_repr
+ assert "Some context" in text_repr
+
+ # Test to_content_blocks returns list
+ blocks = instance.to_content_blocks()
+ assert isinstance(blocks, list)
+ assert len(blocks) > 0
+ assert all(isinstance(b, (TextContent, ImageContent)) for b in blocks)
+
+ # Test has_images returns False for text-only
+ assert not instance.has_images()
+
+
+def test_problem_instance_with_content_blocks():
+ """Test ProblemInstance with ContentBlockList fields containing images."""
+ from opto.utils.backbone import ContentBlockList
+ symbol_set = OptimizerPromptSymbolSet()
+
+ # Create content blocks with an image
+ variables_blocks = ContentBlockList([
+ TextContent(text=""),
+ ImageContent(image_url="https://example.com/test.jpg"),
+ TextContent(text="")
+ ])
+
+ instance = ProblemInstance(
+ instruction="Analyze the image",
+ code="result = analyze(img)",
+ documentation="[analyze] Analyzes an image",
+ variables=variables_blocks,
+ inputs=ContentBlockList(),
+ others=ContentBlockList(),
+ outputs=ContentBlockList("cat"),
+ feedback="Result should be 'dog'",
+ context=None,
+ optimizer_prompt_symbol_set=symbol_set
+ )
+
+ # Test __repr__ handles content blocks (should show [IMAGE] placeholder)
+ text_repr = str(instance)
+ assert "Analyze the image" in text_repr
+ assert "[IMAGE]" in text_repr
+
+ # Test to_content_blocks includes the image
+ blocks = instance.to_content_blocks()
+ assert isinstance(blocks, list)
+
+ # Find the ImageContent block
+ image_blocks = [b for b in blocks if isinstance(b, ImageContent)]
+ assert len(image_blocks) == 1
+ assert image_blocks[0].image_url == "https://example.com/test.jpg"
+
+ # Test has_images returns True
+ assert instance.has_images()
+
+
+def test_problem_instance_mixed_content():
+ """Test ProblemInstance with mixed text and image content in multiple fields."""
+ from opto.utils.backbone import ContentBlockList
+ symbol_set = OptimizerPromptSymbolSet()
+
+ # Variables with image
+ variables_blocks = ContentBlockList([
+ TextContent(text="Hello\n"),
+ TextContent(text=""),
+ ImageContent(image_data="base64data", media_type="image/png"),
+ TextContent(text="")
+ ])
+
+ # Inputs with image
+ inputs_blocks = ContentBlockList([
+ TextContent(text=""),
+ ImageContent(image_url="https://example.com/ref.png"),
+ TextContent(text="")
+ ])
+
+ instance = ProblemInstance(
+ instruction="Compare images",
+ code="result = compare(img, reference)",
+ documentation="[compare] Compares two images",
+ variables=variables_blocks,
+ inputs=inputs_blocks,
+ others=ContentBlockList(),
+ outputs=ContentBlockList("0.8"),
+ feedback="Similarity should be higher",
+ context="Context text",
+ optimizer_prompt_symbol_set=symbol_set
+ )
+
+ # Test has_images
+ assert instance.has_images()
+
+ # Test to_content_blocks
+ blocks = instance.to_content_blocks()
+ image_blocks = [b for b in blocks if isinstance(b, ImageContent)]
+ assert len(image_blocks) == 2 # One from variables, one from inputs
+
+
+def test_value_to_image_content_url():
+ """Test value_to_image_content with URL strings."""
+ # Valid image URL
+ result = value_to_image_content("https://example.com/image.jpg")
+ assert result is not None
+ assert isinstance(result, ImageContent)
+ assert result.image_url == "https://example.com/image.jpg"
+
+ # Non-image URL (no image extension) - is_image returns False for pattern check
+ result = value_to_image_content("https://example.com/page.html")
+ assert result is None
+
+ # Non-URL string
+ result = value_to_image_content("just a regular string")
+ assert result is None
+
+
+def test_value_to_image_content_base64():
+ """Test value_to_image_content with base64 data URLs."""
+ # Valid base64 data URL
+ data_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUg=="
+ result = value_to_image_content(data_url)
+ assert result is not None
+ assert isinstance(result, ImageContent)
+ assert result.image_data == "iVBORw0KGgoAAAANSUhEUg=="
+ assert result.media_type == "image/png"
+
+
+def test_value_to_image_content_non_image():
+ """Test value_to_image_content with non-image values."""
+ # Integer
+ assert value_to_image_content(42) is None
+
+ # List
+ assert value_to_image_content([1, 2, 3]) is None
+
+ # Dict
+ assert value_to_image_content({"key": "value"}) is None
+
+ # Regular string
+ assert value_to_image_content("hello world") is None
+
+
+def test_construct_prompt():
+ """Test construct_prompt returns ContentBlockList for multimodal support."""
+ num_1 = node(1, trainable=True)
+ num_2 = node(2, trainable=True)
+ result = num_1 + num_2
+
+ optimizer = OptoPrimeV3([num_1, num_2], use_json_object_format=False)
+ optimizer.zero_feedback()
+ optimizer.backward(result, 'make this number bigger')
+
+ summary = optimizer.summarize()
+ system_prompt, user_prompt = optimizer.construct_prompt(summary)
+
+ # system_prompt should be string, user_prompt should be ContentBlockList
+ assert isinstance(system_prompt, str)
+ assert isinstance(user_prompt, list)
+ assert all(isinstance(b, (TextContent, ImageContent)) for b in user_prompt)
+
+ # Check that text content contains expected info
+ text_parts = [b.text for b in user_prompt if isinstance(b, TextContent)]
+ full_text = "".join(text_parts)
+ assert "int0" in full_text or "int1" in full_text
+
+
+def test_repr_node_value_as_content_blocks():
+ """Test repr_node_value_as_content_blocks method."""
+ num_1 = node(1, trainable=True)
+ result = num_1 + 1
+
+ optimizer = OptoPrimeV3([num_1], use_json_object_format=False)
+ optimizer.zero_feedback()
+ optimizer.backward(result, 'test')
+
+ # Test with non-image nodes
+ summary = optimizer.summarize()
+ blocks = optimizer.repr_node_value_as_content_blocks(
+ summary.variables,
+ node_tag=optimizer.optimizer_prompt_symbol_set.variable_tag,
+ value_tag=optimizer.optimizer_prompt_symbol_set.value_tag,
+ constraint_tag=optimizer.optimizer_prompt_symbol_set.constraint_tag
+ )
+
+ assert isinstance(blocks, list)
+ assert len(blocks) > 0
+ assert all(isinstance(b, TextContent) for b in blocks) # No images in this case
+
+
+def test_repr_node_value_compact_as_content_blocks():
+ """Test repr_node_value_compact_as_content_blocks method."""
+ long_string = "x" * 5000 # Long string that will be truncated
+ str_node = node(long_string, trainable=True)
+ result = str_node + "!"
+
+ optimizer = OptoPrimeV3([str_node], use_json_object_format=False, initial_var_char_limit=100)
+ optimizer.zero_feedback()
+ optimizer.backward(result, 'test')
+
+ summary = optimizer.summarize()
+ blocks = optimizer.repr_node_value_compact_as_content_blocks(
+ summary.inputs,
+ node_tag=optimizer.optimizer_prompt_symbol_set.node_tag,
+ value_tag=optimizer.optimizer_prompt_symbol_set.value_tag,
+ constraint_tag=optimizer.optimizer_prompt_symbol_set.constraint_tag
+ )
+
+ # Should be truncated
+ text_parts = [b.text for b in blocks if isinstance(b, TextContent)]
+ full_text = "".join(text_parts)
+ assert "skipped due to length limit" in full_text or len(full_text) < len(long_string)
+
+
+# ==================== Real LLM Call Tests ====================
+
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+def test_optimizer_step_real_llm_call():
+ """Test a real optimization step with LLM call."""
+ # Create a simple optimization problem
+ greeting = node("Hello", trainable=True, description="A greeting message")
+
+ @bundle()
+ def make_sentence(word):
+ """Create a sentence from a word."""
+ return f"{word}, how are you today?"
+
+ result = make_sentence(greeting)
+
+ # Create optimizer
+ optimizer = OptoPrimeV3(
+ [greeting],
+ use_json_object_format=False,
+ ignore_extraction_error=True,
+ include_example=False,
+ )
+
+ # Setup feedback
+ optimizer.zero_feedback()
+ optimizer.backward(result, "The greeting should be more formal and professional")
+
+ # Execute optimization step - this makes a real LLM call
+ update_dict = optimizer.step(verbose=True)
+
+ # Verify the optimizer produced a suggestion
+ print(f"Update dict: {update_dict}")
+
+ # The LLM should have suggested a new value
+ # We don't assert specific content since LLM output varies
+ # but we verify the step completed without error
+ assert optimizer.log is not None
+ assert len(optimizer.log) > 0
+
+ # Check that the log contains the expected structure
+ last_log = optimizer.log[-1]
+ assert "system_prompt" in last_log
+ assert "user_prompt" in last_log
+ assert "response" in last_log
+
+ print(f"LLM Response: {last_log['response'][:500]}...")
+
+
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+def test_optimizer_step_with_content_blocks():
+ """Test optimization step using content blocks (multimodal mode)."""
+ # Create trainable parameters
+ num_1 = node(5, trainable=True, description="A number to optimize")
+ num_2 = node(3, trainable=True, description="Another number")
+
+ result = num_1 + num_2
+
+ # Create optimizer
+ optimizer = OptoPrimeV3(
+ [num_1, num_2],
+ use_json_object_format=False,
+ ignore_extraction_error=True,
+ include_example=False,
+ )
+
+ # Setup feedback
+ optimizer.zero_feedback()
+ optimizer.backward(result, "The sum should be exactly 100")
+
+ # Test that construct_prompt returns ContentBlockList
+ summary = optimizer.summarize()
+ system_prompt, user_prompt = optimizer.construct_prompt(summary)
+
+ # Verify content blocks structure
+ from opto.utils.backbone import ContentBlockList
+ assert isinstance(user_prompt, ContentBlockList)
+ assert len(user_prompt) > 0
+
+ # Verify text is merged (should be fewer blocks than if not merged)
+ text_blocks = [b for b in user_prompt if isinstance(b, TextContent)]
+ print(f"Number of text blocks after merging: {len(text_blocks)}")
+
+ # Execute the step (this makes a real LLM call)
+ update_dict = optimizer.step(verbose=True)
+
+ print(f"Update dict: {update_dict}")
+
+ # Verify the step completed
+ assert optimizer.log is not None
+ assert len(optimizer.log) > 0
+
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+def test_optimizer_multimodal_parameter_update():
+ pass
\ No newline at end of file
diff --git a/tests/unit_tests/test_llm.py b/tests/unit_tests/test_llm.py
index 9435bf33..244bbf2e 100644
--- a/tests/unit_tests/test_llm.py
+++ b/tests/unit_tests/test_llm.py
@@ -1,8 +1,22 @@
-from opto.utils.llm import LLM
+from opto.utils.llm import LLM, LLMFactory
from opto.optimizers.utils import print_color
import os
+import pytest
+from opto.utils.backbone import (
+ ConversationHistory,
+ UserTurn,
+ AssistantTurn
+)
+
+# Skip tests if no API credentials are available
+SKIP_REASON = "No API credentials found"
+HAS_CREDENTIALS = os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get(
+ "OPENAI_API_KEY") or os.environ.get("GEMINI_API_KEY")
+
+
def test_llm_init():
+ """Test basic LLM initialization with legacy mode (mm_beta=False)"""
if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"):
llm = LLM()
system_prompt = 'You are a helpful assistant.'
@@ -22,3 +36,433 @@ def test_llm_init():
print_color(f'System: {system_prompt}', 'red')
print_color(f'User: {user_prompt}', 'blue')
print_color(f'LLM: {response}', 'green')
+
+
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+class TestLLMMMBetaMode:
+ """Test suite for LLM class with mm_beta=True and mm_beta=False modes"""
+
+ def test_mm_beta_false_legacy_response_format(self):
+ """Test that mm_beta=False returns raw API response (legacy format)"""
+ llm = LLM(mm_beta=False)
+ messages = [{"role": "user", "content": "Say 'test' and nothing else."}]
+
+ response = llm(messages=messages)
+
+ # Legacy mode should return raw API response with .choices attribute
+ assert hasattr(response, 'choices'), "Legacy mode should return raw API response"
+ assert hasattr(response.choices[0], 'message'), "Response should have message attribute"
+ assert hasattr(response.choices[0].message, 'content'), "Message should have content attribute"
+
+ # Should NOT be an AssistantTurn object
+ assert not isinstance(response, AssistantTurn), "Legacy mode should not return AssistantTurn"
+
+ content = response.choices[0].message.content
+ assert isinstance(content, str), "Content should be a string"
+ assert len(content) > 0, "Content should not be empty"
+
+ print_color(f"✓ Legacy mode (mm_beta=False) returns raw API response", 'green')
+
+ def test_mm_beta_true_assistant_turn_response(self):
+ """Test that mm_beta=True returns AssistantTurn object"""
+ llm = LLM(mm_beta=True)
+ messages = [{"role": "user", "content": "Say 'test' and nothing else."}]
+
+ response = llm(messages=messages)
+
+ # mm_beta mode should return AssistantTurn object
+ assert isinstance(response, AssistantTurn), "mm_beta mode should return AssistantTurn object"
+
+ # Check AssistantTurn attributes
+ assert hasattr(response, 'content'), "AssistantTurn should have content attribute"
+ assert hasattr(response, 'tool_calls'), "AssistantTurn should have tool_calls attribute"
+ assert hasattr(response, 'role'), "AssistantTurn should have role attribute"
+ assert response.role == "assistant", "Role should be 'assistant'"
+
+ # Content should be accessible
+ assert response.content is not None, "Content should not be None"
+
+ print_color(f"✓ Multimodal mode (mm_beta=True) returns AssistantTurn object", 'green')
+
+ def test_mm_beta_with_explicit_model(self):
+ """Test mm_beta parameter works with explicit model specification"""
+ # Test with mm_beta=False
+ llm_legacy = LLM(model="gpt-4o-mini", mm_beta=False)
+ messages = [{"role": "user", "content": "Hi"}]
+
+ response_legacy = llm_legacy(messages=messages)
+ assert hasattr(response_legacy, 'choices'), "Should return raw API response"
+ assert not isinstance(response_legacy, AssistantTurn), "Should not be AssistantTurn"
+
+ # Test with mm_beta=True
+ llm_mm = LLM(model="gpt-4o-mini", mm_beta=True)
+ response_mm = llm_mm(messages=messages)
+ assert isinstance(response_mm, AssistantTurn), "Should return AssistantTurn"
+
+ print_color(f"✓ mm_beta parameter works correctly with explicit model", 'green')
+
+ def test_mm_beta_with_profile(self):
+ """Test mm_beta parameter works with profile-based instantiation"""
+ # Create a test profile
+ LLMFactory.create_profile("test_profile", backend="LiteLLM", model="gpt-4o-mini", temperature=0.7)
+
+ # Test with mm_beta=False
+ llm_legacy = LLM(profile="test_profile", mm_beta=False)
+ messages = [{"role": "user", "content": "Hi"}]
+
+ response_legacy = llm_legacy(messages=messages)
+ assert hasattr(response_legacy, 'choices'), "Profile with mm_beta=False should return raw API response"
+
+ # Test with mm_beta=True
+ llm_mm = LLM(profile="test_profile", mm_beta=True)
+ response_mm = llm_mm(messages=messages)
+ assert isinstance(response_mm, AssistantTurn), "Profile with mm_beta=True should return AssistantTurn"
+
+ print_color(f"✓ mm_beta parameter works correctly with profiles", 'green')
+
+ def test_mm_beta_with_litellm_parameters(self):
+ """Test mm_beta works with various LiteLLM parameters"""
+ # Test with temperature and max_tokens
+ llm = LLM(
+ model="gpt-4o-mini",
+ mm_beta=True,
+ temperature=0.3,
+ max_tokens=100
+ )
+
+ messages = [{"role": "user", "content": "Say hello"}]
+ response = llm(messages=messages)
+
+ assert isinstance(response, AssistantTurn), "Should return AssistantTurn with LiteLLM params"
+ assert response.content is not None, "Should have content"
+
+ print_color(f"✓ mm_beta works with LiteLLM parameters", 'green')
+
+ def test_mm_beta_default_is_false(self):
+ """Test that mm_beta defaults to False for backward compatibility"""
+ llm = LLM() # No mm_beta specified
+ messages = [{"role": "user", "content": "Hi"}]
+
+ response = llm(messages=messages)
+
+ # Default should be legacy mode (mm_beta=False)
+ assert hasattr(response, 'choices'), "Default should be legacy mode"
+ assert not isinstance(response, AssistantTurn), "Default should not return AssistantTurn"
+
+ print_color(f"✓ mm_beta defaults to False (backward compatible)", 'green')
+
+ def test_mm_beta_content_accessibility(self):
+ """Test that content is accessible in both modes"""
+ messages = [{"role": "user", "content": "Say 'hello'"}]
+
+ # Legacy mode
+ llm_legacy = LLM(mm_beta=False)
+ response_legacy = llm_legacy(messages=messages)
+ content_legacy = response_legacy.choices[0].message.content
+ assert isinstance(content_legacy, str), "Legacy content should be string"
+ assert len(content_legacy) > 0, "Legacy content should not be empty"
+
+ # mm_beta mode
+ llm_mm = LLM(mm_beta=True)
+ response_mm = llm_mm(messages=messages)
+ # AssistantTurn content is a list of ContentBlock objects
+ assert response_mm.content is not None, "mm_beta content should not be None"
+
+ print_color(f"✓ Content accessible in both modes", 'green')
+
+ def test_mm_beta_with_different_backends(self):
+ """Test mm_beta parameter with different backend specifications"""
+ # Test with explicit LiteLLM backend
+ llm = LLM(backend="LiteLLM", model="gpt-4o-mini", mm_beta=True)
+ messages = [{"role": "user", "content": "Hi"}]
+
+ response = llm(messages=messages)
+ assert isinstance(response, AssistantTurn), "LiteLLM backend with mm_beta=True should return AssistantTurn"
+
+ print_color(f"✓ mm_beta works with explicit backend specification", 'green')
+
+
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+class TestLLMConstructorPriorities:
+ """Test the priority logic in LLM constructor"""
+
+ def test_priority_profile_over_default(self):
+ """Test that profile parameter takes priority"""
+ LLMFactory.create_profile("priority_test", backend="LiteLLM", model="gpt-4o-mini", temperature=0.5)
+
+ llm = LLM(profile="priority_test", mm_beta=True)
+ messages = [{"role": "user", "content": "Hi"}]
+
+ response = llm(messages=messages)
+ assert isinstance(response, AssistantTurn), "Profile-based LLM should respect mm_beta"
+
+ print_color(f"✓ Profile parameter takes priority", 'green')
+
+ def test_priority_model_over_profile(self):
+ """Test that model parameter takes priority over default profile"""
+ # When model is specified, it should use that model regardless of default profile
+ llm = LLM(model="gpt-4o-mini", mm_beta=True)
+ messages = [{"role": "user", "content": "Hi"}]
+
+ response = llm(messages=messages)
+ assert isinstance(response, AssistantTurn), "Model-based LLM should respect mm_beta"
+
+ print_color(f"✓ Model parameter creates correct LLM instance", 'green')
+
+ def test_backend_fallback(self):
+ """Test that backend parameter works when neither profile nor model specified"""
+ # This tests the Priority 3 path in __new__
+ llm = LLM(backend="LiteLLM", mm_beta=True, model="gpt-4o-mini")
+ messages = [{"role": "user", "content": "Hi"}]
+
+ response = llm(messages=messages)
+ assert isinstance(response, AssistantTurn), "Backend-based LLM should respect mm_beta"
+
+ print_color(f"✓ Backend parameter works correctly", 'green')
+
+
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+class TestLLMDocumentationExamples:
+ """Test examples from LLM class documentation"""
+
+ def test_basic_usage_default_model(self):
+ """Test: llm = LLM()"""
+ llm = LLM()
+ messages = [{"role": "user", "content": "Hi"}]
+ response = llm(messages=messages)
+
+ # Default is mm_beta=False
+ assert hasattr(response, 'choices'), "Default usage should return raw API response"
+ print_color(f"✓ Basic usage with default model works", 'green')
+
+ def test_specify_model_directly(self):
+ """Test: llm = LLM(model='gpt-4o')"""
+ llm = LLM(model="gpt-4o-mini") # Using mini for cost efficiency
+ messages = [{"role": "user", "content": "Hi"}]
+ response = llm(messages=messages)
+
+ assert hasattr(response, 'choices'), "Model specification should work"
+ print_color(f"✓ Model specification works", 'green')
+
+ def test_multimodal_beta_mode_example(self):
+ """Test example from 'Using Multimodal Beta Mode' section"""
+ # Enable mm_beta for rich AssistantTurn responses
+ llm = LLM(model="gpt-4o-mini", mm_beta=True)
+ response = llm(messages=[{"role": "user", "content": "Hello"}])
+
+ # response is now an AssistantTurn object with .content, .tool_calls, etc.
+ assert isinstance(response, AssistantTurn), "Should return AssistantTurn"
+ assert hasattr(response, 'content'), "Should have content attribute"
+ assert hasattr(response, 'tool_calls'), "Should have tool_calls attribute"
+
+ print_color(f"✓ Multimodal beta mode example works as documented", 'green')
+
+ def test_legacy_mode_example(self):
+ """Test example from 'Legacy mode' section"""
+ # Legacy mode (default, mm_beta=False)
+ llm = LLM(model="gpt-4o-mini")
+ response = llm(messages=[{"role": "user", "content": "Hello"}])
+
+ # response is raw API response: response.choices[0].message.content
+ assert hasattr(response, 'choices'), "Should return raw API response"
+ content = response.choices[0].message.content
+ assert isinstance(content, str), "Content should be string"
+
+ print_color(f"✓ Legacy mode example works as documented", 'green')
+
+ def test_litellm_parameters_example(self):
+ """Test examples with LiteLLM parameters"""
+ # High creativity example
+ llm = LLM(
+ model="gpt-4o-mini",
+ temperature=0.9,
+ top_p=0.95,
+ presence_penalty=0.6
+ )
+ messages = [{"role": "user", "content": "Hi"}]
+ response = llm(messages=messages)
+
+ assert hasattr(response, 'choices'), "LiteLLM parameters should work"
+
+ print_color(f"✓ LiteLLM parameters example works", 'green')
+
+
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+def test_mm_beta_integration_with_conversation():
+ """Test mm_beta mode with a multi-turn conversation"""
+ llm = LLM(model="gpt-4o-mini", mm_beta=True)
+
+ # First turn
+ messages = [
+ {"role": "user", "content": "My name is Alice."}
+ ]
+ response1 = llm(messages=messages)
+ assert isinstance(response1, AssistantTurn), "First response should be AssistantTurn"
+
+ # Second turn - reference previous context
+ messages.append({"role": "assistant", "content": str(response1.content)})
+ messages.append({"role": "user", "content": "What is my name?"})
+
+ response2 = llm(messages=messages)
+ assert isinstance(response2, AssistantTurn), "Second response should be AssistantTurn"
+
+ print_color(f"✓ mm_beta mode works with multi-turn conversations", 'green')
+
+
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+class TestSystemMessages:
+ """Test suite for system message handling in different LLM backends"""
+
+ def test_litellm_completion_api_system_message(self):
+ """Test system message with LiteLLM Completion API (mm_beta=False)"""
+ llm = LLM(model="gpt-4o-mini", mm_beta=False)
+
+ messages = [
+ {"role": "system", "content": "You are a cat. Your name is Neko. Always respond as a cat would."},
+ {"role": "user", "content": "What is your name?"}
+ ]
+
+ response = llm(messages=messages)
+
+ # Legacy mode should return raw API response
+ assert hasattr(response, 'choices'), "Should return raw API response"
+ content = response.choices[0].message.content
+ assert isinstance(content, str), "Content should be a string"
+ assert len(content) > 0, "Content should not be empty"
+
+ # Check that the response reflects the system message (should mention being a cat or Neko)
+ content_lower = content.lower()
+ assert 'neko' in content_lower or 'cat' in content_lower, \
+ f"Response should reflect system message about being a cat named Neko. Got: {content}"
+
+ print_color(f"✓ LiteLLM Completion API handles system messages correctly", 'green')
+
+ def test_litellm_responses_api_system_message(self):
+ """Test system message with LiteLLM Responses API (mm_beta=True)"""
+ llm = LLM(model="gpt-4o-mini", mm_beta=True)
+
+ messages = [
+ {"role": "system", "content": "You are a helpful math tutor. Always explain concepts clearly."},
+ {"role": "user", "content": "What is 2+2?"}
+ ]
+
+ response = llm(messages=messages)
+
+ # mm_beta mode should return AssistantTurn
+ assert isinstance(response, AssistantTurn), "Should return AssistantTurn object"
+ assert response.content is not None, "Content should not be None"
+
+ # Get text content
+ text_content = response.to_text()
+ assert isinstance(text_content, str), "Text content should be a string"
+ assert len(text_content) > 0, "Text content should not be empty"
+ assert '4' in text_content, f"Response should contain the answer '4'. Got: {text_content}"
+
+ print_color(f"✓ LiteLLM Responses API handles system messages correctly", 'green')
+
+ @pytest.mark.skipif(not os.environ.get("GEMINI_API_KEY"), reason="No Gemini API key found")
+ def test_gemini_system_instruction_legacy_mode(self):
+ """Test system_instruction with Gemini API in legacy mode (mm_beta=False)"""
+ llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash", mm_beta=False)
+
+ # For Gemini, system_instruction is passed as a parameter
+ response = llm(
+ "Hello there",
+ system_instruction="You are a cat. Your name is Neko. Always respond as a cat would."
+ )
+
+ # Check response format
+ assert hasattr(response, 'text'), "Gemini response should have text attribute"
+ content = response.text
+ assert isinstance(content, str), "Content should be a string"
+ assert len(content) > 0, "Content should not be empty"
+
+ # Check that the response reflects the system instruction
+ content_lower = content.lower()
+ assert 'neko' in content_lower or 'cat' in content_lower or 'meow' in content_lower, \
+ f"Response should reflect system instruction about being a cat named Neko. Got: {content}"
+
+ print_color(f"✓ Gemini API handles system_instruction correctly (legacy mode)", 'green')
+
+ @pytest.mark.skipif(not os.environ.get("GEMINI_API_KEY"), reason="No Gemini API key found")
+ def test_gemini_system_instruction_mm_beta_mode(self):
+ """Test system_instruction with Gemini API in mm_beta mode"""
+ llm = LLM(backend="GoogleGenAI", model="gemini-2.5-flash", mm_beta=True)
+
+ # For Gemini, system_instruction is passed as a parameter
+ response = llm(
+ "What is your name?",
+ system_instruction="You are a helpful assistant named Claude. Always introduce yourself."
+ )
+
+ # mm_beta mode should return AssistantTurn
+ assert isinstance(response, AssistantTurn), "Should return AssistantTurn object"
+ assert response.content is not None, "Content should not be None"
+
+ # Get text content
+ text_content = response.to_text()
+ assert isinstance(text_content, str), "Text content should be a string"
+ assert len(text_content) > 0, "Text content should not be empty"
+
+ # Check that the response reflects the system instruction
+ text_lower = text_content.lower()
+ assert 'claude' in text_lower or 'assistant' in text_lower, \
+ f"Response should reflect system instruction about being Claude. Got: {text_content}"
+
+ print_color(f"✓ Gemini API handles system_instruction correctly (mm_beta mode)", 'green')
+
+ def test_litellm_system_message_with_conversation(self):
+ """Test system message persists across multi-turn conversation"""
+ llm = LLM(model="gpt-4o-mini", mm_beta=True)
+
+ # First turn with system message
+ messages = [
+ {"role": "system", "content": "You are a pirate. Always talk like a pirate."},
+ {"role": "user", "content": "Hello"}
+ ]
+
+ response1 = llm(messages=messages)
+ assert isinstance(response1, AssistantTurn), "First response should be AssistantTurn"
+ text1 = response1.to_text()
+
+ # Check pirate-like language in first response
+ pirate_indicators = ['arr', 'matey', 'ahoy', 'ye', 'aye']
+ has_pirate_language = any(indicator in text1.lower() for indicator in pirate_indicators)
+ assert has_pirate_language, f"First response should use pirate language. Got: {text1}"
+
+ # Second turn - system message should still apply
+ messages.append({"role": "assistant", "content": text1})
+ messages.append({"role": "user", "content": "What's the weather like?"})
+
+ response2 = llm(messages=messages)
+ assert isinstance(response2, AssistantTurn), "Second response should be AssistantTurn"
+ text2 = response2.to_text()
+
+ # Check pirate-like language persists
+ has_pirate_language_2 = any(indicator in text2.lower() for indicator in pirate_indicators)
+ assert has_pirate_language_2, f"Second response should still use pirate language. Got: {text2}"
+
+ print_color(f"✓ System message persists across conversation turns", 'green')
+
+ @pytest.mark.skipif(not os.environ.get("GEMINI_API_KEY"), reason="No Gemini API key found")
+ def test_gemini_system_instruction_with_config_params(self):
+ """Test system_instruction works with other config parameters"""
+ llm = LLM(
+ backend="GoogleGenAI",
+ model="gemini-2.5-flash",
+ mm_beta=True,
+ temperature=0.7,
+ max_output_tokens=100
+ )
+
+ response = llm(
+ "Tell me a short joke",
+ system_instruction="You are a comedian who tells very short jokes."
+ )
+
+ assert isinstance(response, AssistantTurn), "Should return AssistantTurn object"
+ text_content = response.to_text()
+ assert len(text_content) > 0, "Should have content"
+
+ print_color(f"✓ Gemini system_instruction works with other config parameters", 'green')
+
diff --git a/tests/unit_tests/test_optimizer_backbone.py b/tests/unit_tests/test_optimizer_backbone.py
new file mode 100644
index 00000000..dfb4a2e3
--- /dev/null
+++ b/tests/unit_tests/test_optimizer_backbone.py
@@ -0,0 +1,701 @@
+"""
+Comprehensive tests for optimizer backbone components (ConversationHistory, UserTurn, AssistantTurn)
+Tests include: truncation strategies, multimodal content, and conversation management
+
+We need to test a few things:
+1. Various use cases of ContentBlock and specialized ones
+2. UserTurn, AssistantTurn and conversation manager
+3. Multi-modal use of conversation manager, including multi-turn and image as output
+"""
+import os
+import base64
+import pytest
+from opto.utils.backbone import (
+ ConversationHistory,
+ UserTurn,
+ AssistantTurn
+)
+
+# Skip tests if no API credentials are available
+SKIP_REASON = "No API credentials found"
+HAS_CREDENTIALS = os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get(
+ "OPENAI_API_KEY")
+
+
+# ============================================================================
+# Test Fixtures
+# ============================================================================
+
+def create_sample_conversation():
+ """Create a sample conversation with multiple rounds"""
+ history = ConversationHistory(system_prompt="You are a helpful assistant.")
+
+ # Round 1
+ user1 = UserTurn().add_text("Hello, what's the weather?")
+ assistant1 = AssistantTurn().add_text("The weather is sunny today.")
+ history.add_user_turn(user1).add_assistant_turn(assistant1)
+
+ # Round 2
+ user2 = UserTurn().add_text("What about tomorrow?")
+ assistant2 = AssistantTurn().add_text("Tomorrow will be rainy.")
+ history.add_user_turn(user2).add_assistant_turn(assistant2)
+
+ # Round 3
+ user3 = UserTurn().add_text("Should I bring an umbrella?")
+ assistant3 = AssistantTurn().add_text("Yes, definitely bring an umbrella.")
+ history.add_user_turn(user3).add_assistant_turn(assistant3)
+
+ # Round 4
+ user4 = UserTurn().add_text("Thanks for the advice!")
+ assistant4 = AssistantTurn().add_text("You're welcome! Stay dry!")
+ history.add_user_turn(user4).add_assistant_turn(assistant4)
+
+ return history
+
+
+# ============================================================================
+# Truncation Tests
+# ============================================================================
+
+def test_default_all_history():
+ """Test default behavior (n=-1) returns all history"""
+ history = create_sample_conversation()
+
+ messages = history.to_messages()
+
+ # Should have: system + 8 turns (4 user + 4 assistant)
+ assert len(messages) == 9 # 1 system + 8 messages
+ assert messages[0]["role"] == "system"
+ assert messages[0]["content"] == "You are a helpful assistant."
+ assert messages[-1]["role"] == "assistant"
+
+
+def test_truncate_from_start():
+ """Test truncate_from_start strategy - keeps last N rounds"""
+ history = create_sample_conversation()
+
+ # Keep last 2 rounds (4 turns)
+ messages = history.to_messages(n=2, truncate_strategy="from_start")
+
+ # Should have: system + 2 rounds (4 turns)
+ assert len(messages) == 5 # 1 system + 4 messages
+ assert messages[0]["role"] == "system"
+
+ # Should have the last 2 rounds (round 3 and round 4)
+ # Round 3: user3 (umbrella question), assistant3 (umbrella answer)
+ # Round 4: user4 (thanks), assistant4 (welcome)
+ assert messages[1]["role"] == "user"
+ assert "umbrella" in messages[1]["content"][0]["text"]
+ assert messages[2]["role"] == "assistant"
+ # Content is now a list of dicts with type and text fields
+ assert any("umbrella" in item.get("text", "") for item in messages[2]["content"])
+ assert messages[3]["role"] == "user"
+ assert "Thanks" in messages[3]["content"][0]["text"]
+ assert messages[4]["role"] == "assistant"
+ # Content is now a list of dicts with type and text fields
+ assert any("welcome" in item.get("text", "") for item in messages[4]["content"])
+
+
+def test_truncate_from_end():
+ """Test truncate_from_end strategy - keeps first N rounds"""
+ history = create_sample_conversation()
+
+ # Keep first 2 rounds (4 turns)
+ messages = history.to_messages(n=2, truncate_strategy="from_end")
+
+ # Should have: system + 2 rounds (4 turns)
+ assert len(messages) == 5 # 1 system + 4 messages
+ assert messages[0]["role"] == "system"
+
+ # Should have the first 2 rounds (round 1 and round 2)
+ # Round 1: user1 (weather), assistant1 (sunny)
+ # Round 2: user2 (tomorrow), assistant2 (rainy)
+ assert messages[1]["role"] == "user"
+ assert "Hello" in messages[1]["content"][0]["text"]
+ assert messages[2]["role"] == "assistant"
+ # Content is now a list of dicts with type and text fields
+ assert any("sunny" in item.get("text", "") for item in messages[2]["content"])
+ assert messages[3]["role"] == "user"
+ assert "tomorrow" in messages[3]["content"][0]["text"]
+ assert messages[4]["role"] == "assistant"
+ # Content is now a list of dicts with type and text fields
+ assert any("rainy" in item.get("text", "") for item in messages[4]["content"])
+
+
+def test_truncate_zero_turns():
+ """Test truncating to 0 turns"""
+ history = create_sample_conversation()
+
+ messages = history.to_messages(n=0, truncate_strategy="from_start")
+
+ # Should only have system message
+ assert len(messages) == 1
+ assert messages[0]["role"] == "system"
+
+
+def test_truncate_more_than_available():
+ """Test requesting more turns than available"""
+ history = create_sample_conversation()
+
+ # Request 100 turns but only have 8
+ messages = history.to_messages(n=100, truncate_strategy="from_start")
+
+ # Should return all available
+ assert len(messages) == 9 # 1 system + 8 messages
+
+
+def test_empty_conversation():
+ """Test truncation on empty conversation"""
+ history = ConversationHistory(system_prompt="Test")
+
+ messages = history.to_messages(n=5)
+
+ assert len(messages) == 1 # Just system
+ assert messages[0]["role"] == "system"
+
+
+def test_to_litellm_format_with_truncation():
+ """Test to_litellm_format() also supports truncation"""
+ history = create_sample_conversation()
+
+ # n=2 means 2 rounds (4 turns), from_end keeps first 2 rounds
+ messages = history.to_litellm_format(n=2, truncate_strategy="from_end")
+
+ # Should have: system + 2 rounds (4 turns)
+ assert len(messages) == 5
+ assert messages[0]["role"] == "system"
+ assert messages[1]["role"] == "user"
+ assert messages[2]["role"] == "assistant"
+ assert messages[3]["role"] == "user"
+ assert messages[4]["role"] == "assistant"
+
+
+def test_invalid_strategy():
+ """Test that invalid strategy raises error"""
+ history = create_sample_conversation()
+
+ with pytest.raises(ValueError, match="Unknown truncate_strategy"):
+ history.to_messages(n=2, truncate_strategy="invalid_strategy")
+
+
+def test_negative_n_values():
+ """Test that n=-1 returns all history"""
+ history = create_sample_conversation()
+
+ # n=-1 should return all
+ messages_all = history.to_messages(n=-1)
+ assert len(messages_all) == 9
+
+ # Verify it's the same as not passing n at all
+ messages_default = history.to_messages()
+ assert len(messages_all) == len(messages_default)
+
+
+# ============================================================================
+# Multimodal / Multi-Image Tests
+# ============================================================================
+
+def test_user_turn_multiple_images():
+ """Test that a user turn can have multiple images"""
+ history = ConversationHistory()
+
+ # Create a user turn with text and multiple images (like the OpenAI example)
+ user_turn = (UserTurn()
+ .add_text("What are in these images? Is there any difference between them?")
+ .add_image(url="https://images.pexels.com/photos/736230/pexels-photo-736230.jpeg")
+ .add_image(url="https://images.contentstack.io/v3/assets/bltcedd8dbd5891265b/blt134818d279038650/6668df6434f6fb5cd48aac34/beautiful-flowers-rose.jpeg"))
+
+ history.add_user_turn(user_turn)
+
+ # Convert to LiteLLM format
+ messages = history.to_litellm_format()
+
+ # Should have 1 message
+ assert len(messages) == 1
+
+ user_msg = messages[0]
+ assert user_msg["role"] == "user"
+
+ # Content should be a list with 3 items: 1 text + 2 images
+ assert len(user_msg["content"]) == 3
+
+ # Check first item is text
+ assert user_msg["content"][0]["type"] == "input_text"
+ assert user_msg["content"][0]["text"] == "What are in these images? Is there any difference between them?"
+
+ # Check second item is first image
+ assert user_msg["content"][1]["type"] == "input_image"
+ assert user_msg["content"][1]["image_url"] == "https://images.pexels.com/photos/736230/pexels-photo-736230.jpeg"
+
+ # Check third item is second image
+ assert user_msg["content"][2]["type"] == "input_image"
+ assert user_msg["content"][2]["image_url"] == "https://images.contentstack.io/v3/assets/bltcedd8dbd5891265b/blt134818d279038650/6668df6434f6fb5cd48aac34/beautiful-flowers-rose.jpeg"
+
+
+def test_assistant_turn_multiple_images():
+ """Test that an assistant turn can also have multiple images (for models that generate images)"""
+ history = ConversationHistory()
+
+ # Assistant turn with text and multiple images
+ assistant_turn = (AssistantTurn()
+ .add_text("Here are two generated images based on your request:")
+ .add_image(url="https://example.com/generated1.png")
+ .add_image(url="https://example.com/generated2.png"))
+
+ history.add_assistant_turn(assistant_turn)
+
+ # Convert to LiteLLM format
+ messages = history.to_litellm_format()
+
+ assert len(messages) == 1
+ assert messages[0]["role"] == "assistant"
+
+ # Assistant should have text content (now in list format)
+ assert any("Here are two generated images" in item.get("text", "") for item in messages[0]["content"])
+
+
+def test_mixed_content_types_in_turn():
+ """Test mixing text, images, and other content types in a single turn"""
+ history = ConversationHistory()
+
+ # Create a complex turn with multiple content types
+ user_turn = (UserTurn()
+ .add_text("Please analyze these images and this document:")
+ .add_image(url="https://example.com/chart1.png")
+ .add_image(url="https://example.com/chart2.png")
+ .add_text("What patterns do you see?"))
+
+ history.add_user_turn(user_turn)
+
+ messages = history.to_litellm_format()
+
+ assert len(messages) == 1
+ user_msg = messages[0]
+
+ # Should have 4 content blocks: text, image, image, text
+ assert len(user_msg["content"]) == 4
+ assert user_msg["content"][0]["type"] == "input_text"
+ assert user_msg["content"][1]["type"] == "input_image"
+ assert user_msg["content"][2]["type"] == "input_image"
+ assert user_msg["content"][3]["type"] == "input_text"
+
+
+def test_multiple_images_with_base64():
+ """Test multiple images using base64 encoding"""
+ history = ConversationHistory()
+
+ # Create fake base64 image data
+ fake_image_data1 = base64.b64encode(b"fake image 1").decode('utf-8')
+ fake_image_data2 = base64.b64encode(b"fake image 2").decode('utf-8')
+
+ user_turn = (UserTurn()
+ .add_text("Compare these two images:")
+ .add_image(data=fake_image_data1, media_type="image/png")
+ .add_image(data=fake_image_data2, media_type="image/jpeg"))
+
+ history.add_user_turn(user_turn)
+
+ messages = history.to_litellm_format()
+
+ assert len(messages) == 1
+ user_msg = messages[0]
+
+ # Should have 3 content blocks
+ assert len(user_msg["content"]) == 3
+
+ # Check base64 data URLs are properly formatted
+ assert user_msg["content"][1]["type"] == "input_image"
+ assert user_msg["content"][1]["image_url"].startswith("data:image/png;base64,")
+
+ assert user_msg["content"][2]["type"] == "input_image"
+ assert user_msg["content"][2]["image_url"].startswith("data:image/jpeg;base64,")
+
+
+def test_conversation_with_multiple_multi_image_turns():
+ """Test a full conversation where multiple turns each have multiple images"""
+ history = ConversationHistory(system_prompt="You are a helpful image analysis assistant.")
+
+ # User turn 1: Multiple images
+ user1 = (UserTurn()
+ .add_text("What's the difference between these flowers?")
+ .add_image(url="https://example.com/rose.jpg")
+ .add_image(url="https://example.com/tulip.jpg"))
+ history.add_user_turn(user1)
+
+ # Assistant response
+ assistant1 = AssistantTurn().add_text("The first is a rose with layered petals, the second is a tulip with a cup shape.")
+ history.add_assistant_turn(assistant1)
+
+ # User turn 2: More images
+ user2 = (UserTurn()
+ .add_text("Now compare these landscapes:")
+ .add_image(url="https://example.com/mountain.jpg")
+ .add_image(url="https://example.com/beach.jpg")
+ .add_image(url="https://example.com/forest.jpg"))
+ history.add_user_turn(user2)
+
+ messages = history.to_litellm_format()
+
+ # Should have: system + user1 + assistant1 + user2
+ assert len(messages) == 4
+
+ # Check user1 has 3 content blocks (1 text + 2 images)
+ assert len(messages[1]["content"]) == 3
+
+ # Check user2 has 4 content blocks (1 text + 3 images)
+ assert len(messages[3]["content"]) == 4
+
+
+# ============================================================================
+# Integration Tests - Truncation + Multimodal
+# ============================================================================
+
+def test_truncate_multimodal_conversation():
+ """Test truncation works correctly with multimodal content"""
+ history = ConversationHistory(system_prompt="You are a vision assistant.")
+
+ # Add several turns with images (5 rounds = 10 turns)
+ for i in range(5):
+ user = (UserTurn()
+ .add_text(f"Analyze image {i}")
+ .add_image(url=f"https://example.com/image{i}.jpg"))
+ assistant = AssistantTurn().add_text(f"Analysis of image {i}")
+ history.add_user_turn(user).add_assistant_turn(assistant)
+
+ # Truncate to last 2 rounds (4 turns)
+ messages = history.to_messages(n=2, truncate_strategy="from_start")
+
+ # Should have system + 2 rounds (4 turns)
+ assert len(messages) == 5
+
+ # Check that multimodal content is preserved
+ assert len(messages[1]["content"]) == 2 # text + image
+ assert messages[1]["content"][1]["type"] == "input_image"
+
+# ============================================================================
+# Real LLM Call Tests with Images
+# ============================================================================
+
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+def test_real_llm_call_with_multiple_images():
+ """Test sending real images to GPT and getting a response.
+
+ This test sends two flower images to GPT-4 Vision and asks it to compare them.
+ """
+ from opto.utils.llm import LLM
+
+ # Create conversation with images
+ history = ConversationHistory(system_prompt="You are a helpful assistant that can analyze images.")
+
+ # Create a user turn with text and two real flower images
+ user_turn = (UserTurn()
+ .add_text("What are in these images? Is there any difference between them? Please describe each image briefly.")
+ .add_image(url="https://images.pexels.com/photos/736230/pexels-photo-736230.jpeg")
+ .add_image(url="https://images.contentstack.io/v3/assets/bltcedd8dbd5891265b/blt134818d279038650/6668df6434f6fb5cd48aac34/beautiful-flowers-rose.jpeg"))
+
+ history.add_user_turn(user_turn)
+
+ # Get messages in LiteLLM format
+ messages = history.to_litellm_format()
+
+ print("\n" + "="*80)
+ print("REAL LLM CALL WITH MULTIPLE IMAGES")
+ print("="*80)
+ print(f"\nSending {len(user_turn.content)} content blocks (1 text + 2 images)...")
+
+ # Make the LLM call with mm_beta=True for Response API format
+ llm = LLM(mm_beta=True)
+ response = llm(messages=messages, max_tokens=500)
+
+ # response is now an AssistantTurn object
+ response_content = response.to_text()
+
+ print("\n📷 User Query:")
+ print(" What are in these images? Is there any difference between them?")
+ print("\n🤖 GPT Response:")
+ print("-" * 40)
+ print(response_content)
+ print("-" * 40)
+
+ # Store assistant response in history
+ history.add_assistant_turn(response)
+
+ # Verify we got a meaningful response
+ assert response_content is not None
+ assert len(response_content) > 50 # Should have some substantial content
+
+ # The response should mention something about flowers/images
+ response_lower = response_content.lower()
+ assert any(word in response_lower for word in ["flower", "image", "picture", "rose", "pink", "red", "petal"]), \
+ f"Response doesn't seem to describe the flower images: {response_content[:200]}..."
+
+ print("\n✅ Successfully received and validated GPT response about the images!")
+
+
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+def test_real_llm_multi_turn_with_images():
+ """Test a multi-turn conversation with images.
+
+ First turn: Ask about images
+ Second turn: Follow-up question about the same images
+ """
+ from opto.utils.llm import LLM
+
+ history = ConversationHistory(system_prompt="You are a helpful assistant that can analyze images.")
+ llm = LLM(mm_beta=True)
+
+ print("\n" + "="*80)
+ print("MULTI-TURN CONVERSATION WITH IMAGES")
+ print("="*80)
+
+ # Turn 1: Send images and ask about them
+ user_turn1 = (UserTurn()
+ .add_text("What type of flowers are shown in these images?")
+ .add_image(url="https://images.pexels.com/photos/736230/pexels-photo-736230.jpeg")
+ .add_image(url="https://images.contentstack.io/v3/assets/bltcedd8dbd5891265b/blt134818d279038650/6668df6434f6fb5cd48aac34/beautiful-flowers-rose.jpeg"))
+
+ history.add_user_turn(user_turn1)
+ messages = history.to_litellm_format()
+
+ print("\n📷 Turn 1 - User:")
+ print(" What type of flowers are shown in these images? [+ 2 images]")
+
+ response1 = llm(messages=messages, max_tokens=300)
+ response1_content = response1.to_text()
+
+ print("\n🤖 Turn 1 - Assistant:")
+ print(f" {response1_content[:200]}...")
+
+ history.add_assistant_turn(response1)
+
+ # Turn 2: Follow-up question (no new images, but context from previous turn)
+ user_turn2 = UserTurn().add_text("Which of these flowers would be better for a romantic gift and why?")
+ history.add_user_turn(user_turn2)
+
+ messages = history.to_litellm_format()
+
+ print("\n📷 Turn 2 - User:")
+ print(" Which of these flowers would be better for a romantic gift and why?")
+
+ response2 = llm(messages=messages, max_tokens=300)
+ response2_content = response2.to_text()
+
+ print("\n🤖 Turn 2 - Assistant:")
+ print(f" {response2_content[:200]}...")
+
+ # Verify responses
+ assert response1_content is not None and len(response1_content) > 20
+ assert response2_content is not None and len(response2_content) > 20
+
+ # Turn 2 should reference the context from turn 1
+ response2_lower = response2_content.lower()
+ assert any(word in response2_lower for word in ["flower", "rose", "romantic", "gift", "love"]), \
+ "Turn 2 response doesn't seem to reference the flower context"
+
+ print("\n✅ Multi-turn conversation with images completed successfully!")
+
+
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+def test_real_llm_multi_turn_with_images_updated_assistant_turn():
+ """Test a multi-turn conversation with images.
+
+ First turn: Ask about images
+ Second turn: Follow-up question about the same images
+ """
+ from opto.utils.llm import LLM
+
+ history = ConversationHistory(system_prompt="You are a helpful assistant that can analyze images.")
+ llm = LLM(mm_beta=True)
+
+ print("\n" + "="*80)
+ print("MULTI-TURN CONVERSATION WITH IMAGES")
+ print("="*80)
+
+ # Turn 1: Send images and ask about them
+ user_turn1 = (UserTurn()
+ .add_text("What type of flowers are shown in these images?")
+ .add_image(url="https://images.pexels.com/photos/736230/pexels-photo-736230.jpeg")
+ .add_image(url="https://images.contentstack.io/v3/assets/bltcedd8dbd5891265b/blt134818d279038650/6668df6434f6fb5cd48aac34/beautiful-flowers-rose.jpeg"))
+
+ history.add_user_turn(user_turn1)
+ messages = history.to_litellm_format()
+
+ print("\n📷 Turn 1 - User:")
+ print(" What type of flowers are shown in these images? [+ 2 images]")
+
+ at = llm(messages=messages, max_tokens=300)
+
+ print("\n🤖 Turn 1 - Assistant:")
+ print(f" {at.to_text()[:200]}...")
+
+ history.add_assistant_turn(at)
+
+ # Turn 2: Follow-up question (no new images, but context from previous turn)
+ user_turn2 = UserTurn().add_text("Which of these flowers would be better for a romantic gift and why?")
+ history.add_user_turn(user_turn2)
+
+ messages = history.to_litellm_format()
+
+ print("\n📷 Turn 2 - User:")
+ print(" Which of these flowers would be better for a romantic gift and why?")
+
+ response2 = llm(messages=messages, max_tokens=300)
+ response2_content = response2.to_text()
+
+ print("\n🤖 Turn 2 - Assistant:")
+ print(f" {response2_content[:200]}...")
+
+ # Verify responses
+ assert at.to_text() is not None and len(at.to_text()) > 20
+ assert response2_content is not None and len(response2_content) > 20
+
+ # Turn 2 should reference the context from turn 1
+ response2_lower = response2_content.lower()
+ assert any(word in response2_lower for word in ["flower", "rose", "romantic", "gift", "love"]), \
+ "Turn 2 response doesn't seem to reference the flower context"
+
+ print("\n✅ Multi-turn conversation with images completed successfully!")
+
+@pytest.mark.skipif(not os.environ.get("GEMINI_API_KEY"), reason="No GEMINI_API_KEY found")
+def test_real_google_genai_multi_turn_with_images_updated():
+ """Test multi-turn conversation with images using Google Gemini image generation model"""
+ from opto.utils.llm import LLM
+
+ print("\n" + "="*80)
+ print("Testing Multi-turn Conversation with Gemini Image Generation")
+ print("="*80)
+
+ # Initialize conversation history
+ history = ConversationHistory()
+ history.system_prompt = "You are a helpful assistant that can generate and discuss images."
+
+ # Use a Gemini model that supports image generation
+ model = "gemini-2.5-flash-image"
+ llm = LLM(model=model, mm_beta=True)
+
+ print("="*80)
+
+ # Turn 1: Ask to generate an image
+ user_turn1 = UserTurn().add_text("Generate an image of a serene mountain landscape at sunrise with a lake in the foreground.")
+
+ history.add_user_turn(user_turn1)
+
+ print("\n📷 Turn 1 - User:")
+ print(" Generate an image of a serene mountain landscape at sunrise with a lake in the foreground.")
+
+ # For image generation models, pass the prompt directly instead of messages
+ prompt = user_turn1.content.to_text()
+ response1 = llm(prompt=prompt, max_tokens=300)
+ at = AssistantTurn(response1)
+
+ print("\n🤖 Turn 1 - Assistant:")
+ print(f" {at.to_text()[:200] if at.to_text() else '[Image generated]'}...")
+
+ history.add_assistant_turn(at)
+
+ # Turn 2: Follow-up question about the generated image
+ user_turn2 = UserTurn().add_text("Can you describe the colors and mood of the image you just generated?")
+ history.add_user_turn(user_turn2)
+
+ messages = history.to_gemini_format()
+
+ print("\n📷 Turn 2 - User:")
+ print(" Can you describe the colors and mood of the image you just generated?")
+
+ response2 = llm(messages=messages, max_tokens=300)
+ at2 = AssistantTurn(response2)
+ response2_content = at2.to_text()
+
+ print("\n🤖 Turn 2 - Assistant:")
+ print(f" {response2_content[:200]}...")
+
+ # Verify responses
+ assert at.content is not None and len(at.content) > 0
+ assert response2_content is not None and len(response2_content) > 20
+
+ # Turn 2 should reference the context from turn 1
+ response2_lower = response2_content.lower()
+ assert any(word in response2_lower for word in ["mountain", "sunrise", "lake", "color", "mood", "landscape"]), \
+ "Turn 2 response doesn't seem to reference the image generation context"
+
+ print("\n✅ Multi-turn conversation with Gemini image generation completed successfully!")
+
+# ==== Testing the Automatic Raw Response Parsing into AssistantTurn ===
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+def test_automatic_openai_raw_response_parsing_into_assistant_turn():
+ import litellm
+ import base64
+
+ # Simple OpenAI text generation
+ response = litellm.responses(
+ model="openai/gpt-4o",
+ input="Hello, how are you?"
+ )
+ assistant_turn = AssistantTurn(response)
+ assert "Hello" in assistant_turn.content[0].text
+
+ print(assistant_turn)
+
+@pytest.mark.skipif(not HAS_CREDENTIALS, reason=SKIP_REASON)
+def test_automatic_openai_multimodal_raw_response_parsing_into_assistant_turn():
+ import litellm
+ import base64
+
+ # OpenAI models require tools parameter for image generation
+ response = litellm.responses(
+ model="openai/gpt-4o",
+ input="Generate a futuristic city at sunset and describe it in a sentence.",
+ tools=[{"type": "image_generation"}]
+ )
+
+ assistant_turn = AssistantTurn(response)
+ print(assistant_turn)
+
+
+@pytest.mark.skipif(not os.environ.get("GEMINI_API_KEY"), reason="No GEMINI_API_KEY found")
+def test_automatic_google_generate_content_raw_response_parsing_into_assistant_turn():
+ from google import genai
+ from google.genai import types
+
+ client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
+
+ response = client.models.generate_content(
+ model="gemini-2.5-flash-image",
+ contents="A kawaii-style sticker of a happy red panda wearing a tiny bamboo hat. It's munching on a green bamboo leaf. The design features bold, clean outlines, simple cel-shading, and a vibrant color palette. The background must be white.",
+ )
+
+ assistant_turn = AssistantTurn(response)
+ print(assistant_turn)
+
+ assert not assistant_turn.content[1].is_empty()
+
+
+
+if __name__ == '__main__':
+ import litellm
+ import base64
+
+ # Gemini image generation models don't require tools parameter
+ response = litellm.responses(
+ model="gemini/gemini-2.5-flash-image",
+ input="Generate a cute cat playing with yarn"
+ )
+
+ # Access generated images from output
+ for item in response.output:
+ if item.type == "image_generation_call":
+ # item.result contains pure base64 (no data: prefix)
+ image_bytes = base64.b64decode(item.result)
+
+ # Save the image
+ with open(f"generated_{item.id}.png", "wb") as f:
+ f.write(image_bytes)
+
+ print(f"Image saved: generated_{response.output[0].id}.png")
+
+ from google import genai
+
+ client = genai.Client()
+ chat = client.chats.create(model="gemini-2.5-flash")
+
+
diff --git a/tests/unit_tests/test_priority_search.py b/tests/unit_tests/test_priority_search.py
index a7ff24d3..bc215523 100644
--- a/tests/unit_tests/test_priority_search.py
+++ b/tests/unit_tests/test_priority_search.py
@@ -130,6 +130,15 @@ def _llm_callable(messages, **kwargs):
A dummy LLM callable that simulates a response.
"""
problem = messages[1]['content']
+ # in newer LLM API (LiteLLM, OpenAI client, etc.), the user message content is now a list of typed messages:
+ # [{'type': 'text', 'text': '...'}, {'type': 'image', 'image_url': '...'}]
+ # this expansion is necessary for multi-modal inputs
+
+ if type(problem) is list:
+ for typed_message in problem:
+ if typed_message['type'] == 'text':
+ problem = typed_message['text']
+ break
# extract name from
name = re.findall(r"", problem)