Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
d0d0e12
Merge pull request #1 from jswope00/main
jonippolito Apr 17, 2025
d5365d7
Create picture-hanger.py
jonippolito Apr 17, 2025
cb87105
Rename picture-hanger.py to app_picture_hanger.py
jonippolito Apr 17, 2025
4364b23
Update app_picture_hanger.py
jonippolito Apr 17, 2025
cf1ffe7
Update app_picture_hanger.py
jonippolito Apr 17, 2025
2f0bdba
Update app_picture_hanger.py
jonippolito Apr 17, 2025
5aeb65d
Update app_picture_hanger.py
jonippolito Apr 17, 2025
e27cd86
Update app_picture_hanger.py
jonippolito Apr 17, 2025
d279b28
Update app_picture_hanger.py
jonippolito Apr 17, 2025
f12a885
Update app_picture_hanger.py
jonippolito Apr 17, 2025
f9d1aa6
Update app_picture_hanger.py
jonippolito Apr 17, 2025
702059e
Update app_picture_hanger.py
jonippolito Apr 17, 2025
0a7b9f0
Update app_picture_hanger.py
jonippolito Apr 17, 2025
6ce412a
Update app_picture_hanger.py
jonippolito Apr 17, 2025
2f14a53
Update app_picture_hanger.py
jonippolito Apr 17, 2025
4147de7
Update app_picture_hanger.py
jonippolito Apr 17, 2025
8509754
Update app_picture_hanger.py
jonippolito Apr 17, 2025
5015e6a
Update app_picture_hanger.py
jonippolito Apr 17, 2025
4f3635f
Update app_picture_hanger.py
jonippolito Apr 17, 2025
77ae5bf
Update app_picture_hanger.py
jonippolito Apr 17, 2025
aa31c5b
Update app_picture_hanger.py
jonippolito Apr 17, 2025
728da00
Update app_picture_hanger.py
jonippolito Apr 17, 2025
fa12292
Update app_picture_hanger.py
jonippolito Apr 17, 2025
a5624f7
Update app_picture_hanger.py
jonippolito Apr 17, 2025
132d4f7
Add new 4.1 models and update chat completions to more recent responses
jswope00 Apr 18, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
161 changes: 161 additions & 0 deletions app_picture_hanger.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
PUBLISHED = False
APP_URL = "https://voting-decision-tree.streamlit.app"

APP_TITLE = "🖼 Hang a Picture"
APP_INTRO = """🔨 This app helps you hang a painting, framed photo, or other flat artwork on the wall of a home or gallery"""

APP_HOW_IT_WORKS = """
This app helps you hang a picture by telling you where to hammer your nail based on the size of your wall and the frame.\n\nIt uses OpenAI behind the scenes to calculate this position based on the fields you complete and generates instructions accordingly.
"""

SHARED_ASSET = {
}

HTML_BUTTON = {
"button_text": "Learn to make an AI MicroApp like this",
"url": "https://www.youtube.com/watch?v=jqpEJ2995IA"
}

SYSTEM_PROMPT = """Acting as an expert in curating displays of art in homes, galleries, and museums, you will generate concise instructions in plain language understandable by a lay person on how to hang a picture, specifically where to place a nail in accordance with the measurements of the wall and picture supplied in the prompt. Your calculations will depend on the following fields from the user:

viewer_height_inches: This will be the typical standing viewer's height, and will be used to calculate the average eye level.

picture_height: This will be the vertical dimension of the picture.

drop_to_hardware: This will be the distance between the top of the picture to the wire, cleat, or other hanging hardware attached to the back of the picture. In almost every case, the hardware will be screwed or adhered to the back of the picture so as to be invisible to the viewer; the viewer will not see the nail or wire sticking out above the picture frame.

available_wall_width: This is the running length of wall space available to mount the picture; it is bounded by obstacles to the left and right such as wall corners, furniture, or other hangings.

When asked to perform a calculation, please use Python to perform the calculation and follow the provided instructions EXACTLY.
"""

PHASES = {
"dimension_calculations": {
"name": "Calculate your nail's position",
"fields": {
"info": {
"type": "markdown",
"body": "Use a tape measure to find the following dimensions."
},
"measurement_units": {
"type": "selectbox",
"options": ['inches', 'centimeters'],
"label": "Are you measuring dimensions in inches or centimeters?",
"help": "Whole numbers without fractions or decimals will suffice",
},
"viewer_height_inches": {
"type": "slider",
"min_value": 60,
"max_value": 75,
"value": 66,
"label": "How tall is your average viewer in inches? (The average American height is 66 in.)",
"showIf": {"measurement_units": "inches"},
},
"viewer_height_centimeters": {
"type": "slider",
"min_value": 155,
"max_value": 180,
"value": 168,
"label": "How tall is your average viewer in centimeters? (The average American height is 168 cm.)",
"showIf": {"measurement_units": "centimeters"},
},
"picture_height": {
"type": "number_input",
"step": 1,
"label": "What's the height of your picture, including the frame? (You can just type a whole number)",
},
"drop_to_hardware": {
"type": "number_input",
"step": 1,
"label": "How far below the top of the picture is the hanger on the back?",
"help": "Measure down from the top to the place where the nail will go, whether a hook or a wire held taut, in the units you chose (inches or cm).",
},
"available_wall_width": {
"type": "number_input",
"step": 1,
"label": "How much horizontal wall space is available?",
"help": "Include the total span in the units you chose (inches or cm), eg between adjacent walls, nearby furniture, or pictures to the left and right.",
},
"picture_weight": {
"type": "selectbox",
"options": ['light (under 5 pounds)', 'medium (5-20 pounds)', 'heavy (over 20 pounds)'],
"label": "How heavy is the picture?",
"help": "Include the frame and glazing (glass or Plexi front}, if any"
},
"wall_type": {
"type": "selectbox",
"options": ['normal', 'reinforced'],
"label": "What type of wall are you hanging on?",
"help": "Most American rooms have normal drywall, but some galleries are reinforced with plywood backing"
},
},
"user_prompt": [
{
"condition": {},
"prompt": "Please use {measurement_units} in all of your output for this prompt."
},
{
"condition": {},
"prompt": """
Use Python to set [height] = ((.93 * {viewer_height_inches}) + ({picture_height}/2) - {drop_to_hardware})). Set [height] to EXACTLY the result of this calculation, with no further assumptions or calculations. Show me step by step reasoning for the calculation.
Use Python to set [distance] = ({available_wall_width}/2)
- Now tell the user to place the nail at a height of [height] off the floor and a horizontal distance of [distance] in {measurement_units} from the left obstacle. Do not tell them how to do the calculations; just do the calculations yourself and tell the user the results.'\n
""",
},
{
"condition": {"$and":[{"picture_weight": "light"},{"wall_type": "normal"}]},
"prompt": "- Tell the user a simple nail or adhesive hook should suffice for a lightweight picture.\n",
},
{
"condition": {"$and":[{"picture_weight": "medium"},{"wall_type": "normal"}]},
"prompt": "- Recommend a picture hook or wall anchor to compensate for a medium-weight picture. Also mention that you can place painter's tape on the wall where you plan to drill or hammer to prevent the wall from chipping and making dust. \n",
},
{
"condition": {"$and":[{"picture_weight": "heavy"},{"wall_type": "normal"}]},
"prompt": "- Recommend for a heavy picture that the user hammer one or more nails into one of the vertical wooden studs behind the wallboard, adding guidance that American homes are usually built with studs placed every 16 inches on-center.\n",
},
{
"condition": {"wall_type": "reinforced"},
"prompt": "- Explain that you can hang any reasonably sized picture by hammering one or more nails into the plywood behind the wallboard.\n",
},
{
"condition": {},
"prompt": "- After typing out the preceding information, think of a prompt that can be entered in ChatGPT to generate a diagram illustrating the measurements supplied by the user, with labeled arrows to indicate the appropriate dimensions. This schematic image should include a small nail icon or graphic positioned [height] {dimension_units} off the floor and a distance of [distance] {measurement_units} from the nearest left obstacle. The latter dimension is the distance from the nail to the nearest left obstacle; it is not the distance between the edge of the picture and the nail location. Your prompt should also draw a dashed rectangle corresponding to the picture frame, showing that the picture has a height of {picture_height} and another labeled arrow showing the picture has a hardware drop of {hardware_drop}. The lower end of the hardware drop should line up horizontally with the position of the nail. Your prompt should ask ChatGPT to draw this in the style of an architectural blueprint with white lines and text on a blue background. Your prompt should clarify that the diagram should be as easy to follow as possible, with no extraneous text or imagery. Finally, type a message to the user suggesting entering this prompt into ChatGPT.com to generate a useful diagram.\n",
},
],
"ai_response": True,
"allow_skip": False,
"show_prompt": True,
"allow_revisions": True,
#"read_only_prompt": False
}
}

PREFERRED_LLM = "gpt-4o-mini"
LLM_CONFIG_OVERRIDE = {"gpt-4o-mini": {
"family": "openai",
"model": "gpt-4o-mini",
"max_tokens": 1000,
"temperature": 0.5,
"top_p": 1.0,
}
}

PAGE_CONFIG = {
"page_title": "Hang a Picture",
"page_icon": "️🖼",
"layout": "centered",
"initial_sidebar_state": "collapsed"
}

SIDEBAR_HIDDEN = True

SCORING_DEBUG_MODE = True
DISPLAY_COST = True

COMPLETION_MESSAGE = "⚠️ Chatbots can generate incorrect results; your eye is the best judge of the results. We hope this app makes it easier to hang art in your room!"
COMPLETION_CELEBRATION = False

from core_logic.main import main
if __name__ == "__main__":
main(config=globals())
63 changes: 45 additions & 18 deletions core_logic/handlers.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import openai
from openai import OpenAI
import anthropic
import google.generativeai as genai
from core_logic import rag_pipeline
Expand Down Expand Up @@ -42,39 +42,66 @@ def format_chat_history(chat_history, family):
return formatted_history

# openai llm handler

def handle_openai(context):
"""Handle requests for OpenAI models."""
"""Handle requests for OpenAI models using the new Responses API."""
if not context["supports_image"] and context.get("image_urls"):
return "Images are not supported by selected model."

try:
openai.api_key = get_api_key("openai")
# 1. Initialize the new client
client = OpenAI(api_key=get_api_key("openai"))

# 2. Build a single instructions string
instructions = "\n".join([
context["SYSTEM_PROMPT"],
context["phase_instructions"]
])

messages = format_chat_history(context["chat_history"], "openai") + [
{"role": "system", "content": context["SYSTEM_PROMPT"]},
{"role": "assistant", "content": context["phase_instructions"]},
# 3. Prepare the input messages array
inputs = format_chat_history(context["chat_history"], "openai") + [
{"role": "user", "content": context["user_prompt"]}
]

if context["supports_image"] and context["image_urls"]:
messages.insert(2, {"role": "user", "content": [{"type": "image_url", "image_url": {"url": url}} for url in
context["image_urls"]]})

response = openai.chat.completions.create(
# Responses API expects input_image objects in the same list
img_msgs = {
"role": "user",
"content": [
{"type": "input_image", "image_url": url}
for url in context["image_urls"]
]
}
# insert after history but before user prompt
inputs.insert(
len(format_chat_history(context["chat_history"], "openai")),
img_msgs
)

# 4. Call the Responses endpoint
response = client.responses.create(
model=context["model"],
messages=messages,
instructions=instructions,
input=inputs,
temperature=context["temperature"],
max_tokens=context["max_tokens"],
top_p=context["top_p"],
frequency_penalty=context["frequency_penalty"],
presence_penalty=context["presence_penalty"]
max_output_tokens=context["max_tokens"],
# stream=True # if you want streaming responses
)
input_price = int(getattr(response.usage, 'prompt_tokens', 0)) * context["price_input_token_1M"] / 1000000
output_price = int(getattr(response.usage, 'completion_tokens', 0)) * context["price_output_token_1M"] / 1000000

# 5. Pricing stays the same (assuming usage is still returned)
prompt_toks = getattr(response.usage, "prompt_tokens", 0)
completion_toks = getattr(response.usage, "completion_tokens", 0)
input_price = prompt_toks * context["price_input_token_1M"] / 1_000_000
output_price = completion_toks * context["price_output_token_1M"] / 1_000_000
execution_price = input_price + output_price
return response.choices[0].message.content, execution_price

# 6. Extract the assistant’s reply
return response.output_text, execution_price

except Exception as e:
return f"Unexpected error while handling OpenAI request: {e}"

# claude llm handler
def handle_claude(context):
"""Handle requests for Claude models."""
Expand Down
42 changes: 22 additions & 20 deletions core_logic/llm_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,32 +6,36 @@
"max_tokens": 1000,
"temperature": 1.0,
"top_p": 1.0,
"frequency_penalty": 0,
"presence_penalty": 0,
"supports_image": False,
"price_input_token_1M": 0.15,
"price_output_token_1M": 0.60
},
"gpt-4-turbo": {
"gpt-4.1": {
"family": "openai",
"model": "gpt-4-turbo",
"model": "gpt-4.1",
"max_tokens": 1000,
"temperature": 1.0,
"top_p": 1.0,
"frequency_penalty": 0,
"presence_penalty": 0,
"supports_image": True,
"price_input_token_1M": 10,
"price_output_token_1M": 30
"price_input_token_1M": 2,
"price_output_token_1M": 8
},
"gpt-4.1-mini": {
"family": "openai",
"model": "gpt-4.1-mini",
"max_tokens": 1000,
"temperature": 1.0,
"top_p": 1.0,
"supports_image": True,
"price_input_token_1M": .4,
"price_output_token_1M": 1.6
},
"rag-with-gpt-4o": {
"family": "rag",
"model": "gpt-4-turbo",
"max_tokens": 1000,
"temperature": 1.0,
"top_p": 1.0,
"frequency_penalty": 0,
"presence_penalty": 0,
"supports_image": True,
"price_input_token_1M": 10,
"price_output_token_1M": 30
Expand All @@ -42,35 +46,33 @@
"max_tokens": 2000,
"temperature": 1.0,
"top_p": 1.0,
"frequency_penalty": 0,
"presence_penalty": 0,
"supports_image": True,
"price_input_token_1M": 2.5,
"price_output_token_1M": 10
},
"gemini-2.0-flash-lite": {
"gemini-pro": {
"family": "gemini",
"model": "gemini-2.0-flash-lite",
"model": "gemini-pro",
"max_tokens": 1000,
"temperature": 1.0,
"top_p": 0.95,
"frequency_penalty": 0,
"presence_penalty": 0,
"supports_image": False,
"price_input_token_1M": 0.15,
"price_output_token_1M": 0.60
"price_input_token_1M": 2.5,
"price_output_token_1M": 10.00
},
"gemini-2.0-flash": {
"gemini-pro-vision": {
"family": "gemini",
"model": "gemini-2.0-flash",
"model": "gemini-pro-vision",
"max_tokens": 1000,
"temperature": 1.0,
"top_p": 0.95,
"frequency_penalty": 0,
"presence_penalty": 0,
"supports_image": True,
"price_input_token_1M": 2.5,
"price_output_token_1M": 10.00
"price_input_token_1M": 0.15,
"price_output_token_1M": 0.60
},
"claude-3.5-sonnet": {
"family": "claude",
Expand Down
4 changes: 2 additions & 2 deletions core_logic/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,8 +214,8 @@ def execute_llm_completions(SYSTEM_PROMPT,selected_llm, phase_instructions, user
"max_tokens": model_config["max_tokens"],
"temperature": model_config["temperature"],
"top_p": model_config["top_p"],
"frequency_penalty": model_config["frequency_penalty"],
"presence_penalty": model_config["presence_penalty"],
"frequency_penalty": model_config.get("frequency_penalty", 0),
"presence_penalty": model_config.get("presence_penalty", 0),
"price_input_token_1M": model_config["price_input_token_1M"],
"price_output_token_1M": model_config["price_output_token_1M"],
"TOTAL_PRICE": 0,
Expand Down