Streamlit

import streamlit as st
st.markdown("""
    <style>
        .reportview-container {
            margin-top: -2em;
        }
        #MainMenu {visibility: hidden;}
        .stDeployButton {display:none;}
        footer {visibility: hidden;}
        #stDecoration {display:none;}
    </style>
""", unsafe_allow_html=True)

Authentication

import yaml
from yaml.loader import SafeLoader
import streamlit_authenticator as stauth

with open('./config.yml') as file:
    config = yaml.load(file, Loader=SafeLoader)
    
authenticator = stauth.Authenticate(
    config['credentials'],
    config['cookie']['name'],
    config['cookie']['key'],
    config['cookie']['expiry_days'],
)

name, auth_status, username = authenticator.login()

if auth_status:
    pass

authenticator.logout()

Chat Application

import os
import streamlit as st
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_community.chat_message_histories import SQLChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.output_parsers import StrOutputParser
from dotenv import load_dotenv
load_dotenv(".env", override=True)
CONNECTION_STRING = os.getenv("CONNECTION_STRING")

with open('design.css') as source:
    st.markdown(f"<style>{source.read()}</style>", unsafe_allow_html=True)

def get_session_history(session_id):
    chat_history = SQLChatMessageHistory(
        session_id=session_id, 
        connection=CONNECTION_STRING
    )
    return chat_history

def get_response():
    llm = ChatGoogleGenerativeAI(model=model_name, 
                                temperature=temperature, 
                                max_tokens=max_tokens)
    parser = StrOutputParser()
    prompt = ChatPromptTemplate.from_messages(
        [
            ("system", system_prompt),
            MessagesPlaceholder(variable_name="history"),
            ("human", "{question}"),
        ]
    )
    runnable = prompt | llm | parser
    with_message_history = RunnableWithMessageHistory(
        runnable,
        get_session_history,
        input_messages_key="question",
        history_messages_key="history",
    )
    return with_message_history.stream(
                {"question": question},
                config={
                    "configurable": {"session_id": chat_session_id}
                })

def main():                                   
    with st.sidebar:
        model_name = st.selectbox(
            "Select AI Model",
            ("gemini-1.5-flash", "gemini-1.5-pro"),
            index=0,
        )
        temperature = st.slider('Temperature', min_value=0.0, max_value=1.0, value=1.0, step=0.01)
        max_tokens = st.slider("Max Tokens", min_value=128, max_value=4096, value=1024, step=128)
        instruction_prompt = st.text_area(
            "Instructions",
            "Answer the user question",
            height=120
        )
    
    system_prompt = "..."
    for msg in chat_history.messages:
        st.chat_message(msg.type).write(msg.content)
        
    if prompt := st.chat_input("Ask Ellon Chat AI"):
        with st.chat_message("user"):
            st.write(prompt)
    
        with st.chat_message("ai"):
            message = get_response(...)
            st.write_stream(message)
            
main()
/* Chat container */
.stChatMessageContainer {
    display: flex;
    flex-direction: column;
    width: 100%;
    max-width: 800px;
    margin: 0 auto;
    padding: 10px;
    box-sizing: border-box;
}

/* Base styles for all messages */
div.stChatMessage {
    width: 100%;
    max-width: 450px; /* Reduced from 600px */
    min-width: 200px;
    padding: 10px 15px;
    margin-bottom: 10px;
    border-radius: 20px;
    word-wrap: break-word;
    box-sizing: border-box;
}

/* Styles for received messages */
div.stChatMessage.st-emotion-cache-4oy321.eeusbqq4 {
    background-color: #efeeee;
    align-self: flex-start;
}

/* Styles for sent messages */
div.stChatMessage.st-emotion-cache-1c7y2kd.eeusbqq4 {
    background-color: #d3dfed;
    color: black;
    align-self: flex-end;
}

/* Media query for large screens */
@media (min-width: 1200px) {
    .stChatMessageContainer {
        max-width: 700px; /* Reduced from 1000px */
    }
    
    div.stChatMessage {
        max-width: 40%; /* Reduced from 70% */
        min-width: 550px;
    }
}

/* Media query for medium-sized screens */
@media (max-width: 768px) {
    div.stChatMessage {
        max-width: 60%; /* Reduced from 80% */
    }
}

/* Media query for small screens */
@media (max-width: 480px) {
    div.stChatMessage {
        max-width: 85%; /* Reduced from 90% */
        min-width: 150px;
    }
    
    .stChatMessageContainer {
        padding: 5px;
    }
}

/* Media query for very small screens */
@media (max-width: 320px) {
    div.stChatMessage {
        max-width: 95%;
        min-width: 100px;
    }
}

Last updated