Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| st.set_page_config(page_title="Qwen Chatbot", layout="centered") | |
| st.title("🧠 Qwen3-0.6B Chatbot") | |
| def load_model(): | |
| tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B") | |
| model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen3-0.6B", torch_dtype=torch.float32) | |
| return tokenizer, model | |
| tokenizer, model = load_model() | |
| # Chat state | |
| if "chat_history" not in st.session_state: | |
| st.session_state.chat_history = [] | |
| user_input = st.text_input("You:", key="input") | |
| if user_input: | |
| # Add to chat history | |
| st.session_state.chat_history.append(("You", user_input)) | |
| # Prepare prompt with full context | |
| context = "" | |
| for speaker, msg in st.session_state.chat_history: | |
| context += f"{speaker}: {msg}\n" | |
| context += "Bot:" | |
| inputs = tokenizer(context, return_tensors="pt") | |
| outputs = model.generate(**inputs, max_new_tokens=100, do_sample=True, top_p=0.9, temperature=0.7) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Extract only bot response (after "Bot:") | |
| bot_msg = response.split("Bot:")[-1].strip() | |
| st.session_state.chat_history.append(("Bot", bot_msg)) | |
| # Display conversation | |
| for speaker, msg in st.session_state.chat_history: | |
| st.markdown(f"**{speaker}:** {msg}") | |