diff --git a/.env.local.example b/.env.local.example index 16e5d10..efc9ea5 100644 --- a/.env.local.example +++ b/.env.local.example @@ -27,6 +27,11 @@ NEXT_PUBLIC_DYNAMIC_ENVIRONMENT_ID=your_dynamic_environment_id_here # Get your API key from: https://www.alchemy.com/ ALCHEMY_API_KEY=your_alchemy_api_key +# Enable testnet chains (Sepolia) for development/testing +# Set to 'true' to enable Sepolia testnet support +# IMPORTANT: Set this in your .env.local for development! +NEXT_PUBLIC_ENABLE_TESTNET_CHAINS=false + # EVM payment wallet address (same address used across all EVM chains) # This is where users send their stablecoin payments on Ethereum, Base, Arbitrum, Optimism, Polygon # NEXT_PUBLIC_ prefix is required because it's displayed in the browser UI @@ -76,6 +81,16 @@ NEXT_PUBLIC_DAI_CONTRACT_ARBITRUM=0xDA10009cBd5D07dd0CeCc66161FC93D7c9000da1 NEXT_PUBLIC_DAI_CONTRACT_OPTIMISM=0xDA10009cBd5D07dd0CeCc66161FC93D7c9000da1 NEXT_PUBLIC_DAI_CONTRACT_POLYGON=0x8f3Cf7ad23Cd3CaDbD9735AFf958023239c6A063 +# Sepolia Testnet (DEV MODE ONLY - NODE_ENV=development) +# Get testnet tokens from Sepolia faucets (see STABLECOIN_PAYMENTS.md) +USDC_CONTRACT_SEPOLIA=0x1c7D4B196Cb0C7B01d743Fbc6116a902379C7238 +USDT_CONTRACT_SEPOLIA=0xaA8E23Fb1079EA71e0a56F48a2aA51851D8433D0 +DAI_CONTRACT_SEPOLIA=0xFF34B3d4Aee8ddCd6F9AFFFB6Fe49bD371b8a357 + +NEXT_PUBLIC_USDC_CONTRACT_SEPOLIA=0x1c7D4B196Cb0C7B01d743Fbc6116a902379C7238 +NEXT_PUBLIC_USDT_CONTRACT_SEPOLIA=0xaA8E23Fb1079EA71e0a56F48a2aA51851D8433D0 +NEXT_PUBLIC_DAI_CONTRACT_SEPOLIA=0xFF34B3d4Aee8ddCd6F9AFFFB6Fe49bD371b8a357 + # ============================================================================= # STRIPE PAYMENTS - Credit/Debit Card Payments # ============================================================================= diff --git a/STABLECOIN_PAYMENTS.md b/STABLECOIN_PAYMENTS.md index 4c608d6..c57d9af 100644 --- a/STABLECOIN_PAYMENTS.md +++ b/STABLECOIN_PAYMENTS.md @@ -73,13 +73,16 @@ NEXT_PUBLIC_DYNAMIC_ENVIRONMENT_ID=your_dynamic_environment_id ### 2. Supported Chains -The system supports 5 EVM chains: +The system supports 5 EVM chains (production): - **Ethereum** - **Base** - **Arbitrum** - **Optimism** - **Polygon** +The system also supports **Sepolia testnet** when explicitly enabled: +- **Sepolia** (Ethereum testnet) - Enable by setting `NEXT_PUBLIC_ENABLE_TESTNET_CHAINS=true` in `.env.local` + **USDC**, **USDT**, and **DAI** are accepted on all chains. ### 3. Pricing Model @@ -212,10 +215,39 @@ Response: ### Testnet Testing (Recommended) -1. Get testnet stablecoins from faucets (Sepolia, Base Sepolia, etc.) -2. Configure testnet payment wallet in `.env.local` -3. Send test transaction -4. Verify subscription activates +**Enable Sepolia testnet by adding this to your `.env.local` file:** +```bash +NEXT_PUBLIC_ENABLE_TESTNET_CHAINS=true +``` + +Then restart your development server (`npm run dev`). + +1. **Get testnet ETH for gas fees:** + - [Alchemy Sepolia Faucet](https://sepoliafaucet.com/) + - [Infura Sepolia Faucet](https://www.infura.io/faucet/sepolia) + +2. **Get testnet stablecoins:** + - **USDC (Sepolia)**: [Circle Testnet Faucet](https://faucet.circle.com/) - Use address `0x1c7D4B196Cb0C7B01d743Fbc6116a902379C7238` + - **USDT (Sepolia)**: Deploy your own or use testnet faucet at `0xaA8E23Fb1079EA71e0a56F48a2aA51851D8433D0` + - **DAI (Sepolia)**: [MakerDAO Testnet Faucet](https://app.spark.fi/faucet/) - Use address `0xFF34B3d4Aee8ddCd6F9AFFFB6Fe49bD371b8a357` + +3. **Configure payment wallet:** + - Set `NEXT_PUBLIC_EVM_PAYMENT_WALLET_ADDRESS` in `.env.local` to your test wallet address + - This is where testnet payments will be sent + +4. **Switch wallet to Sepolia network:** + - Connect wallet via Dynamic.xyz + - Switch to Sepolia network in your wallet + - The UI will show "๐Ÿงช TESTNET" indicator when connected to Sepolia + +5. **Send test transaction:** + - Choose amount (e.g., $1 USD = 1 USDC) + - Select stablecoin (USDC, USDT, or DAI) + - Confirm transaction in wallet + +6. **Verify subscription activates:** + - Wait ~30 seconds for transaction confirmation + - Refresh page to check subscription status ### Mainnet Testing (Small Amount) diff --git a/app/api/check-subscription/route.ts b/app/api/check-subscription/route.ts index 030862c..848bee5 100644 --- a/app/api/check-subscription/route.ts +++ b/app/api/check-subscription/route.ts @@ -1,3 +1,6 @@ +// CRITICAL: Apply fetch polyfill BEFORE any other imports +import '@/lib/fetch-polyfill'; + import { NextRequest, NextResponse } from 'next/server'; import { checkCombinedSubscription } from '@/lib/subscription-manager'; diff --git a/app/components/LLMChatInline.tsx b/app/components/LLMChatInline.tsx index df7991b..f3bb1c5 100644 --- a/app/components/LLMChatInline.tsx +++ b/app/components/LLMChatInline.tsx @@ -9,25 +9,41 @@ import { useAuth } from "./AuthProvider"; import ReactMarkdown from 'react-markdown'; import remarkGfm from 'remark-gfm'; import { callLLM, callLLMStream, getLLMDescription } from "@/lib/llm-client"; +import { getLLMConfig } from "@/lib/llm-config"; import { RobotIcon } from "./Icons"; import { trackLLMQuestionAsked } from "@/lib/analytics"; +type AttachmentType = 'text' | 'pdf' | 'csv' | 'tsv'; + +type Attachment = { + name: string; + content: string; + type: AttachmentType; + size: number; // in bytes +}; + type Message = { - role: 'user' | 'assistant'; + role: 'user' | 'assistant' | 'system'; content: string; timestamp: Date; studiesUsed?: SavedResult[]; + attachments?: Attachment[]; }; const CONSENT_STORAGE_KEY = "nilai_llm_chat_consent_accepted"; const MAX_CONTEXT_RESULTS = 500; +const MAX_FILE_SIZE = 1 * 1024 * 1024; // 1MB in bytes +const ALLOWED_FILE_TYPES = ['.txt', '.pdf', '.csv', '.tsv']; +const MAX_ATTACHMENTS = 5; const EXAMPLE_QUESTIONS = [ "Which traits should I pay attention to?", + "How's my sleep profile?", "Which sports are ideal for me?", "What kinds of foods do you think I will like best?", "On a scale of 1 - 10, how risk seeking am I?", - "Can you tell me which learning styles work best for me?" + "Can you tell me which learning styles work best for me?", + "What can you guess about my appearance?" ]; const FOLLOWUP_SUGGESTIONS = [ @@ -55,8 +71,13 @@ export default function AIChatInline() { const [hasPromoAccess, setHasPromoAccess] = useState(false); const [showPersonalizationPrompt, setShowPersonalizationPrompt] = useState(false); const [expandedMessageIndex, setExpandedMessageIndex] = useState(null); + const [attachedFiles, setAttachedFiles] = useState([]); + const [attachmentError, setAttachmentError] = useState(null); + const [expandedAttachmentIndex, setExpandedAttachmentIndex] = useState(null); + const [showProviderTip, setShowProviderTip] = useState(true); const inputRef = useRef(null); + const fileInputRef = useRef(null); useEffect(() => { setMounted(true); @@ -142,6 +163,182 @@ export default function AIChatInline() { return `${score.toFixed(2)}x`; }; + const getProviderTip = () => { + if (!mounted) return null; + + const config = getLLMConfig(); + + if (config.provider === 'nilai' || config.provider === 'ollama') { + return { + icon: '๐Ÿ”’', + type: 'privacy', + message: config.provider === 'nilai' + ? 'You\'re using nilAI (privacy-preserving TEE) - your data is maximally protected!' + : 'You\'re using Ollama (local processing) - your data never leaves your device!', + tip: 'Want more advanced models? Use the โš™๏ธ LLM button (top right) to switch to HuggingFace for better performance. Note: HuggingFace requires creating your own account and involves some privacy tradeoffs.', + }; + } else if (config.provider === 'huggingface') { + return { + icon: 'โšก', + type: 'performance', + message: 'You\'re using HuggingFace - maximizing model performance!', + tip: 'Want maximum privacy? Use the โš™๏ธ LLM button (top right) to switch to nilAI for privacy-preserving processing in a Trusted Execution Environment.', + }; + } + + return null; + }; + + const handleAttachmentClick = () => { + setAttachmentError(null); + fileInputRef.current?.click(); + }; + + const validateFile = (file: File): string | null => { + // Check file size + if (file.size > MAX_FILE_SIZE) { + return `File "${file.name}" is too large. Maximum size is 1MB.`; + } + + // Check file type + const extension = '.' + file.name.split('.').pop()?.toLowerCase(); + if (!ALLOWED_FILE_TYPES.includes(extension)) { + return `File "${file.name}" has an unsupported format. Allowed: ${ALLOWED_FILE_TYPES.join(', ')}`; + } + + return null; + }; + + const handleFileSelect = async (e: React.ChangeEvent) => { + const files = e.target.files; + if (!files || files.length === 0) return; + + setAttachmentError(null); + + // Check total number of attachments + if (attachedFiles.length + files.length > MAX_ATTACHMENTS) { + setAttachmentError(`Maximum ${MAX_ATTACHMENTS} files can be attached at once.`); + return; + } + + const newFiles: File[] = []; + for (let i = 0; i < files.length; i++) { + const file = files[i]; + const error = validateFile(file); + if (error) { + setAttachmentError(error); + return; + } + newFiles.push(file); + } + + setAttachedFiles(prev => [...prev, ...newFiles]); + // Reset input so same file can be selected again + e.target.value = ''; + }; + + const handleRemoveAttachment = (index: number) => { + setAttachedFiles(prev => prev.filter((_, i) => i !== index)); + setAttachmentError(null); + }; + + const processAttachments = async (files: File[]): Promise => { + const attachments: Attachment[] = []; + + for (const file of files) { + const extension = file.name.split('.').pop()?.toLowerCase(); + + try { + if (extension === 'pdf') { + // For PDF, we'll need to use pdfjs-dist + const content = await extractTextFromPDF(file); + attachments.push({ + name: file.name, + content, + type: 'pdf', + size: file.size + }); + } else { + // For text, csv, tsv - read as text + const content = await file.text(); + attachments.push({ + name: file.name, + content, + type: extension as AttachmentType, + size: file.size + }); + } + } catch (err) { + console.error(`Failed to process file ${file.name}:`, err); + throw new Error(`Failed to process file "${file.name}"`); + } + } + + return attachments; + }; + + const extractTextFromPDF = async (file: File): Promise => { + try { + // Load PDF.js from CDN dynamically to avoid webpack issues + if (typeof window !== 'undefined' && !(window as any).pdfjsLib) { + await new Promise((resolve, reject) => { + const script = document.createElement('script'); + script.src = 'https://cdnjs.cloudflare.com/ajax/libs/pdf.js/3.11.174/pdf.min.js'; + script.onload = () => resolve(); + script.onerror = () => reject(new Error('Failed to load PDF.js')); + document.head.appendChild(script); + }); + } + + const pdfjsLib = (window as any).pdfjsLib; + if (!pdfjsLib) { + throw new Error('PDF.js library not loaded'); + } + + // Set worker + pdfjsLib.GlobalWorkerOptions.workerSrc = 'https://cdnjs.cloudflare.com/ajax/libs/pdf.js/3.11.174/pdf.worker.min.js'; + + // Read file as ArrayBuffer + const arrayBuffer = await file.arrayBuffer(); + + // Load PDF document + const loadingTask = pdfjsLib.getDocument({ data: arrayBuffer }); + const pdf = await loadingTask.promise; + + let fullText = ''; + + // Extract text from each page + for (let pageNum = 1; pageNum <= pdf.numPages; pageNum++) { + const page = await pdf.getPage(pageNum); + const textContent = await page.getTextContent(); + const pageText = textContent.items + .map((item: any) => item.str) + .join(' '); + fullText += pageText + '\n\n'; + } + + return fullText.trim() || 'PDF file contains no extractable text'; + } catch (error) { + console.error('Failed to extract text from PDF:', error); + throw new Error(`Failed to extract text from PDF file: ${error instanceof Error ? error.message : 'Unknown error'}`); + } + }; + + const formatAttachmentsForMessage = (attachments: Attachment[], query: string): string => { + let message = `User question:\n${query}`; + + if (attachments.length > 0) { + message += '\n\n---\n'; + for (const attachment of attachments) { + const sizeKB = (attachment.size / 1024).toFixed(1); + message += `\nAttached file: ${attachment.name} (${attachment.type.toUpperCase()}, ${sizeKB}KB):\n${attachment.content}\n`; + } + message += '---'; + } + + return message; + }; + const handleSendMessage = async () => { const query = inputValue.trim(); if (!query) return; @@ -171,19 +368,29 @@ export default function AIChatInline() { return; } - const userMessage: Message = { - role: 'user', - content: query, - timestamp: new Date() - }; - setMessages(prev => [...prev, userMessage]); setInputValue(""); setIsLoading(true); setError(null); + setAttachmentError(null); // Track LLM question trackLLMQuestionAsked(); + // Process attachments if any + let processedAttachments: Attachment[] = []; + if (attachedFiles.length > 0) { + try { + setLoadingStatus("๐Ÿ“Ž Processing attachments..."); + processedAttachments = await processAttachments(attachedFiles); + console.log(`[LLM Chat] Processed ${processedAttachments.length} attachments`); + } catch (err) { + const errorMessage = err instanceof Error ? err.message : "Failed to process attachments"; + setError(errorMessage); + setIsLoading(false); + return; + } + } + try { let relevantResults: SavedResult[] = []; @@ -264,13 +471,32 @@ Consider how this user's background, lifestyle factors (smoking, alcohol, diet), } } - const conversationHistory = messages.map(m => ({ - role: m.role, - content: m.content - })); - const llmDescription = getLLMDescription(); - const systemPrompt = `You are an expert genetic counselor LLM assistant providing personalized, holistic insights about GWAS results. ${llmDescription} + + // Conversational system prompt for follow-up questions + const conversationalSystemPrompt = `You are continuing a conversation about the user's genetic results. ${llmDescription} + +CONTEXT: +- You previously provided a detailed analysis of their GWAS data +- The user is now asking follow-up questions about that analysis +- All the detailed genetic findings were already discussed in your first response + +INSTRUCTIONS FOR FOLLOW-UP RESPONSES: +โš ๏ธ CRITICAL - REFUSE QUESTIONS NOT RELATED TO USER DATA: +- Still refuse to answer questions not related to the user's results +- Just answer their question directly based on the conversation history +- CRITICAL: Remind the user any recommendations are based on LLM training data and may be subject to hallucinations and errors so they should conduct a physician if they have real health concerns. + +RESPONSE STYLE: +- Answer naturally and conversationally (NO rigid 5-section structure needed) +- Keep responses focused and concise (200-400 words unless more detail is specifically requested) +- Reference your previous detailed analysis when relevant +- Maintain the same helpful, educational tone as before +- NO need for comprehensive action plans or structured sections unless specifically asked + +Remember: This is educational, not medical advice. The detailed disclaimers were already provided in your initial response.`; + + const systemPrompt = `You are an expert providing personalized, holistic insights about GWAS results. ${llmDescription} IMPORTANT CONTEXT: - The user has uploaded their DNA file and analyzed it against thousands of GWAS studies @@ -285,10 +511,9 @@ USER'S SPECIFIC QUESTION: "${query}" โš ๏ธ CRITICAL - STAY ON TOPIC: +- Refuse to answer questions not related to the user's genetic data such as general knowledge or trivia to prevent the abuse of this system. - Answer ONLY the specific trait/condition the user asked about in their question - Do NOT discuss other traits or conditions from the RAG context unless directly relevant to their question -- If they ask about "heart disease", focus ONLY on cardiovascular traits - ignore diabetes, cancer, etc. -- If they ask about "diabetes", focus ONLY on metabolic/diabetes traits - ignore heart, cancer, etc. - If this is a follow-up question, continue the conversation about the SAME topic from previous messages - Do NOT use the RAG context to go off on tangents about unrelated health topics - The RAG context is provided for reference, but answer ONLY what the user specifically asked about @@ -300,24 +525,11 @@ CRITICAL INSTRUCTIONS - COMPLETE RESPONSES: 4. If running low on space, wrap up your current section properly and provide a brief conclusion 5. Every response MUST have a clear ending with actionable takeaways -HOW TO PRESENT FINDINGS - AVOID STUDY-BY-STUDY LISTS: -โŒ DO NOT create tables listing individual SNPs/studies one by one -โŒ DO NOT list rs numbers with individual interpretations -โŒ DO NOT organize findings by individual genetic variants -โŒ DO NOT restate the user's personal information (age, ethnicity, medical history, smoking, alcohol, diet, etc.) - they already know it - -โœ… INSTEAD, synthesize findings into THEMES and PATTERNS: -- Group related variants into biological themes (e.g., "Cardiovascular Protection", "Metabolic Risk", "Inflammatory Response") -- Describe the OVERALL pattern across multiple variants (e.g., "You have 8 protective variants and 3 risk variants for heart disease, suggesting...") -- Focus on the BIG PICTURE and what the collection of findings means together -- Mention specific genes/pathways only when illustrating a broader point - PERSONALIZED HOLISTIC ADVICE FRAMEWORK: 1. Synthesize ALL findings into a coherent story about their health landscape 2. Explain how their genetic profile interacts with their background factors (without restating what those factors are) 3. Identify both strengths (protective factors) and areas to monitor (risk factors) -4. Connect different body systems (e.g., how cardiovascular + metabolic + inflammatory factors relate) -5. Provide specific, actionable recommendations tailored to THEIR situation +4. Provide specific, actionable recommendations tailored to THEIR situation โš ๏ธ CRITICAL GWAS LIMITATIONS & MEDICAL RECOMMENDATIONS: @@ -328,9 +540,9 @@ UNDERSTANDING GWAS LIMITATIONS: - Environment, lifestyle, and chance play MUCH LARGER roles than genetics - This app is for EDUCATIONAL PURPOSES ONLY - not clinical diagnosis - Results should NEVER be used to make medical decisions without professional consultation +- Any health recommendations are based on LLM training data and may be subject to hallucinations and errors so they should conduct a physician if they have real health concerns. MEDICAL REFERRAL THRESHOLD - EXTREMELY HIGH BAR: -- Focus 95% of recommendations on lifestyle, diet, exercise, sleep, stress management, and self-monitoring - ONLY suggest medical consultation if MULTIPLE high-risk variants + family history + existing symptoms align - NEVER routinely say "consult a genetic counselor" or "see your doctor" or "get tested" - Do NOT recommend medical tests, lab work, or screening unless findings are TRULY exceptional (e.g., multiple high-risk variants for serious hereditary conditions) @@ -382,32 +594,66 @@ Remember: You have plenty of space. Use ALL of it to provide a complete, thoroug console.log('System Prompt:', systemPrompt); console.log('User Query:', query); console.log('Relevant Results Count:', relevantResults.length); + console.log('Attachments Count:', processedAttachments.length); console.log('======================'); + // Format the user query with attachments for LLM + const userQueryWithAttachments = processedAttachments.length > 0 + ? formatAttachmentsForMessage(processedAttachments, query) + : query; + + // Build the message history to send to LLM FIRST (before updating state) + // For first message: [system, user] + // For follow-ups: [conversational system, user1, assistant1, ..., userN] + const messagesToSend = shouldIncludeContext + ? [ + { role: "system" as const, content: systemPrompt }, + { role: "user" as const, content: userQueryWithAttachments } + ] + : [ + // Use conversational system prompt for follow-ups (replace the detailed one from history) + { role: "system" as const, content: conversationalSystemPrompt }, + // Include all user/assistant messages from history (filter out old system message) + ...messages.filter(m => m.role !== 'system').map(m => ({ + role: m.role as 'user' | 'assistant', + content: m.content + })), + // Add the new user question + { role: "user" as const, content: userQueryWithAttachments } + ]; + + // Now add messages to state for UI display + // Add system message to conversation history (only for first message) + if (shouldIncludeContext) { + const systemMessage: Message = { + role: 'system', + content: systemPrompt, + timestamp: new Date(), + studiesUsed: relevantResults + }; + setMessages(prev => [...prev, systemMessage]); + } + + // Add user message to conversation history + const userMessage: Message = { + role: 'user', + content: query, + timestamp: new Date(), + attachments: processedAttachments.length > 0 ? processedAttachments : undefined + }; + setMessages(prev => [...prev, userMessage]); + // Create an initial assistant message with empty content const assistantMessage: Message = { role: 'assistant', content: '', timestamp: new Date(), - studiesUsed: relevantResults + studiesUsed: shouldIncludeContext ? relevantResults : undefined }; setMessages(prev => [...prev, assistantMessage]); // Call LLM with streaming - const stream = callLLMStream([ - { - role: "system", - content: systemPrompt - }, - ...conversationHistory.map(m => ({ - role: m.role as 'system' | 'user' | 'assistant', - content: m.content - })), - { - role: "user", - content: query - } - ], { + const stream = callLLMStream(messagesToSend, { maxTokens: 5000, temperature: 0.7, reasoningEffort: 'medium', @@ -441,6 +687,9 @@ Remember: You have plenty of space. Use ALL of it to provide a complete, thoroug throw new Error("No response generated from LLM"); } + // Clear attachments after successful send + setAttachedFiles([]); + } catch (err) { console.error('[LLM Chat] Error:', err); @@ -686,6 +935,27 @@ Remember: You have plenty of space. Use ALL of it to provide a complete, thoroug )} + {/* Provider tip banner */} + {showProviderTip && (() => { + const tip = getProviderTip(); + if (!tip) return null; + + return ( +
+
+ {tip.icon} +
+
{tip.message}
+
{tip.tip}
+
+
+ +
+ ); + })()} +
{messages.length === 0 && (
@@ -714,13 +984,26 @@ Remember: You have plenty of space. Use ALL of it to provide a complete, thoroug ))} -

- โš ๏ธ This is for educational purposes only. Always consult healthcare professionals for medical advice. -

+
+

โš ๏ธ Important Disclaimer:

+
    +
  • LLMs can report incorrect information based on their training data
  • +
  • LLMs can hallucinate and make up information that sounds plausible but is false
  • +
  • LLMs can sound authoritative and confident even though they are not medical experts
  • +
  • This is for educational purposes only. Always consult healthcare professionals for medical advice.
  • +
+
)} - {messages.map((message, idx) => ( + {messages + .filter(message => message.role !== 'system') // Hide system messages from UI + .map((message, idx, filteredMessages) => { + // Check if this is the last assistant message in the filtered array + const isLastAssistantMessage = message.role === 'assistant' && + idx === filteredMessages.length - 1; + + return (
{message.role === 'user' ? '๐Ÿ‘ค' : '๐Ÿค–'} @@ -744,7 +1027,7 @@ Remember: You have plenty of space. Use ALL of it to provide a complete, thoroug > ๐Ÿ“‹ Copy - {idx === messages.length - 1 && !isLoading && ( + {isLastAssistantMessage && !isLoading && (
๐Ÿ’ก Try asking:
@@ -796,12 +1079,40 @@ Remember: You have plenty of space. Use ALL of it to provide a complete, thoroug )}
)} + {message.role === 'user' && message.attachments && message.attachments.length > 0 && ( +
+ + {expandedAttachmentIndex === idx && ( +
+ {message.attachments.map((attachment, attIdx) => ( +
+
+ ๐Ÿ“„ {attachment.name} + + {attachment.type.toUpperCase()} โ€ข {(attachment.size / 1024).toFixed(1)}KB + +
+
+
{attachment.content.substring(0, 500)}{attachment.content.length > 500 ? '...' : ''}
+
+
+ ))} +
+ )} +
+ )}
{message.timestamp.toLocaleTimeString()}
- ))} + ); + })} {isLoading && (
@@ -825,6 +1136,43 @@ Remember: You have plenty of space. Use ALL of it to provide a complete, thoroug
+ {/* Hidden file input */} + + + {/* Attachment preview chips */} + {attachedFiles.length > 0 && ( +
+ {attachedFiles.map((file, idx) => ( +
+ ๐Ÿ“Ž + {file.name} + ({(file.size / 1024).toFixed(1)}KB) + +
+ ))} +
+ )} + + {/* Attachment error display */} + {attachmentError && ( +
+ โš ๏ธ {attachmentError} +
+ )} +