diff --git a/.cursor/environment.json b/.cursor/environment.json new file mode 100644 index 000000000..de78b5e1d --- /dev/null +++ b/.cursor/environment.json @@ -0,0 +1,10 @@ +{ + "agentCanUpdateSnapshot": true, + "install": "pnpm install", + "terminals": [ + { + "name": "Run dev server", + "command": "pnpm dev" + } + ] +} diff --git a/.cursor/rules/_general-rules.mdc b/.cursor/rules/_general-rules.mdc new file mode 100644 index 000000000..51ec67326 --- /dev/null +++ b/.cursor/rules/_general-rules.mdc @@ -0,0 +1,16 @@ +--- +description: +globs: +alwaysApply: true +--- + +# General rules + +- This is the documentation for the Langfuse website. +- We use Nextra.site, v3, docs: https://nextra-v2-7hslbun8z-shud.vercel.app/ +- All pages are in the /pages folder and rendered by Nextra.site. +- Reusable markdown components are in the /components-mdx folder. + +## Frontend + +- When using tailwindcss, never use explicit colors, always use the default semantic color tokens introduced by shadcn/ui. diff --git a/.cursor/rules/available-internal-links.mdc b/.cursor/rules/available-internal-links.mdc new file mode 100644 index 000000000..659d51be0 --- /dev/null +++ b/.cursor/rules/available-internal-links.mdc @@ -0,0 +1,145 @@ +--- +description: +globs: +alwaysApply: true +--- + +Please make sure that all relative links match to what is available. + +// Start of LLMs.txt + +# Langfuse + +> Langfuse is an **open-source LLM engineering platform** ([GitHub](https://github.com/langfuse/langfuse)) that helps teams collaboratively debug, analyze, and iterate on their LLM applications. All platform features are natively integrated to accelerate the development workflow. + +## Langfuse Docs MCP Server + +Connect to the Langfuse Docs MCP server to access documentation directly in your AI editor: + +- **Endpoint**: `/api/mcp` +- **Transport**: `streamableHttp` +- **Documentation**: [Langfuse Docs MCP Server](/docs/docs-mcp) + +The MCP server provides tools to search Langfuse documentation, GitHub issues, and discussions. See the [installation guide](/docs/docs-mcp) for setup instructions in Cursor, VS Code, Claude Desktop, and other MCP clients. + +## Docs + +- [Docs](/docs) +- [Audit Logs](/docs/administration/audit-logs) +- [Data Deletion](/docs/administration/data-deletion) +- [Data Retention](/docs/administration/data-retention) +- [Llm Connection](/docs/administration/llm-connection) +- [Rbac](/docs/administration/rbac) +- [Scim And Org Api](/docs/administration/scim-and-org-api) +- [Usage Alerts](/docs/administration/usage-alerts) +- [Export From Ui](/docs/api-and-data-platform/features/export-from-ui) +- [Export To Blob Storage](/docs/api-and-data-platform/features/export-to-blob-storage) +- [Fine Tuning](/docs/api-and-data-platform/features/fine-tuning) +- [Public Api](/docs/api-and-data-platform/features/public-api) +- [Query Via Sdk](/docs/api-and-data-platform/features/query-via-sdk) +- [Overview](/docs/api-and-data-platform/overview) +- [Ask Ai](/docs/ask-ai) +- [Demo](/docs/demo) +- [Docs Mcp](/docs/docs-mcp) +- [Data Model](/docs/evaluation/dataset-runs/data-model) +- [Datasets](/docs/evaluation/dataset-runs/datasets) +- [Native Run](/docs/evaluation/dataset-runs/native-run) +- [Remote Run](/docs/evaluation/dataset-runs/remote-run) +- [Annotation](/docs/evaluation/evaluation-methods/annotation) +- [Custom Scores](/docs/evaluation/evaluation-methods/custom-scores) +- [Data Model](/docs/evaluation/evaluation-methods/data-model) +- [Llm As A Judge](/docs/evaluation/evaluation-methods/llm-as-a-judge) +- [Overview](/docs/evaluation/overview) +- [Troubleshooting And Faq](/docs/evaluation/troubleshooting-and-faq) +- [Custom Dashboards](/docs/metrics/features/custom-dashboards) +- [Metrics Api](/docs/metrics/features/metrics-api) +- [Overview](/docs/metrics/overview) +- [Data Model](/docs/observability/data-model) +- [Agent Graphs](/docs/observability/features/agent-graphs) +- [Comments](/docs/observability/features/comments) +- [Environments](/docs/observability/features/environments) +- [Log Levels](/docs/observability/features/log-levels) +- [Masking](/docs/observability/features/masking) +- [Metadata](/docs/observability/features/metadata) +- [Multi Modality](/docs/observability/features/multi-modality) +- [Queuing Batching](/docs/observability/features/queuing-batching) +- [Releases And Versioning](/docs/observability/features/releases-and-versioning) +- [Sampling](/docs/observability/features/sampling) +- [Sessions](/docs/observability/features/sessions) +- [Tags](/docs/observability/features/tags) +- [Token And Cost Tracking](/docs/observability/features/token-and-cost-tracking) +- [Trace Ids And Distributed Tracing](/docs/observability/features/trace-ids-and-distributed-tracing) +- [Url](/docs/observability/features/url) +- [Users](/docs/observability/features/users) +- [Get Started](/docs/observability/get-started) +- [Overview](/docs/observability/overview) +- [Overview](/docs/observability/sdk/overview) +- [Decorators](/docs/observability/sdk/python/decorators) +- [Example](/docs/observability/sdk/python/example) +- [Low Level Sdk](/docs/observability/sdk/python/low-level-sdk) +- [Sdk V3](/docs/observability/sdk/python/sdk-v3) +- [Example Notebook](/docs/observability/sdk/typescript/example-notebook) +- [Guide](/docs/observability/sdk/typescript/guide) +- [Guide Web](/docs/observability/sdk/typescript/guide-web) +- [Troubleshooting And Faq](/docs/observability/troubleshooting-and-faq) +- [Data Model](/docs/prompt-management/data-model) +- [A B Testing](/docs/prompt-management/features/a-b-testing) +- [Caching](/docs/prompt-management/features/caching) +- [Composability](/docs/prompt-management/features/composability) +- [Config](/docs/prompt-management/features/config) +- [Folders](/docs/prompt-management/features/folders) +- [Github Integration](/docs/prompt-management/features/github-integration) +- [Guaranteed Availability](/docs/prompt-management/features/guaranteed-availability) +- [Link To Traces](/docs/prompt-management/features/link-to-traces) +- [Mcp Server](/docs/prompt-management/features/mcp-server) +- [Message Placeholders](/docs/prompt-management/features/message-placeholders) +- [N8n Node](/docs/prompt-management/features/n8n-node) +- [Playground](/docs/prompt-management/features/playground) +- [Prompt Version Control](/docs/prompt-management/features/prompt-version-control) +- [Webhooks Slack Integrations](/docs/prompt-management/features/webhooks-slack-integrations) +- [Get Started](/docs/prompt-management/get-started) +- [Overview](/docs/prompt-management/overview) +- [Troubleshooting And Faq](/docs/prompt-management/troubleshooting-and-faq) +- [Roadmap](/docs/roadmap) +- [Security And Guardrails](/docs/security-and-guardrails) + +## Optional + +- [Self Hosting](/self-hosting) +- [Authentication And Sso](/self-hosting/authentication-and-sso) +- [Automated Access Provisioning](/self-hosting/automated-access-provisioning) +- [Aws](/self-hosting/aws) +- [Azure](/self-hosting/azure) +- [Background Migrations](/self-hosting/background-migrations) +- [Backups](/self-hosting/backups) +- [Caching Features](/self-hosting/caching-features) +- [Configuration](/self-hosting/configuration) +- [Custom Base Path](/self-hosting/custom-base-path) +- [Deployment Strategies](/self-hosting/deployment-strategies) +- [Docker Compose](/self-hosting/docker-compose) +- [Encryption](/self-hosting/encryption) +- [Gcp](/self-hosting/gcp) +- [Headless Initialization](/self-hosting/headless-initialization) +- [Blobstorage](/self-hosting/infrastructure/blobstorage) +- [Cache](/self-hosting/infrastructure/cache) +- [Clickhouse](/self-hosting/infrastructure/clickhouse) +- [Containers](/self-hosting/infrastructure/containers) +- [Llm Api](/self-hosting/infrastructure/llm-api) +- [Postgres](/self-hosting/infrastructure/postgres) +- [Kubernetes Helm](/self-hosting/kubernetes-helm) +- [License Key](/self-hosting/license-key) +- [Networking](/self-hosting/networking) +- [Organization Creators](/self-hosting/organization-creators) +- [Organization Management Api](/self-hosting/organization-management-api) +- [Railway](/self-hosting/railway) +- [Scaling](/self-hosting/scaling) +- [Transactional Emails](/self-hosting/transactional-emails) +- [Troubleshooting](/self-hosting/troubleshooting) +- [Ui Customization](/self-hosting/ui-customization) +- [Upgrade](/self-hosting/upgrade) +- [Upgrade V1 To V2](/self-hosting/upgrade-guides/upgrade-v1-to-v2) +- [Upgrade V2 To V3](/self-hosting/upgrade-guides/upgrade-v2-to-v3) +- [V2](/self-hosting/v2) +- [Deployment Guide](/self-hosting/v2/deployment-guide) +- [Docker Compose](/self-hosting/v2/docker-compose) +- [Versioning](/self-hosting/versioning) diff --git a/.cursor/rules/changelog-posts.mdc b/.cursor/rules/changelog-posts.mdc new file mode 100644 index 000000000..524880421 --- /dev/null +++ b/.cursor/rules/changelog-posts.mdc @@ -0,0 +1,70 @@ +--- +description: How we do changelog posts and format them +globs: pages/changelog/** +alwaysApply: false +--- +# Changelog Posts Guidelines + +## Overview +Changelog posts document updates, new features, bug fixes, and improvements. They serve as a historical record of changes and provide clarity for users and developers alike. + +## File Placement +- Place all changelog posts in the `/pages/changelog` directory. +- Use a clear naming convention (e.g., include the date and a brief description, such as `2023-10-01-new-feature.md`). +- Please make sure you use today's date. If unsure, use terminal to get today's date. + +## Frontmatter Configuration Options +Each changelog post should define the following frontmatter fields: + +- **title** (string, required): + The title of the changelog entry, displayed prominently in both the header and index. + +- **description** (string, required): + A concise summary of the changes covered in the post. + +- **date** (string, required): + The release date of the update. Use a date format that can be parsed by JavaScript (e.g., YYYY-MM-DD). This is formatted and displayed in the changelog header and index. + +- **author** (string, required): + The name or identifier of the author responsible for the changelog entry. + +- **showOgInHeader** (boolean, optional): + Set to false to hide the media in the header. If omitted or true, media will be rendered as described below. + +- **ogCloudflareVideo** (string, optional): + The video ID for a Cloudflare-hosted video. When provided, this video will be embedded in the header. + +- **ogVideo** (string, optional): + A fallback video URL to use if no Cloudflare video is provided. Point to mp4 file. + +- **ogImage** (string, optional): + The URL of an image to display if there is no video. If both an image and a GIF are provided, the GIF may be prioritized. + +- **gif** (string, optional): + URL for an animated GIF. This can be used in place of a static image if desired. + +*Note*: The display components (`ChangelogHeader.tsx` and `ChangelogIndex.tsx`) use these options in the following priority order: +1. Cloudflare Video (`ogCloudflareVideo`) +2. Fallback Video (`ogVideo`) +3. Image (`ogImage`), optionally replaced by `gif` if provided + +## Content Structure +- After the frontmatter +- Never forget to add the `ChangelogHeader` component after the frontmatter. + ```tsx + import { ChangelogHeader } from "@/components/changelog/ChangelogHeader"; + + + ``` +- After the header, write your post content in Markdown. +- Structure your content with clear headings and sections, such as: + - New Features + - Improvements + - Bug Fixes + - Miscellaneous +- Use bullet points and concise paragraphs for readability. +- Maintain a consistent tone and style that aligns with existing changelog examples. + +## Examples in Use +- Refer to existing changelog posts in the `/pages/changelog` directory to see real-world examples of how these frontmatter options are applied. +- Notice how the posts integrate media and text to create an engaging changelog experience. diff --git a/.cursor/rules/documentation-pages.mdc b/.cursor/rules/documentation-pages.mdc new file mode 100644 index 000000000..5f19fff08 --- /dev/null +++ b/.cursor/rules/documentation-pages.mdc @@ -0,0 +1,73 @@ +--- +alwaysApply: true +--- + +Here's a continuation and elaboration of the guidelines for creating documentation pages for Langfuse, based on the existing documentation structure and content style observed across various files: + +### Langfuse Documentation Creation Guide + +#### General Structure + +Each documentation page typically follows a structured format to ensure consistency and ease of understanding. Here’s a breakdown of the common elements: + +1. **Metadata Block**: At the top of each document, include YAML front matter that specifies metadata such as title, description, and category. This helps in SEO optimization and categorizing the content. + + ```markdown + --- + title: Title of the Document + description: A brief description of what the document covers. + category: Category Name + --- + ``` + +2. **Introduction**: Start with a brief introduction that outlines what the document will cover and why it’s important. This section sets the context for the readers. + +3. **Prerequisites**: If applicable, list any prerequisites that readers need to fulfill before they can effectively use the documentation. This could include software installations, account setups, or background knowledge. + +4. **Step-by-Step Instructions**: Provide detailed, actionable steps for the users to follow. Use numbered lists for sequences that need to be followed in order. + +5. **Code Snippets**: Include code examples where necessary. Use syntax highlighting to improve readability. Ensure that any placeholders are clearly indicated. + + ```python + import langfuse + + # Replace 'your_api_key' with your actual API key + langfuse.configure(api_key='your_api_key') + ``` + +6. **Images and Diagrams**: Use images and diagrams to explain concepts that are difficult to convey through text alone. Wrap images in a `` component to maintain styling consistency. + + ```markdown + + ![Alt text for image](mdc:path/to/image.png) + + ``` + +7. **Tips and Notes**: Use callouts to highlight tips, warnings, or important notes. This can be done using markdown syntax or specific components if available. + + ```markdown + > **Tip:** This is a helpful tip. + ``` + +8. **FAQs and Troubleshooting**: End the document with a FAQ or troubleshooting section to address common issues or questions related to the topic. + +9. **Further Resources**: Provide links to related documents, external resources, or further reading to help users deepen their understanding. + +#### Writing Style + +- **Clarity and Conciseness**: Use clear and concise language. Avoid jargon unless it is commonly understood in the context of the documentation. +- **Active Voice**: Use active voice to make the content more engaging. +- **Second Person**: Address the reader directly using second person ("you") to make the text more user-friendly. + +#### Formatting + +- **Headers**: Use headers to organize content into logical sections. Headers should be hierarchical (`#`, `##`, `###`). +- **Lists**: Use bullet points for unordered lists and numbers for ordered lists. +- **Links**: Always use descriptive link texts instead of generic texts like "click here". + +#### Accessibility + +- **Alt Texts for Images**: Provide descriptive alt texts for all images to improve accessibility. +- **Readable Fonts and Colors**: Ensure that the text is readable by using sufficient contrast between text and background colors. + +By adhering to these guidelines, the documentation will not only maintain a consistent style but also enhance the user experience, making it easier for readers to find, understand, and use the information they need. diff --git a/.cursor/rules/embed-videos.mdc b/.cursor/rules/embed-videos.mdc new file mode 100644 index 000000000..0b9785637 --- /dev/null +++ b/.cursor/rules/embed-videos.mdc @@ -0,0 +1,44 @@ +--- +description: Follow this rule when adding embedding videos or gifs in the documentation files. +globs: +alwaysApply: false +--- +## General rules + +We use the components from [Video.tsx](mdc:components/Video.tsx) across our documentation to embed videos in the docs. + +You can use the gifStyle flag for short videos, they will autoplay and look like gifs. + +Always include a placeholder for the dimensions, thereby the user can add these to reduce layout shift. + + is available globally without adding an import + +## Recording Videos + +- ScreenStudio to record videos +- 16:9 aspect ratio +- Light mode (not dark mode) +- Do not use a frame or border around the video. will do this automatically. + +## Examples + +Example + +``` + +``` + +Example gifmode + +``` + +``` + diff --git a/.cursor/rules/integration-page.mdc b/.cursor/rules/integration-page.mdc new file mode 100644 index 000000000..7be37528e --- /dev/null +++ b/.cursor/rules/integration-page.mdc @@ -0,0 +1,146 @@ +--- +description: How we create integration pages and format them +globs: /cookbook +--- + +# Integration Pages Guidelines + +## Overview + +Integration pages document how to integrate Langfuse with various frameworks, tools, and platforms. They are written as Jupyter notebooks to enable users to easily run and test the integrations themselves. These pages serve as practical, executable guides for users looking to implement Langfuse in their existing tech stack. + +## File Placement + +- Place all integration pages in the `/cookbook` directory +- Use a clear naming convention (e.g., `integration_[framework_name].ipynb`) + +## Frontmatter Configuration Options + +Each integration page should define the following frontmatter fields: + +- **title** (string, required): + The title of the integration, displayed prominently in both the header and index. + +- **description** (string, required): + A concise, SEO optimized summary of the integration covered in the guide. + +## Content Structure + +Each integration page should follow this specific structure: + +**H1 Title** + +- A short, clear title for the integration +- Example: "Integrate Langfuse with LlamaIndex Workflows" + +**Introduction** + +- 1-2 sentences explaining what this cookbook is about +- Example: "This guide shows you how to trace LlamaIndex Workflows with Langfuse to monitor and evaluate your AI application." + +**Framework/Tool Introduction** + +- Brief explanation of the framework/tool being integrated +- Include a link to the official documentation +- Example: + +```markdown +> **What are LlamaIndex Workflows?** [LlamaIndex Workflows](mdc:https:/docs.llamaindex.ai/en/stable/module_guides/workflow) is a flexible, event-driven framework designed to build robust AI agents. In LlamaIndex, workflows are created by chaining together multiple steps—each defined and validated using the `@step` decorator. Every step processes specific event types, allowing you to orchestrate complex processes such as AI agent collaboration, RAG flows, data extraction, and more. +``` + +**Langfuse Introduction** + +- Brief explanation of Langfuse and its benefits +- Example: + +```markdown +> **What is Langfuse?** [Langfuse](mdc:https:/langfuse.com) is the open source LLM engineering platform. It helps teams to collaboratively manage prompts, trace applications, debug problems, and evaluate their LLM system in production. +``` + +**Implementation Guide** + +- Break down the integration process into clear steps: + + - **Step 1: Install Dependencies** + - Code cell with pip install commands + - **Step 2: Set Up Environment Variables** + + - Example: + + ```markdown + import os + + # Get keys for your project from the project settings page + + # https://cloud.langfuse.com + + os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-..." + os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-..." + os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # 🇪🇺 EU region + + # os.environ["LANGFUSE_HOST"] = "https://us.cloud.langfuse.com" # 🇺🇸 US region + + # Get your Together.ai API key from the project settings page + + os.environ["TOGETHER_API_KEY"] = "..." + ``` + + ```` + - **Step 3: Initialize Integration** + - Code cell showing how to initialize Langfuse with the framework + - **Step 4: Run an Example** + - A complete working example that demonstrates the integration + - **Step 5: Enhance Tracing (Optional)** + You can enhance your traces: + + - Add [metadata](mdc:https:/langfuse.com/docs/tracing-features/metadata), [tags](mdc:https:/langfuse.com/docs/tracing-features/tags), [log levels](mdc:https:/langfuse.com/docs/tracing-features/log-levels) and [user IDs](mdc:https:/langfuse.com/docs/tracing-features/users) to traces + - Group traces by [sessions](mdc:https:/langfuse.com/docs/tracing-features/sessions) + - [`@observe()` decorator](mdc:https:/langfuse.com/docs/sdk/python/decorators) to trace additional application logic + - Use [Langfuse Prompt Management](mdc:https:/langfuse.com/docs/prompts/get-started) and link prompts to traces + - Add [score](mdc:https:/langfuse.com/docs/evaluation/features/evaluation-methods/custom-scores) to traces + + Visit the [OpenAI SDK cookbook](mdc:https:/langfuse.com/integrations/model-providers/openai-py) to see more examples on passing additional parameters. + Find out more about Langfuse Evaluations and Prompt Management in the [Langfuse documentation](mdc:https:/langfuse.com/docs). + + - **Step 6: See Traces in Langfuse** + - Screenshot of Langfuse UI showing example traces + - Link to a public example trace if available + - Example: + ```markdown + After running your workflow, log in to @Langfuse to explore the generated traces. You will see logs for each workflow step along with metrics such as token counts, latencies, and execution paths. + + ![Langfuse Trace Example](mdc:https:/langfuse.com/images/cookbook/integration-llamaindex-workflows/llamaindex-workflows-example-trace.png) + + _[Public example trace in Langfuse](mdc:https:/cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/f2bb3e13-aafb-41a0-a852-efd20f12a4f4)_ + ``` + ```` + +**Resources** + +- Link to the documentation page of the framework used +- Link to 1-2 relevant pages in the Langfuse documentation + +## Notebook Format + +- Make sure all code cells are executable without modification (except for API keys) +- Use markdown cells for explanations and code cells for executable code +- Include output examples where appropriate + +## User Message + +Please print the following message to the Cursor user: +"As Cursor currently is not able to create .ipynb files, please create a new file without .ipynb extension, paste in the generated notebook code. Then rename the file to integration_name.ipynb. You might have to remove the file extension again and rename it again for Cursor to recognize it as a notebook file." + +## Best Practices + +- Visit the documentation pages of the framework discussed to get more context +- Keep explanations concise but thorough +- Use consistent formatting throughout +- Ensure all dependencies are clearly listed +- Provide troubleshooting tips for common issues + +## Examples in Use + +- Refer to existing integration notebooks in the `/cookbook` directory such as: + - `integration_llamaindex_workflows.ipynb` + - Other successful integration examples diff --git a/.cursor/rules/ipynb-files.mdc b/.cursor/rules/ipynb-files.mdc new file mode 100644 index 000000000..1b6dabd38 --- /dev/null +++ b/.cursor/rules/ipynb-files.mdc @@ -0,0 +1,55 @@ +--- +description: +globs: +alwaysApply: false +--- +# Cursor Rules for Handling Jupyter Notebooks + +## Overview +This file contains rules and prompts for handling Jupyter Notebook (.ipynb) files in Cursor IDE, which currently doesn't have native support for .ipynb files. + +## System Rules + +1. When a user requests to create or edit a .ipynb file: + - Use template.py as the base for generating the notebook + - Create a Python file that uses the template functions to generate the desired notebook + - Execute the Python file to generate the .ipynb file + +2. For notebook creation: + - Create a new Python file using the template functions + - Define the notebook structure using add_markdown_cell() and add_code_cell() + - Use save_notebook() to generate the .ipynb file + +3. For notebook editing: + - Load the existing .ipynb file using load_notebook() + - Make the requested modifications + - Save the updated notebook using save_notebook() + +4. For notebook to Python conversion: + - Use notebook_to_python() function to convert .ipynb to .py + - This is useful for version control and editing in Cursor + +## Example Prompts + +1. Creating a new notebook: + "Please create a new Jupyter notebook with a markdown cell explaining the project and a code cell with a simple plot." + +2. Editing an existing notebook: + "Please modify the existing notebook to add a new code cell that performs data analysis." + +3. Converting a notebook: + "Please convert this notebook to a Python file so I can edit it in Cursor." + +## Implementation Notes + +1. Always use the template.py functions for notebook manipulation +2. Keep the Python file and .ipynb file in sync +3. Use proper error handling when working with files +4. Maintain consistent formatting in the generated notebooks + +## Best Practices + +1. Document all notebook cells with clear markdown explanations +2. Use proper code organization in code cells +3. Include necessary imports at the beginning of the notebook +4. Save intermediate results when working with large datasets \ No newline at end of file diff --git a/.cursor/rules/ipynb-files.py b/.cursor/rules/ipynb-files.py new file mode 100644 index 000000000..a7a9f94ce --- /dev/null +++ b/.cursor/rules/ipynb-files.py @@ -0,0 +1,118 @@ +import nbformat as nbf +import json +import os + +def create_notebook(cells=None, metadata=None): + """ + Create a new Jupyter notebook with the given cells and metadata. + + Args: + cells (list): List of cell dictionaries + metadata (dict): Notebook metadata + + Returns: + nbformat.notebooknode.NotebookNode: The created notebook + """ + if cells is None: + cells = [] + if metadata is None: + metadata = { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.0" + } + } + + nb = nbf.v4.new_notebook(metadata=metadata) + nb.cells = cells + return nb + +def add_code_cell(notebook, source, execution_count=None): + """ + Add a code cell to the notebook. + + Args: + notebook: The notebook to add the cell to + source (str): The source code + execution_count (int): The execution count + """ + cell = nbf.v4.new_code_cell(source=source, execution_count=execution_count) + notebook.cells.append(cell) + +def add_markdown_cell(notebook, source): + """ + Add a markdown cell to the notebook. + + Args: + notebook: The notebook to add the cell to + source (str): The markdown text + """ + cell = nbf.v4.new_markdown_cell(source=source) + notebook.cells.append(cell) + +def save_notebook(notebook, filename): + """ + Save the notebook to a file. + + Args: + notebook: The notebook to save + filename (str): The output filename + """ + with open(filename, 'w', encoding='utf-8') as f: + nbf.write(notebook, f) + +def load_notebook(filename): + """ + Load a notebook from a file. + + Args: + filename (str): The notebook filename + + Returns: + nbformat.notebooknode.NotebookNode: The loaded notebook + """ + with open(filename, 'r', encoding='utf-8') as f: + return nbf.read(f, as_version=4) + +def notebook_to_python(notebook, output_file): + """ + Convert a notebook to a Python file. + + Args: + notebook: The notebook to convert + output_file (str): The output Python filename + """ + with open(output_file, 'w', encoding='utf-8') as f: + for cell in notebook.cells: + if cell.cell_type == 'code': + f.write(cell.source) + f.write('\n\n') + elif cell.cell_type == 'markdown': + f.write('# ' + cell.source.replace('\n', '\n# ')) + f.write('\n\n') + +if __name__ == '__main__': + # Example usage + nb = create_notebook() + + # Add a markdown cell + add_markdown_cell(nb, "# My Jupyter Notebook\nThis is an example notebook.") + + # Add a code cell + add_code_cell(nb, "# This is a simple code cell\nprint('Hello, World!')") + + # Save the notebook + save_notebook(nb, 'example.ipynb') \ No newline at end of file diff --git a/.editorconfig b/.editorconfig index 96c99acd5..d7ea973ce 100644 --- a/.editorconfig +++ b/.editorconfig @@ -1,3 +1,15 @@ +# http://editorconfig.org + +root = true + +[*] +charset = utf-8 +indent_style = space +indent_size = 2 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + # Specific settings for the .mdx file you want to format as JavaScript [pages/_app.mdx] language = javascript \ No newline at end of file diff --git a/.env.template b/.env.template index 3b475a59c..d89972a52 100644 --- a/.env.template +++ b/.env.template @@ -4,6 +4,10 @@ LOOPS_API_KEY= NEXT_PUBLIC_POSTHOG_HOST= NEXT_PUBLIC_POSTHOG_KEY= +NEXT_PUBLIC_PLAIN_APP_ID= + +WEBSITE_FEEDBACK_WEBHOOK= + # For qa chatbot OPENAI_API_KEY= SUPABASE_URL= diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..87bb722fb --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,7 @@ +contact_links: + - name: 💡 Feature Request + url: https://github.com/orgs/langfuse/discussions/new?category=ideas + about: Suggest any ideas you have using our discussion forums. + - name: 🤗 Get Help + url: https://github.com/orgs/langfuse/discussions/new?category=support + about: If you can’t get something to work the way you expect, open a question in our discussion forums. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..bb72b97a9 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,4 @@ +## Review Needed? Checklist +- [ ] Did you create or **move a section with headline** (h2, h3, etc...) → *review by Jannik or Felix required* +- [ ] Does this include a **changelog post** → *review by Jannik or Felix required* +- [ ] Do you feel having a review would be good → *review by Jannik or Felix required* \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..fa0e3b412 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,191 @@ +permissions: + contents: read +name: "CI" + +on: + pull_request: + merge_group: + +jobs: + # Add pre-job to skip duplicate actions in merge queues + pre-job: + runs-on: ubuntu-latest + outputs: + should_skip: ${{ steps.skip_check.outputs.should_skip }} + timeout-minutes: 15 + steps: + - id: skip_check + uses: fkirc/skip-duplicate-actions@v5 + with: + do_not_skip: '["workflow_dispatch"]' + + check_h1: + needs: + - pre-job + if: needs.pre-job.outputs.should_skip != 'true' + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: "20" # Use a current LTS version + + - name: Run H1 heading check + run: node scripts/check-h1-headings.js + + # check-notebook-docs-sync: + # needs: + # - pre-job + # if: needs.pre-job.outputs.should_skip != 'true' + # runs-on: ubuntu-latest + # timeout-minutes: 15 + # steps: + # - name: Checkout code + # uses: actions/checkout@v4 + + # - name: Install uv + # uses: astral-sh/setup-uv@v4 + # with: + # version: "latest" + + # - name: Check if notebook docs are up to date + # run: | + # # Run the update script + # bash scripts/update_cookbook_docs.sh + + # # Check if any files changed (staged, unstaged, or untracked) + # if [[ -n $(git status --porcelain) ]]; then + # echo "❌ Repository has changes after regenerating cookbook documentation!" + # echo "The following files have changes:" + # git status --porcelain + # echo "" + # echo "Please run 'bash scripts/update_cookbook_docs.sh' locally and commit the changes." + # echo "" + # echo "Detailed diff:" + # git diff + # git diff --staged + # exit 1 + # else + # echo "✅ Repository is up to date after regenerating cookbook documentation" + # fi + + build-and-check-links: + needs: + - pre-job + if: needs.pre-job.outputs.should_skip != 'true' + runs-on: ubuntu-latest + timeout-minutes: 20 + env: + GITHUB_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - uses: pnpm/action-setup@v4 + id: pnpm-install + with: + version: 9.5.0 + run_install: false + + - uses: actions/setup-node@v4 + with: + node-version: 20 + + - run: pnpm install + + - name: Build next.js app + run: pnpm build + + - name: Start server and check links + run: | + # Start the server in the background and capture its PID + pnpm start & SERVER_PID=$! + + # Wait for the server to be ready (max 30 seconds) + timeout 30 bash -c 'until curl -s http://localhost:3333 > /dev/null; do sleep 1; done' + + # Run the link checker + pnpm link-check + + # Store the exit code + LINK_CHECK_EXIT=$? + + # Kill the server using the captured PID + kill $SERVER_PID || true + + # Exit with the link checker's exit code + exit $LINK_CHECK_EXIT + + check-sitemap-links: + needs: + - pre-job + if: needs.pre-job.outputs.should_skip != 'true' + runs-on: ubuntu-latest + timeout-minutes: 20 + env: + GITHUB_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - uses: pnpm/action-setup@v4 + id: pnpm-install + with: + version: 9.5.0 + run_install: false + + - uses: actions/setup-node@v4 + with: + node-version: 20 + + - run: pnpm install + + - name: Build next.js app + run: pnpm build + + - name: Start server and check sitemap links + run: | + # Start the server in the background and capture its PID + pnpm start & SERVER_PID=$! + + # Wait for the server to be ready (max 30 seconds) + timeout 30 bash -c 'until curl -s http://localhost:3333 > /dev/null; do sleep 1; done' + + # Run the sitemap checker + pnpm sitemap-check + + # Store the exit code + SITEMAP_CHECK_EXIT=$? + + # Kill the server using the captured PID + kill $SERVER_PID || true + + # Exit with the sitemap checker's exit code + exit $SITEMAP_CHECK_EXIT + + # Summary job that depends on all other jobs + # This allows you to require only this single check in branch protection rules + all-checks-pass: + runs-on: ubuntu-latest + needs: [ + pre-job, + check_h1, + # check-notebook-docs-sync, + build-and-check-links, + check-sitemap-links, + ] + if: always() + steps: + - name: Successful CI + if: ${{ !(contains(needs.*.result, 'failure')) }} + run: exit 0 + working-directory: . + - name: Failing CI + if: ${{ contains(needs.*.result, 'failure') }} + run: exit 1 + working-directory: . diff --git a/.github/workflows/dependabot-merge.yml b/.github/workflows/dependabot-merge.yml.txt similarity index 100% rename from .github/workflows/dependabot-merge.yml rename to .github/workflows/dependabot-merge.yml.txt diff --git a/.github/workflows/dependabot-rebase-stale.yml b/.github/workflows/dependabot-rebase-stale.yml.txt similarity index 100% rename from .github/workflows/dependabot-rebase-stale.yml rename to .github/workflows/dependabot-rebase-stale.yml.txt diff --git a/.github/workflows/nextjs_bundle_analysis.yml b/.github/workflows/nextjs_bundle_analysis.yml index 304bdccb5..300a5b835 100644 --- a/.github/workflows/nextjs_bundle_analysis.yml +++ b/.github/workflows/nextjs_bundle_analysis.yml @@ -61,13 +61,13 @@ jobs: run: npx -p nextjs-bundle-analysis report - name: Upload bundle - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: bundle path: .next/analyze/__bundle_analysis.json - name: Download base branch bundle stats - uses: dawidd6/action-download-artifact@v2 + uses: dawidd6/action-download-artifact@v8 if: success() && github.event.number with: workflow: nextjs_bundle_analysis.yml @@ -106,11 +106,11 @@ jobs: id: fc with: issue-number: ${{ github.event.number }} - body-includes: "" + body-includes: " Web + Web --> Postgres + Web -.->|"optional for playground"| LLM +``` diff --git a/components-mdx/architecture-diagram-v3.mdx b/components-mdx/architecture-diagram-v3.mdx new file mode 100644 index 000000000..02e1fd9a3 --- /dev/null +++ b/components-mdx/architecture-diagram-v3.mdx @@ -0,0 +1,26 @@ +```mermaid +flowchart TB + User["UI, API, SDKs"] + subgraph vpc["VPC"] + Web["Web Server
(langfuse/langfuse)"] + Worker["Async Worker
(langfuse/worker)"] + Postgres@{ img: "/images/logos/postgres_icon.svg", label: "Postgres - OLTP\n(Transactional Data)", pos: "b", w: 60, h: 60, constraint: "on" } + Cache@{ img: "/images/logos/redis_icon.png", label: "Redis\n(Cache, Queue)", pos: "b", w: 60, h: 60, constraint: "on" } + Clickhouse@{ img: "/images/logos/clickhouse_icon.svg", label: "Clickhouse - OLAP\n(Observability Data)", pos: "b", w: 60, h: 60, constraint: "on" } + S3@{ img: "/images/logos/s3_icon.svg", label: "S3 / Blob Storage\n(Raw events, multi-modal attachments)", pos: "b", w: 60, h: 60, constraint: "on" } + end + LLM["LLM API/Gateway
(optional; BYO; can be same VPC or VPC-peered)"] + + User --> Web + Web --> S3 + Web --> Postgres + Web --> Cache + Web --> Clickhouse + Web -..->|"optional for playground"| LLM + + Cache --> Worker + Worker --> Clickhouse + Worker --> Postgres + Worker --> S3 + Worker -..->|"optional for evals"| LLM +``` diff --git a/components-mdx/changes-v3-architecture-changes-short-description.mdx b/components-mdx/changes-v3-architecture-changes-short-description.mdx new file mode 100644 index 000000000..bab02c2b1 --- /dev/null +++ b/components-mdx/changes-v3-architecture-changes-short-description.mdx @@ -0,0 +1,10 @@ +Langfuse has gained significant traction over the last months, both in our Cloud environment and in self-hosted setups. +With Langfuse v3 we introduce changes that allow our backend to handle hundreds of events per second with higher reliability. +To achieve this scale, we introduce a second Langfuse container and additional storage services like S3/Blob store, Clickhouse, and Redis which are better suited for the required workloads than our previous Postgres-based setup. + +In short, Langfuse v3 adds: + +- A new worker container that processes events asynchronously. +- A new S3/Blob store for storing large objects. +- A new Clickhouse instance for storing traces, observations, and scores. +- Redis/Valkey for queuing events and caching data. diff --git a/components-mdx/changes-v3-component-reasoning.mdx b/components-mdx/changes-v3-component-reasoning.mdx new file mode 100644 index 000000000..33622ef6f --- /dev/null +++ b/components-mdx/changes-v3-component-reasoning.mdx @@ -0,0 +1,84 @@ + + +Learn more about the v2 to v3 evolution and architectural decisions in our [technical blog post](/blog/2024-12-langfuse-v3-infrastructure-evolution). + + + +
+1. Why Clickhouse + +We made the strategic decision to migrate our traces, observations, and scores table from Postgres to Clickhouse. +Both us and our self-hosters observed bottlenecks in Postgres when dealing with millions of rows of tracing data, +both on ingestion and retrieval of information. +Our core requirement was a database that could handle massive volumes of trace and event data with exceptional query speed and efficiency +while also being available for free to self-hosters. + +**Limitations of Postgres** + +Initially, Postgres was an excellent choice due to its robustness, flexibility, and the extensive tooling available. +As our platform grew, we encountered performance bottlenecks with complex aggregations and time-series data. +The row-based storage model of PostgreSQL becomes increasingly inefficient when dealing with billions of rows of tracing data, +leading to slow query times and high resource consumption. + +**Our requirements** + +- Analytical queries: all queries for our dashboards (e.g. sum of LLM tokens consumed over time) +- Table queries: Finding tracing data based on filtering and ordering selected via tables in our UI. +- Select by ID: Quickly locating a specific trace by its ID. +- High write throughput while allowing for updates. Our tracing data can be updated from the SKDs. Hence, we need an option to update rows in the database. +- Self-hosting: We needed a database that is free to use for self-hosters, avoiding dependencies on specific cloud providers. +- Low operational effort: As a small team, we focus on building features for our users. We try to keep operational efforts as low as possible. + +**Why Clickhouse is great** + +- Optimized for Analytical Queries: ClickHouse is a modern OLAP database capable of ingesting data at high rates and querying it with low latency. It handles billions of rows efficiently. +- Rich feature-set: Clickhouse offers different Table Engines, Materialized views, different types of Indices, and many integrations which helps us to build fast and achieve low latency read queries. +- Our self-hosters can use the official Clickhouse Helm Charts and Docker Images for deploying in the cloud infrastructure of their choice. +- Clickhouse Cloud: Clickhouse Cloud is a database as a SaaS service which allows us to reduce operational efforts on our side. + +When talking to other companies and looking at their code bases, we learned that Clickhouse is a popular choice these days for analytical workloads. +Many modern observability tools, such as [Signoz](https://signoz.io/) or [Posthog](https://posthog.com/), as well as established companies like [Cloudflare](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/), use Clickhouse for their analytical workloads. + +**Clickhouse vs. others** + +We think there are many great OLAP databases out there and are sure that we could have chosen an alternative and would also succeed with it. However, here are some thoughts on alternatives: + +- Druid: Unlike Druid's [modular architecture](https://posthog.com/blog/clickhouse-vs-druid), ClickHouse provides a more straightforward, unified instance approach. Hence, it is easier for teams to manage Clickhouse in production as there are fewer moving parts. This reduces the operational burden especially for our self-hosters. +- StarRocks: We think StarRocks is great but early. The vast amount of features in Clickhouse help us to remain flexible with our requirements while benefiting from the performance of an OLAP database. + +**Building an adapter and support multiple databases** + +We explored building a multi-database adapter to support Postgres for smaller self-hosted deployments. +After talking to engineers and reviewing some of PostHog's [Clickhouse implementation](https://github.com/PostHog/posthog), +we decided against this path due to its complexity and maintenance overhead. +This allows us to focus our resources on building user features instead. + +
+ +
+2. Why Redis + +We added a Redis instance to serve cache and queue use-cases within our stack. +With its open source license, broad native support my major cloud vendors, and ubiquity in the industry, Redis was a natural choice for us. + +
+ +
+3. Why S3/Blob Store + +Observability data for LLM application tends to contain large, semi-structured bodies of data to represent inputs and outputs. +We chose S3/Blob Store as a scalable, secure, and cost-effective solution to store these large objects. +It allows us to store all incoming events for further processing and acts as a native backup solution, as the full state +can be restored based on the events stored there. + +
+ +
+4. Why Worker Container + +When processing observability data for LLM applications, there are many CPU-heavy operations which block the main loop in our Node.js backend, +e.g. tokenization and other parsing of event bodies. +To achieve high availability and low latencies across client applications, we decided to move the heavy processing into an asynchronous worker container. +It accepts events from a Redis queue and ensures that they are eventually being upserted into Clickhouse. + +
diff --git a/components-mdx/datasets-create-dataset-item.mdx b/components-mdx/datasets-create-dataset-item.mdx new file mode 100644 index 000000000..ab7549b67 --- /dev/null +++ b/components-mdx/datasets-create-dataset-item.mdx @@ -0,0 +1,80 @@ + + + +```python +langfuse.create_dataset_item( + dataset_name="", + # any python object or value, optional + input={ + "text": "hello world" + }, + # any python object or value, optional + expected_output={ + "text": "hello world" + }, + # metadata, optional + metadata={ + "model": "llama3", + } +) +``` + +_See [Python SDK v3](/docs/sdk/python/sdk-v3) docs for details on how to initialize the Python client._ + + + + +```ts +langfuse.createDatasetItem({ + datasetName: "", + // any JS object or value + input: { + text: "hello world", + }, + // any JS object or value, optional + expectedOutput: { + text: "hello world", + }, + // metadata, optional + metadata: { + model: "llama3", + }, +}); +``` + +_See [JS/TS SDK](/docs/sdk/typescript/guide) docs for details on how to initialize the JS/TS client._ + + + + + + + + + + + + + + + + + + + + + + + diff --git a/components-mdx/datasets-create-dataset.mdx b/components-mdx/datasets-create-dataset.mdx new file mode 100644 index 000000000..46c7096cc --- /dev/null +++ b/components-mdx/datasets-create-dataset.mdx @@ -0,0 +1,50 @@ + + + +```python +langfuse.create_dataset( + name="", + # optional description + description="My first dataset", + # optional metadata + metadata={ + "author": "Alice", + "date": "2022-01-01", + "type": "benchmark" + } +) +``` + +_See [Python SDK](/docs/sdk/python/sdk-v3) docs for details on how to initialize the Python client._ + + + + +```ts +langfuse.createDataset({ + name: "", + // optional description + description: "My first dataset", + // optional metadata + metadata: { + author: "Alice", + date: "2022-01-01", + type: "benchmark", + }, +}); +``` + + + + + +1. **Navigate to** `Your Project` > `Datasets` +2. **Click on** `+ New dataset` to create a new dataset. + + +![Create dataset](/images/docs/create_dataset.png) + + + + + diff --git a/components-mdx/datasets-datamodel.mdx b/components-mdx/datasets-datamodel.mdx new file mode 100644 index 000000000..71c3d00f3 --- /dev/null +++ b/components-mdx/datasets-datamodel.mdx @@ -0,0 +1,189 @@ +## Datasets +Datasets are a collection of inputs and, optionally, expected outputs that can be during Dataset runs. + +`Dataset`s are a collection of `DatasetItem`s. + +
+ +```mermaid +classDiagram +direction LR + class Dataset { + name + description + metadata + } + + class DatasetItem { + datasetName + input + expectedOutput + metadata + sourceTraceId + sourceObservationId + id + status + } + + Dataset "1" --> "n" DatasetItem +``` + +
+ +### Dataset object + +| Attribute | Type | Required | Description | +| ------------- | ------ | -------- | --------------------------------------------------------------------------- | +| `name` | string | Yes | Name of the dataset | +| `description` | string | No | Description of the dataset | +| `metadata` | object | No | Additional metadata for the dataset | + +### DatasetItem object + +| Attribute | Type | Required | Description | +| ------------------ | -------------- | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `datasetName` | string | Yes | Name of the dataset to add the item to | +| `input` | object | No | Input data for the dataset item | +| `expectedOutput` | object | No | Expected output data for the dataset item | +| `metadata` | object | No | Additional metadata for the dataset item | +| `sourceTraceId` | string | No | ID of the source trace to link this dataset item to | +| `sourceObservationId` | string | No | ID of the source observation to link this dataset item to | +| `id` | string | No | Unique identifier for the dataset item. Dataset items are upserted on their id. Id needs to be unique (project-level) and cannot be reused across datasets. | +| `status` | DatasetStatus | No | Status of the dataset item. Defaults to ACTIVE for newly created items. Possible values: `ACTIVE`, `ARCHIVED` | + + + + + + +## DatasetRun +Dataset runs are used to run a dataset through your LLM application and optionally apply evaluation methods to the results. + +
+
+ +```mermaid +classDiagram +direction LR + class DatasetRun { + name + description + metadata + } + + DatasetRun "1" --> "n" DatasetRunItem + + class DatasetRunItem { + runName + runDescription + metadata + datasetItemId + traceId + observationId + + } +``` + +
+ +### DatasetRun object + +| Attribute | Type | Required | Description | +| -------------- | ------ | -------- | --------------------------------------------------------------------------- | +| `datasetName` | string | Yes | Name of the dataset | +| `runName` | string | Yes | Name of the dataset run | + +### DatasetRunItem object + +| Attribute | Type | Required | Description | +| ---------------- | ------ | -------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `runName` | string | Yes | Name of the dataset run to add the item to | +| `runDescription` | string | No | Description of the run. If run exists, description will be updated | +| `metadata` | object | No | Metadata of the dataset run, updates run if run already exists | +| `datasetItemId` | string | Yes | ID of the dataset item to link to this run | +| `observationId` | string | No | ID of the observation to link to this run | +| `traceId` | string | No | ID of the trace to link to this run. traceId should always be provided. For compatibility with older SDK versions it can also be inferred from the provided observationId. | + + +Most of the time, we recommend that DatasetRunItems reference TraceIDs directly. The reference to ObservationID exists for backwards compatibility with older SDK versions. + + +## End to end data relations + +DataSetRuns can combine a few Langfuse objects: +- `DatasetRuns` are created by looping through all or selected `DatasetItem`s of a `Dataset` with your LLM application. +- For each `DatasetItem` passed into the LLM application as an Input a `DatasetRunItem` & a `Trace` are created. +- Optionally `Score`s can be added to the `Trace`s to evaluate the output of the LLM application during the `DatasetRun`. + +
+ +
+ +```mermaid +classDiagram +direction LR + namespace Datasets { + class Dataset { + } + class DatasetItem { + } + } + namespace DatasetRuns { + class DatasetRun { + } + class DatasetRunItem { + } + } + namespace Observability { + class Trace { + } + class Observation { + } + } + namespace Evals { + class Score { + } + } + + + class DatasetRun { + } + + class DatasetRunItem { + } + + class Dataset { + } + + class DatasetItem { + } + + class Trace { + input + output + } + + class Observation { + input + output + } + + class Score { + name + value + comment + } + + Dataset "1" --> "n" DatasetItem + Dataset "1" --> "n" DatasetRun + DatasetRun "1" --> "n" DatasetRunItem + DatasetRunItem "1" --> "1" DatasetItem + Trace "1" --> "n" Observation + DatasetRunItem "1" --> "1" Trace + DatasetRunItem "1" --> "0..1" Observation + Observation "1" --> "n" Score + Trace "1" --> "n" Score +``` + +
+ diff --git a/components-mdx/datasets-overview-gif.mdx b/components-mdx/datasets-overview-gif.mdx new file mode 100644 index 000000000..493c7a52b --- /dev/null +++ b/components-mdx/datasets-overview-gif.mdx @@ -0,0 +1,37 @@ +import { CloudflareVideo } from "@/components/Video"; + + + + +Collaboratively manage datasets via UI, API, or SDKs. + + + + + + +Run experiments on datasets. Compare metrics across experiments, and see results side-by-side. + + + + + + +Directly add new items from production traces to datasets to improve your datasets over time. + + + + + diff --git a/components-mdx/dependents/README.md b/components-mdx/dependents/README.md new file mode 100644 index 000000000..eab824b2c --- /dev/null +++ b/components-mdx/dependents/README.md @@ -0,0 +1 @@ +Update via `bash scripts/update_package_usage.sh` diff --git a/components-mdx/dependents/js.md b/components-mdx/dependents/js.md new file mode 100644 index 000000000..9c2206a32 --- /dev/null +++ b/components-mdx/dependents/js.md @@ -0,0 +1,60 @@ +### Dependents stats for langfuse/langfuse-js + +[![Generated by github-dependents-info](https://img.shields.io/static/v1?label=Used%20by&message=106&color=informational&logo=slickpic)](https://github.com/langfuse/langfuse-js/network/dependents) +[![Generated by github-dependents-info](https://img.shields.io/static/v1?label=Used%20by%20(public)&message=106&color=informational&logo=slickpic)](https://github.com/langfuse/langfuse-js/network/dependents) +[![Generated by github-dependents-info](https://img.shields.io/static/v1?label=Used%20by%20(private)&message=-106&color=informational&logo=slickpic)](https://github.com/langfuse/langfuse-js/network/dependents) +[![Generated by github-dependents-info](https://img.shields.io/static/v1?label=Used%20by%20(stars)&message=88586&color=informational&logo=slickpic)](https://github.com/langfuse/langfuse-js/network/dependents) + +| Repository | Stars | +| :-------- | -----: | +|   [lobehub](https://github.com/lobehub) / [lobe-chat](https://github.com/lobehub/lobe-chat) | 56800 | +|   [FlowiseAI](https://github.com/FlowiseAI) / [Flowise](https://github.com/FlowiseAI/Flowise) | 35760 | +|   [chatchat-space](https://github.com/chatchat-space) / [Langchain-Chatchat](https://github.com/chatchat-space/Langchain-Chatchat) | 33855 | +|   [twentyhq](https://github.com/twentyhq) / [twenty](https://github.com/twentyhq/twenty) | 26681 | +|   [formbricks](https://github.com/formbricks) / [formbricks](https://github.com/formbricks/formbricks) | 10163 | +|   [anthropics](https://github.com/anthropics) / [courses](https://github.com/anthropics/courses) | 9504 | +|   [langfuse](https://github.com/langfuse) / [langfuse](https://github.com/langfuse/langfuse) | 9019 | +|   [promptfoo](https://github.com/promptfoo) / [promptfoo](https://github.com/promptfoo/promptfoo) | 5686 | +|   [superagent-ai](https://github.com/superagent-ai) / [superagent](https://github.com/superagent-ai/superagent) | 5641 | +|   [pingcap](https://github.com/pingcap) / [autoflow](https://github.com/pingcap/autoflow) | 2394 | +|   [alishobeiri](https://github.com/alishobeiri) / [thread](https://github.com/alishobeiri/thread) | 1089 | +|   [plastic-labs](https://github.com/plastic-labs) / [tutor-gpt](https://github.com/plastic-labs/tutor-gpt) | 795 | +|   [AIDotNet](https://github.com/AIDotNet) / [fast-wiki](https://github.com/AIDotNet/fast-wiki) | 435 | +|   [inferablehq](https://github.com/inferablehq) / [inferable](https://github.com/inferablehq/inferable) | 338 | +|   [vespperhq](https://github.com/vespperhq) / [vespper](https://github.com/vespperhq/vespper) | 332 | +|   [souzatharsis](https://github.com/souzatharsis) / [tamingLLMs](https://github.com/souzatharsis/tamingLLMs) | 292 | +|   [ElectricCodeGuy](https://github.com/ElectricCodeGuy) / [SupabaseAuthWithSSR](https://github.com/ElectricCodeGuy/SupabaseAuthWithSSR) | 222 | +|   [LibreChat-AI](https://github.com/LibreChat-AI) / [librechat.ai](https://github.com/LibreChat-AI/librechat.ai) | 201 | +|   [i-am-alice](https://github.com/i-am-alice) / [3rd-devs](https://github.com/i-am-alice/3rd-devs) | 185 | +|   [babelcloud](https://github.com/babelcloud) / [LLM-RGB](https://github.com/babelcloud/LLM-RGB) | 153 | +|   [cofacts](https://github.com/cofacts) / [rumors-api](https://github.com/cofacts/rumors-api) | 115 | +|   [awslabs](https://github.com/awslabs) / [backstage-plugins-for-aws](https://github.com/awslabs/backstage-plugins-for-aws) | 87 | +|   [langfuse](https://github.com/langfuse) / [langfuse-docs](https://github.com/langfuse/langfuse-docs) | 86 | +|   [lucagrippa](https://github.com/lucagrippa) / [obsidian-ai-tagger](https://github.com/lucagrippa/obsidian-ai-tagger) | 85 | +|   [cofacts](https://github.com/cofacts) / [rumors-site](https://github.com/cofacts/rumors-site) | 84 | +|   [chokiproai](https://github.com/chokiproai) / [ChatGPT-Plugins](https://github.com/chokiproai/ChatGPT-Plugins) | 74 | +|   [Davis-Media](https://github.com/Davis-Media) / [weights-ai](https://github.com/Davis-Media/weights-ai) | 69 | +|   [indexnetwork](https://github.com/indexnetwork) / [index](https://github.com/indexnetwork/index) | 52 | +|   [empirical-run](https://github.com/empirical-run) / [appwright](https://github.com/empirical-run/appwright) | 52 | +|   [giselles-ai](https://github.com/giselles-ai) / [giselle](https://github.com/giselles-ai/giselle) | 51 | +|   [SuveenE](https://github.com/SuveenE) / [codenames-ai](https://github.com/SuveenE/codenames-ai) | 47 | +|   [AmineDjeghri](https://github.com/AmineDjeghri) / [generative-ai-project-template](https://github.com/AmineDjeghri/generative-ai-project-template) | 47 | +|   [Chainlit](https://github.com/Chainlit) / [literalai-cookbooks](https://github.com/Chainlit/literalai-cookbooks) | 45 | +|   [langfuse](https://github.com/langfuse) / [langfuse-js](https://github.com/langfuse/langfuse-js) | 43 | +|   [Future-House](https://github.com/Future-House) / [LAB-Bench](https://github.com/Future-House/LAB-Bench) | 36 | +|   [Ozamatash](https://github.com/Ozamatash) / [deep-research-mcp](https://github.com/Ozamatash/deep-research-mcp) | 30 | +|   [orsonteodoro](https://github.com/orsonteodoro) / [oiledmachine-overlay](https://github.com/orsonteodoro/oiledmachine-overlay) | 25 | +|   [ChristophHandschuh](https://github.com/ChristophHandschuh) / [chatbot-ui](https://github.com/ChristophHandschuh/chatbot-ui) | 23 | +|   [bluewind-ai](https://github.com/bluewind-ai) / [bluewind](https://github.com/bluewind-ai/bluewind) | 23 | +|   [checkly](https://github.com/checkly) / [srebot](https://github.com/checkly/srebot) | 17 | +|   [kalviumcommunity](https://github.com/kalviumcommunity) / [compilerd](https://github.com/kalviumcommunity/compilerd) | 17 | +|   [microsoft](https://github.com/microsoft) / [vscode-azureapicenter](https://github.com/microsoft/vscode-azureapicenter) | 16 | +|   [langfuse](https://github.com/langfuse) / [mcp-server-langfuse](https://github.com/langfuse/mcp-server-langfuse) | 15 | +|   [minorun365](https://github.com/minorun365) / [aws-level-checker](https://github.com/minorun365/aws-level-checker) | 15 | +|   [promptfoo](https://github.com/promptfoo) / [promptfoo-action](https://github.com/promptfoo/promptfoo-action) | 15 | +|   [justUmen](https://github.com/justUmen) / [Bjornulf_lobe-chat](https://github.com/justUmen/Bjornulf_lobe-chat) | 14 | +|   [langfuse](https://github.com/langfuse) / [ai-chatbot](https://github.com/langfuse/ai-chatbot) | 14 | +|   [anti-work](https://github.com/anti-work) / [helper](https://github.com/anti-work/helper) | 13 | +|   [find-xposed-magisk](https://github.com/find-xposed-magisk) / [lobe-chat](https://github.com/find-xposed-magisk/lobe-chat) | 11 | + +_Generated using [github-dependents-info](https://github.com/nvuillam/github-dependents-info), by [Nicolas Vuillamy](https://github.com/nvuillam)_ \ No newline at end of file diff --git a/components-mdx/dependents/python.md b/components-mdx/dependents/python.md new file mode 100644 index 000000000..4e3ef6eb7 --- /dev/null +++ b/components-mdx/dependents/python.md @@ -0,0 +1,119 @@ +### Dependents stats for langfuse/langfuse-python + +[![Generated by github-dependents-info](https://img.shields.io/static/v1?label=Used%20by&message=108&color=informational&logo=slickpic)](https://github.com/langfuse/langfuse-python/network/dependents) +[![Generated by github-dependents-info](https://img.shields.io/static/v1?label=Used%20by%20(public)&message=108&color=informational&logo=slickpic)](https://github.com/langfuse/langfuse-python/network/dependents) +[![Generated by github-dependents-info](https://img.shields.io/static/v1?label=Used%20by%20(private)&message=-108&color=informational&logo=slickpic)](https://github.com/langfuse/langfuse-python/network/dependents) +[![Generated by github-dependents-info](https://img.shields.io/static/v1?label=Used%20by%20(stars)&message=50276&color=informational&logo=slickpic)](https://github.com/langfuse/langfuse-python/network/dependents) + +| Repository | Stars | +| :-------- | -----: | +|   [open-webui](https://github.com/open-webui) / [open-webui](https://github.com/open-webui/open-webui) | 80355 | +|   [langgenius](https://github.com/langgenius) / [dify](https://github.com/langgenius/dify) | 76869 | +|   [langflow-ai](https://github.com/langflow-ai) / [langflow](https://github.com/langflow-ai/langflow) | 50121 | +|   [run-llama](https://github.com/run-llama) / [llama_index](https://github.com/run-llama/llama_index) | 39485 | +|   [QuivrHQ](https://github.com/QuivrHQ) / [quivr](https://github.com/QuivrHQ/quivr) | 37432 | +|   [mindsdb](https://github.com/mindsdb) / [mindsdb](https://github.com/mindsdb/mindsdb) | 27274 | +|   [BerriAI](https://github.com/BerriAI) / [litellm](https://github.com/BerriAI/litellm) | 18293 | +|   [GreyDGL](https://github.com/GreyDGL) / [PentestGPT](https://github.com/GreyDGL/PentestGPT) | 7876 | +|   [Canner](https://github.com/Canner) / [WrenAI](https://github.com/Canner/WrenAI) | 6767 | +|   [superagent-ai](https://github.com/superagent-ai) / [superagent](https://github.com/superagent-ai/superagent) | 5641 | +|   [bRAGAI](https://github.com/bRAGAI) / [bRAG-langchain](https://github.com/bRAGAI/bRAG-langchain) | 2501 | +|   [pingcap](https://github.com/pingcap) / [autoflow](https://github.com/pingcap/autoflow) | 2394 | +|   [open-webui](https://github.com/open-webui) / [pipelines](https://github.com/open-webui/pipelines) | 1499 | +|   [topoteretes](https://github.com/topoteretes) / [cognee](https://github.com/topoteretes/cognee) | 1335 | +|   [MLSysOps](https://github.com/MLSysOps) / [MLE-agent](https://github.com/MLSysOps/MLE-agent) | 1238 | +|   [dynamiq-ai](https://github.com/dynamiq-ai) / [dynamiq](https://github.com/dynamiq-ai/dynamiq) | 751 | +|   [opslane](https://github.com/opslane) / [opslane](https://github.com/opslane/opslane) | 694 | +|   [dmayboroda](https://github.com/dmayboroda) / [minima](https://github.com/dmayboroda/minima) | 546 | +|   [theopenconversationkit](https://github.com/theopenconversationkit) / [tock](https://github.com/theopenconversationkit/tock) | 538 | +|   [andysingal](https://github.com/andysingal) / [llm-course](https://github.com/andysingal/llm-course) | 459 | +|   [phospho-app](https://github.com/phospho-app) / [phospho](https://github.com/phospho-app/phospho) | 417 | +|   [sentient-engineering](https://github.com/sentient-engineering) / [agent-q](https://github.com/sentient-engineering/agent-q) | 399 | +|   [aorwall](https://github.com/aorwall) / [moatless-tools](https://github.com/aorwall/moatless-tools) | 362 | +|   [duoyang666](https://github.com/duoyang666) / [ai_novel](https://github.com/duoyang666/ai_novel) | 277 | +|   [YFGaia](https://github.com/YFGaia) / [dify-plus](https://github.com/YFGaia/dify-plus) | 259 | +|   [sql-agi](https://github.com/sql-agi) / [DB-GPT](https://github.com/sql-agi/DB-GPT) | 242 | +|   [zenml-io](https://github.com/zenml-io) / [zenml-projects](https://github.com/zenml-io/zenml-projects) | 238 | +|   [bklieger-groq](https://github.com/bklieger-groq) / [mathtutor-on-groq](https://github.com/bklieger-groq/mathtutor-on-groq) | 215 | +|   [RobotecAI](https://github.com/RobotecAI) / [rai](https://github.com/RobotecAI/rai) | 201 | +|   [plastic-labs](https://github.com/plastic-labs) / [honcho](https://github.com/plastic-labs/honcho) | 168 | +|   [8090-inc](https://github.com/8090-inc) / [xrx-sample-apps](https://github.com/8090-inc/xrx-sample-apps) | 156 | +|   [langfuse](https://github.com/langfuse) / [langfuse-python](https://github.com/langfuse/langfuse-python) | 142 | +|   [deepset-ai](https://github.com/deepset-ai) / [haystack-core-integrations](https://github.com/deepset-ai/haystack-core-integrations) | 134 | +|   [ai-shifu](https://github.com/ai-shifu) / [ai-shifu](https://github.com/ai-shifu/ai-shifu) | 118 | +|   [kenshiro-o](https://github.com/kenshiro-o) / [nagato-ai](https://github.com/kenshiro-o/nagato-ai) | 113 | +|   [bmd1905](https://github.com/bmd1905) / [ChatOpsLLM](https://github.com/bmd1905/ChatOpsLLM) | 109 | +|   [i-dot-ai](https://github.com/i-dot-ai) / [redbox](https://github.com/i-dot-ai/redbox) | 107 | +|   [zozoheir](https://github.com/zozoheir) / [tinyllm](https://github.com/zozoheir/tinyllm) | 100 | +|   [pavanjava](https://github.com/pavanjava) / [bootstrap-rag](https://github.com/pavanjava/bootstrap-rag) | 88 | +|   [ansari-project](https://github.com/ansari-project) / [ansari-backend](https://github.com/ansari-project/ansari-backend) | 87 | +|   [chatchat-space](https://github.com/chatchat-space) / [LangGraph-Chatchat](https://github.com/chatchat-space/LangGraph-Chatchat) | 80 | +|   [elenagalun](https://github.com/elenagalun) / [difygit](https://github.com/elenagalun/difygit) | 68 | +|   [veragloo](https://github.com/veragloo) / [dify](https://github.com/veragloo/dify) | 67 | +|   [jayita13](https://github.com/jayita13) / [GenerativeAI](https://github.com/jayita13/GenerativeAI) | 58 | +|   [MSNP1381](https://github.com/MSNP1381) / [kaggle-Agent](https://github.com/MSNP1381/kaggle-Agent) | 51 | +|   [longevity-genie](https://github.com/longevity-genie) / [just-agents](https://github.com/longevity-genie/just-agents) | 49 | +|   [gnosis](https://github.com/gnosis) / [prediction-market-agent](https://github.com/gnosis/prediction-market-agent) | 45 | +|   [opendatahub-io-contrib](https://github.com/opendatahub-io-contrib) / [workbench-images](https://github.com/opendatahub-io-contrib/workbench-images) | 44 | +|   [agentcoinorg](https://github.com/agentcoinorg) / [predictionprophet](https://github.com/agentcoinorg/predictionprophet) | 43 | +|   [Coding-Crashkurse](https://github.com/Coding-Crashkurse) / [Udemy-Advanced-LangChain](https://github.com/Coding-Crashkurse/Udemy-Advanced-LangChain) | 43 | +|   [holunda-io](https://github.com/holunda-io) / [bpm-ai-connectors-camunda-8](https://github.com/holunda-io/bpm-ai-connectors-camunda-8) | 38 | +|   [hustyichi](https://github.com/hustyichi) / [dify-eval](https://github.com/hustyichi/dify-eval) | 36 | +|   [guyernest](https://github.com/guyernest) / [advanced-rag](https://github.com/guyernest/advanced-rag) | 35 | +|   [jakobap](https://github.com/jakobap) / [graph2nosql](https://github.com/jakobap/graph2nosql) | 35 | +|   [valory-xyz](https://github.com/valory-xyz) / [mech](https://github.com/valory-xyz/mech) | 34 | +|   [UNOAWORLD](https://github.com/UNOAWORLD) / [unoa-agent](https://github.com/UNOAWORLD/unoa-agent) | 33 | +|   [jakobap](https://github.com/jakobap) / [graphrag-light](https://github.com/jakobap/graphrag-light) | 33 | +|   [garyzava](https://github.com/garyzava) / [chat-to-database-chatbot](https://github.com/garyzava/chat-to-database-chatbot) | 32 | +|   [gnosis](https://github.com/gnosis) / [prediction-market-agent-tooling](https://github.com/gnosis/prediction-market-agent-tooling) | 31 | +|   [Shekswess](https://github.com/Shekswess) / [synthgenai](https://github.com/Shekswess/synthgenai) | 30 | +|   [IDinsight](https://github.com/IDinsight) / [ask-a-question](https://github.com/IDinsight/ask-a-question) | 30 | +|   [SSK-14](https://github.com/SSK-14) / [WizSearch](https://github.com/SSK-14/WizSearch) | 30 | +|   [thomasnormal](https://github.com/thomasnormal) / [fewshot](https://github.com/thomasnormal/fewshot) | 28 | +|   [i-dot-ai](https://github.com/i-dot-ai) / [themefinder](https://github.com/i-dot-ai/themefinder) | 27 | +|   [pingcap](https://github.com/pingcap) / [LinguFlow](https://github.com/pingcap/LinguFlow) | 26 | +|   [GreyDGL](https://github.com/GreyDGL) / [ShareGPTs](https://github.com/GreyDGL/ShareGPTs) | 26 | +|   [syzhy113](https://github.com/syzhy113) / [Engineering-Code-Analysis](https://github.com/syzhy113/Engineering-Code-Analysis) | 25 | +|   [yaitec](https://github.com/yaitec) / [langflow-streamlit](https://github.com/yaitec/langflow-streamlit) | 25 | +|   [kaymen99](https://github.com/kaymen99) / [AI-Sales-agent](https://github.com/kaymen99/AI-Sales-agent) | 25 | +|   [SuperGalaxy0901](https://github.com/SuperGalaxy0901) / [Streamlit-OpenAI-Chatbot](https://github.com/SuperGalaxy0901/Streamlit-OpenAI-Chatbot) | 23 | +|   [dimagi](https://github.com/dimagi) / [open-chat-studio](https://github.com/dimagi/open-chat-studio) | 22 | +|   [mayflower](https://github.com/mayflower) / [langchain_agents](https://github.com/mayflower/langchain_agents) | 22 | +|   [cab938](https://github.com/cab938) / [jupyter_tool](https://github.com/cab938/jupyter_tool) | 21 | +|   [jayrinaldime](https://github.com/jayrinaldime) / [ollama-straico-apiproxy](https://github.com/jayrinaldime/ollama-straico-apiproxy) | 21 | +|   [PersonaFlow](https://github.com/PersonaFlow) / [agentstack](https://github.com/PersonaFlow/agentstack) | 21 | +|   [bearlike](https://github.com/bearlike) / [Personal-Assistant](https://github.com/bearlike/Personal-Assistant) | 21 | +|   [asteroidai](https://github.com/asteroidai) / [asteroid-python-sdk](https://github.com/asteroidai/asteroid-python-sdk) | 19 | +|   [betagouv](https://github.com/betagouv) / [ComparIA](https://github.com/betagouv/ComparIA) | 18 | +|   [Coding-Crashkurse](https://github.com/Coding-Crashkurse) / [LangChain-in-Production-with-Langfuse](https://github.com/Coding-Crashkurse/LangChain-in-Production-with-Langfuse) | 18 | +|   [beaubeas](https://github.com/beaubeas) / [lung-cancer-detection](https://github.com/beaubeas/lung-cancer-detection) | 17 | +|   [blacksmithop](https://github.com/blacksmithop) / [LLM-Graph-Builder](https://github.com/blacksmithop/LLM-Graph-Builder) | 17 | +|   [Hyperspawn](https://github.com/Hyperspawn) / [Dropbear](https://github.com/Hyperspawn/Dropbear) | 16 | +|   [shreyashankar](https://github.com/shreyashankar) / [spade-experiments](https://github.com/shreyashankar/spade-experiments) | 16 | +|   [ogabrielluiz](https://github.com/ogabrielluiz) / [langflow-railway](https://github.com/ogabrielluiz/langflow-railway) | 16 | +|   [minorun365](https://github.com/minorun365) / [aws-level-checker](https://github.com/minorun365/aws-level-checker) | 15 | +|   [aimclub](https://github.com/aimclub) / [ProtoLLM](https://github.com/aimclub/ProtoLLM) | 15 | +|   [Coding-Crashkurse](https://github.com/Coding-Crashkurse) / [LangGraph-Visualizer](https://github.com/Coding-Crashkurse/LangGraph-Visualizer) | 14 | +|   [ldilab](https://github.com/ldilab) / [ArchCode](https://github.com/ldilab/ArchCode) | 14 | +|   [balajivis](https://github.com/balajivis) / [modernaipro](https://github.com/balajivis/modernaipro) | 14 | +|   [diicellman](https://github.com/diicellman) / [dynamite-dogs](https://github.com/diicellman/dynamite-dogs) | 14 | +|   [shivaraj-bh](https://github.com/shivaraj-bh) / [ollama-flake](https://github.com/shivaraj-bh/ollama-flake) | 13 | +|   [maxritter](https://github.com/maxritter) / [aws-bedrock-multi-agent-blueprint](https://github.com/maxritter/aws-bedrock-multi-agent-blueprint) | 12 | +|   [krflorian](https://github.com/krflorian) / [planeswalker_companion](https://github.com/krflorian/planeswalker_companion) | 12 | +|   [wangxj03](https://github.com/wangxj03) / [ai-cookbook](https://github.com/wangxj03/ai-cookbook) | 12 | +|   [dida-do](https://github.com/dida-do) / [public](https://github.com/dida-do/public) | 12 | +|   [georgian-io](https://github.com/georgian-io) / [GAL](https://github.com/georgian-io/GAL) | 12 | +|   [Coding-Crashkurse](https://github.com/Coding-Crashkurse) / [RAG-Evaluation-with-Ragas](https://github.com/Coding-Crashkurse/RAG-Evaluation-with-Ragas) | 12 | +|   [phitrann](https://github.com/phitrann) / [arXivRAG](https://github.com/phitrann/arXivRAG) | 11 | +|   [aimclub](https://github.com/aimclub) / [FEDOT.LLM](https://github.com/aimclub/FEDOT.LLM) | 11 | +|   [c00cjz00](https://github.com/c00cjz00) / [llmservice_ip](https://github.com/c00cjz00/llmservice_ip) | 11 | +|   [zby](https://github.com/zby) / [answerbot](https://github.com/zby/answerbot) | 11 | +|   [himanshu-skid19](https://github.com/himanshu-skid19) / [Inter-IIT-12-Devrev-AI-Agent-007](https://github.com/himanshu-skid19/Inter-IIT-12-Devrev-AI-Agent-007) | 11 | +|   [YeonwooSung](https://github.com/YeonwooSung) / [MLOps](https://github.com/YeonwooSung/MLOps) | 11 | +|   [stackitcloud](https://github.com/stackitcloud) / [rag-template](https://github.com/stackitcloud/rag-template) | 10 | +|   [CeS-3](https://github.com/CeS-3) / [pwn.hust.college](https://github.com/CeS-3/pwn.hust.college) | 10 | +|   [CdC-SI](https://github.com/CdC-SI) / [ZAS-EAK-CopilotGPT](https://github.com/CdC-SI/ZAS-EAK-CopilotGPT) | 10 | +|   [PhiBrandon](https://github.com/PhiBrandon) / [offer-generator-lightrag](https://github.com/PhiBrandon/offer-generator-lightrag) | 10 | +|   [brylie](https://github.com/brylie) / [langflow-fastapi-htmx](https://github.com/brylie/langflow-fastapi-htmx) | 10 | + +_Generated using [github-dependents-info](https://github.com/nvuillam/github-dependents-info), by [Nicolas Vuillamy](https://github.com/nvuillam)_ \ No newline at end of file diff --git a/components-mdx/docs-mcp-server-installation.mdx b/components-mdx/docs-mcp-server-installation.mdx new file mode 100644 index 000000000..ab8780670 --- /dev/null +++ b/components-mdx/docs-mcp-server-installation.mdx @@ -0,0 +1,151 @@ +import { Button } from "@/components/ui/button"; +import Link from "next/link"; + + + + + +Add Langfuse Docs MCP to Cursor via the one-click install: + +
+ +
+ +
+Manual configuration + +Add the following to your `mcp.json`: + +```json +{ + "mcpServers": { + "langfuse-docs": { + "url": "https://langfuse.com/api/mcp" + } + } +} +``` + +
+ +
+ + + +Add Langfuse Docs MCP to Copilot in VSCode via the following steps: + +1. Open Command Palette (⌘+Shift+P) +2. Open "MCP: Add Server..." +3. Select `HTTP` +4. Paste `https://langfuse.com/api/mcp` +5. Select name (e.g. `langfuse-docs`) and whether to save in user or workspace settings +6. You're all set! The MCP server is now available in Agent mode + + + + + +Add Langfuse Docs MCP to Claude Code via the CLI: + +```bash +claude mcp add \ + --transport http \ + langfuse-docs \ + https://langfuse.com/api/mcp \ + --scope user +``` + +
+Manual configuration + +Alternatively, add the following to your settings file: + +- **User scope**: `~/.claude/settings.json` +- **Project scope**: `your-repo/.claude/settings.json` +- **Local scope**: `your-repo/.claude/settings.local.json` + +```json +{ + "mcpServers": { + "langfuse-docs": { + "transportType": "http", + "url": "https://langfuse.com/api/mcp", + "verifySsl": true + } + } +} +``` + +**One-liner JSON import** + +```bash +claude mcp add-json langfuse-docs \ + '{"type":"http","url":"https://langfuse.com/api/mcp"}' +``` + +Once added, start a Claude Code session (`claude`) and type `/mcp` to confirm the connection. + +
+ +
+ + + +Add Langfuse Docs MCP to Windsurf via the following steps: + +1. Open Command Palette (⌘+Shift+P) +2. Open "MCP Configuration Panel" +3. Select `Add custom server` +4. Add the following configuration: + + ```json + { + "mcpServers": { + "langfuse-docs": { + "command": "npx", + "args": ["mcp-remote", "https://langfuse.com/api/mcp"] + } + } + } + ``` + + + + + +Langfuse uses the `streamableHttp` protocol to communicate with the MCP server. This is supported by most clients. + +```json +{ + "mcpServers": { + "langfuse-docs": { + "url": "https://langfuse.com/api/mcp" + } + } +} +``` + +If you use a client that does not support `streamableHttp` (e.g. Windsurf), you can use the `mcp-remote` command as a local proxy. + +```json +{ + "mcpServers": { + "langfuse-docs": { + "command": "npx", + "args": ["mcp-remote", "https://langfuse.com/api/mcp"] + } + } +} +``` + + + +
diff --git a/components-mdx/env-js.mdx b/components-mdx/env-js.mdx index b513e4ca2..16dea0c70 100644 --- a/components-mdx/env-js.mdx +++ b/components-mdx/env-js.mdx @@ -1,6 +1,6 @@ ```bash filename=".env" -LANGFUSE_SECRET_KEY="sk-lf-..." -LANGFUSE_PUBLIC_KEY="pk-lf-..." -LANGFUSE_BASEURL="https://cloud.langfuse.com" # 🇪🇺 EU region -# LANGFUSE_BASEURL="https://us.cloud.langfuse.com" # 🇺🇸 US region +LANGFUSE_SECRET_KEY = "sk-lf-..." +LANGFUSE_PUBLIC_KEY = "pk-lf-..." +LANGFUSE_BASEURL = "https://cloud.langfuse.com" # 🇪🇺 EU region +# LANGFUSE_BASEURL = "https://us.cloud.langfuse.com" # 🇺🇸 US region ``` diff --git a/components-mdx/env-python-os.mdx b/components-mdx/env-python-os.mdx index 522d7ac77..9ef962018 100644 --- a/components-mdx/env-python-os.mdx +++ b/components-mdx/env-python-os.mdx @@ -1,8 +1,8 @@ ```python import os -os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-..." -os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-..." -os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # 🇪🇺 EU region +os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-..." +os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-..." +os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # 🇪🇺 EU region # os.environ["LANGFUSE_HOST"] = "https://us.cloud.langfuse.com" # 🇺🇸 US region ``` diff --git a/components-mdx/env-python.mdx b/components-mdx/env-python.mdx index 4f360c521..199b793ff 100644 --- a/components-mdx/env-python.mdx +++ b/components-mdx/env-python.mdx @@ -1,6 +1,6 @@ ```bash filename=".env" -LANGFUSE_SECRET_KEY="sk-lf-..." -LANGFUSE_PUBLIC_KEY="pk-lf-..." -LANGFUSE_HOST="https://cloud.langfuse.com" # 🇪🇺 EU region -# LANGFUSE_HOST="https://us.cloud.langfuse.com" # 🇺🇸 US region +LANGFUSE_SECRET_KEY = "sk-lf-..." +LANGFUSE_PUBLIC_KEY = "pk-lf-..." +LANGFUSE_HOST = "https://cloud.langfuse.com" # 🇪🇺 EU region +# LANGFUSE_HOST = "https://us.cloud.langfuse.com" # 🇺🇸 US region ``` diff --git a/components-mdx/evaluation-overview-gifs.mdx b/components-mdx/evaluation-overview-gifs.mdx new file mode 100644 index 000000000..96735ff93 --- /dev/null +++ b/components-mdx/evaluation-overview-gifs.mdx @@ -0,0 +1,80 @@ +import { CloudflareVideo } from "@/components/Video"; + + + + +Plot evaluation results in the Langfuse Dashboard. + + + + + + +Collect feedback from your users. Can be captured in the frontend via our Browser SDK, server-side via the SDKs or API. Video includes example application. + + + + + + +Run fully managed LLM-as-a-judge evaluations on production or development traces. Can be applied to any step within your application for step-wise evaluations. + + + + + + +Evaluate prompts and models on datasets directly in the user interface. No custom code is needed. + + + + + + + + +Baseline your evaluation workflow with human annotations via Annotation Queues. + + + + + + +Add custom evaluation results, supports numeric, boolean and categorical values. + +```bash +POST /api/public/scores +``` + +Add scores via Python or JS SDK. + +```python filename="Example (Python)" +langfuse.score( + trace_id="123", + name="my_custom_evaluator", + value=0.5, +) +``` + + + diff --git a/components-mdx/get-started-general.mdx b/components-mdx/get-started-general.mdx new file mode 100644 index 000000000..e1a7e6766 --- /dev/null +++ b/components-mdx/get-started-general.mdx @@ -0,0 +1,88 @@ +import { BookOpen, UserPlus, Video as VideoIcon } from "lucide-react"; + +import IntegrationsGrid from '@/components/home/IntegrationsGrid'; + +--- + +**Learn more about Langfuse** + + + } + /> + } + /> + } + /> + + +**Integrate Langfuse with your favorite framework** + + + + +```python /@observe()/ /from langfuse.openai import openai/ filename="main.py" +from langfuse import observe +from langfuse.openai import openai # OpenAI integration + +@observe() +def story(): + return openai.chat.completions.create( + model="gpt-4o", + messages=[{"role": "user", "content": "Once upon a time in a galaxy far, far away..."}], + ).choices[0].message.content + +@observe() +def main(): + return story() + +main() +``` + + + +```ts filename="server.ts" +import { Langfuse } from "langfuse"; + +const langfuse = new Langfuse(); + +const trace = langfuse.trace({ + name: "my-AI-application-endpoint", +}); + +// Example generation creation +const generation = trace.generation({ + name: "chat-completion", + model: "gpt-4o", + modelParameters: { + }, + input: messages, +}); + +// Application code +const chatCompletion = await llm.respond(prompt); + +// End generation - sets endTime +generation.end({ + output: chatCompletion, +}); +``` + + + + + + + + + + +--- \ No newline at end of file diff --git a/components-mdx/get-started-langchain-js-constructor-args.mdx b/components-mdx/get-started-langchain-js-constructor-args.mdx index 0e4880a0c..6759c7e07 100644 --- a/components-mdx/get-started-langchain-js-constructor-args.mdx +++ b/components-mdx/get-started-langchain-js-constructor-args.mdx @@ -2,7 +2,7 @@ npm i langfuse-langchain ``` -```typescript +```ts import { CallbackHandler } from "langfuse-langchain"; // Deno: import CallbackHandler from "https://esm.sh/langfuse-langchain"; diff --git a/components-mdx/get-started-langchain-js-env.mdx b/components-mdx/get-started-langchain-js-env.mdx index d29c13ba6..832643c86 100644 --- a/components-mdx/get-started-langchain-js-env.mdx +++ b/components-mdx/get-started-langchain-js-env.mdx @@ -6,7 +6,7 @@ npm i langfuse-langchain -```typescript +```ts import { CallbackHandler } from "langfuse-langchain"; // Deno: import CallbackHandler from "https://esm.sh/langfuse-langchain"; diff --git a/components-mdx/get-started-llamaindex-python-constructor-args.mdx b/components-mdx/get-started-llamaindex-python-constructor-args.mdx deleted file mode 100644 index 59e6208df..000000000 --- a/components-mdx/get-started-llamaindex-python-constructor-args.mdx +++ /dev/null @@ -1,12 +0,0 @@ -```python -from llama_index.core import Settings -from llama_index.core.callbacks import CallbackManager -from langfuse.llama_index import LlamaIndexCallbackHandler - -langfuse_callback_handler = LlamaIndexCallbackHandler( - public_key="pk-lf-...", - secret_key="sk-lf-...", - host="https://cloud.langfuse.com" -) -Settings.callback_manager = CallbackManager([langfuse_callback_handler]) -``` diff --git a/components-mdx/get-started-llamaindex-python-env.mdx b/components-mdx/get-started-llamaindex-python-env.mdx deleted file mode 100644 index aff72c148..000000000 --- a/components-mdx/get-started-llamaindex-python-env.mdx +++ /dev/null @@ -1,12 +0,0 @@ -import Env from "./env-python.mdx"; - - - -```python -from llama_index.core import Settings -from llama_index.core.callbacks import CallbackManager -from langfuse.llama_index import LlamaIndexCallbackHandler - -langfuse_callback_handler = LlamaIndexCallbackHandler() -Settings.callback_manager = CallbackManager([langfuse_callback_handler]) -``` diff --git a/components-mdx/get-started-python-decorator-any-llm.mdx b/components-mdx/get-started-python-decorator-any-llm.mdx index 9bec79242..5f3d55b60 100644 --- a/components-mdx/get-started-python-decorator-any-llm.mdx +++ b/components-mdx/get-started-python-decorator-any-llm.mdx @@ -22,7 +22,7 @@ def anthropic_completion(**kwargs): # See docs for more details on token counts and usd cost in Langfuse # https://langfuse.com/docs/model-usage-and-cost langfuse_context.update_current_observation( - usage={ + usage_details={ "input": response.usage.input_tokens, "output": response.usage.output_tokens } diff --git a/components-mdx/get-started-python-decorator-openai.mdx b/components-mdx/get-started-python-decorator-openai.mdx index 8ca67078c..604dbc204 100644 --- a/components-mdx/get-started-python-decorator-openai.mdx +++ b/components-mdx/get-started-python-decorator-openai.mdx @@ -5,8 +5,7 @@ from langfuse.openai import openai # OpenAI integration @observe() def story(): return openai.chat.completions.create( - model="gpt-3.5-turbo", - max_tokens=100, + model="gpt-4o", messages=[ {"role": "system", "content": "You are a great storyteller."}, {"role": "user", "content": "Once upon a time in a galaxy far, far away..."} diff --git a/components-mdx/get-started/js-langchain.mdx b/components-mdx/get-started/js-langchain.mdx new file mode 100644 index 000000000..abc82e3c5 --- /dev/null +++ b/components-mdx/get-started/js-langchain.mdx @@ -0,0 +1,23 @@ +```bash +npm i langfuse-langchain +``` + +import EnvJS from "@/components-mdx/env-js.mdx"; + +Add your Langfuse credentials to your environment variables. Make sure that you have a `.env` file in your project root and a package like `dotenv` to load the variables. + + + +Initialize the Langfuse callback handler and add it to your chain. + +```ts {9} +import { CallbackHandler } from "langfuse-langchain"; +// Deno: import CallbackHandler from "https://esm.sh/langfuse-langchain"; + +const langfuseHandler = new CallbackHandler(); + +// Your Langchain code + +// Add Langfuse handler as callback to `run` or `invoke` +await chain.invoke({ input: "" }, { callbacks: [langfuseHandler] }); +``` \ No newline at end of file diff --git a/components-mdx/get-started/js-openai-sdk.mdx b/components-mdx/get-started/js-openai-sdk.mdx new file mode 100644 index 000000000..c98ade18f --- /dev/null +++ b/components-mdx/get-started/js-openai-sdk.mdx @@ -0,0 +1,24 @@ +```sh +npm install langfuse openai +``` + +Add your Langfuse credentials to your environment variables. Make sure that you have a `.env` file in your project root and a package like `dotenv` to load the variables. + +import EnvJS from "@/components-mdx/env-js.mdx"; + + + +With your environment configured, call OpenAI SDK methods as usual from the wrapped client. + +```ts +import OpenAI from "openai"; +import { observeOpenAI } from "langfuse"; + +const openai = observeOpenAI(new OpenAI()); + +const res = await openai.chat.completions.create({ + messages: [{ role: "system", content: "Tell me a story about a dog." }], + model: "gpt-4o", + max_tokens: 300, +}); +``` \ No newline at end of file diff --git a/components-mdx/get-started/js-sdk.mdx b/components-mdx/get-started/js-sdk.mdx new file mode 100644 index 000000000..6c86ac88f --- /dev/null +++ b/components-mdx/get-started/js-sdk.mdx @@ -0,0 +1,68 @@ +```bash +npm i langfuse +``` + + + +```bash filename=".env" +LANGFUSE_SECRET_KEY = "sk-lf-..."; +LANGFUSE_PUBLIC_KEY = "pk-lf-..."; +LANGFUSE_BASEURL = "https://cloud.langfuse.com"; 🇪🇺 EU region +# LANGFUSE_BASEURL = "https://us.cloud.langfuse.com"; 🇺🇸 US region +``` + +```ts +import { Langfuse } from "langfuse"; // or "langfuse-node" + +const langfuse = new Langfuse(); +``` + + + + +```ts +import { Langfuse } from "langfuse"; // or "langfuse-node" + +const langfuse = new Langfuse({ + secretKey: "sk-lf-...", + publicKey: "pk-lf-...", + baseUrl: "https://cloud.langfuse.com", // 🇪🇺 EU region + // baseUrl: "https://us.cloud.langfuse.com", // 🇺🇸 US region + + // optional + release: "v1.0.0", + requestTimeout: 10000, + enabled: true, // set to false to disable sending events +}); +``` + + + + + +```ts filename="server.ts" +const trace = langfuse.trace({ + name: "my-AI-application-endpoint", +}); + +// Example generation creation +const generation = trace.generation({ + name: "chat-completion", + model: "gpt-4o", + input: messages, +}); + +// Application code +const chatCompletion = await llm.respond(prompt); + +// End generation - sets endTime +generation.end({ + output: chatCompletion, +}); +``` + + + In short-lived environments (e.g. serverless functions), make sure to always + call `langfuse.shutdownAsync()` at the end to await all pending requests. + ([Learn more](#lambda)) + \ No newline at end of file diff --git a/components-mdx/get-started/langchain.mdx b/components-mdx/get-started/langchain.mdx new file mode 100644 index 000000000..293f4f9f6 --- /dev/null +++ b/components-mdx/get-started/langchain.mdx @@ -0,0 +1,32 @@ +import Env from "@/components-mdx/env-python.mdx"; + +```bash +pip install langfuse langchain-openai +``` + +Add your Langfuse credentials as environment variables. + + + +Initialize the Langfuse callback handler. + +```python +from langfuse.langchain import CallbackHandler + +langfuse_handler = CallbackHandler() +``` + +Add the Langfuse callback handler to your chain. + +```python {10} +from langchain_openai import ChatOpenAI +from langchain_core.prompts import ChatPromptTemplate + +llm = ChatOpenAI(model_name="gpt-4o") +prompt = ChatPromptTemplate.from_template("Tell me a joke about {topic}") +chain = prompt | llm + +response = chain.invoke( + {"topic": "cats"}, + config={"callbacks": [langfuse_handler]}) +``` \ No newline at end of file diff --git a/components-mdx/get-started/next-steps.mdx b/components-mdx/get-started/next-steps.mdx new file mode 100644 index 000000000..b1fce8386 --- /dev/null +++ b/components-mdx/get-started/next-steps.mdx @@ -0,0 +1,43 @@ +import { Tabs, Cards } from "nextra/components"; +import { + FileText, + ClipboardCheck, + Scale, + Database, + LayoutDashboard, + TestTube, +} from "lucide-react"; + +{/* wihtout defining num, the cards will be rendered as three columns which is hard to read */} + + } + /> + } + /> + } + /> + } + /> + } + /> + } + /> + \ No newline at end of file diff --git a/components-mdx/get-started/openai-sdk.mdx b/components-mdx/get-started/openai-sdk.mdx new file mode 100644 index 000000000..559ccc65a --- /dev/null +++ b/components-mdx/get-started/openai-sdk.mdx @@ -0,0 +1,28 @@ +import Env from "@/components-mdx/env-python.mdx"; + +```bash +pip install langfuse +``` + +Add you Langfuse credentials as environment variables. + + + +Change the import to use the OpenAI drop-in replacement. + +```python +from langfuse.openai import openai +``` + +Use the OpenAI SDK as usual. + +```python +completion = openai.chat.completions.create( + name="test-chat", + model="gpt-4o", + messages=[ + {"role": "system", "content": "You are a very accurate calculator. You output only the result of the calculation."}, + {"role": "user", "content": "1 + 1 = "}], + metadata={"someMetadataKey": "someValue"}, +) +``` \ No newline at end of file diff --git a/components-mdx/get-started/python-sdk.mdx b/components-mdx/get-started/python-sdk.mdx new file mode 100644 index 000000000..df1969c7d --- /dev/null +++ b/components-mdx/get-started/python-sdk.mdx @@ -0,0 +1,80 @@ + + + +The `@observe` decorator is the simplest way to instrument your application. It is a function decorator that can be applied to any function. + +It sets the current span in the context for automatic nesting of child spans and automatically ends it when the function returns. It also automatically captures the function name, arguments, and return value. + +```python +from langfuse import observe, get_client + +@observe +def my_function(): + return "Hello, world!" # Input/output and timings are automatically captured + +my_function() + +# Flush events in short-lived applications +langfuse = get_client() +langfuse.flush() +``` + + + + +Context managers are the recommended way to instrument chunks of work in your application as they automatically handle the start and end of spans, and set the current span in the context for automatic nesting of child spans. They provide more control than the `@observe` decorator. + +```python +from langfuse import get_client + +langfuse = get_client() + +# Create a span using a context manager +with langfuse.start_as_current_span(name="process-request") as span: + # Your processing logic here + span.update(output="Processing complete") + + # Create a nested generation for an LLM call + with langfuse.start_as_current_generation(name="llm-response", model="gpt-3.5-turbo") as generation: + # Your LLM call logic here + generation.update(output="Generated response") + +# All spans are automatically closed when exiting their context blocks + + +# Flush events in short-lived applications +langfuse.flush() +``` + + + + +Manual observations give you control over when spans start and end and do not set the current span in the context for automatic nesting of child spans. You must explicitly call `.end()` when they're complete. + +```python +from langfuse import get_client + +langfuse = get_client() + +# Create a span without a context manager +span = langfuse.start_span(name="user-request") + +# Your processing logic here +span.update(output="Request processed") + +# Child spans must be created using the parent span object +nested_span = span.start_span(name="nested-span") +nested_span.update(output="Nested span output") + +# Important: Manually end the span +nested_span.end() + +# Important: Manually end the parent span +span.end() + +# Flush events in short-lived applications +langfuse.flush() +``` + + + \ No newline at end of file diff --git a/components-mdx/github-cta.mdx b/components-mdx/github-cta.mdx new file mode 100644 index 000000000..2a0ee6d9d --- /dev/null +++ b/components-mdx/github-cta.mdx @@ -0,0 +1,10 @@ + + +
+ Stay updated: Star Langfuse on [GitHub](https://github.com/langfuse/langfuse) to get instant notifications about new releases. + + Langfuse GitHub stars + +
+ +
\ No newline at end of file diff --git a/components-mdx/integration-learn-more.mdx b/components-mdx/integration-learn-more.mdx new file mode 100644 index 000000000..64993430f --- /dev/null +++ b/components-mdx/integration-learn-more.mdx @@ -0,0 +1,85 @@ +import { Tabs, Cards } from "nextra/components"; +import { + FileText, + ClipboardCheck, + Scale, + Database, + LayoutDashboard, + TestTube, +} from "lucide-react"; + +## Interoperability with the Python SDK + +You can use this integration together with the Langfuse [Python SDK](/docs/sdk/python/sdk-v3) to add additional attributes to the trace. + + + + +The [`@observe()` decorator](/docs/sdk/python/sdk-v3#observe-decorator) provides a convenient way to automatically wrap your instrumented code and add additional attributes to the trace. + +```python +from langfuse import observe, get_client + +langfuse = get_client() + +@observe() +def my_instrumented_function(input): + output = my_llm_call(input) + + langfuse.update_current_trace( + input=input, + output=output, + user_id="user_123", + session_id="session_abc", + tags=["agent", "my-trace"], + metadata={"email": "user@langfuse.com"}, + version="1.0.0" + ) + + return output +``` + +Learn more about using the Decorator in the [Python SDK](/docs/sdk/python/sdk-v3#observe-decorator) docs. + + + + +The [Context Manager](/docs/sdk/python/sdk-v3#context-managers) allows you to wrap your instrumented code using context managers (with `with` statements), which allows you to add additional attributes to the trace. + +```python +from langfuse import get_client + +langfuse = get_client() + +with langfuse.start_as_current_span(name="my-trace") as span: + + # Run your application here + output = my_llm_call(input) + + # Pass additional attributes to the span + span.update_trace( + input=input, + output=output, + user_id="user_123", + session_id="session_abc", + tags=["agent", "my-trace"], + metadata={"email": "user@langfuse.com"}, + version="1.0.0" + ) + +# Flush events in short-lived applications +langfuse.flush() +``` + +Learn more about using the Context Manager in the [Python SDK](/sdk-v3#context-managers) docs. + + + + +## Next Steps + +Once you have instrumented your code, you can manage, evaluate and debug your application: + +import NextSteps from "@/components-mdx/get-started/next-steps.mdx"; + + diff --git a/components-mdx/litellm-about.mdx b/components-mdx/litellm-about.mdx new file mode 100644 index 000000000..dd38ea18d --- /dev/null +++ b/components-mdx/litellm-about.mdx @@ -0,0 +1,17 @@ +## Learn more about LiteLLM + +### What is LiteLLM? + +[LiteLLM](https://litellm.ai) is an open source proxy server to manage auth, loadbalancing, and spend tracking across more than 100 LLMs. LiteLLM has grown to be a popular utility for developers working with LLMs and is universally thought to be a useful abstraction. + +### Is LiteLLM an Open Source project? + +Yes, LiteLLM is open source. The majority of its code is permissively MIT-licensed. You can find the open source LiteLLM repository on [GitHub](https://github.com/BerriAI/litellm). + +### Can I use LiteLLM with Ollama and local models? + +Yes, you can use LiteLLM with Ollama and other local models. LiteLLM supports all models from Ollama, and it provides a Docker image for an OpenAI API-compatible server for local LLMs like llama2, mistral, and codellama. + +### How does LiteLLM simplify API calls across multiple LLM providers? + +LiteLLM provides a unified interface for calling models such as OpenAI, Anthropic, Cohere, Ollama and others. This means you can call any supported model using a consistent method, such as `completion(model, messages)`, and expect a uniform response format. The library does away with the need for if/else statements or provider-specific code, making it easier to manage and debug LLM interactions in your application. diff --git a/components-mdx/multi-modal-image-gallery.mdx b/components-mdx/multi-modal-image-gallery.mdx new file mode 100644 index 000000000..7bcd81c39 --- /dev/null +++ b/components-mdx/multi-modal-image-gallery.mdx @@ -0,0 +1,17 @@ + + + + ![Trace in Langfuse UI](/images/docs/multi-modal-trace-image.jpg) + + + + + ![Trace in Langfuse UI](/images/docs/multi-modal-trace-audio.png) + + + + + ![Trace in Langfuse UI](/images/docs/multi-modal-trace-attachment.png) + + + diff --git a/components-mdx/observability-core-features.mdx b/components-mdx/observability-core-features.mdx new file mode 100644 index 000000000..79e875f66 --- /dev/null +++ b/components-mdx/observability-core-features.mdx @@ -0,0 +1,52 @@ +import { + Users, + Tag, + MessagesSquare, + Images, + Braces, + GitGraph, + Globe, + Database, + FileDigit, + GitCompare, + MapPin, + BarChart3, + Filter, + BadgeDollarSign, + EyeOff, + MessageCircle, +} from "lucide-react"; + + + } + arrow + /> + } + arrow + /> + } + arrow + /> + } arrow /> + } + arrow + /> + } + arrow + /> + \ No newline at end of file diff --git a/components-mdx/prompt-create.mdx b/components-mdx/prompt-create.mdx new file mode 100644 index 000000000..a6c79f5cf --- /dev/null +++ b/components-mdx/prompt-create.mdx @@ -0,0 +1,183 @@ + + + +Use the Langfuse UI to create a new prompt or update an existing one. + + + + + + +import Env from "@/components-mdx/env-python.mdx"; + +```bash +pip install langfuse +``` + +Add your Langfuse credentials as environment variables. + + + +Use the Python SDK to create a new prompt or update an existing one. + +```python +# Create a text prompt +langfuse.create_prompt( + name="movie-critic", + type="text", + prompt="As a {{criticlevel}} movie critic, do you like {{movie}}?", + labels=["production"], # directly promote to production + config={ + "model": "gpt-4o", + "temperature": 0.7, + "supported_languages": ["en", "fr"], + }, # optionally, add configs (e.g. model parameters or model tools) or tags +) + +# Create a chat prompt +langfuse.create_prompt( + name="movie-critic-chat", + type="chat", + prompt=[ + { "role": "system", "content": "You are an {{criticlevel}} movie critic" }, + { "role": "user", "content": "Do you like {{movie}}?" }, + ], + labels=["production"], # directly promote to production + config={ + "model": "gpt-4o", + "temperature": 0.7, + "supported_languages": ["en", "fr"], + }, # optionally, add configs (e.g. model parameters or model tools) or tags +) +``` + +If you already have a prompt with the same `name`, the prompt will be added as a new version. + + + + + +```bash +npm i langfuse +``` + + + +```bash filename=".env" +LANGFUSE_SECRET_KEY = "sk-lf-..."; +LANGFUSE_PUBLIC_KEY = "pk-lf-..."; +LANGFUSE_BASEURL = "https://cloud.langfuse.com"; 🇪🇺 EU region +# LANGFUSE_BASEURL = "https://us.cloud.langfuse.com"; 🇺🇸 US region +``` + +```ts +import { Langfuse } from "langfuse"; // or "langfuse-node" + +const langfuse = new Langfuse(); +``` + + + + +```ts +import { Langfuse } from "langfuse"; // or "langfuse-node" + +const langfuse = new Langfuse({ + secretKey: "sk-lf-...", + publicKey: "pk-lf-...", + baseUrl: "https://cloud.langfuse.com", // 🇪🇺 EU region + // baseUrl: "https://us.cloud.langfuse.com", // 🇺🇸 US region + + // optional + release: "v1.0.0", + requestTimeout: 10000, + enabled: true, // set to false to disable sending events +}); +``` + + + + +Use the JS/TS SDK to create a new prompt or update an existing one. + +```ts +// Create a text prompt +await langfuse.createPrompt({ + name: "movie-critic", + type: "text", + prompt: "As a {{criticlevel}} critic, do you like {{movie}}?", + labels: ["production"], // directly promote to production + config: { + model: "gpt-4o", + temperature: 0.7, + supported_languages: ["en", "fr"], + }, // optionally, add configs (e.g. model parameters or model tools) or tags +}); + +// Create a chat prompt +await langfuse.createPrompt({ + name: "movie-critic-chat", + type: "chat", + prompt: [ + { role: "system", content: "You are an {{criticlevel}} movie critic" }, + { role: "user", content: "Do you like {{movie}}?" }, + ], + labels: ["production"], // directly promote to production + config: { + model: "gpt-4o", + temperature: 0.7, + supported_languages: ["en", "fr"], + }, // optionally, add configs (e.g. model parameters or model tools) or tags +}); +``` + +If you already have a prompt with the same `name`, the prompt will be added as a new version. + + + + + +Use the [Public API](https://api.reference.langfuse.com/#tag/prompts/post/api/public/v2/prompts) to create a new prompt or update an existing one. + +```bash +curl https://cloud.langfuse.com/api/public/v2/prompts \ + --request POST \ + --header 'Content-Type: application/json' \ + --data '{ + "type": "chat", + "name": "", + "prompt": [ + { + "type": "chatmessage", + "role": "", + "content": "" + } + ], + "config": null, + "labels": [ + "" + ], + "tags": [ + "" + ], + "commitMessage": null +}' + +``` + + + + + + + + + diff --git a/components-mdx/prompt-linking.mdx b/components-mdx/prompt-linking.mdx new file mode 100644 index 000000000..3562f6c51 --- /dev/null +++ b/components-mdx/prompt-linking.mdx @@ -0,0 +1,258 @@ + + + +**Decorators** + +```python +from langfuse import observe, get_client + +langfuse = get_client() + +@observe(as_type="generation") +def nested_generation(): + prompt = langfuse.get_prompt("movie-critic") + + langfuse.update_current_generation( + prompt=prompt, + ) + +@observe() +def main(): + nested_generation() + +main() +``` + +**Context Managers** + +```python +from langfuse import get_client + +langfuse = get_client() + +prompt = langfuse.get_prompt("movie-critic") + +with langfuse.start_as_current_generation( + name="movie-generation", + model="gpt-4o", + prompt=prompt +) as generation: + # Your LLM call here + generation.update(output="LLM response") +``` + + + + + +```diff +langfuse.generation({ + ... ++ prompt: prompt + ... +}) +``` + + + + + +```python /langfuse_prompt=prompt/ +from langfuse.openai import openai +from langfuse import get_client + +langfuse = get_client() + +prompt = langfuse.get_prompt("calculator") + +openai.chat.completions.create( + model="gpt-4o", + messages=[ + {"role": "system", "content": prompt.compile(base=10)}, + {"role": "user", "content": "1 + 1 = "}], + langfuse_prompt=prompt +) +``` + + + + + +```ts /langfusePrompt,/ +import { observeOpenAI } from "langfuse"; +import OpenAI from "openai"; + +const langfusePrompt = await langfuse.getPrompt("prompt-name"); // Fetch a previously created prompt + +const res = await observeOpenAI(new OpenAI(), { + langfusePrompt, +}).completions.create({ + prompt: langfusePrompt.prompt, + model: "gpt-4o", + max_tokens: 300, +}); +``` + + + + + +```python +from langfuse import get_client +from langfuse.langchain import CallbackHandler +from langchain_core.prompts import ChatPromptTemplate, PromptTemplate +from langchain_openai import ChatOpenAI, OpenAI + +langfuse = get_client() + +# Initialize the Langfuse handler +langfuse_handler = CallbackHandler() +``` + +**Text prompts** + +```python /"langfuse_prompt"/ +langfuse_text_prompt = langfuse.get_prompt("movie-critic") + +## Pass the langfuse_text_prompt to the PromptTemplate as metadata to link it to generations that use it +langchain_text_prompt = PromptTemplate.from_template( + langfuse_text_prompt.get_langchain_prompt(), + metadata={"langfuse_prompt": langfuse_text_prompt}, +) + +## Use the text prompt in a Langchain chain +llm = OpenAI() +completion_chain = langchain_text_prompt | llm + +completion_chain.invoke({"movie": "Dune 2", "criticlevel": "expert"}, config={"callbacks": [langfuse_handler]}) +``` + +**Chat prompts** + +```python /"langfuse_prompt"/ +langfuse_chat_prompt = langfuse.get_prompt("movie-critic-chat", type="chat") + +## Manually set the metadata on the langchain_chat_prompt to link it to generations that use it +langchain_chat_prompt = ChatPromptTemplate.from_messages( + langfuse_chat_prompt.get_langchain_prompt() +) + +langchain_chat_prompt.metadata = {"langfuse_prompt": langfuse_chat_prompt} + +## or use the ChatPromptTemplate constructor directly. +## Note that using ChatPromptTemplate.from_template led to issues in the past +## See: https://github.com/langfuse/langfuse/issues/5374 +langchain_chat_prompt = ChatPromptTemplate( + langfuse_chat_prompt.get_langchain_prompt(), + metadata={"langfuse_prompt": langfuse_chat_prompt} +) + +## Use the chat prompt in a Langchain chain +chat_llm = ChatOpenAI() +chat_chain = langchain_chat_prompt | chat_llm + +chat_chain.invoke({"movie": "Dune 2", "criticlevel": "expert"}, config={"callbacks": [langfuse_handler]}) +``` + + + If you use the `with_config` method on the PromptTemplate to create a new + Langchain Runnable with updated config, please make sure to pass the + `langfuse_prompt` in the `metadata` key as well. + + + + Set the `langfuse_prompt` metadata key only on PromptTemplates and not + additionally on the LLM calls or elsewhere in your chains. + + + + + + +```ts +import { Langfuse } from "langfuse"; +import { PromptTemplate } from "@langchain/core/prompts"; +import { ChatOpenAI, OpenAI } from "@langchain/openai"; +import { CallbackHandler } from "langfuse-langchain"; + +const langfuseHandler = new CallbackHandler({ + secretKey: "sk-lf-...", + publicKey: "pk-lf-...", + baseUrl: "https://cloud.langfuse.com", // 🇪🇺 EU region + // baseUrl: "https://us.cloud.langfuse.com", // 🇺🇸 US region +}); + +const langfuse = new Langfuse(); +``` + +**Text prompts** + +```ts /metadata: { langfusePrompt:/ +const langfuseTextPrompt = await langfuse.getPrompt("movie-critic"); // Fetch a previously created text prompt + +// Pass the langfuseTextPrompt to the PromptTemplate as metadata to link it to generations that use it +const langchainTextPrompt = PromptTemplate.fromTemplate( + langfuseTextPrompt.getLangchainPrompt() +).withConfig({ + metadata: { langfusePrompt: langfuseTextPrompt }, +}); + +const model = new OpenAI(); +const chain = langchainTextPrompt.pipe(model); + +await chain.invoke({ movie: "Dune 2", criticlevel: "expert" }, { callbacks: [langfuseHandler] }); + +``` + +**Chat prompts** + +```ts /metadata: { langfusePrompt:/ +const langfuseChatPrompt = await langfuse.getPrompt( + "movie-critic-chat", + undefined, + { + type: "chat", + } +); // type option infers the prompt type as chat (default is 'text') + +const langchainChatPrompt = ChatPromptTemplate.fromMessages( + langfuseChatPrompt.getLangchainPrompt().map((m) => [m.role, m.content]) +).withConfig({ + metadata: { langfusePrompt: langfuseChatPrompt }, +}); + +const chatModel = new ChatOpenAI(); +const chatChain = langchainChatPrompt.pipe(chatModel); + +await chatChain.invoke({ movie: "Dune 2", criticlevel: "expert" }, { callbacks: [langfuseHandler] }); +``` + + + + + +Link Langfuse prompts to Vercel AI SDK generations by setting the `langfusePrompt` property in the `metadata` field: + +```typescript /langfusePrompt: fetchedPrompt.toJSON()/ +import { generateText } from "ai"; +import { Langfuse } from "langfuse"; + +const langfuse = new Langfuse(); + +const fetchedPrompt = await langfuse.getPrompt("my-prompt"); + +const result = await generateText({ + model: openai("gpt-4o"), + prompt: fetchedPrompt.prompt, + experimental_telemetry: { + isEnabled: true, + metadata: { + langfusePrompt: fetchedPrompt.toJSON(), + }, + }, +}); +``` + + + + \ No newline at end of file diff --git a/components-mdx/prompt-overview-gifs.mdx b/components-mdx/prompt-overview-gifs.mdx new file mode 100644 index 000000000..d32227267 --- /dev/null +++ b/components-mdx/prompt-overview-gifs.mdx @@ -0,0 +1,82 @@ +import { CloudflareVideo } from "@/components/Video"; + + + + + +Create a new prompt via UI, SDKs, or API. + + + + + + + +Collaboratively version and edit prompts via UI, API, or SDKs. + + + + + + +Deploy prompts to production or any environment via labels - without any code changes. + + + + + + +Compare latency, cost, and evaluation metrics across different versions of your prompts. + + + + + + +Instantly test your prompts in the playground. + + + + + + +Link prompts with traces to understand how they perform in the context of your LLM application. + + + + + +Track changes to your prompts to understand how they evolve over time. + + + + + diff --git a/components-mdx/public-metrics.mdx b/components-mdx/public-metrics.mdx index e92a90841..e249fb47c 100644 --- a/components-mdx/public-metrics.mdx +++ b/components-mdx/public-metrics.mdx @@ -1,8 +1,8 @@ diff --git a/components-mdx/self-host-features.mdx b/components-mdx/self-host-features.mdx new file mode 100644 index 000000000..86fcf5c9d --- /dev/null +++ b/components-mdx/self-host-features.mdx @@ -0,0 +1,77 @@ +import { + Lock, + Shield, + Network, + Users, + Brush, + Workflow, + UserCog, + Route, + Mail, + ServerCog, +} from "lucide-react"; + +import { Cards } from "nextra/components"; + + + } + title="Authentication & SSO" + href="/self-hosting/authentication-and-sso" + arrow + /> + } + title="Automated Access Provisioning" + href="/self-hosting/automated-access-provisioning" + arrow + /> + } + title="Custom Base Path" + href="/self-hosting/custom-base-path" + arrow + /> + } + title="Encryption" + href="/self-hosting/encryption" + arrow + /> + } + title="Headless Initialization" + href="/self-hosting/headless-initialization" + arrow + /> + } + title="Networking" + href="/self-hosting/networking" + arrow + /> + } + title="Organization Creators (EE)" + href="/self-hosting/organization-creators" + arrow + /> + } + title="Organization Management API (EE)" + href="/self-hosting/organization-management-api" + arrow + /> + } + title="Transactional Emails" + href="/self-hosting/transactional-emails" + arrow + /> + } + title="UI Customization (EE)" + href="/self-hosting/ui-customization" + arrow + /> + diff --git a/components-mdx/self-host-help-footer.mdx b/components-mdx/self-host-help-footer.mdx new file mode 100644 index 000000000..ebda44ab0 --- /dev/null +++ b/components-mdx/self-host-help-footer.mdx @@ -0,0 +1,7 @@ +## Support + +If you experience any issues, please create an [issue on GitHub](/issues) or contact the maintainers ([support](/support)). + +For support with production deployments, the Langfuse team provides dedicated enterprise support. To learn more, reach out to enterprise@langfuse.com or [talk to us](/talk-to-us). + +Alternatively, you may consider using Langfuse Cloud, which is a fully managed version of Langfuse. You can find information about its security and privacy [here](/security). diff --git a/components-mdx/tracing-overview-gifs.mdx b/components-mdx/tracing-overview-gifs.mdx new file mode 100644 index 000000000..b354f3f1e --- /dev/null +++ b/components-mdx/tracing-overview-gifs.mdx @@ -0,0 +1,73 @@ +import { CloudflareVideo } from "@/components/Video"; + + + + +Traces allow you to track every LLM call and other relevant logic in your app. + + + + + + +Sessions allow you to track multi-step conversations or agentic workflows. + + + + + + +Debug latency issues by inspecting the timeline view. + + + + + + + +Add your own `userId` to monitor costs and usage for each user. Optionally, create a deep link to this view in your systems. + + + + + + +LLM agents can be visualized as a graph to illustrate the flow of complex agentic workflows. + + + + + + +See quality, cost, and latency metrics in the dashboard to monitor your LLM application. + + + + + + + diff --git a/components/Authors.tsx b/components/Authors.tsx index 864560a3e..bbbaf8008 100644 --- a/components/Authors.tsx +++ b/components/Authors.tsx @@ -1,68 +1,27 @@ +import { cn } from "@/lib/utils"; import Image from "next/image"; +import authorsData from "../data/authors.json"; -export const allAuthors = { - maxdeichmann: { - firstName: "Max", - name: "Max Deichmann", - image: "/images/people/maxdeichmann.jpg", - twitter: "maxdeichmann", - }, - marcklingen: { - firstName: "Marc", - name: "Marc Klingen", - image: "/images/people/marcklingen.jpg", - twitter: "marcklingen", - }, - clemensrawert: { - firstName: "Clemens", - name: "Clemens Rawert", - image: "/images/people/clemensrawert.jpg", - twitter: "rawert", - }, - hassiebpakzad: { - firstName: "Hassieb", - name: "Hassieb Pakzad", - image: "/images/people/hassiebpakzad.jpg", - twitter: "hassiebpakzad", - }, - richardkruemmel: { - firstName: "Richard", - name: "Richard Krümmel", - image: "/images/people/richardkruemmel.jpg", - twitter: "RichardKrue", - }, - marliesmayerhofer: { - firstName: "Marlies", - name: "Marlies Mayerhofer", - image: "/images/people/marliesmayerhofer.jpg", - twitter: "marliessophie", - }, - lydiayou: { - firstName: "Lydia", - name: "Lydia You", - image: "/images/people/lydiayou.jpg", - }, - jannikmaierhoefer: { - firstName: "Jannik", - name: "Jannik Maierhöfer", - image: "/images/people/jannikmaierhoefer.jpg", - }, -} as const; +export const allAuthors = authorsData; -export const Authors = (props: { authors: (keyof typeof allAuthors)[] }) => { - const authors = props.authors.filter((author) => author in allAuthors); +export const Authors = (props: { authors?: (keyof typeof allAuthors)[] }) => { + const authors = props.authors?.filter((author) => author in allAuthors) ?? []; if (authors.length === 0) return null; return ( -
+
{authors.map((author) => ( - + 2} + /> ))}
); }; -export const Author = (props: { author: string }) => { +export const Author = (props: { author: string; hideLastName?: boolean }) => { const author = allAuthors[props.author] ?? Object.values(allAuthors).find( @@ -87,8 +46,12 @@ export const Author = (props: { author: string }) => { className="rounded-full" alt={`Picture ${author.name}`} /> - - {author.name} + + {props.hideLastName ? author.firstName : author.name}
diff --git a/components/CTACard.tsx b/components/CTACard.tsx new file mode 100644 index 000000000..1cb15df79 --- /dev/null +++ b/components/CTACard.tsx @@ -0,0 +1,53 @@ +import React from "react"; +import { Card, CardContent } from "@/components/ui/card"; +import { Button } from "@/components/ui/button"; +import { ArrowRight } from "lucide-react"; +import { cn } from "@/lib/utils"; + +interface CTACardProps { + title: string; + description: string; + children?: React.ReactNode; + className?: string; + showArrow?: boolean; +} + +export function CTACard({ title, description, children, className, showArrow = false }: CTACardProps) { + return ( + + +
+
+

+ {title} +

+

+ {description} +

+
+ {children && ( +
+ {showArrow ? ( + React.Children.map(children, (child) => { + if (React.isValidElement(child) && (child.type === Button || child.props.asChild)) { + return React.cloneElement(child, { + children: ( +
+ {child.props.children} + +
+ ) + } as any); + } + return child; + }) + ) : ( + children + )} +
+ )} +
+
+
+ ); +} \ No newline at end of file diff --git a/components/CalComScheduleDemo.tsx b/components/CalComScheduleDemo.tsx new file mode 100644 index 000000000..b175d3069 --- /dev/null +++ b/components/CalComScheduleDemo.tsx @@ -0,0 +1,32 @@ +import Cal, { getCalApi } from "@calcom/embed-react"; +import { useEffect } from "react"; + +export function ScheduleDemo({ + className, + region, +}: { + className?: string; + region: "us" | "eu"; +}) { + useEffect(() => { + (async function () { + const cal = await getCalApi(); + cal("ui", { + styles: { branding: { brandColor: "#000000" } }, + hideEventTypeDetails: false, + layout: "month_view", + }); + })(); + }, []); + + const calLink = region === "us" ? "team/langfuse/intro" : "team/langfuse/intro-eu"; + + return ( + + ); +} diff --git a/components/CookbookIndex.tsx b/components/CookbookIndex.tsx index 08f92013a..e4667ec08 100644 --- a/components/CookbookIndex.tsx +++ b/components/CookbookIndex.tsx @@ -1,9 +1,9 @@ import { getPagesUnderRoute } from "nextra/context"; import { type Page } from "nextra"; -import { Card, Cards } from "nextra-theme-docs"; +import { Cards } from "nextra/components"; import { FileCode } from "lucide-react"; -export const CookbookIndex = () => ( +export const CookbookIndex = ({ categories }: { categories?: string[] }) => ( <> {Object.entries( ( @@ -12,6 +12,7 @@ export const CookbookIndex = () => ( > ) .filter((page) => page.route !== "/cookbook") + .filter((page) => page.route !== "/guides/cookbook") .reduce((acc, page) => { const category = page.frontMatter?.category || "Other"; if (!acc[category]) acc[category] = []; @@ -20,26 +21,43 @@ export const CookbookIndex = () => ( }, {} as Record>) ) .sort(([categoryA], [categoryB]) => { + // if categories are provided, use the order of the provided categories + if (categories) { + const indexA = categories.indexOf(categoryA); + const indexB = categories.indexOf(categoryB); + if (indexA === -1) return 1; + if (indexB === -1) return -1; + return indexA - indexB; + } + + // if categories are not provided, use the default order, Other last if (categoryA === "Other") return 1; if (categoryB === "Other") return -1; return categoryA.localeCompare(categoryB); }) + .filter(([category]) => !categories || categories.includes(category)) .map(([category, pages]) => (
-

+

{category}

{pages.map((page) => ( - word.charAt(0).toUpperCase() + word.slice(1)) + .join(" ") + } icon={} arrow > {""} - + ))}
diff --git a/components/DocsContributors.tsx b/components/DocsContributors.tsx new file mode 100644 index 000000000..11258c455 --- /dev/null +++ b/components/DocsContributors.tsx @@ -0,0 +1,112 @@ +import { useRouter } from "next/router"; +import { useState, useEffect } from "react"; +import { allAuthors } from "./Authors"; +import contributorsData from "../data/generated/contributors.json"; +import Image from "next/image"; + +const getContributors = (path: string): string[] => { + // Try exact path first, then with/without /index suffix + const variants = [ + path, + path.endsWith("/index") ? path.slice(0, -6) : `${path}/index`, + ]; + + for (const variant of variants) { + const contributors = contributorsData[variant]; + if (contributors?.length > 0) return contributors; + } + + return []; +}; + +const processContributor = (username: string) => { + const author = Object.values(allAuthors).find( + (author) => author.github === username + ); + + if (author) { + // Internal contributor + return { + username, + name: author.name, + title: author.title || "Team Member", + image: author.image, + profileUrl: + "twitter" in author + ? `https://twitter.com/${author.twitter}` + : `https://github.com/${author.github}`, + }; + } + + // External contributor + return { + username, + name: username, + title: "Contributor", + image: `https://github.com/${username}.png?size=64`, + profileUrl: `https://github.com/${username}`, + }; +}; + +export const DocsContributors = () => { + const router = useRouter(); + const currentPath = router.asPath.split("#")[0].split("?")[0]; + const [showAll, setShowAll] = useState(false); + + // Reset showAll when the page changes + useEffect(() => { + setShowAll(false); + }, [currentPath]); + + const contributors = getContributors(currentPath); + if (contributors.length === 0) return null; + + const processedContributors = contributors.map(processContributor); + const displayedContributors = showAll + ? processedContributors + : processedContributors.slice(0, 3); + const remainingCount = Math.max(0, processedContributors.length - 3); + + return ( +
+
+ Contributors +
+
+ {displayedContributors.map((contributor) => ( + + {contributor.name} +
+
+ {contributor.name} +
+
+ {contributor.title} +
+
+
+ ))} + {remainingCount > 0 && !showAll && ( + + )} +
+
+ ); +}; diff --git a/components/EnterpriseLogos.tsx b/components/EnterpriseLogos.tsx new file mode 100644 index 000000000..994622aaf --- /dev/null +++ b/components/EnterpriseLogos.tsx @@ -0,0 +1,9 @@ +import { EnterpriseLogoGrid } from "./shared/EnterpriseLogoGrid"; + +export const EnterpriseLogos = () => { + return ( +
+ +
+ ); +}; diff --git a/components/FeatureOverview.tsx b/components/FeatureOverview.tsx new file mode 100644 index 000000000..57a81719c --- /dev/null +++ b/components/FeatureOverview.tsx @@ -0,0 +1,95 @@ +import { + TextQuote, + GitPullRequestArrow, + ThumbsUp, + Database, +} from "lucide-react"; + +const features = [ + { + icon: TextQuote, + title: "Tracing", + items: [ + "Log traces", + "Lowest level transparency", + "Understand cost and latency", + ], + }, + { + icon: GitPullRequestArrow, + title: "Prompts", + items: [ + "Version control and deploy", + "Collaborate on prompts", + "Test prompts and models", + ], + }, + { + icon: ThumbsUp, + title: "Evals", + items: [ + "Measure output quality", + "Monitor production health", + "Test changes in development", + ], + }, +]; + +const platformFeature = { + icon: Database, + title: "Platform", + items: [ + "API-first architecture", + "Data exports to blob storage", + "Enterprise security and administration", + ], +}; + +export const FeatureOverview = () => { + return ( +
+ {/* Top 3 cards */} +
+ {features.map((feature) => ( +
+
+ +

+ {feature.title} +

+
+
    + {feature.items.map((item, itemIndex) => ( +
  • + + {item} +
  • + ))} +
+
+ ))} +
+ + {/* Platform card - full width */} +
+
+ +

+ {platformFeature.title} +

+
+
    + {platformFeature.items.map((item, itemIndex) => ( +
  • + + {item} +
  • + ))} +
+
+
+ ); +}; diff --git a/components/FooterMenu.tsx b/components/FooterMenu.tsx index a5acae2bc..e5634ca83 100644 --- a/components/FooterMenu.tsx +++ b/components/FooterMenu.tsx @@ -1,8 +1,12 @@ import Link from "next/link"; +import InkeepChatButton from "./inkeep/InkeepChatButton"; const menuItems: { heading: string; - items: { name: string; href: string; notificationCount?: number }[]; + items: ( + | { name: string; href: string; notificationCount?: number } + | "separator" + )[]; }[] = [ { heading: "Platform", @@ -20,7 +24,7 @@ const menuItems: { href: "/docs/scores/overview", }, { - name: "Manual Annotation", + name: "Human Annotation", href: "/docs/scores/annotation", }, { @@ -42,7 +46,7 @@ const menuItems: { items: [ { name: "Python SDK", - href: "/docs/sdk/python", + href: "/docs/sdk/python/sdk-v3", }, { name: "JS/TS SDK", @@ -50,47 +54,43 @@ const menuItems: { }, { name: "OpenAI SDK", - href: "/docs/integrations/openai/get-started", + href: "/integrations/model-providers/openai-py", }, { name: "Langchain", - href: "/docs/integrations/langchain/tracing", + href: "/integrations/frameworks/langchain", }, { name: "Llama-Index", - href: "/docs/integrations/llama-index/get-started", + href: "/integrations/frameworks/llamaindex", }, { name: "Litellm", - href: "/docs/integrations/litellm", + href: "/integrations/gateways/litellm", }, { name: "Dify", - href: "/docs/integrations/dify", + href: "/integrations/no-code/dify", }, { name: "Flowise", - href: "/docs/integrations/flowise", + href: "/integrations/no-code/flowise", }, { name: "Langflow", - href: "/docs/integrations/langflow", + href: "/integrations/no-code/langflow", }, { name: "Vercel AI SDK", - href: "/docs/sdk/typescript/example-vercel-ai", + href: "/integrations/frameworks/vercel-ai-sdk", }, { name: "Instructor", - href: "/docs/integrations/instructor", - }, - { - name: "Mirascope", - href: "/docs/integrations/mirascope", + href: "/integrations/frameworks/instructor", }, { name: "API", - href: "https://api.reference.langfuse.com/", + href: "/docs/api", }, ], }, @@ -103,8 +103,8 @@ const menuItems: { href: "/demo", }, { - name: "Video demo (3 min)", - href: "/video", + name: "Video demo (10 min)", + href: "/watch-demo", }, { name: "Changelog", @@ -124,37 +124,65 @@ const menuItems: { }, { name: "Self-hosting", - href: "/docs/deployment/self-host", + href: "/self-hosting", }, { name: "Open Source", - href: "/docs/open-source", + href: "/open-source", }, { name: "Why Langfuse?", href: "/why" }, + { + name: "AI Engineering Library", + href: "/library", + }, { name: "Status", href: "https://status.langfuse.com", }, + { + name: "🇯🇵 Japanese", + href: "/jp", + }, + { + name: "🇰🇷 Korean", + href: "/kr", + }, + { + name: "🇨🇳 Chinese", + href: "/cn", + }, ], }, { heading: "About", items: [ { name: "Blog", href: "/blog" }, - { name: "Careers", href: "/careers", notificationCount: 2 }, + { name: "Careers", href: "/careers" }, { name: "About us", href: "/about", }, + { + name: "Customers", + href: "/customers", + }, { name: "Support", href: "/support" }, { - name: "Schedule Demo", - href: "/schedule-demo", + name: "Talk to us", + href: "/talk-to-us", }, { name: "OSS Friends", href: "/oss-friends", }, + { + name: "Twitter", + href: "https://x.com/langfuse", + }, + { + name: "LinkedIn", + href: "https://www.linkedin.com/company/langfuse/", + }, ], }, @@ -171,6 +199,23 @@ const menuItems: { name: "Privacy", href: "/privacy", }, + "separator", + { + name: "SOC 2 Type II", + href: "/security/soc2", + }, + { + name: "ISO 27001", + href: "/security/iso27001", + }, + { + name: "GDPR", + href: "/security/gdpr", + }, + { + name: "HIPAA", + href: "/security/hipaa", + }, ], }, ]; @@ -185,21 +230,30 @@ const FooterMenu = () => { {menu.heading}

    - {menu.items.map((item) => ( -
  • - - {item.name} - - {item.notificationCount > 0 && ( - - {item.notificationCount} - - )} -
  • - ))} + {menu.items.map((item, index) => { + if (item === "separator") { + return ( +
  • +
    +
  • + ); + } + return ( +
  • + + {item.name} + + {item.notificationCount > 0 && ( + + {item.notificationCount} + + )} +
  • + ); + })}
))} @@ -209,6 +263,7 @@ const FooterMenu = () => { © 2022-{new Date().getFullYear()} Langfuse GmbH / Finto Technologies Inc. + ); }; diff --git a/components/Frame.tsx b/components/Frame.tsx index cfeda42e3..7c9e00b4b 100644 --- a/components/Frame.tsx +++ b/components/Frame.tsx @@ -1,4 +1,49 @@ import { cn } from "@/lib/utils"; +import { useEffect, useRef, useState } from "react"; + +// Image Zoom Modal Component +const ImageZoomModal = ({ src, alt, onClose }: { src: string; alt: string; onClose: () => void }) => { + useEffect(() => { + const handleEscape = (e: KeyboardEvent) => { + if (e.key === 'Escape') { + onClose(); + } + }; + + document.addEventListener('keydown', handleEscape); + document.body.style.overflow = 'hidden'; // Prevent background scrolling + + return () => { + document.removeEventListener('keydown', handleEscape); + document.body.style.overflow = 'unset'; + }; + }, [onClose]); + + return ( +
+
+ {alt} e.stopPropagation()} + /> + +
+
+ ); +}; export const Frame = ({ children, @@ -12,24 +57,90 @@ export const Frame = ({ border?: boolean; fullWidth?: boolean; transparent?: boolean; -}) => ( -
-
*]:mt-0", - fullWidth && "max-w-full", - transparent && "bg-transparent", - border && "[&>*]:-mb-1" +}) => { + const frameRef = useRef(null); + const [zoomedImage, setZoomedImage] = useState<{ src: string; alt: string } | null>(null); + + useEffect(() => { + const frame = frameRef.current; + if (!frame) return; + + const handleImageClick = (e: Event) => { + const target = e.target as HTMLImageElement; + if (target.tagName === 'IMG') { + // Only handle clicks on desktop (screens wider than 500px) + if (window.innerWidth <= 500) { + return; + } + + e.preventDefault(); + e.stopPropagation(); + const src = target.src; + const alt = target.alt || 'Image'; + if (src) { + setZoomedImage({ src, alt }); + } + } + }; + + const updateImageCursors = () => { + const images = frame.querySelectorAll('img'); + images.forEach(img => { + if (window.innerWidth > 500) { + img.style.cursor = 'pointer'; + img.style.transition = 'opacity 0.2s ease'; + } else { + img.style.cursor = 'default'; + img.style.transition = 'none'; + } + }); + }; + + // Add click event listener to the frame + frame.addEventListener('click', handleImageClick); + + // Initial cursor setup + updateImageCursors(); + + // Add resize listener to update cursors when screen size changes + window.addEventListener('resize', updateImageCursors); + + return () => { + frame.removeEventListener('click', handleImageClick); + window.removeEventListener('resize', updateImageCursors); + }; + }, []); + + return ( + <> +
+
*]:mt-0 [&>*]:mb-0 [&>img]:block [&>img]:w-full [&>img]:h-auto [&>img]:leading-none [&>img]:align-top", + fullWidth && "max-w-full", + transparent && "bg-transparent", + border + )} + > + {children} +
+
+ + {zoomedImage && ( + setZoomedImage(null)} + /> )} - > - {children} -
-
-); + + ); +}; diff --git a/components/GitHubBadge.tsx b/components/GitHubBadge.tsx index bd60bfd47..6ec3dca9d 100644 --- a/components/GitHubBadge.tsx +++ b/components/GitHubBadge.tsx @@ -1,4 +1,4 @@ -import { useEffect, useState } from "react"; +import { getGitHubStars } from "@/lib/github-stars"; import IconGithub from "./icons/github"; export const GithubMenuBadge = () => ( @@ -19,23 +19,14 @@ export const GithubMenuBadge = () => ( ); export const StarCount = () => { - const [stars, setStars] = useState(null); + const stars = getGitHubStars(); - useEffect(() => { - if (!stars) - fetch("/api/stargazer-count") - .then((data) => - data.json().then((json) => setStars(json.stargazers_count)) - ) - .catch((err) => console.error("Error while loading GitHub stars", err)); - }, []); - - return stars ? ( + return ( - {(stars as number).toLocaleString("en-US", { + {stars.toLocaleString("en-US", { compactDisplay: "short", notation: "compact", })} - ) : null; + ); }; diff --git a/components/Header.tsx b/components/Header.tsx index ae97daffd..489a02f50 100644 --- a/components/Header.tsx +++ b/components/Header.tsx @@ -13,7 +13,7 @@ export const Header = ({ }: { title?: React.ReactNode; description?: React.ReactNode; - buttons?: { href: string; text: string }[]; + buttons?: { href: string; text: string; target?: string }[]; h?: "h1" | "h2" | "h3" | "h4" | "h5" | "h6"; className?: string; }) => { @@ -26,7 +26,7 @@ export const Header = ({ )} > {title && ( - + {title} )} @@ -39,9 +39,15 @@ export const Header = ({
{buttons.map((button) => ( ))}
diff --git a/components/LogoContextMenu.tsx b/components/LogoContextMenu.tsx index fbab097ab..f826272d8 100644 --- a/components/LogoContextMenu.tsx +++ b/components/LogoContextMenu.tsx @@ -1,3 +1,4 @@ +import { DropdownMenuPortal } from "@radix-ui/react-dropdown-menu"; import { DropdownMenuContent, DropdownMenuTrigger, @@ -11,39 +12,75 @@ const LogoContextMenu: React.FC<{ open: boolean; setOpen: (open: boolean) => void; }> = ({ open, setOpen }) => { + const handleAction = ( + e: React.MouseEvent, + url: string, + isDownload: boolean + ) => { + e.preventDefault(); + + if (isDownload) { + const link = document.createElement("a"); + link.href = url; + link.download = url.split("/").pop() || ""; // Get filename from URL + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + } else { + window.open(url, "_blank", "noopener,noreferrer"); + } + }; + return ( - - { - e.preventDefault(); - window.open("/", "_blank"); - }} - > - - Open in new tab - - - { - e.preventDefault(); - window.open("/langfuse_logo.png", "_blank"); - }} - > - - Logo (png) - - { - e.preventDefault(); - window.open("/langfuse_logo.svg", "_blank"); - }} - > - - Logo (svg) - - + + + handleAction(e, "/", false)}> + + Open in new tab + + + handleAction(e, "/langfuse_logo.png", true)} + > + + Logo (png) + + handleAction(e, "/langfuse_logo.svg", true)} + > + + Logo (svg) + + + handleAction(e, "/langfuse_logo_white.png", true)} + > + + Logo white (png) + + handleAction(e, "/langfuse_logo_white.svg", true)} + > + + Logo white (svg) + + + handleAction(e, "/langfuse_icon.png", true)} + > + + Icon (png) + + handleAction(e, "/langfuse_icon.svg", true)} + > + + Icon (svg) + + + ); }; diff --git a/components/MainContentWrapper.tsx b/components/MainContentWrapper.tsx index 521adaf06..f87c721fd 100644 --- a/components/MainContentWrapper.tsx +++ b/components/MainContentWrapper.tsx @@ -1,43 +1,193 @@ import { useRouter } from "next/router"; -import { useState } from "react"; +import { useState, useEffect, useRef } from "react"; +import { useConfig } from "nextra-theme-docs"; import { Button } from "./ui/button"; import { - Calendar, - Mail, - MessageSquare, + Copy as CopyIcon, + Check as CheckIcon, + LifeBuoy, ThumbsDown, ThumbsUp, } from "lucide-react"; import { Textarea } from "./ui/textarea"; -import { openChat } from "./supportChat"; import { Background } from "./Background"; import { NotebookBanner } from "./NotebookBanner"; -import { ProductUpdateSignup } from "./productUpdateSignup"; import { COOKBOOK_ROUTE_MAPPING } from "@/lib/cookbook_route_mapping"; -import IconGithub from "./icons/github"; +import { cn } from "@/lib/utils"; +import Image from "next/image"; +import Link from "next/link"; +import { Dialog, DialogContent } from "./ui/dialog"; +import { CustomerStoryCTA } from "./customers/CustomerStoryCTA"; -const pathsWithoutFooterWidgets = ["/imprint", "/blog"]; +const pathsWithoutFooterWidgets = [ + "/imprint", + "/blog", + "/customers", + "/careers", +]; +const pathsWithCopyAsMarkdownButton = [ + "/docs", + "/self-hosting", + "/guide", + "/faq", + "/integrations", +]; +const isCustomerStory = (pathname: string) => + pathname.startsWith("/customers/"); + +const CopyMarkdownButton = () => { + const router = useRouter(); + const [copyState, setCopyState] = useState< + "idle" | "loading" | "copied" | "error" + >("idle"); + const [errorMessage, setErrorMessage] = useState("Error Copying"); + const timeoutIdRef = useRef(null); + + // Cleanup timeout on unmount + useEffect(() => { + return () => { + if (timeoutIdRef.current) { + clearTimeout(timeoutIdRef.current); + } + }; + }, []); + + const handleCopy = async () => { + // Clear any existing timeout before starting a new operation + if (timeoutIdRef.current) { + clearTimeout(timeoutIdRef.current); + timeoutIdRef.current = null; + } + + setCopyState("loading"); + setErrorMessage(""); + + let basePath = router.pathname; + if (basePath.startsWith("/")) basePath = basePath.substring(1); + if (basePath.endsWith("/")) basePath = basePath.slice(0, -1); + if (!basePath) basePath = "index"; // Handle root index page + + const mdUrl = `/${basePath}.md`; + + try { + const response = await fetch(mdUrl, { + headers: { Accept: "text/markdown" }, + }); + if (!response.ok) { + if (response.status === 404) { + setErrorMessage("Source Files Not Found"); + } else { + setErrorMessage("Fetch Error"); + } + setCopyState("error"); + timeoutIdRef.current = setTimeout(() => { + setCopyState("idle"); + timeoutIdRef.current = null; + }, 3000); + return; + } + const markdown = await response.text(); + + await navigator.clipboard.writeText(markdown); + setCopyState("copied"); + timeoutIdRef.current = setTimeout(() => { + setCopyState("idle"); + timeoutIdRef.current = null; + }, 2000); + } catch (error: any) { + console.error("Failed to copy markdown:", error); + if ( + error?.name === "NotAllowedError" || + error?.name === "SecurityError" + ) { + setErrorMessage("Clipboard Permission Denied"); + } else { + setErrorMessage("Copy Error"); + } + setCopyState("error"); + timeoutIdRef.current = setTimeout(() => { + setCopyState("idle"); + timeoutIdRef.current = null; + }, 3000); + } + }; + + let buttonText = "Copy as Markdown"; + let ButtonIcon = CopyIcon; + if (copyState === "loading") { + buttonText = "Copying..."; + } else if (copyState === "copied") { + buttonText = "Copied!"; + ButtonIcon = CheckIcon; + } else if (copyState === "error") { + buttonText = errorMessage; + } + + const isDisabled = copyState === "loading" || copyState === "copied"; + + return ( + + ); +}; export const MainContentWrapper = (props) => { const router = useRouter(); + const { frontMatter } = useConfig(); const cookbook = COOKBOOK_ROUTE_MAPPING.find( (cookbook) => cookbook.path === router.pathname ); + const versionLabel = frontMatter.label; + + const shouldShowCopyButton = pathsWithCopyAsMarkdownButton.some((prefix) => + router.pathname.startsWith(prefix) + ); + return ( <> + {(versionLabel || shouldShowCopyButton) && ( +
+ {versionLabel && ( + + {versionLabel} + + )} + {shouldShowCopyButton && } +
+ )} + {cookbook ? ( - + ) : null} + {props.children} - {!pathsWithoutFooterWidgets.includes(router.pathname) ? ( + {isCustomerStory(router.pathname) && } + {!pathsWithoutFooterWidgets.some( + (path) => + router.pathname === path || router.pathname.startsWith(path + "/") + ) ? (
-
) : null} @@ -45,46 +195,15 @@ export const MainContentWrapper = (props) => { ); }; -export const DocsSubscribeToUpdates = () => { - return ( -
-

Subscribe to updates

-
- -
-
- ); -}; - export const DocsSupport = () => { return ( -
-

- Questions? We're here to help -

-
- - - - -
+ ); }; @@ -96,9 +215,18 @@ export const DocsFeedback = () => { >(null); const [feedbackComment, setFeedbackComment] = useState(""); const [submitting, setSubmitting] = useState(false); + const [commentSubmitting, setCommentSubmitting] = useState(false); + + // Controls the prominent follow-up dialog shown after any rating + const [dialogOpen, setDialogOpen] = useState(false); const handleFeedbackSelection = (newSelection: "positive" | "negative") => { + // For both positive and negative feedback, open dialog immediately + setSelected(newSelection); + setDialogOpen(true); + setFeedbackComment(""); setSubmitting(true); + fetch("/api/feedback", { method: "POST", body: JSON.stringify({ @@ -107,15 +235,18 @@ export const DocsFeedback = () => { }), }) .then(() => { - setFeedbackComment(""); - setSelected(newSelection); setSubmitting(false); }) - .catch(() => setSelected(null)); + .catch(() => { + setSelected(null); + setDialogOpen(false); + setSubmitting(false); + }); }; const handleFeedbackCommentSubmit = () => { - setSubmitting(true); + setCommentSubmitting(true); + fetch("/api/feedback", { method: "POST", body: JSON.stringify({ @@ -127,76 +258,154 @@ export const DocsFeedback = () => { .then(() => { setSelected("submitted"); setFeedbackComment(""); - setSubmitting(false); + setCommentSubmitting(false); }) - .catch(() => setSelected(null)); + .catch(() => { + setSelected(null); + setDialogOpen(false); + setCommentSubmitting(false); + }); }; return ( -
-

- {selected === null - ? "Was this page useful?" - : selected === "positive" - ? "What was most useful?" - : selected === "negative" - ? "What can we improve?" - : "Thanks for your feedback!"} -

- {selected === null ? ( -
- - -
- ) : selected === "positive" || selected === "negative" ? ( -
-