diff --git a/src/backend/base/langflow/components/embeddings/LMStudioEmbeddings.py b/src/backend/base/langflow/components/embeddings/LMStudioEmbeddings.py
new file mode 100644
index 00000000000..4b5d170e4f0
--- /dev/null
+++ b/src/backend/base/langflow/components/embeddings/LMStudioEmbeddings.py
@@ -0,0 +1,86 @@
+from typing import Any
+from urllib.parse import urljoin
+
+import httpx
+
+from langflow.base.embeddings.model import LCEmbeddingsModel
+from langflow.field_typing import Embeddings
+from langflow.inputs.inputs import DropdownInput, SecretStrInput
+from langflow.io import FloatInput, MessageTextInput
+
+
+class LMStudioEmbeddingsComponent(LCEmbeddingsModel):
+ display_name: str = "LM Studio Embeddings"
+ description: str = "Generate embeddings using LM Studio."
+ icon = "LMStudio"
+
+ def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):
+ if field_name == "model":
+ base_url_dict = build_config.get("base_url", {})
+ base_url_load_from_db = base_url_dict.get("load_from_db", False)
+ base_url_value = base_url_dict.get("value")
+ if base_url_load_from_db:
+ base_url_value = self.variables(base_url_value)
+ elif not base_url_value:
+ base_url_value = "http://localhost:1234/v1"
+ build_config["model"]["options"] = self.get_model(base_url_value)
+
+ return build_config
+
+ def get_model(self, base_url_value: str) -> list[str]:
+ try:
+ url = urljoin(base_url_value, "/v1/models")
+ with httpx.Client() as client:
+ response = client.get(url)
+ response.raise_for_status()
+ data = response.json()
+
+ return [model["id"] for model in data.get("data", [])]
+ except Exception as e:
+ msg = "Could not retrieve models. Please, make sure the LM Studio server is running."
+ raise ValueError(msg) from e
+
+ inputs = [
+ DropdownInput(
+ name="model",
+ display_name="Model",
+ advanced=False,
+ refresh_button=True,
+ ),
+ MessageTextInput(
+ name="base_url",
+ display_name="LM Studio Base URL",
+ refresh_button=True,
+ value="http://localhost:1234/v1",
+ ),
+ SecretStrInput(
+ name="api_key",
+ display_name="LM Studio API Key",
+ advanced=True,
+ value="LMSTUDIO_API_KEY",
+ ),
+ FloatInput(
+ name="temperature",
+ display_name="Model Temperature",
+ value=0.1,
+ advanced=True,
+ ),
+ ]
+
+ def build_embeddings(self) -> Embeddings:
+ try:
+ from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings
+ except ImportError as e:
+ msg = "Please install langchain-nvidia-ai-endpoints to use LM Studio Embeddings."
+ raise ImportError(msg) from e
+ try:
+ output = NVIDIAEmbeddings(
+ model=self.model,
+ base_url=self.base_url,
+ temperature=self.temperature,
+ nvidia_api_key=self.api_key,
+ ) # type: ignore
+ except Exception as e:
+ msg = f"Could not connect to LM Studio API. Error: {e}"
+ raise ValueError(msg) from e
+ return output
diff --git a/src/backend/base/langflow/components/models/LMStudioModel.py b/src/backend/base/langflow/components/models/LMStudioModel.py
new file mode 100644
index 00000000000..9d028296f4a
--- /dev/null
+++ b/src/backend/base/langflow/components/models/LMStudioModel.py
@@ -0,0 +1,169 @@
+import operator
+from functools import reduce
+from typing import Any
+from urllib.parse import urljoin
+
+import httpx
+from langchain_openai import ChatOpenAI
+from pydantic.v1 import SecretStr
+
+from langflow.base.models.model import LCModelComponent
+from langflow.field_typing import LanguageModel
+from langflow.field_typing.range_spec import RangeSpec
+from langflow.inputs import (
+ BoolInput,
+ DictInput,
+ DropdownInput,
+ FloatInput,
+ IntInput,
+ SecretStrInput,
+ StrInput,
+)
+from langflow.inputs.inputs import HandleInput
+
+
+class LMStudioModelComponent(LCModelComponent):
+ display_name = "LM Studio"
+ description = "Generate text using LM Studio Local LLMs."
+ icon = "LMStudio"
+ name = "LMStudioModel"
+
+ def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):
+ if field_name == "model_name":
+ base_url_dict = build_config.get("base_url", {})
+ base_url_load_from_db = base_url_dict.get("load_from_db", False)
+ base_url_value = base_url_dict.get("value")
+ if base_url_load_from_db:
+ base_url_value = self.variables(base_url_value)
+ elif not base_url_value:
+ base_url_value = "http://localhost:1234/v1"
+ build_config["model_name"]["options"] = self.get_model(base_url_value)
+
+ return build_config
+
+ def get_model(self, base_url_value: str) -> list[str]:
+ try:
+ url = urljoin(base_url_value, "/v1/models")
+ with httpx.Client() as client:
+ response = client.get(url)
+ response.raise_for_status()
+ data = response.json()
+
+ return [model["id"] for model in data.get("data", [])]
+ except Exception as e:
+ msg = "Could not retrieve models. Please, make sure the LM Studio server is running."
+ raise ValueError(msg) from e
+
+ inputs = [
+ *LCModelComponent._base_inputs,
+ IntInput(
+ name="max_tokens",
+ display_name="Max Tokens",
+ advanced=True,
+ info="The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
+ range_spec=RangeSpec(min=0, max=128000),
+ ),
+ DictInput(name="model_kwargs", display_name="Model Kwargs", advanced=True),
+ BoolInput(
+ name="json_mode",
+ display_name="JSON Mode",
+ advanced=True,
+ info="If True, it will output JSON regardless of passing a schema.",
+ ),
+ DictInput(
+ name="output_schema",
+ is_list=True,
+ display_name="Schema",
+ advanced=True,
+ info="The schema for the Output of the model. "
+ "You must pass the word JSON in the prompt. "
+ "If left blank, JSON mode will be disabled.",
+ ),
+ DropdownInput(
+ name="model_name",
+ display_name="Model Name",
+ advanced=False,
+ refresh_button=True,
+ ),
+ StrInput(
+ name="base_url",
+ display_name="Base URL",
+ advanced=False,
+ info="Endpoint of the LM Studio API. Defaults to 'http://localhost:1234/v1' if not specified.",
+ value="http://localhost:1234/v1",
+ ),
+ SecretStrInput(
+ name="api_key",
+ display_name="LM Studio API Key",
+ info="The LM Studio API Key to use for LM Studio.",
+ advanced=True,
+ value="LMSTUDIO_API_KEY",
+ ),
+ FloatInput(name="temperature", display_name="Temperature", value=0.1),
+ IntInput(
+ name="seed",
+ display_name="Seed",
+ info="The seed controls the reproducibility of the job.",
+ advanced=True,
+ value=1,
+ ),
+ HandleInput(
+ name="output_parser",
+ display_name="Output Parser",
+ info="The parser to use to parse the output of the model",
+ advanced=True,
+ input_types=["OutputParser"],
+ ),
+ ]
+
+ def build_model(self) -> LanguageModel: # type: ignore[type-var]
+ # self.output_schema is a list of dictionaries
+ # let's convert it to a dictionary
+ output_schema_dict: dict[str, str] = reduce(operator.ior, self.output_schema or {}, {})
+ lmstudio_api_key = self.api_key
+ temperature = self.temperature
+ model_name: str = self.model_name
+ max_tokens = self.max_tokens
+ model_kwargs = self.model_kwargs or {}
+ base_url = self.base_url or "http://localhost:1234/v1"
+ json_mode = bool(output_schema_dict) or self.json_mode
+ seed = self.seed
+
+ api_key = SecretStr(lmstudio_api_key) if lmstudio_api_key else None
+ output = ChatOpenAI(
+ max_tokens=max_tokens or None,
+ model_kwargs=model_kwargs,
+ model=model_name,
+ base_url=base_url,
+ api_key=api_key,
+ temperature=temperature if temperature is not None else 0.1,
+ seed=seed,
+ )
+ if json_mode:
+ if output_schema_dict:
+ output = output.with_structured_output(schema=output_schema_dict, method="json_mode") # type: ignore
+ else:
+ output = output.bind(response_format={"type": "json_object"}) # type: ignore
+
+ return output # type: ignore
+
+ def _get_exception_message(self, e: Exception):
+ """
+ Get a message from an LM Studio exception.
+
+ Args:
+ exception (Exception): The exception to get the message from.
+
+ Returns:
+ str: The message from the exception.
+ """
+
+ try:
+ from openai import BadRequestError
+ except ImportError:
+ return None
+ if isinstance(e, BadRequestError):
+ message = e.body.get("message") # type: ignore
+ if message:
+ return message
+ return None
diff --git a/src/frontend/src/icons/LMStudio/LMStudioIcon.jsx b/src/frontend/src/icons/LMStudio/LMStudioIcon.jsx
new file mode 100644
index 00000000000..4be26ed03e9
--- /dev/null
+++ b/src/frontend/src/icons/LMStudio/LMStudioIcon.jsx
@@ -0,0 +1,137 @@
+const SvgLMStudio = (props) => (
+
+);
+export default SvgLMStudio;
diff --git a/src/frontend/src/icons/LMStudio/index.tsx b/src/frontend/src/icons/LMStudio/index.tsx
new file mode 100644
index 00000000000..07475bfdc78
--- /dev/null
+++ b/src/frontend/src/icons/LMStudio/index.tsx
@@ -0,0 +1,9 @@
+import React, { forwardRef } from "react";
+import SvgLMStudio from "./LMStudioIcon";
+
+export const LMStudioIcon = forwardRef<
+ SVGSVGElement,
+ React.PropsWithChildren<{}>
+>((props, ref) => {
+ return ;
+});
diff --git a/src/frontend/src/icons/LMStudio/lmstudio-icon.svg b/src/frontend/src/icons/LMStudio/lmstudio-icon.svg
new file mode 100644
index 00000000000..d7de9f3e33c
--- /dev/null
+++ b/src/frontend/src/icons/LMStudio/lmstudio-icon.svg
@@ -0,0 +1,13 @@
+
+
+
+
diff --git a/src/frontend/src/utils/styleUtils.ts b/src/frontend/src/utils/styleUtils.ts
index 05a7125ec49..2c8b2e3c3a7 100644
--- a/src/frontend/src/utils/styleUtils.ts
+++ b/src/frontend/src/utils/styleUtils.ts
@@ -206,6 +206,7 @@ import { GroqIcon } from "../icons/Groq";
import { HCDIcon } from "../icons/HCD";
import { HuggingFaceIcon } from "../icons/HuggingFace";
import { IFixIcon } from "../icons/IFixIt";
+import { LMStudioIcon } from "../icons/LMStudio";
import { LangChainIcon } from "../icons/LangChain";
import { MaritalkIcon } from "../icons/Maritalk";
import { MetaIcon } from "../icons/Meta";
@@ -357,6 +358,7 @@ export const nodeNames: { [char: string]: string } = {
};
export const nodeIconsLucide: iconsType = {
+ LMStudio: LMStudioIcon,
X: X,
Notify: Bell,
ListFlows: Group,