此库支持访问各种 Google 模型,包括 Gemini 系列模型及其 Nano Banana 图像生成模型。您可以通过 Google 的 Google AI API(有时也称为 Generative AI API 或 AI Studio API)或通过 Google Cloud Platform Vertex AI 服务访问这些模型。这将帮助您开始使用 ChatGoogle聊天模型。有关所有 ChatGoogle 功能和配置的详细文档,请访问 API 参考。
import { HumanMessage, SystemMessage } from "@langchain/core/messages";const aiMsg = await llm.invoke([ new SystemMessage( "You are a helpful assistant that translates English to French. Translate the user sentence." ), new HumanMessage("I love programming."),]);
const llmWithLogprobs = new ChatGoogle({ model: "gemini-2.5-flash", logprobs: 2, // Number of top candidates to return});const resWithLogprobs = await llmWithLogprobs.invoke("Hello");console.log(resWithLogprobs.response_metadata.logprobs_result);
import { ChatGoogle } from "@langchain/google";import { z } from "zod";const llm = new ChatGoogle("gemini-2.5-flash");const schema = z.object({ people: z.array(z.object({ name: z.string().describe("The name of the person"), age: z.number().describe("The age of the person"), })),});const structuredLlm = llm.withStructuredOutput(schema);const res = await structuredLlm.invoke("John is 25 and Jane is 30.");console.log(res);
import { ChatGoogle } from "@langchain/google";import { tool } from "@langchain/core/tools";import { z } from "zod";const weatherTool = tool((input) => { return "It is sunny and 75 degrees.";}, { name: "get_weather", description: "Get the weather for a location", schema: z.object({ location: z.string(), }),});const llm = new ChatGoogle("gemini-2.5-flash") .bindTools([weatherTool]);const res = await llm.invoke("What is the weather in SF?");console.log(res.tool_calls);
import { ChatGoogle } from "@langchain/google";const llm = new ChatGoogle("gemini-2.5-flash") .bindTools([ { googleSearch: {}, }, ]);const res = await llm.invoke("Who won the latest World Series?");console.log(res.text);
import { ChatGoogle } from "@langchain/google";const llm = new ChatGoogle("gemini-2.5-pro");// Pass the cache name to the modelconst res = await llm.invoke("Summarize this document", { cachedContent: "projects/123/locations/us-central1/cachedContents/456",});
import { ChatGoogle } from "@langchain/google";const llm = new ChatGoogle({ model: "gemini-3.1-pro-preview", reasoningEffort: "high",});const res = await llm.invoke("What is the square root of 144?");// The reasoning steps are available in the contentBlocksconst reasoningBlocks = res.contentBlocks.filter((block) => block.type === "reasoning");reasoningBlocks.forEach((block) => { if (block.type === "reasoning") { console.log("Thought:", block.reasoning); }});console.log("Answer:", res.text);
import { ChatGoogle } from "@langchain/google";import * as fs from "fs";const llm = new ChatGoogle({ model: "gemini-2.5-flash-image", responseModalities: ["IMAGE", "TEXT"],});const res = await llm.invoke( "I would like to see a drawing of a house with the sun shining overhead. Drawn in crayon.");// Generated images are returned in the contentBlocks of the messagefor (const [index, block] of res.contentBlocks.entries()) { if (block.type === "file" && block.data) { const base64Data = block.data; // Determine the correct file extension from the MIME type const mimeType = (block.mimeType || "image/png").split(";")[0]; const extension = mimeType.split("/")[1] || "png"; const filename = `generated_image_${index}.${extension}`; // Save the image to a file fs.writeFileSync(filename, Buffer.from(base64Data, "base64")); console.log(`[Saved image to ${filename}]`); } else if (block.type === "text") { console.log(block.text); }}