diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/MiniMax.node.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/MiniMax.node.ts
new file mode 100644
index 0000000000000..2b2d740f208a6
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/MiniMax.node.ts
@@ -0,0 +1,12 @@
+import type { IExecuteFunctions, INodeType } from 'n8n-workflow';
+
+import { router } from './actions/router';
+import { versionDescription } from './actions/versionDescription';
+
+export class MiniMax implements INodeType {
+ description = versionDescription;
+
+ async execute(this: IExecuteFunctions) {
+ return await router.call(this);
+ }
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/audio/index.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/audio/index.ts
new file mode 100644
index 0000000000000..a7b4d7e3593d4
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/audio/index.ts
@@ -0,0 +1,29 @@
+import type { INodeProperties } from 'n8n-workflow';
+
+import * as textToSpeech from './tts.operation';
+
+export { textToSpeech };
+
+export const description: INodeProperties[] = [
+ {
+ displayName: 'Operation',
+ name: 'operation',
+ type: 'options',
+ noDataExpression: true,
+ displayOptions: {
+ show: {
+ resource: ['audio'],
+ },
+ },
+ options: [
+ {
+ name: 'Text to Speech',
+ value: 'textToSpeech',
+ action: 'Convert text to speech',
+ description: 'Generate speech audio from text input',
+ },
+ ],
+ default: 'textToSpeech',
+ },
+ ...textToSpeech.description,
+];
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/audio/tts.operation.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/audio/tts.operation.ts
new file mode 100644
index 0000000000000..bdcc7fd18fcc0
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/audio/tts.operation.ts
@@ -0,0 +1,281 @@
+import type {
+ IDataObject,
+ IExecuteFunctions,
+ INodeExecutionData,
+ INodeProperties,
+} from 'n8n-workflow';
+import { NodeOperationError, updateDisplayOptions } from 'n8n-workflow';
+
+import type { T2AResponse } from '../../helpers/interfaces';
+import { apiRequest } from '../../transport';
+
+const properties: INodeProperties[] = [
+ {
+ displayName: 'Model',
+ name: 'modelId',
+ type: 'options',
+ options: [
+ {
+ name: 'Speech 02 HD',
+ value: 'speech-02-hd',
+ description: 'Superior rhythm and stability with outstanding quality',
+ },
+ {
+ name: 'Speech 02 Turbo',
+ value: 'speech-02-turbo',
+ description: 'Enhanced multilingual capabilities and performance',
+ },
+ {
+ name: 'Speech 2.6 HD',
+ value: 'speech-2.6-hd',
+ description: 'HD model with outstanding prosody and cloning similarity',
+ },
+ {
+ name: 'Speech 2.6 Turbo',
+ value: 'speech-2.6-turbo',
+ description: 'Turbo model with support for 40 languages',
+ },
+ {
+ name: 'Speech 2.8 HD',
+ value: 'speech-2.8-hd',
+ description: 'Latest HD model with ultra-realistic quality and sound tags',
+ },
+ {
+ name: 'Speech 2.8 Turbo',
+ value: 'speech-2.8-turbo',
+ description: 'Latest Turbo model with seamless speed and natural flow',
+ },
+ ],
+ default: 'speech-2.8-hd',
+ description: 'The speech synthesis model to use',
+ },
+ {
+ displayName: 'Text',
+ name: 'text',
+ type: 'string',
+ typeOptions: {
+ rows: 4,
+ },
+ default: '',
+ required: true,
+ description: 'The text to convert to speech (max 10,000 characters)',
+ placeholder: 'e.g. Hello, welcome to our service!',
+ },
+ {
+ displayName: 'Voice ID',
+ name: 'voiceId',
+ type: 'string',
+ default: 'English_Graceful_Lady',
+ required: true,
+ // eslint-disable-next-line n8n-nodes-base/node-param-description-miscased-id
+ description:
+ 'Voice ID to use for speech synthesis. Browse available voices in the MiniMax documentation.',
+ placeholder: 'e.g. English_Graceful_Lady',
+ },
+ {
+ displayName: 'Download Audio',
+ name: 'downloadAudio',
+ type: 'boolean',
+ default: true,
+ description:
+ 'Whether to download the generated audio as binary data. When disabled, only the audio URL is returned.',
+ },
+ {
+ displayName: 'Options',
+ name: 'options',
+ placeholder: 'Add Option',
+ type: 'collection',
+ default: {},
+ options: [
+ {
+ displayName: 'Audio Format',
+ name: 'audioFormat',
+ type: 'options',
+ options: [
+ { name: 'MP3', value: 'mp3' },
+ { name: 'PCM', value: 'pcm' },
+ { name: 'FLAC', value: 'flac' },
+ { name: 'WAV', value: 'wav' },
+ ],
+ default: 'mp3',
+ description: 'Output audio format. WAV is only supported in non-streaming mode.',
+ },
+ {
+ displayName: 'Emotion',
+ name: 'emotion',
+ type: 'options',
+ options: [
+ { name: 'Angry', value: 'angry' },
+ { name: 'Calm', value: 'calm' },
+ { name: 'Disgusted', value: 'disgusted' },
+ { name: 'Fearful', value: 'fearful' },
+ { name: 'Happy', value: 'happy' },
+ { name: 'Sad', value: 'sad' },
+ { name: 'Surprised', value: 'surprised' },
+ ],
+ default: 'calm',
+ description:
+ 'Emotion for synthesized speech. By default the model auto-selects the most natural emotion.',
+ },
+ {
+ displayName: 'Language Boost',
+ name: 'languageBoost',
+ type: 'options',
+ options: [
+ { name: 'Arabic', value: 'Arabic' },
+ { name: 'Auto Detect', value: 'auto' },
+ { name: 'Chinese', value: 'Chinese' },
+ { name: 'English', value: 'English' },
+ { name: 'French', value: 'French' },
+ { name: 'German', value: 'German' },
+ { name: 'Indonesian', value: 'Indonesian' },
+ { name: 'Italian', value: 'Italian' },
+ { name: 'Japanese', value: 'Japanese' },
+ { name: 'Korean', value: 'Korean' },
+ { name: 'Portuguese', value: 'Portuguese' },
+ { name: 'Russian', value: 'Russian' },
+ { name: 'Spanish', value: 'Spanish' },
+ { name: 'Thai', value: 'Thai' },
+ { name: 'Turkish', value: 'Turkish' },
+ { name: 'Vietnamese', value: 'Vietnamese' },
+ ],
+ default: 'auto',
+ description: 'Enhance recognition for a specific language',
+ },
+ {
+ displayName: 'Pitch',
+ name: 'pitch',
+ type: 'number',
+ typeOptions: {
+ minValue: -12,
+ maxValue: 12,
+ },
+ default: 0,
+ description: 'Speech pitch adjustment (-12 to 12, 0 = original pitch)',
+ },
+ {
+ displayName: 'Speed',
+ name: 'speed',
+ type: 'number',
+ typeOptions: {
+ minValue: 0.5,
+ maxValue: 2,
+ numberPrecision: 1,
+ },
+ default: 1,
+ description: 'Speech speed (0.5-2, higher = faster)',
+ },
+ {
+ displayName: 'Volume',
+ name: 'volume',
+ type: 'number',
+ typeOptions: {
+ minValue: 0.1,
+ maxValue: 10,
+ numberPrecision: 1,
+ },
+ default: 1,
+ description: 'Speech volume (0.1-10, higher = louder)',
+ },
+ ],
+ },
+];
+
+const displayOptions = {
+ show: {
+ resource: ['audio'],
+ operation: ['textToSpeech'],
+ },
+};
+
+export const description = updateDisplayOptions(displayOptions, properties);
+
+export async function execute(
+ this: IExecuteFunctions,
+ itemIndex: number,
+): Promise {
+ const model = this.getNodeParameter('modelId', itemIndex) as string;
+ const text = this.getNodeParameter('text', itemIndex) as string;
+ const voiceId = this.getNodeParameter('voiceId', itemIndex) as string;
+ const downloadAudio = this.getNodeParameter('downloadAudio', itemIndex, true) as boolean;
+ const options = this.getNodeParameter('options', itemIndex, {}) as IDataObject;
+
+ const audioFormat = (options.audioFormat as string) || 'mp3';
+
+ const body: IDataObject = {
+ model,
+ text,
+ stream: false,
+ output_format: 'url',
+ voice_setting: {
+ voice_id: voiceId,
+ speed: (options.speed as number) ?? 1,
+ vol: (options.volume as number) ?? 1,
+ pitch: (options.pitch as number) ?? 0,
+ },
+ audio_setting: {
+ format: audioFormat,
+ },
+ };
+
+ if (options.emotion) {
+ (body.voice_setting as IDataObject).emotion = options.emotion;
+ }
+
+ if (options.languageBoost) {
+ body.language_boost = options.languageBoost;
+ }
+
+ const response = (await apiRequest.call(this, 'POST', '/t2a_v2', {
+ body,
+ })) as T2AResponse;
+
+ if (response.base_resp?.status_code !== 0) {
+ throw new NodeOperationError(
+ this.getNode(),
+ `Text-to-speech failed: ${response.base_resp?.status_msg || 'Unknown error'}`,
+ );
+ }
+
+ const audioData = response.data?.audio;
+ if (!audioData) {
+ throw new NodeOperationError(this.getNode(), 'No audio data returned');
+ }
+
+ const jsonData: IDataObject = {
+ audioLength: response.extra_info?.audio_length,
+ audioFormat: response.extra_info?.audio_format,
+ audioSize: response.extra_info?.audio_size,
+ wordCount: response.extra_info?.word_count,
+ usageCharacters: response.extra_info?.usage_characters,
+ };
+
+ if (downloadAudio) {
+ const audioResponse = await this.helpers.httpRequest({
+ method: 'GET',
+ url: audioData,
+ encoding: 'arraybuffer',
+ returnFullResponse: true,
+ });
+
+ const mimeType = (audioResponse.headers?.['content-type'] as string) || `audio/${audioFormat}`;
+ const binaryBuffer = Buffer.from(audioResponse.body as ArrayBuffer);
+ const fileName = `speech.${audioFormat}`;
+ const binaryData = await this.helpers.prepareBinaryData(binaryBuffer, fileName, mimeType);
+
+ return [
+ {
+ binary: { data: binaryData },
+ json: jsonData,
+ pairedItem: { item: itemIndex },
+ },
+ ];
+ }
+
+ return [
+ {
+ json: { ...jsonData, audioUrl: audioData },
+ pairedItem: { item: itemIndex },
+ },
+ ];
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/image/generate.operation.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/image/generate.operation.ts
new file mode 100644
index 0000000000000..cad602d2bbcfa
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/image/generate.operation.ts
@@ -0,0 +1,189 @@
+import type {
+ IDataObject,
+ IExecuteFunctions,
+ INodeExecutionData,
+ INodeProperties,
+} from 'n8n-workflow';
+import { NodeOperationError, updateDisplayOptions } from 'n8n-workflow';
+
+import type { ImageGenerationResponse } from '../../helpers/interfaces';
+import { apiRequest } from '../../transport';
+
+const properties: INodeProperties[] = [
+ {
+ displayName: 'Model',
+ name: 'modelId',
+ type: 'options',
+ options: [
+ {
+ name: 'Image-01',
+ value: 'image-01',
+ description: 'High-quality image generation with fine-grained details',
+ },
+ ],
+ default: 'image-01',
+ description: 'The model to use for image generation',
+ },
+ {
+ displayName: 'Prompt',
+ name: 'prompt',
+ type: 'string',
+ typeOptions: {
+ rows: 4,
+ },
+ default: '',
+ required: true,
+ description: 'Text description of the image to generate (max 1500 characters)',
+ placeholder: 'e.g. A serene mountain landscape at sunset with reflections in a lake',
+ },
+ {
+ displayName: 'Aspect Ratio',
+ name: 'aspectRatio',
+ type: 'options',
+ options: [
+ { name: '1:1 (1024x1024)', value: '1:1' },
+ { name: '16:9 (1280x720)', value: '16:9' },
+ { name: '2:3 (832x1248)', value: '2:3' },
+ { name: '21:9 (1344x576)', value: '21:9' },
+ { name: '3:2 (1248x832)', value: '3:2' },
+ { name: '3:4 (864x1152)', value: '3:4' },
+ { name: '4:3 (1152x864)', value: '4:3' },
+ { name: '9:16 (720x1280)', value: '9:16' },
+ ],
+ default: '1:1',
+ description: 'Aspect ratio of the generated image',
+ },
+ {
+ displayName: 'Number of Images',
+ name: 'numberOfImages',
+ type: 'number',
+ typeOptions: {
+ minValue: 1,
+ maxValue: 9,
+ },
+ default: 1,
+ description: 'Number of images to generate per request (1-9)',
+ },
+ {
+ displayName: 'Download Image',
+ name: 'downloadImage',
+ type: 'boolean',
+ default: true,
+ description:
+ 'Whether to download the generated image as binary data. When disabled, only the image URL is returned.',
+ },
+ {
+ displayName: 'Options',
+ name: 'options',
+ placeholder: 'Add Option',
+ type: 'collection',
+ default: {},
+ options: [
+ {
+ displayName: 'Prompt Optimizer',
+ name: 'promptOptimizer',
+ type: 'boolean',
+ default: false,
+ description: 'Whether to automatically optimize the prompt for better results',
+ },
+ {
+ displayName: 'Seed',
+ name: 'seed',
+ type: 'number',
+ default: 0,
+ description:
+ 'Random seed for reproducible outputs. Using the same seed and parameters produces the same image.',
+ },
+ ],
+ },
+];
+
+const displayOptions = {
+ show: {
+ operation: ['generate'],
+ resource: ['image'],
+ },
+};
+
+export const description = updateDisplayOptions(displayOptions, properties);
+
+export async function execute(this: IExecuteFunctions, i: number): Promise {
+ const model = this.getNodeParameter('modelId', i) as string;
+ const prompt = this.getNodeParameter('prompt', i) as string;
+ const aspectRatio = this.getNodeParameter('aspectRatio', i) as string;
+ const numberOfImages = this.getNodeParameter('numberOfImages', i, 1) as number;
+ const downloadImage = this.getNodeParameter('downloadImage', i, true) as boolean;
+ const options = this.getNodeParameter('options', i, {}) as {
+ promptOptimizer?: boolean;
+ seed?: number;
+ };
+
+ const body: IDataObject = {
+ model,
+ prompt,
+ aspect_ratio: aspectRatio,
+ n: numberOfImages,
+ response_format: 'url',
+ };
+
+ if (options.promptOptimizer !== undefined) {
+ body.prompt_optimizer = options.promptOptimizer;
+ }
+
+ if (options.seed !== undefined) {
+ body.seed = options.seed;
+ }
+
+ const response = (await apiRequest.call(this, 'POST', '/image_generation', {
+ body,
+ })) as ImageGenerationResponse;
+
+ if (response.base_resp?.status_code !== 0) {
+ throw new NodeOperationError(
+ this.getNode(),
+ `Image generation failed: ${response.base_resp?.status_msg || 'Unknown error'}`,
+ );
+ }
+
+ const imageUrls = response.data?.image_urls ?? [];
+ if (imageUrls.length === 0) {
+ throw new NodeOperationError(this.getNode(), 'No images were generated');
+ }
+
+ const results: INodeExecutionData[] = [];
+
+ for (let idx = 0; idx < imageUrls.length; idx++) {
+ const imageUrl = imageUrls[idx];
+
+ if (downloadImage) {
+ const imageResponse = await this.helpers.httpRequest({
+ method: 'GET',
+ url: imageUrl,
+ encoding: 'arraybuffer',
+ returnFullResponse: true,
+ });
+
+ const contentType = (imageResponse.headers?.['content-type'] as string) || 'image/png';
+ const fileContent = Buffer.from(imageResponse.body as ArrayBuffer);
+ const ext = contentType.includes('jpeg') || contentType.includes('jpg') ? 'jpg' : 'png';
+ const binaryData = await this.helpers.prepareBinaryData(
+ fileContent,
+ `image_${idx}.${ext}`,
+ contentType,
+ );
+
+ results.push({
+ binary: { data: binaryData },
+ json: { imageUrl },
+ pairedItem: { item: i },
+ });
+ } else {
+ results.push({
+ json: { imageUrl },
+ pairedItem: { item: i },
+ });
+ }
+ }
+
+ return results;
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/image/index.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/image/index.ts
new file mode 100644
index 0000000000000..f8723deb0fcc8
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/image/index.ts
@@ -0,0 +1,29 @@
+import type { INodeProperties } from 'n8n-workflow';
+
+import * as generate from './generate.operation';
+
+export { generate };
+
+export const description: INodeProperties[] = [
+ {
+ displayName: 'Operation',
+ name: 'operation',
+ type: 'options',
+ noDataExpression: true,
+ displayOptions: {
+ show: {
+ resource: ['image'],
+ },
+ },
+ options: [
+ {
+ name: 'Generate an Image',
+ value: 'generate',
+ action: 'Generate an image',
+ description: 'Create an image from a text prompt',
+ },
+ ],
+ default: 'generate',
+ },
+ ...generate.description,
+];
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/node.type.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/node.type.ts
new file mode 100644
index 0000000000000..5bcad7033b048
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/node.type.ts
@@ -0,0 +1,10 @@
+import type { AllEntities } from 'n8n-workflow';
+
+type NodeMap = {
+ text: 'message';
+ image: 'generate';
+ video: 'textToVideo' | 'imageToVideo';
+ audio: 'textToSpeech';
+};
+
+export type MiniMaxType = AllEntities;
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/router.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/router.ts
new file mode 100644
index 0000000000000..97a9c069e8f37
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/router.ts
@@ -0,0 +1,57 @@
+import { NodeOperationError, type IExecuteFunctions, type INodeExecutionData } from 'n8n-workflow';
+
+import * as audio from './audio';
+import * as image from './image';
+import type { MiniMaxType } from './node.type';
+import * as text from './text';
+import * as video from './video';
+
+export async function router(this: IExecuteFunctions) {
+ const returnData: INodeExecutionData[] = [];
+
+ const items = this.getInputData();
+ const resource = this.getNodeParameter('resource', 0);
+ const operation = this.getNodeParameter('operation', 0);
+
+ const miniMaxTypeData = {
+ resource,
+ operation,
+ } as MiniMaxType;
+
+ let execute;
+ switch (miniMaxTypeData.resource) {
+ case 'audio':
+ execute = audio[miniMaxTypeData.operation].execute;
+ break;
+ case 'image':
+ execute = image[miniMaxTypeData.operation].execute;
+ break;
+ case 'text':
+ execute = text[miniMaxTypeData.operation].execute;
+ break;
+ case 'video':
+ execute = video[miniMaxTypeData.operation].execute;
+ break;
+ default:
+ throw new NodeOperationError(this.getNode(), `The resource "${resource}" is not supported!`);
+ }
+
+ for (let i = 0; i < items.length; i++) {
+ try {
+ const responseData = await execute.call(this, i);
+ returnData.push(...responseData);
+ } catch (error) {
+ if (this.continueOnFail()) {
+ returnData.push({ json: { error: error.message }, pairedItem: { item: i } });
+ continue;
+ }
+
+ throw new NodeOperationError(this.getNode(), error, {
+ itemIndex: i,
+ description: error.description,
+ });
+ }
+ }
+
+ return [returnData];
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/text/index.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/text/index.ts
new file mode 100644
index 0000000000000..34610450939e3
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/text/index.ts
@@ -0,0 +1,29 @@
+import type { INodeProperties } from 'n8n-workflow';
+
+import * as message from './message.operation';
+
+export { message };
+
+export const description: INodeProperties[] = [
+ {
+ displayName: 'Operation',
+ name: 'operation',
+ type: 'options',
+ noDataExpression: true,
+ options: [
+ {
+ name: 'Message a Model',
+ value: 'message',
+ action: 'Message a model',
+ description: 'Send a message and get a response from a MiniMax model',
+ },
+ ],
+ default: 'message',
+ displayOptions: {
+ show: {
+ resource: ['text'],
+ },
+ },
+ },
+ ...message.description,
+];
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/text/message.operation.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/text/message.operation.ts
new file mode 100644
index 0000000000000..34d11fadab0cd
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/text/message.operation.ts
@@ -0,0 +1,348 @@
+import type { Tool } from '@langchain/core/tools';
+import type {
+ IDataObject,
+ IExecuteFunctions,
+ INodeExecutionData,
+ INodeProperties,
+} from 'n8n-workflow';
+import { accumulateTokenUsage, jsonParse, updateDisplayOptions } from 'n8n-workflow';
+import zodToJsonSchema from 'zod-to-json-schema';
+
+import { getConnectedTools } from '@utils/helpers';
+
+import type {
+ ChatCompletionResponse,
+ ChatMessage,
+ ToolCall,
+ ToolFunction,
+} from '../../helpers/interfaces';
+import { apiRequest } from '../../transport';
+
+const properties: INodeProperties[] = [
+ {
+ displayName: 'Model',
+ name: 'modelId',
+ type: 'options',
+ options: [
+ { name: 'MiniMax-M2', value: 'MiniMax-M2' },
+ { name: 'MiniMax-M2.1', value: 'MiniMax-M2.1' },
+ { name: 'MiniMax-M2.1-Highspeed', value: 'MiniMax-M2.1-highspeed' },
+ { name: 'MiniMax-M2.5', value: 'MiniMax-M2.5' },
+ { name: 'MiniMax-M2.5-Highspeed', value: 'MiniMax-M2.5-highspeed' },
+ { name: 'MiniMax-M2.7', value: 'MiniMax-M2.7' },
+ { name: 'MiniMax-M2.7-Highspeed', value: 'MiniMax-M2.7-highspeed' },
+ ],
+ default: 'MiniMax-M2.7',
+ description: 'The model to use for generating the response',
+ },
+ {
+ displayName: 'Messages',
+ name: 'messages',
+ type: 'fixedCollection',
+ typeOptions: {
+ sortable: true,
+ multipleValues: true,
+ },
+ placeholder: 'Add Message',
+ default: { values: [{ content: '', role: 'user' }] },
+ options: [
+ {
+ displayName: 'Values',
+ name: 'values',
+ values: [
+ {
+ displayName: 'Prompt',
+ name: 'content',
+ type: 'string',
+ description: 'The content of the message to be sent',
+ default: '',
+ placeholder: 'e.g. Hello, how can you help me?',
+ typeOptions: {
+ rows: 2,
+ },
+ },
+ {
+ displayName: 'Role',
+ name: 'role',
+ type: 'options',
+ description:
+ "Role in shaping the model's response, it tells the model how it should behave and interact with the user",
+ options: [
+ {
+ name: 'User',
+ value: 'user',
+ description: 'Send a message as a user and get a response from the model',
+ },
+ {
+ name: 'Assistant',
+ value: 'assistant',
+ description: 'Tell the model to adopt a specific tone or personality',
+ },
+ ],
+ default: 'user',
+ },
+ ],
+ },
+ ],
+ },
+ {
+ displayName: 'Simplify Output',
+ name: 'simplify',
+ type: 'boolean',
+ default: true,
+ description: 'Whether to return a simplified version of the response instead of the raw data',
+ },
+ {
+ displayName: 'Options',
+ name: 'options',
+ placeholder: 'Add Option',
+ type: 'collection',
+ default: {},
+ options: [
+ {
+ displayName: 'Hide Thinking',
+ name: 'hideThinking',
+ type: 'boolean',
+ default: true,
+ description:
+ 'Whether to strip chain-of-thought reasoning from the response, returning only the final answer',
+ },
+ {
+ displayName: 'Maximum Number of Tokens',
+ name: 'maxTokens',
+ default: 1024,
+ description: 'The maximum number of tokens to generate in the completion',
+ type: 'number',
+ typeOptions: {
+ minValue: 1,
+ numberPrecision: 0,
+ },
+ },
+ {
+ displayName: 'Max Tool Calls Iterations',
+ name: 'maxToolsIterations',
+ type: 'number',
+ default: 15,
+ description:
+ 'The maximum number of tool iteration cycles the LLM will run before stopping. A single iteration can contain multiple tool calls. Set to 0 for no limit.',
+ typeOptions: {
+ minValue: 0,
+ numberPrecision: 0,
+ },
+ },
+ {
+ displayName: 'Output Randomness (Temperature)',
+ name: 'temperature',
+ default: 0.7,
+ description:
+ 'Controls the randomness of the output. Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
+ type: 'number',
+ typeOptions: {
+ minValue: 0,
+ maxValue: 1,
+ numberPrecision: 1,
+ },
+ },
+ {
+ displayName: 'Output Randomness (Top P)',
+ name: 'topP',
+ default: 0.95,
+ description: 'The maximum cumulative probability of tokens to consider when sampling',
+ type: 'number',
+ typeOptions: {
+ minValue: 0,
+ maxValue: 1,
+ numberPrecision: 2,
+ },
+ },
+ {
+ displayName: 'System Message',
+ name: 'system',
+ type: 'string',
+ default: '',
+ placeholder: 'e.g. You are a helpful assistant',
+ },
+ ],
+ },
+];
+
+const displayOptions = {
+ show: {
+ operation: ['message'],
+ resource: ['text'],
+ },
+};
+
+export const description = updateDisplayOptions(displayOptions, properties);
+
+interface MessageOptions {
+ hideThinking?: boolean;
+ maxTokens?: number;
+ system?: string;
+ temperature?: number;
+ topP?: number;
+}
+
+export async function execute(this: IExecuteFunctions, i: number): Promise {
+ const model = this.getNodeParameter('modelId', i) as string;
+ const rawMessages = this.getNodeParameter('messages.values', i, []) as Array<{
+ content: string;
+ role: string;
+ }>;
+ const simplify = this.getNodeParameter('simplify', i, true) as boolean;
+ const options = this.getNodeParameter('options', i, {}) as MessageOptions;
+
+ const hideThinking = options.hideThinking ?? true;
+
+ const messages: ChatMessage[] = [];
+
+ if (options.system) {
+ messages.push({ role: 'system', content: options.system });
+ }
+
+ for (const msg of rawMessages) {
+ messages.push({ role: msg.role as 'user' | 'assistant', content: msg.content });
+ }
+
+ const { tools, connectedTools } = await getToolDefinitions.call(this);
+
+ const body: IDataObject = {
+ model,
+ messages,
+ max_tokens: options.maxTokens ?? 1024,
+ };
+
+ if (hideThinking) {
+ body.reasoning_split = true;
+ }
+
+ if (options.temperature !== undefined) body.temperature = options.temperature;
+ if (options.topP !== undefined) body.top_p = options.topP;
+
+ if (tools.length > 0) {
+ body.tools = tools;
+ }
+
+ let response = (await apiRequest.call(this, 'POST', '/chat/completions', {
+ body,
+ })) as ChatCompletionResponse;
+
+ const captureUsage = () => {
+ const usage = response.usage;
+ if (usage) {
+ accumulateTokenUsage(this, usage.prompt_tokens, usage.completion_tokens);
+ }
+ };
+
+ captureUsage();
+
+ const maxToolsIterations = this.getNodeParameter('options.maxToolsIterations', i, 15) as number;
+ const abortSignal = this.getExecutionCancelSignal();
+ let currentIteration = 0;
+
+ while (true) {
+ if (abortSignal?.aborted) {
+ break;
+ }
+
+ const choice = response.choices?.[0];
+ if (choice?.finish_reason !== 'tool_calls' || !choice.message.tool_calls?.length) {
+ break;
+ }
+
+ if (maxToolsIterations > 0 && currentIteration >= maxToolsIterations) {
+ break;
+ }
+
+ const assistantMsg: ChatMessage = {
+ role: 'assistant',
+ content: choice.message.content ?? '',
+ tool_calls: choice.message.tool_calls,
+ };
+ if (choice.message.reasoning_content) {
+ assistantMsg.reasoning_content = choice.message.reasoning_content;
+ }
+ messages.push(assistantMsg);
+
+ await handleToolUse.call(this, choice.message.tool_calls, messages, connectedTools);
+ currentIteration++;
+
+ response = (await apiRequest.call(this, 'POST', '/chat/completions', {
+ body,
+ })) as ChatCompletionResponse;
+
+ captureUsage();
+ }
+
+ const finalMessage = response.choices?.[0]?.message;
+
+ if (simplify) {
+ const result: IDataObject = {
+ content: finalMessage?.content ?? '',
+ };
+
+ if (!hideThinking && finalMessage?.reasoning_content) {
+ result.reasoning_content = finalMessage.reasoning_content;
+ }
+
+ return [
+ {
+ json: result,
+ pairedItem: { item: i },
+ },
+ ];
+ }
+
+ return [
+ {
+ json: { ...response },
+ pairedItem: { item: i },
+ },
+ ];
+}
+
+async function getToolDefinitions(this: IExecuteFunctions) {
+ let connectedTools: Tool[] = [];
+ const nodeInputs = this.getNodeInputs();
+
+ if (nodeInputs.some((input) => input.type === 'ai_tool')) {
+ connectedTools = await getConnectedTools(this, true);
+ }
+
+ const tools: ToolFunction[] = connectedTools.map((t) => ({
+ type: 'function' as const,
+ function: {
+ name: t.name,
+ description: t.description,
+ parameters: zodToJsonSchema(t.schema) as IDataObject,
+ },
+ }));
+
+ return { tools, connectedTools };
+}
+
+async function handleToolUse(
+ this: IExecuteFunctions,
+ toolCalls: ToolCall[],
+ messages: ChatMessage[],
+ connectedTools: Tool[],
+) {
+ for (const toolCall of toolCalls) {
+ let toolResponse: unknown;
+ for (const connectedTool of connectedTools) {
+ if (connectedTool.name === toolCall.function.name) {
+ const args = jsonParse(toolCall.function.arguments);
+ toolResponse = await connectedTool.invoke(args);
+ }
+ }
+
+ messages.push({
+ role: 'tool',
+ content:
+ typeof toolResponse === 'object'
+ ? JSON.stringify(toolResponse)
+ : ((toolResponse as string) ?? ''),
+ tool_call_id: toolCall.id,
+ });
+ }
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/versionDescription.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/versionDescription.ts
new file mode 100644
index 0000000000000..246d65bbd6511
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/versionDescription.ts
@@ -0,0 +1,84 @@
+/* eslint-disable n8n-nodes-base/node-filename-against-convention */
+import { NodeConnectionTypes, type INodeTypeDescription } from 'n8n-workflow';
+
+import * as audio from './audio';
+import * as image from './image';
+import * as text from './text';
+import * as video from './video';
+
+export const versionDescription: INodeTypeDescription = {
+ displayName: 'MiniMax',
+ name: 'minimax',
+ icon: 'file:minimax.svg',
+ group: ['transform'],
+ version: 1,
+ subtitle: '={{ $parameter["operation"] + ": " + $parameter["resource"] }}',
+ description: 'Interact with MiniMax AI models',
+ defaults: {
+ name: 'MiniMax',
+ },
+ usableAsTool: true,
+ codex: {
+ alias: ['minimax', 'hailuo', 'LangChain', 'video', 'image', 'tts', 'speech'],
+ categories: ['AI'],
+ subcategories: {
+ AI: ['Agents', 'Miscellaneous', 'Root Nodes'],
+ },
+ resources: {
+ primaryDocumentation: [
+ {
+ url: 'https://docs.n8n.io/integrations/builtin/app-nodes/n8n-nodes-langchain.minimax/',
+ },
+ ],
+ },
+ },
+ inputs: `={{
+ (() => {
+ const resource = $parameter.resource;
+ const operation = $parameter.operation;
+ if (resource === 'text' && operation === 'message') {
+ return [{ type: 'main' }, { type: 'ai_tool', displayName: 'Tools' }];
+ }
+
+ return ['main'];
+ })()
+ }}`,
+ outputs: [NodeConnectionTypes.Main],
+ credentials: [
+ {
+ name: 'minimaxApi',
+ required: true,
+ },
+ ],
+ properties: [
+ {
+ displayName: 'Resource',
+ name: 'resource',
+ type: 'options',
+ noDataExpression: true,
+ options: [
+ {
+ name: 'Audio',
+ value: 'audio',
+ },
+ {
+ name: 'Image',
+ value: 'image',
+ },
+ {
+ name: 'Text',
+ value: 'text',
+ },
+ {
+ name: 'Video',
+ value: 'video',
+ },
+ ],
+ default: 'text',
+ },
+ ...audio.description,
+ ...image.description,
+ ...text.description,
+ ...video.description,
+ ],
+};
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/video/generate.i2v.operation.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/video/generate.i2v.operation.ts
new file mode 100644
index 0000000000000..ea6b44133ef34
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/video/generate.i2v.operation.ts
@@ -0,0 +1,380 @@
+import type {
+ IDataObject,
+ IExecuteFunctions,
+ INodeExecutionData,
+ INodeProperties,
+} from 'n8n-workflow';
+import { NodeOperationError, updateDisplayOptions } from 'n8n-workflow';
+
+import type { VideoGenerationResponse } from '../../helpers/interfaces';
+import { apiRequest, getVideoDownloadUrl, pollVideoTask } from '../../transport';
+
+const properties: INodeProperties[] = [
+ {
+ displayName: 'Model',
+ name: 'modelId',
+ type: 'options',
+ options: [
+ {
+ name: 'I2V-01',
+ value: 'I2V-01',
+ description: 'Standard image-to-video model',
+ },
+ {
+ name: 'I2V-01-Director',
+ value: 'I2V-01-Director',
+ description: 'Image-to-video with camera control commands',
+ },
+ {
+ name: 'I2V-01-Live',
+ value: 'I2V-01-live',
+ description: 'Image-to-video live model',
+ },
+ {
+ name: 'MiniMax-Hailuo-02',
+ value: 'MiniMax-Hailuo-02',
+ description: 'Model supporting higher resolution and longer duration',
+ },
+ {
+ name: 'MiniMax-Hailuo-2.3',
+ value: 'MiniMax-Hailuo-2.3',
+ description: 'Latest model with enhanced realism',
+ },
+ {
+ name: 'MiniMax-Hailuo-2.3-Fast',
+ value: 'MiniMax-Hailuo-2.3-Fast',
+ description: 'Faster image-to-video model for value and efficiency',
+ },
+ ],
+ default: 'MiniMax-Hailuo-2.3',
+ description: 'The model to use for video generation',
+ },
+ {
+ displayName: 'Image Input Type',
+ name: 'imageInputType',
+ type: 'options',
+ options: [
+ { name: 'URL', value: 'url' },
+ { name: 'Binary File', value: 'binary' },
+ ],
+ default: 'url',
+ description: 'How to provide the first frame image',
+ },
+ {
+ displayName: 'Image URL',
+ name: 'imageUrl',
+ type: 'string',
+ default: '',
+ required: true,
+ placeholder: 'https://example.com/image.jpg',
+ description: 'Public URL of the image to use as first frame (JPG, JPEG, PNG, WebP, <20MB)',
+ displayOptions: {
+ show: {
+ imageInputType: ['url'],
+ },
+ },
+ },
+ {
+ displayName: 'Input Data Field Name',
+ name: 'binaryPropertyName',
+ type: 'string',
+ default: 'data',
+ required: true,
+ placeholder: 'e.g. data',
+ hint: 'The name of the input field containing the binary image data',
+ typeOptions: {
+ binaryDataProperty: true,
+ },
+ displayOptions: {
+ show: {
+ imageInputType: ['binary'],
+ },
+ },
+ },
+ {
+ displayName: 'Prompt',
+ name: 'prompt',
+ type: 'string',
+ typeOptions: {
+ rows: 4,
+ },
+ default: '',
+ description:
+ 'Optional text description of the video (max 2000 characters). Camera movements can be controlled using [command] syntax.',
+ placeholder: 'e.g. The subject smiles and waves at the camera [Zoom in]',
+ },
+ {
+ displayName: 'Duration (Seconds)',
+ name: 'duration',
+ type: 'options',
+ options: [
+ { name: '6 Seconds', value: 6 },
+ { name: '10 Seconds', value: 10 },
+ ],
+ default: 6,
+ description: 'Duration of the generated video',
+ },
+ {
+ displayName: 'Resolution',
+ name: 'resolution',
+ type: 'options',
+ options: [
+ { name: '512P', value: '512P' },
+ { name: '720P', value: '720P' },
+ { name: '768P', value: '768P' },
+ { name: '1080P', value: '1080P' },
+ ],
+ default: '768P',
+ description: 'Resolution of the generated video. Available options depend on the model.',
+ },
+ {
+ displayName: 'Download Video',
+ name: 'downloadVideo',
+ type: 'boolean',
+ default: true,
+ description:
+ 'Whether to download the generated video as binary data. When disabled, only the video URL is returned.',
+ },
+ {
+ displayName: 'Options',
+ name: 'options',
+ placeholder: 'Add Option',
+ type: 'collection',
+ default: {},
+ options: [
+ {
+ displayName: 'Prompt Optimizer',
+ name: 'promptOptimizer',
+ type: 'boolean',
+ default: true,
+ description: 'Whether to automatically optimize the prompt',
+ },
+ {
+ displayName: 'Last Frame Image Input Type',
+ name: 'lastFrameInputType',
+ type: 'options',
+ options: [
+ { name: 'None', value: 'none' },
+ { name: 'URL', value: 'url' },
+ { name: 'Binary File', value: 'binary' },
+ ],
+ default: 'none',
+ description:
+ 'Provide a last frame image to generate a first-and-last-frame video. Only supported by MiniMax-Hailuo-2.3 and MiniMax-Hailuo-02.',
+ },
+ {
+ displayName: 'Last Frame Image URL',
+ name: 'lastFrameImageUrl',
+ type: 'string',
+ default: '',
+ placeholder: 'https://example.com/last-frame.jpg',
+ displayOptions: {
+ show: {
+ lastFrameInputType: ['url'],
+ },
+ },
+ },
+ {
+ displayName: 'Last Frame Data Field Name',
+ name: 'lastFrameBinaryPropertyName',
+ type: 'string',
+ default: 'lastFrame',
+ placeholder: 'e.g. lastFrame',
+ typeOptions: {
+ binaryDataProperty: true,
+ },
+ displayOptions: {
+ show: {
+ lastFrameInputType: ['binary'],
+ },
+ },
+ },
+ {
+ displayName: 'Subject Reference Input Type',
+ name: 'subjectReferenceInputType',
+ type: 'options',
+ options: [
+ { name: 'None', value: 'none' },
+ { name: 'URL', value: 'url' },
+ { name: 'Binary File', value: 'binary' },
+ ],
+ default: 'none',
+ description:
+ 'Provide a face photo for facial consistency in the generated video. Only supported by MiniMax-Hailuo-2.3.',
+ },
+ {
+ displayName: 'Subject Reference Image URL',
+ name: 'subjectReferenceImageUrl',
+ type: 'string',
+ default: '',
+ placeholder: 'https://example.com/face.jpg',
+ displayOptions: {
+ show: {
+ subjectReferenceInputType: ['url'],
+ },
+ },
+ },
+ {
+ displayName: 'Subject Reference Data Field Name',
+ name: 'subjectReferenceBinaryPropertyName',
+ type: 'string',
+ default: 'subjectReference',
+ placeholder: 'e.g. subjectReference',
+ typeOptions: {
+ binaryDataProperty: true,
+ },
+ displayOptions: {
+ show: {
+ subjectReferenceInputType: ['binary'],
+ },
+ },
+ },
+ ],
+ },
+];
+
+const displayOptions = {
+ show: {
+ resource: ['video'],
+ operation: ['imageToVideo'],
+ },
+};
+
+export const description = updateDisplayOptions(displayOptions, properties);
+
+async function resolveImageInput(
+ executeFunctions: IExecuteFunctions,
+ itemIndex: number,
+ inputType: string,
+ urlValue: string,
+ binaryPropertyName: string,
+): Promise {
+ if (inputType === 'binary') {
+ const binaryData = executeFunctions.helpers.assertBinaryData(itemIndex, binaryPropertyName);
+ const buffer = await executeFunctions.helpers.getBinaryDataBuffer(
+ itemIndex,
+ binaryPropertyName,
+ );
+ return `data:${binaryData.mimeType};base64,${buffer.toString('base64')}`;
+ }
+ return urlValue;
+}
+
+export async function execute(
+ this: IExecuteFunctions,
+ itemIndex: number,
+): Promise {
+ const model = this.getNodeParameter('modelId', itemIndex) as string;
+ const imageInputType = this.getNodeParameter('imageInputType', itemIndex) as string;
+ const prompt = this.getNodeParameter('prompt', itemIndex, '') as string;
+ const duration = this.getNodeParameter('duration', itemIndex) as number;
+ const resolution = this.getNodeParameter('resolution', itemIndex) as string;
+ const downloadVideo = this.getNodeParameter('downloadVideo', itemIndex, true) as boolean;
+ const options = this.getNodeParameter('options', itemIndex, {}) as IDataObject;
+
+ let firstFrameImage: string;
+ if (imageInputType === 'binary') {
+ const binaryPropertyName = this.getNodeParameter('binaryPropertyName', itemIndex) as string;
+ firstFrameImage = await resolveImageInput(this, itemIndex, 'binary', '', binaryPropertyName);
+ } else {
+ const imageUrl = this.getNodeParameter('imageUrl', itemIndex) as string;
+ firstFrameImage = imageUrl;
+ }
+
+ const body: IDataObject = {
+ model,
+ first_frame_image: firstFrameImage,
+ duration,
+ resolution,
+ };
+
+ if (prompt) {
+ body.prompt = prompt;
+ }
+
+ if (options.promptOptimizer !== undefined) {
+ body.prompt_optimizer = options.promptOptimizer;
+ }
+
+ const lastFrameInputType = (options.lastFrameInputType as string) || 'none';
+ if (lastFrameInputType !== 'none') {
+ body.last_frame_image = await resolveImageInput(
+ this,
+ itemIndex,
+ lastFrameInputType,
+ (options.lastFrameImageUrl as string) || '',
+ (options.lastFrameBinaryPropertyName as string) || 'lastFrame',
+ );
+ }
+
+ const subjectRefInputType = (options.subjectReferenceInputType as string) || 'none';
+ if (subjectRefInputType !== 'none') {
+ body.subject_reference = [
+ {
+ image: await resolveImageInput(
+ this,
+ itemIndex,
+ subjectRefInputType,
+ (options.subjectReferenceImageUrl as string) || '',
+ (options.subjectReferenceBinaryPropertyName as string) || 'subjectReference',
+ ),
+ },
+ ];
+ }
+
+ const createResponse = (await apiRequest.call(this, 'POST', '/video_generation', {
+ body,
+ })) as VideoGenerationResponse;
+
+ if (createResponse.base_resp?.status_code !== 0) {
+ throw new NodeOperationError(
+ this.getNode(),
+ `Failed to create video task: ${createResponse.base_resp?.status_msg || 'Unknown error'}`,
+ );
+ }
+
+ const taskId = createResponse.task_id;
+ if (!taskId) {
+ throw new NodeOperationError(
+ this.getNode(),
+ 'No task_id returned from video generation request',
+ );
+ }
+
+ const { fileId } = await pollVideoTask.call(this, taskId);
+ const videoUrl = await getVideoDownloadUrl.call(this, fileId);
+
+ const jsonData: IDataObject = {
+ videoUrl,
+ taskId,
+ fileId,
+ };
+
+ if (downloadVideo && videoUrl) {
+ const videoResponse = await this.helpers.httpRequest({
+ method: 'GET',
+ url: videoUrl,
+ encoding: 'arraybuffer',
+ returnFullResponse: true,
+ });
+
+ const contentType = (videoResponse.headers?.['content-type'] as string) || 'video/mp4';
+ const fileContent = Buffer.from(videoResponse.body as ArrayBuffer);
+ const binaryData = await this.helpers.prepareBinaryData(fileContent, 'video.mp4', contentType);
+
+ return [
+ {
+ binary: { data: binaryData },
+ json: jsonData,
+ pairedItem: { item: itemIndex },
+ },
+ ];
+ }
+
+ return [
+ {
+ json: jsonData,
+ pairedItem: { item: itemIndex },
+ },
+ ];
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/video/generate.t2v.operation.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/video/generate.t2v.operation.ts
new file mode 100644
index 0000000000000..58610a1085626
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/video/generate.t2v.operation.ts
@@ -0,0 +1,192 @@
+import type {
+ IDataObject,
+ IExecuteFunctions,
+ INodeExecutionData,
+ INodeProperties,
+} from 'n8n-workflow';
+import { NodeOperationError, updateDisplayOptions } from 'n8n-workflow';
+
+import type { VideoGenerationResponse } from '../../helpers/interfaces';
+import { apiRequest, getVideoDownloadUrl, pollVideoTask } from '../../transport';
+
+const properties: INodeProperties[] = [
+ {
+ displayName: 'Model',
+ name: 'modelId',
+ type: 'options',
+ options: [
+ {
+ name: 'MiniMax-Hailuo-2.3',
+ value: 'MiniMax-Hailuo-2.3',
+ description: 'Latest video generation model with enhanced realism',
+ },
+ {
+ name: 'MiniMax-Hailuo-02',
+ value: 'MiniMax-Hailuo-02',
+ description: 'Video model supporting higher resolution and longer duration',
+ },
+ {
+ name: 'T2V-01-Director',
+ value: 'T2V-01-Director',
+ description: 'Text-to-video model with camera control commands',
+ },
+ {
+ name: 'T2V-01',
+ value: 'T2V-01',
+ description: 'Standard text-to-video model',
+ },
+ ],
+ default: 'MiniMax-Hailuo-2.3',
+ description: 'The model to use for video generation',
+ },
+ {
+ displayName: 'Prompt',
+ name: 'prompt',
+ type: 'string',
+ typeOptions: {
+ rows: 4,
+ },
+ default: '',
+ required: true,
+ description:
+ 'Text description of the video (max 2000 characters). Camera movements can be controlled using [command] syntax, e.g. [Push in], [Pan left].',
+ placeholder: 'e.g. A cat playing with a ball of yarn [Static shot]',
+ },
+ {
+ displayName: 'Duration (Seconds)',
+ name: 'duration',
+ type: 'options',
+ options: [
+ { name: '6 Seconds', value: 6 },
+ { name: '10 Seconds', value: 10 },
+ ],
+ default: 6,
+ description: 'Duration of the generated video',
+ },
+ {
+ displayName: 'Resolution',
+ name: 'resolution',
+ type: 'options',
+ options: [
+ { name: '720P', value: '720P' },
+ { name: '768P', value: '768P' },
+ { name: '1080P', value: '1080P' },
+ ],
+ default: '768P',
+ description: 'Resolution of the generated video. Available options depend on the model.',
+ },
+ {
+ displayName: 'Download Video',
+ name: 'downloadVideo',
+ type: 'boolean',
+ default: true,
+ description:
+ 'Whether to download the generated video as binary data. When disabled, only the video URL is returned.',
+ },
+ {
+ displayName: 'Options',
+ name: 'options',
+ placeholder: 'Add Option',
+ type: 'collection',
+ default: {},
+ options: [
+ {
+ displayName: 'Prompt Optimizer',
+ name: 'promptOptimizer',
+ type: 'boolean',
+ default: true,
+ description: 'Whether to automatically optimize the prompt for better results',
+ },
+ ],
+ },
+];
+
+const displayOptions = {
+ show: {
+ resource: ['video'],
+ operation: ['textToVideo'],
+ },
+};
+
+export const description = updateDisplayOptions(displayOptions, properties);
+
+export async function execute(
+ this: IExecuteFunctions,
+ itemIndex: number,
+): Promise {
+ const model = this.getNodeParameter('modelId', itemIndex) as string;
+ const prompt = this.getNodeParameter('prompt', itemIndex) as string;
+ const duration = this.getNodeParameter('duration', itemIndex) as number;
+ const resolution = this.getNodeParameter('resolution', itemIndex) as string;
+ const downloadVideo = this.getNodeParameter('downloadVideo', itemIndex, true) as boolean;
+ const options = this.getNodeParameter('options', itemIndex, {}) as {
+ promptOptimizer?: boolean;
+ };
+
+ const body: IDataObject = {
+ model,
+ prompt,
+ duration,
+ resolution,
+ };
+
+ if (options.promptOptimizer !== undefined) {
+ body.prompt_optimizer = options.promptOptimizer;
+ }
+
+ const createResponse = (await apiRequest.call(this, 'POST', '/video_generation', {
+ body,
+ })) as VideoGenerationResponse;
+
+ if (createResponse.base_resp?.status_code !== 0) {
+ throw new NodeOperationError(
+ this.getNode(),
+ `Failed to create video task: ${createResponse.base_resp?.status_msg || 'Unknown error'}`,
+ );
+ }
+
+ const taskId = createResponse.task_id;
+ if (!taskId) {
+ throw new NodeOperationError(
+ this.getNode(),
+ 'No task_id returned from video generation request',
+ );
+ }
+
+ const { fileId } = await pollVideoTask.call(this, taskId);
+ const videoUrl = await getVideoDownloadUrl.call(this, fileId);
+
+ const jsonData: IDataObject = {
+ videoUrl,
+ taskId,
+ fileId,
+ };
+
+ if (downloadVideo && videoUrl) {
+ const videoResponse = await this.helpers.httpRequest({
+ method: 'GET',
+ url: videoUrl,
+ encoding: 'arraybuffer',
+ returnFullResponse: true,
+ });
+
+ const contentType = (videoResponse.headers?.['content-type'] as string) || 'video/mp4';
+ const fileContent = Buffer.from(videoResponse.body as ArrayBuffer);
+ const binaryData = await this.helpers.prepareBinaryData(fileContent, 'video.mp4', contentType);
+
+ return [
+ {
+ binary: { data: binaryData },
+ json: jsonData,
+ pairedItem: { item: itemIndex },
+ },
+ ];
+ }
+
+ return [
+ {
+ json: jsonData,
+ pairedItem: { item: itemIndex },
+ },
+ ];
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/video/index.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/video/index.ts
new file mode 100644
index 0000000000000..bebd73fe1083c
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/actions/video/index.ts
@@ -0,0 +1,38 @@
+import type { INodeProperties } from 'n8n-workflow';
+
+import * as textToVideo from './generate.t2v.operation';
+import * as imageToVideo from './generate.i2v.operation';
+
+export { textToVideo, imageToVideo };
+
+export const description: INodeProperties[] = [
+ {
+ displayName: 'Operation',
+ name: 'operation',
+ type: 'options',
+ noDataExpression: true,
+ displayOptions: {
+ show: {
+ resource: ['video'],
+ },
+ },
+ options: [
+ {
+ name: 'Generate Video From Text',
+ value: 'textToVideo',
+ action: 'Generate video from text prompt',
+ description: 'Generate a video from a text prompt',
+ },
+ {
+ name: 'Generate Video From Image',
+ value: 'imageToVideo',
+ action: 'Generate video from image',
+ description:
+ 'Generate a video from an image, with optional last frame and subject reference',
+ },
+ ],
+ default: 'textToVideo',
+ },
+ ...textToVideo.description,
+ ...imageToVideo.description,
+];
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/helpers/interfaces.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/helpers/interfaces.ts
new file mode 100644
index 0000000000000..0d2c180c2f3f6
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/helpers/interfaces.ts
@@ -0,0 +1,95 @@
+import type { IDataObject } from 'n8n-workflow';
+
+export interface ChatMessage {
+ role: 'system' | 'user' | 'assistant' | 'tool';
+ content: string;
+ tool_call_id?: string;
+ tool_calls?: ToolCall[];
+ reasoning_content?: string;
+}
+
+export interface ToolFunction {
+ type: 'function';
+ function: {
+ name: string;
+ description?: string;
+ parameters?: IDataObject;
+ };
+}
+
+export interface ToolCall {
+ id: string;
+ type: 'function';
+ function: {
+ name: string;
+ arguments: string;
+ };
+}
+
+export interface ChatCompletionResponse {
+ id: string;
+ object: string;
+ created: number;
+ model: string;
+ choices: Array<{
+ index: number;
+ message: {
+ role: string;
+ content: string | null;
+ reasoning_content?: string | null;
+ tool_calls?: ToolCall[];
+ };
+ finish_reason: string;
+ }>;
+ usage: {
+ prompt_tokens: number;
+ completion_tokens: number;
+ total_tokens: number;
+ };
+}
+
+export interface ImageGenerationResponse {
+ id: string;
+ data: {
+ image_urls?: string[];
+ image_base64?: string[];
+ };
+ metadata: {
+ success_count: number;
+ failed_count: number;
+ };
+ base_resp: {
+ status_code: number;
+ status_msg: string;
+ };
+}
+
+export interface VideoGenerationResponse {
+ task_id: string;
+ base_resp: {
+ status_code: number;
+ status_msg: string;
+ };
+}
+
+export interface T2AResponse {
+ data: {
+ audio: string;
+ status: number;
+ };
+ extra_info: {
+ audio_length: number;
+ audio_sample_rate: number;
+ audio_size: number;
+ bitrate: number;
+ audio_format: string;
+ audio_channel: number;
+ usage_characters: number;
+ word_count: number;
+ };
+ trace_id: string;
+ base_resp: {
+ status_code: number;
+ status_msg: string;
+ };
+}
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/minimax.svg b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/minimax.svg
new file mode 100644
index 0000000000000..f9447403a6714
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/minimax.svg
@@ -0,0 +1,10 @@
+
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/test/operations.test.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/test/operations.test.ts
new file mode 100644
index 0000000000000..dc555f4d1f713
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/test/operations.test.ts
@@ -0,0 +1,429 @@
+import { mock, mockDeep } from 'jest-mock-extended';
+import type { IExecuteFunctions, IBinaryData } from 'n8n-workflow';
+
+jest.mock('../transport', () => ({
+ apiRequest: jest.fn(),
+ pollVideoTask: jest.fn(),
+ getVideoDownloadUrl: jest.fn(),
+}));
+
+jest.mock('@utils/helpers', () => ({
+ getConnectedTools: jest.fn().mockResolvedValue([]),
+}));
+
+jest.mock('zod-to-json-schema', () => ({
+ __esModule: true,
+ default: jest.fn(),
+}));
+
+jest.mock('n8n-workflow', () => {
+ const actual = jest.requireActual('n8n-workflow');
+ return {
+ ...actual,
+ accumulateTokenUsage: jest.fn(),
+ };
+});
+
+import { execute as textMessageExecute } from '../actions/text/message.operation';
+import { execute as imageGenerateExecute } from '../actions/image/generate.operation';
+import { execute as videoT2VExecute } from '../actions/video/generate.t2v.operation';
+import { execute as videoI2VExecute } from '../actions/video/generate.i2v.operation';
+import { execute as audioTTSExecute } from '../actions/audio/tts.operation';
+import { apiRequest, pollVideoTask, getVideoDownloadUrl } from '../transport';
+
+const mockApiRequest = apiRequest as jest.Mock;
+const mockPollVideoTask = pollVideoTask as jest.Mock;
+const mockGetVideoDownloadUrl = getVideoDownloadUrl as jest.Mock;
+
+describe('MiniMax Operations', () => {
+ let mockExecuteFunctions: ReturnType>;
+
+ beforeEach(() => {
+ mockExecuteFunctions = mock();
+ mockExecuteFunctions.getNodeInputs.mockReturnValue([{ type: 'main' }]);
+ mockExecuteFunctions.getExecutionCancelSignal.mockReturnValue(undefined);
+ });
+
+ afterEach(() => {
+ jest.clearAllMocks();
+ });
+
+ describe('Text: message', () => {
+ it('should send correct request body and return simplified response', async () => {
+ mockExecuteFunctions.getNodeParameter.mockImplementation(
+ (param: string, _index: number, fallback?: any) => {
+ const params: Record = {
+ modelId: 'MiniMax-M2.7',
+ 'messages.values': [{ role: 'user', content: 'Hello' }],
+ options: { temperature: 0.7 },
+ simplify: true,
+ 'options.maxToolsIterations': 15,
+ };
+ return params[param] ?? fallback;
+ },
+ );
+
+ const mockResponse = {
+ choices: [{ message: { content: 'Hi there!' }, finish_reason: 'stop' }],
+ usage: { prompt_tokens: 5, completion_tokens: 3, total_tokens: 8 },
+ };
+ mockApiRequest.mockResolvedValue(mockResponse);
+
+ const result = await textMessageExecute.call(mockExecuteFunctions, 0);
+
+ expect(mockApiRequest).toHaveBeenCalledWith('POST', '/chat/completions', {
+ body: expect.objectContaining({
+ model: 'MiniMax-M2.7',
+ messages: [{ role: 'user', content: 'Hello' }],
+ reasoning_split: true,
+ }),
+ });
+ expect(result[0].json).toEqual({ content: 'Hi there!' });
+ });
+
+ it('should return full response when simplify is false', async () => {
+ mockExecuteFunctions.getNodeParameter.mockImplementation(
+ (param: string, _index: number, fallback?: any) => {
+ const params: Record = {
+ modelId: 'MiniMax-M2.7',
+ 'messages.values': [{ role: 'user', content: 'Hello' }],
+ options: {},
+ simplify: false,
+ 'options.maxToolsIterations': 15,
+ };
+ return params[param] ?? fallback;
+ },
+ );
+
+ const mockResponse = {
+ choices: [{ message: { content: 'Hi!' }, finish_reason: 'stop' }],
+ usage: { prompt_tokens: 5, completion_tokens: 2, total_tokens: 7 },
+ };
+ mockApiRequest.mockResolvedValue(mockResponse);
+
+ const result = await textMessageExecute.call(mockExecuteFunctions, 0);
+
+ expect(result[0].json).toEqual(
+ expect.objectContaining({
+ choices: expect.any(Array),
+ usage: expect.any(Object),
+ }),
+ );
+ });
+
+ it('should include system message when provided', async () => {
+ mockExecuteFunctions.getNodeParameter.mockImplementation(
+ (param: string, _index: number, fallback?: any) => {
+ const params: Record = {
+ modelId: 'MiniMax-M2.7',
+ 'messages.values': [{ role: 'user', content: 'Hello' }],
+ options: { system: 'You are a helpful assistant' },
+ simplify: true,
+ 'options.maxToolsIterations': 15,
+ };
+ return params[param] ?? fallback;
+ },
+ );
+
+ const mockResponse = {
+ choices: [{ message: { content: 'Hi!' }, finish_reason: 'stop' }],
+ usage: { prompt_tokens: 10, completion_tokens: 2, total_tokens: 12 },
+ };
+ mockApiRequest.mockResolvedValue(mockResponse);
+
+ await textMessageExecute.call(mockExecuteFunctions, 0);
+
+ expect(mockApiRequest).toHaveBeenCalledWith('POST', '/chat/completions', {
+ body: expect.objectContaining({
+ messages: expect.arrayContaining([
+ { role: 'system', content: 'You are a helpful assistant' },
+ ]),
+ }),
+ });
+ });
+ });
+
+ describe('Image: generate', () => {
+ it('should send prompt and return URL-only when downloadImage is false', async () => {
+ mockExecuteFunctions.getNodeParameter.mockImplementation(
+ (param: string, _index: number, fallback?: any) => {
+ const params: Record = {
+ modelId: 'image-01',
+ prompt: 'A sunset over mountains',
+ aspectRatio: '16:9',
+ numberOfImages: 1,
+ downloadImage: false,
+ options: {},
+ };
+ return params[param] ?? fallback;
+ },
+ );
+
+ const mockResponse = {
+ data: { image_urls: ['https://cdn.minimax.io/image.png'] },
+ metadata: { success_count: 1, failed_count: 0 },
+ base_resp: { status_code: 0, status_msg: 'success' },
+ };
+ mockApiRequest.mockResolvedValue(mockResponse);
+
+ const result = await imageGenerateExecute.call(mockExecuteFunctions, 0);
+
+ expect(mockApiRequest).toHaveBeenCalledWith('POST', '/image_generation', {
+ body: expect.objectContaining({
+ model: 'image-01',
+ prompt: 'A sunset over mountains',
+ aspect_ratio: '16:9',
+ n: 1,
+ }),
+ });
+ expect(result[0].json).toEqual({ imageUrl: 'https://cdn.minimax.io/image.png' });
+ expect(result[0].binary).toBeUndefined();
+ });
+
+ it('should download image as binary when downloadImage is true', async () => {
+ const deepMock = mockDeep();
+ deepMock.getNodeParameter.mockImplementation(
+ (param: string, _index: number, fallback?: any) => {
+ const params: Record = {
+ modelId: 'image-01',
+ prompt: 'A sunset',
+ aspectRatio: '1:1',
+ numberOfImages: 1,
+ downloadImage: true,
+ options: {},
+ };
+ return params[param] ?? fallback;
+ },
+ );
+
+ const mockResponse = {
+ data: { image_urls: ['https://cdn.minimax.io/image.png'] },
+ metadata: { success_count: 1, failed_count: 0 },
+ base_resp: { status_code: 0, status_msg: 'success' },
+ };
+ mockApiRequest.mockResolvedValue(mockResponse);
+
+ const imageBuffer = Buffer.from('fake-png-data');
+ deepMock.helpers.httpRequest.mockResolvedValue({
+ body: imageBuffer,
+ headers: { 'content-type': 'image/png' },
+ });
+
+ const mockBinaryData: IBinaryData = {
+ mimeType: 'image/png',
+ fileType: 'image',
+ fileExtension: 'png',
+ data: '',
+ fileName: 'image_0.png',
+ };
+ deepMock.helpers.prepareBinaryData.mockResolvedValue(mockBinaryData);
+
+ const result = await imageGenerateExecute.call(deepMock, 0);
+
+ expect(deepMock.helpers.httpRequest).toHaveBeenCalledWith(
+ expect.objectContaining({
+ method: 'GET',
+ url: 'https://cdn.minimax.io/image.png',
+ encoding: 'arraybuffer',
+ returnFullResponse: true,
+ }),
+ );
+ expect(result[0].binary).toBeDefined();
+ expect(result[0].binary!.data).toEqual(mockBinaryData);
+ });
+ });
+
+ describe('Video: textToVideo', () => {
+ it('should create task, poll until success, and return video URL', async () => {
+ mockExecuteFunctions.getNodeParameter.mockImplementation(
+ (param: string, _index: number, fallback?: any) => {
+ const params: Record = {
+ modelId: 'MiniMax-Hailuo-2.3',
+ prompt: 'A cat playing with yarn',
+ duration: 6,
+ resolution: '768P',
+ downloadVideo: false,
+ options: {},
+ };
+ return params[param] ?? fallback;
+ },
+ );
+
+ mockApiRequest.mockResolvedValue({
+ task_id: 'video-task-1',
+ base_resp: { status_code: 0, status_msg: 'success' },
+ });
+
+ mockPollVideoTask.mockResolvedValue({ fileId: 'file-abc', status: 'Success' });
+ mockGetVideoDownloadUrl.mockResolvedValue('https://cdn.minimax.io/video.mp4');
+
+ const result = await videoT2VExecute.call(mockExecuteFunctions, 0);
+
+ expect(mockApiRequest).toHaveBeenCalledWith('POST', '/video_generation', {
+ body: expect.objectContaining({
+ model: 'MiniMax-Hailuo-2.3',
+ prompt: 'A cat playing with yarn',
+ duration: 6,
+ resolution: '768P',
+ }),
+ });
+ expect(mockPollVideoTask).toHaveBeenCalledWith('video-task-1');
+ expect(result[0].json).toEqual(
+ expect.objectContaining({
+ videoUrl: 'https://cdn.minimax.io/video.mp4',
+ taskId: 'video-task-1',
+ fileId: 'file-abc',
+ }),
+ );
+ });
+ });
+
+ describe('Video: imageToVideo', () => {
+ it('should create task with image URL input and return video URL', async () => {
+ mockExecuteFunctions.getNodeParameter.mockImplementation(
+ (param: string, _index: number, fallback?: any) => {
+ const params: Record = {
+ modelId: 'MiniMax-Hailuo-2.3',
+ imageInputType: 'url',
+ imageUrl: 'https://example.com/frame.png',
+ prompt: 'A bird taking flight',
+ duration: 6,
+ resolution: '768P',
+ downloadVideo: false,
+ options: {},
+ };
+ return params[param] ?? fallback;
+ },
+ );
+
+ mockApiRequest.mockResolvedValue({
+ task_id: 'i2v-task-1',
+ base_resp: { status_code: 0, status_msg: 'success' },
+ });
+
+ mockPollVideoTask.mockResolvedValue({ fileId: 'file-i2v', status: 'Success' });
+ mockGetVideoDownloadUrl.mockResolvedValue('https://cdn.minimax.io/i2v-video.mp4');
+
+ const result = await videoI2VExecute.call(mockExecuteFunctions, 0);
+
+ expect(mockApiRequest).toHaveBeenCalledWith('POST', '/video_generation', {
+ body: expect.objectContaining({
+ model: 'MiniMax-Hailuo-2.3',
+ first_frame_image: 'https://example.com/frame.png',
+ prompt: 'A bird taking flight',
+ }),
+ });
+ expect(result[0].json).toEqual(
+ expect.objectContaining({
+ videoUrl: 'https://cdn.minimax.io/i2v-video.mp4',
+ }),
+ );
+ });
+ });
+
+ describe('Audio: textToSpeech', () => {
+ it('should send TTS request and return audio URL when downloadAudio is false', async () => {
+ mockExecuteFunctions.getNodeParameter.mockImplementation(
+ (param: string, _index: number, fallback?: any) => {
+ const params: Record = {
+ modelId: 'speech-2.8-hd',
+ text: 'Hello world',
+ voiceId: 'English_Graceful_Lady',
+ downloadAudio: false,
+ options: {},
+ };
+ return params[param] ?? fallback;
+ },
+ );
+
+ const mockResponse = {
+ data: { audio: 'https://cdn.minimax.io/speech.mp3', status: 1 },
+ extra_info: {
+ audio_length: 1500,
+ audio_format: 'mp3',
+ audio_size: 24000,
+ word_count: 2,
+ usage_characters: 11,
+ },
+ base_resp: { status_code: 0, status_msg: 'success' },
+ };
+ mockApiRequest.mockResolvedValue(mockResponse);
+
+ const result = await audioTTSExecute.call(mockExecuteFunctions, 0);
+
+ expect(mockApiRequest).toHaveBeenCalledWith('POST', '/t2a_v2', {
+ body: expect.objectContaining({
+ model: 'speech-2.8-hd',
+ text: 'Hello world',
+ voice_setting: expect.objectContaining({
+ voice_id: 'English_Graceful_Lady',
+ }),
+ }),
+ });
+ expect(result[0].json).toEqual(
+ expect.objectContaining({
+ audioUrl: 'https://cdn.minimax.io/speech.mp3',
+ audioLength: 1500,
+ audioFormat: 'mp3',
+ }),
+ );
+ });
+
+ it('should download audio as binary when downloadAudio is true', async () => {
+ const deepMock = mockDeep();
+ deepMock.getNodeParameter.mockImplementation(
+ (param: string, _index: number, fallback?: any) => {
+ const params: Record = {
+ modelId: 'speech-2.8-hd',
+ text: 'Hello world',
+ voiceId: 'English_Graceful_Lady',
+ downloadAudio: true,
+ options: {},
+ };
+ return params[param] ?? fallback;
+ },
+ );
+
+ const mockResponse = {
+ data: { audio: 'https://cdn.minimax.io/speech.mp3', status: 1 },
+ extra_info: {
+ audio_length: 1500,
+ audio_format: 'mp3',
+ audio_size: 24000,
+ word_count: 2,
+ usage_characters: 11,
+ },
+ base_resp: { status_code: 0, status_msg: 'success' },
+ };
+ mockApiRequest.mockResolvedValue(mockResponse);
+
+ const audioBuffer = Buffer.from('fake-audio-data');
+ deepMock.helpers.httpRequest.mockResolvedValue({
+ body: audioBuffer,
+ headers: { 'content-type': 'audio/mpeg' },
+ });
+
+ const mockBinaryData: IBinaryData = {
+ mimeType: 'audio/mpeg',
+ fileType: 'audio',
+ fileExtension: 'mp3',
+ data: '',
+ fileName: 'speech.mp3',
+ };
+ deepMock.helpers.prepareBinaryData.mockResolvedValue(mockBinaryData);
+
+ const result = await audioTTSExecute.call(deepMock, 0);
+
+ expect(deepMock.helpers.httpRequest).toHaveBeenCalledWith(
+ expect.objectContaining({
+ method: 'GET',
+ url: 'https://cdn.minimax.io/speech.mp3',
+ encoding: 'arraybuffer',
+ returnFullResponse: true,
+ }),
+ );
+ expect(result[0].binary).toBeDefined();
+ expect(result[0].binary!.data).toEqual(mockBinaryData);
+ });
+ });
+});
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/test/router.test.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/test/router.test.ts
new file mode 100644
index 0000000000000..d4be378906812
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/test/router.test.ts
@@ -0,0 +1,168 @@
+import { mock } from 'jest-mock-extended';
+import type { IExecuteFunctions, INodeExecutionData } from 'n8n-workflow';
+import { NodeOperationError } from 'n8n-workflow';
+
+jest.mock('../actions/text', () => ({
+ message: { execute: jest.fn() },
+}));
+
+jest.mock('../actions/image', () => ({
+ generate: { execute: jest.fn() },
+}));
+
+jest.mock('../actions/video', () => ({
+ textToVideo: { execute: jest.fn() },
+ imageToVideo: { execute: jest.fn() },
+}));
+
+jest.mock('../actions/audio', () => ({
+ textToSpeech: { execute: jest.fn() },
+}));
+
+import { router } from '../actions/router';
+import * as text from '../actions/text';
+import * as image from '../actions/image';
+import * as video from '../actions/video';
+import * as audio from '../actions/audio';
+
+describe('MiniMax Router', () => {
+ let mockExecuteFunctions: ReturnType>;
+
+ const mockNode = {
+ id: 'test-node-id',
+ name: 'Test Node',
+ type: '@n8n/n8n-nodes-langchain.minimax',
+ typeVersion: 1,
+ position: [0, 0] as [number, number],
+ parameters: {},
+ };
+
+ beforeEach(() => {
+ mockExecuteFunctions = mock();
+ mockExecuteFunctions.getNode.mockReturnValue(mockNode);
+ mockExecuteFunctions.getInputData.mockReturnValue([{ json: {} }]);
+ mockExecuteFunctions.continueOnFail.mockReturnValue(false);
+ });
+
+ afterEach(() => {
+ jest.clearAllMocks();
+ });
+
+ it('should route text/message to text.message.execute', async () => {
+ const expectedResult: INodeExecutionData = { json: { text: 'hello' }, pairedItem: 0 };
+ (text.message.execute as jest.Mock).mockResolvedValue([expectedResult]);
+ mockExecuteFunctions.getNodeParameter.mockImplementation((param: string) => {
+ if (param === 'resource') return 'text';
+ if (param === 'operation') return 'message';
+ return undefined;
+ });
+
+ const result = await router.call(mockExecuteFunctions);
+
+ expect(text.message.execute).toHaveBeenCalledTimes(1);
+ expect(result).toEqual([[expectedResult]]);
+ });
+
+ it('should route image/generate to image.generate.execute', async () => {
+ const expectedResult: INodeExecutionData = {
+ json: { imageUrl: 'https://example.com/img.png' },
+ pairedItem: 0,
+ };
+ (image.generate.execute as jest.Mock).mockResolvedValue([expectedResult]);
+ mockExecuteFunctions.getNodeParameter.mockImplementation((param: string) => {
+ if (param === 'resource') return 'image';
+ if (param === 'operation') return 'generate';
+ return undefined;
+ });
+
+ const result = await router.call(mockExecuteFunctions);
+
+ expect(image.generate.execute).toHaveBeenCalledTimes(1);
+ expect(result).toEqual([[expectedResult]]);
+ });
+
+ it('should route video/textToVideo to video.textToVideo.execute', async () => {
+ const expectedResult: INodeExecutionData = {
+ json: { videoUrl: 'https://example.com/video.mp4' },
+ pairedItem: 0,
+ };
+ (video.textToVideo.execute as jest.Mock).mockResolvedValue([expectedResult]);
+ mockExecuteFunctions.getNodeParameter.mockImplementation((param: string) => {
+ if (param === 'resource') return 'video';
+ if (param === 'operation') return 'textToVideo';
+ return undefined;
+ });
+
+ const result = await router.call(mockExecuteFunctions);
+
+ expect(video.textToVideo.execute).toHaveBeenCalledTimes(1);
+ expect(result).toEqual([[expectedResult]]);
+ });
+
+ it('should route video/imageToVideo to video.imageToVideo.execute', async () => {
+ const expectedResult: INodeExecutionData = {
+ json: { videoUrl: 'https://example.com/video.mp4' },
+ pairedItem: 0,
+ };
+ (video.imageToVideo.execute as jest.Mock).mockResolvedValue([expectedResult]);
+ mockExecuteFunctions.getNodeParameter.mockImplementation((param: string) => {
+ if (param === 'resource') return 'video';
+ if (param === 'operation') return 'imageToVideo';
+ return undefined;
+ });
+
+ const result = await router.call(mockExecuteFunctions);
+
+ expect(video.imageToVideo.execute).toHaveBeenCalledTimes(1);
+ expect(result).toEqual([[expectedResult]]);
+ });
+
+ it('should route audio/textToSpeech to audio.textToSpeech.execute', async () => {
+ const expectedResult: INodeExecutionData = {
+ json: { audioLength: 5 },
+ pairedItem: 0,
+ };
+ (audio.textToSpeech.execute as jest.Mock).mockResolvedValue([expectedResult]);
+ mockExecuteFunctions.getNodeParameter.mockImplementation((param: string) => {
+ if (param === 'resource') return 'audio';
+ if (param === 'operation') return 'textToSpeech';
+ return undefined;
+ });
+
+ const result = await router.call(mockExecuteFunctions);
+
+ expect(audio.textToSpeech.execute).toHaveBeenCalledTimes(1);
+ expect(result).toEqual([[expectedResult]]);
+ });
+
+ it('should throw NodeOperationError for unsupported resource', async () => {
+ mockExecuteFunctions.getNodeParameter.mockImplementation((param: string) => {
+ if (param === 'resource') return 'unsupported';
+ if (param === 'operation') return 'test';
+ return undefined;
+ });
+
+ await expect(router.call(mockExecuteFunctions)).rejects.toThrow(NodeOperationError);
+ });
+
+ it('should return error in json when continueOnFail is enabled and operation throws', async () => {
+ (text.message.execute as jest.Mock).mockRejectedValue(new Error('API limit reached'));
+ mockExecuteFunctions.continueOnFail.mockReturnValue(true);
+ mockExecuteFunctions.getNodeParameter.mockImplementation((param: string) => {
+ if (param === 'resource') return 'text';
+ if (param === 'operation') return 'message';
+ return undefined;
+ });
+
+ const result = await router.call(mockExecuteFunctions);
+
+ expect(result).toEqual([
+ [
+ {
+ json: { error: 'API limit reached' },
+ pairedItem: { item: 0 },
+ },
+ ],
+ ]);
+ });
+});
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/test/transport.test.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/test/transport.test.ts
new file mode 100644
index 0000000000000..fb004d91b9bed
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/test/transport.test.ts
@@ -0,0 +1,162 @@
+import { mockDeep } from 'jest-mock-extended';
+import type { IExecuteFunctions } from 'n8n-workflow';
+import { NodeOperationError } from 'n8n-workflow';
+
+import { apiRequest, pollVideoTask, getVideoDownloadUrl } from '../transport';
+
+jest.mock('n8n-workflow', () => {
+ const actual = jest.requireActual('n8n-workflow');
+ return {
+ ...actual,
+ sleep: jest.fn(),
+ };
+});
+
+describe('MiniMax Transport', () => {
+ let mockExecuteFunctions: ReturnType>;
+
+ beforeEach(() => {
+ mockExecuteFunctions = mockDeep();
+ mockExecuteFunctions.getCredentials.mockResolvedValue({
+ apiKey: 'test-key',
+ url: 'https://api.minimax.io/v1',
+ });
+ mockExecuteFunctions.getNode.mockReturnValue({
+ id: 'test-node-id',
+ name: 'Test Node',
+ type: '@n8n/n8n-nodes-langchain.minimax',
+ typeVersion: 1,
+ position: [0, 0],
+ parameters: {},
+ });
+ });
+
+ afterEach(() => {
+ jest.clearAllMocks();
+ });
+
+ describe('apiRequest', () => {
+ it('should call httpRequestWithAuthentication with correct URL, method, and body', async () => {
+ const mockResponse = { choices: [{ message: { content: 'hello' } }] };
+ mockExecuteFunctions.helpers.httpRequestWithAuthentication.mockResolvedValue(mockResponse);
+
+ const result = await apiRequest.call(mockExecuteFunctions, 'POST', '/chat/completions', {
+ body: { model: 'MiniMax-M2.7', messages: [] },
+ });
+
+ expect(mockExecuteFunctions.helpers.httpRequestWithAuthentication).toHaveBeenCalledWith(
+ 'minimaxApi',
+ expect.objectContaining({
+ method: 'POST',
+ url: 'https://api.minimax.io/v1/chat/completions',
+ body: { model: 'MiniMax-M2.7', messages: [] },
+ json: true,
+ }),
+ );
+ expect(result).toEqual(mockResponse);
+ });
+
+ it('should pass through query string parameters', async () => {
+ mockExecuteFunctions.helpers.httpRequestWithAuthentication.mockResolvedValue({});
+
+ await apiRequest.call(mockExecuteFunctions, 'GET', '/query/video_generation', {
+ qs: { task_id: 'task-123' },
+ });
+
+ expect(mockExecuteFunctions.helpers.httpRequestWithAuthentication).toHaveBeenCalledWith(
+ 'minimaxApi',
+ expect.objectContaining({
+ method: 'GET',
+ url: 'https://api.minimax.io/v1/query/video_generation',
+ qs: { task_id: 'task-123' },
+ }),
+ );
+ });
+
+ it('should resolve China region to correct base URL', async () => {
+ mockExecuteFunctions.getCredentials.mockResolvedValue({
+ apiKey: 'test-key',
+ url: 'https://api.minimaxi.com/v1',
+ });
+ mockExecuteFunctions.helpers.httpRequestWithAuthentication.mockResolvedValue({});
+
+ await apiRequest.call(mockExecuteFunctions, 'GET', '/files/retrieve');
+
+ expect(mockExecuteFunctions.helpers.httpRequestWithAuthentication).toHaveBeenCalledWith(
+ 'minimaxApi',
+ expect.objectContaining({
+ url: 'https://api.minimaxi.com/v1/files/retrieve',
+ }),
+ );
+ });
+ });
+
+ describe('pollVideoTask', () => {
+ it('should return fileId when task status is Success', async () => {
+ const succeededResponse = {
+ status: 'Success',
+ file_id: 'file-abc-123',
+ };
+ mockExecuteFunctions.helpers.httpRequestWithAuthentication.mockResolvedValue(
+ succeededResponse,
+ );
+
+ const result = await pollVideoTask.call(mockExecuteFunctions, 'task-123', 0);
+
+ expect(result).toEqual({ fileId: 'file-abc-123', status: 'Success' });
+ });
+
+ it('should throw NodeOperationError when task status is Fail', async () => {
+ const failedResponse = {
+ status: 'Fail',
+ base_resp: {
+ status_code: 'CONTENT_MODERATION',
+ status_msg: 'Content moderation failed',
+ },
+ };
+ mockExecuteFunctions.helpers.httpRequestWithAuthentication.mockResolvedValue(failedResponse);
+
+ await expect(pollVideoTask.call(mockExecuteFunctions, 'task-456', 0)).rejects.toThrow(
+ NodeOperationError,
+ );
+ await expect(pollVideoTask.call(mockExecuteFunctions, 'task-456', 0)).rejects.toThrow(
+ 'Task failed',
+ );
+ });
+
+ it('should throw timeout error when max poll attempts exceeded', async () => {
+ const pendingResponse = {
+ status: 'Processing',
+ };
+ mockExecuteFunctions.helpers.httpRequestWithAuthentication.mockResolvedValue(pendingResponse);
+
+ await expect(pollVideoTask.call(mockExecuteFunctions, 'task-timeout', 0)).rejects.toThrow(
+ /did not complete within the maximum polling time/,
+ );
+ });
+ });
+
+ describe('getVideoDownloadUrl', () => {
+ it('should return download URL from file retrieval response', async () => {
+ mockExecuteFunctions.helpers.httpRequestWithAuthentication.mockResolvedValue({
+ file: {
+ download_url: 'https://cdn.minimax.io/videos/abc.mp4',
+ },
+ });
+
+ const result = await getVideoDownloadUrl.call(mockExecuteFunctions, 'file-abc');
+
+ expect(result).toBe('https://cdn.minimax.io/videos/abc.mp4');
+ });
+
+ it('should throw NodeOperationError when download URL is missing', async () => {
+ mockExecuteFunctions.helpers.httpRequestWithAuthentication.mockResolvedValue({
+ file: {},
+ });
+
+ await expect(getVideoDownloadUrl.call(mockExecuteFunctions, 'file-missing')).rejects.toThrow(
+ NodeOperationError,
+ );
+ });
+ });
+});
diff --git a/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/transport/index.ts b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/transport/index.ts
new file mode 100644
index 0000000000000..082e3dcdce841
--- /dev/null
+++ b/packages/@n8n/nodes-langchain/nodes/vendors/MiniMax/transport/index.ts
@@ -0,0 +1,104 @@
+import type {
+ IDataObject,
+ IExecuteFunctions,
+ IHttpRequestMethods,
+ ILoadOptionsFunctions,
+} from 'n8n-workflow';
+import { NodeOperationError, sleep } from 'n8n-workflow';
+
+type RequestParameters = {
+ headers?: IDataObject;
+ body?: IDataObject;
+ qs?: IDataObject;
+ option?: IDataObject;
+};
+
+export async function apiRequest(
+ this: IExecuteFunctions | ILoadOptionsFunctions,
+ method: IHttpRequestMethods,
+ endpoint: string,
+ parameters?: RequestParameters,
+) {
+ const { body, qs, option, headers } = parameters ?? {};
+
+ const credentials = await this.getCredentials('minimaxApi');
+ const baseUrl = (credentials.url as string) ?? 'https://api.minimax.io/v1';
+ const url = `${baseUrl}${endpoint}`;
+
+ const options = {
+ headers: headers ?? {},
+ method,
+ body,
+ qs,
+ url,
+ json: true,
+ };
+
+ if (option && Object.keys(option).length !== 0) {
+ Object.assign(options, option);
+ }
+
+ return await this.helpers.httpRequestWithAuthentication.call(this, 'minimaxApi', options);
+}
+
+const VIDEO_TERMINAL_STATUSES = ['Success', 'Fail'];
+const DEFAULT_POLL_INTERVAL_MS = 15_000;
+const MAX_POLL_ATTEMPTS = 60;
+
+export async function pollVideoTask(
+ this: IExecuteFunctions,
+ taskId: string,
+ pollIntervalMs: number = DEFAULT_POLL_INTERVAL_MS,
+): Promise<{ fileId: string; status: string }> {
+ for (let attempt = 0; attempt < MAX_POLL_ATTEMPTS; attempt++) {
+ const response = await apiRequest.call(this, 'GET', '/query/video_generation', {
+ qs: { task_id: taskId },
+ });
+
+ const status = response?.status as string;
+
+ if (VIDEO_TERMINAL_STATUSES.includes(status)) {
+ if (status === 'Fail') {
+ const errorCode = response?.base_resp?.status_code || 'UNKNOWN';
+ const errorMessage = response?.base_resp?.status_msg || 'Video generation task failed';
+ throw new NodeOperationError(this.getNode(), `Task failed: [${errorCode}] ${errorMessage}`);
+ }
+
+ const fileId = response?.file_id as string;
+ if (!fileId) {
+ throw new NodeOperationError(
+ this.getNode(),
+ 'Video generation succeeded but no file_id was returned',
+ );
+ }
+
+ return { fileId, status };
+ }
+
+ await sleep(pollIntervalMs);
+ }
+
+ throw new NodeOperationError(
+ this.getNode(),
+ `Video task ${taskId} did not complete within the maximum polling time. You can query the task manually using the task ID.`,
+ );
+}
+
+export async function getVideoDownloadUrl(
+ this: IExecuteFunctions,
+ fileId: string,
+): Promise {
+ const response = await apiRequest.call(this, 'GET', '/files/retrieve', {
+ qs: { file_id: fileId },
+ });
+
+ const downloadUrl = response?.file?.download_url as string;
+ if (!downloadUrl) {
+ throw new NodeOperationError(
+ this.getNode(),
+ `Failed to retrieve download URL for file ${fileId}`,
+ );
+ }
+
+ return downloadUrl;
+}
diff --git a/packages/@n8n/nodes-langchain/package.json b/packages/@n8n/nodes-langchain/package.json
index 8f6f86e1ffd5d..60f1335f9ba07 100644
--- a/packages/@n8n/nodes-langchain/package.json
+++ b/packages/@n8n/nodes-langchain/package.json
@@ -86,6 +86,7 @@
"dist/nodes/vendors/AlibabaCloud/AlibabaCloud.node.js",
"dist/nodes/vendors/Anthropic/Anthropic.node.js",
"dist/nodes/vendors/GoogleGemini/GoogleGemini.node.js",
+ "dist/nodes/vendors/MiniMax/MiniMax.node.js",
"dist/nodes/vendors/Moonshot/Moonshot.node.js",
"dist/nodes/vendors/Ollama/Ollama.node.js",
"dist/nodes/vendors/OpenAi/OpenAi.node.js",
diff --git a/packages/workflow/src/constants.ts b/packages/workflow/src/constants.ts
index 89ce7ac1a121b..32bf8693e91e0 100644
--- a/packages/workflow/src/constants.ts
+++ b/packages/workflow/src/constants.ts
@@ -119,6 +119,7 @@ export const OLLAMA_LANGCHAIN_NODE_TYPE = '@n8n/n8n-nodes-langchain.ollama';
export const GOOGLE_GEMINI_LANGCHAIN_NODE_TYPE = '@n8n/n8n-nodes-langchain.googleGemini';
export const ALIBABA_CLOUD_LANGCHAIN_NODE_TYPE = '@n8n/n8n-nodes-langchain.alibabaCloud';
export const MOONSHOT_LANGCHAIN_NODE_TYPE = '@n8n/n8n-nodes-langchain.moonshot';
+export const MINIMAX_LANGCHAIN_NODE_TYPE = '@n8n/n8n-nodes-langchain.minimax';
export const AI_VENDOR_NODE_TYPES = [
OPENAI_LANGCHAIN_NODE_TYPE,
@@ -127,6 +128,7 @@ export const AI_VENDOR_NODE_TYPES = [
GOOGLE_GEMINI_LANGCHAIN_NODE_TYPE,
ALIBABA_CLOUD_LANGCHAIN_NODE_TYPE,
MOONSHOT_LANGCHAIN_NODE_TYPE,
+ MINIMAX_LANGCHAIN_NODE_TYPE,
];
export const LANGCHAIN_LM_NODE_TYPE_PREFIX = '@n8n/n8n-nodes-langchain.lm';