API Integration
Integrate GLM-4.6V-Flash into your applications using the following code snippets.
from openai import OpenAI
client = OpenAI(
base_url="https://open.bigmodel.cn/api/paas/v4",
api_key="YOUR_API_KEY"
)
response = client.chat.completions.create(
model="glm-4-6v-flash",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
import OpenAI from "openai";
const openai = new OpenAI({
baseURL: "https://open.bigmodel.cn/api/paas/v4",
apiKey: "YOUR_API_KEY",
});
const completion = await openai.chat.completions.create({
model: "glm-4-6v-flash",
messages: [{ role: "user", content: "Hello!" }],
});
console.log(completion.choices[0].message.content);
curl https://open.bigmodel.cn/api/paas/v4/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_API_KEY" \
-d '{
"model": "glm-4-6v-flash",
"messages": [{"role": "user", "content": "Hello!"}]
}'