Prerequisites
Before you start, you need:
- Python 3.9+ installed on your machine
- A LinkrAPI account with at least one active hold — sign up at linkrapi.com
- Your LinkrAPI API key (format:
lkr_xxxxxxxxxxxx) from the Holds page
- An active Midjourney subscription connected to your hold account
- The
requests library: pip install requests
- Optional for async:
pip install httpx
No other dependencies are required. LinkrAPI uses a standard REST API — no SDKs needed.
Getting Your API Key
After signing up and creating a hold account on LinkrAPI:
- Go to the Holds page in your dashboard
- Click on your hold account to expand it
- Copy the API key displayed (starts with
lkr_)
Store this key as an environment variable — never hardcode it in source files:
bash
export LINKRAPI_KEY="lkr_your_api_key_here"
In your Python code:
python
import os
API_KEY = os.environ["LINKRAPI_KEY"]
BASE_URL = "https://linkrapi.com/api/v1"
HEADERS = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json",
}
Start using the Midjourney API today
Get your API key on LinkrAPI — connect your Midjourney subscription in minutes.
Get API Key →
Basic Image Generation with requests
The simplest approach: submit a prompt, poll for the result, return the image URL.
python
import time
import requests
import os
API_KEY = os.environ["LINKRAPI_KEY"]
BASE_URL = "https://linkrapi.com/api/v1"
HEADERS = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json",
}
def generate_image(prompt: str, timeout: int = 180) -> str | None:
"""Submit a prompt and return the image URL when ready."""
# Submit generation request
response = requests.post(
f"{BASE_URL}/imagine",
headers=HEADERS,
json={"prompt": prompt},
)
response.raise_for_status()
task_id = response.json()["task_id"]
print(f"Task submitted: {task_id}")
# Poll for result
deadline = time.time() + timeout
while time.time() < deadline:
fetch = requests.get(
f"{BASE_URL}/fetch/{task_id}",
headers=HEADERS,
)
fetch.raise_for_status()
data = fetch.json()
status = data.get("status")
if status == "completed":
return data["image_url"]
elif status == "failed":
raise RuntimeError(f"Generation failed: {data.get('error', 'unknown')}")
print(f"Status: {status} — waiting...")
time.sleep(5)
raise TimeoutError(f"Generation did not complete within {timeout}s")
if __name__ == "__main__":
url = generate_image(
"a futuristic city at sunset, cinematic lighting, --ar 16:9 --q 2"
)
print(f"Image ready: {url}")
This is suitable for scripts and background jobs. For web applications, polling inside a request handler is not recommended — use webhooks instead.
Saving Images Locally
Once you have the image URL, download and save it:
python
def save_image(image_url: str, output_path: str) -> None:
"""Download an image from a URL and save it locally."""
response = requests.get(image_url, stream=True)
response.raise_for_status()
with open(output_path, "wb") as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
print(f"Saved to {output_path}")
# Usage
image_url = generate_image("astronaut on Mars, dramatic lighting --ar 1:1")
save_image(image_url, "output.png")
Async Generation with httpx
For applications that need to generate multiple images concurrently, use httpx with Python's asyncio:
python
import asyncio
import httpx
import os
import time
API_KEY = os.environ["LINKRAPI_KEY"]
BASE_URL = "https://linkrapi.com/api/v1"
HEADERS = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json",
}
async def submit_prompt(client: httpx.AsyncClient, prompt: str) -> str:
"""Submit a prompt and return the task_id."""
response = await client.post(
f"{BASE_URL}/imagine",
headers=HEADERS,
json={"prompt": prompt},
)
response.raise_for_status()
return response.json()["task_id"]
async def poll_task(client: httpx.AsyncClient, task_id: str, timeout: int = 180) -> str:
"""Poll until a task is complete, return the image URL."""
deadline = asyncio.get_event_loop().time() + timeout
while asyncio.get_event_loop().time() < deadline:
fetch = await client.get(
f"{BASE_URL}/fetch/{task_id}",
headers=HEADERS,
)
fetch.raise_for_status()
data = fetch.json()
if data["status"] == "completed":
return data["image_url"]
elif data["status"] == "failed":
raise RuntimeError(f"Failed: {data.get('error')}")
await asyncio.sleep(5)
raise TimeoutError(f"Task {task_id} timed out")
async def generate_batch(prompts: list[str]) -> list[str]:
"""Generate multiple images concurrently."""
async with httpx.AsyncClient(timeout=30.0) as client:
# Submit all prompts
task_ids = await asyncio.gather(
*[submit_prompt(client, p) for p in prompts]
)
print(f"Submitted {len(task_ids)} tasks: {task_ids}")
# Poll all tasks concurrently
image_urls = await asyncio.gather(
*[poll_task(client, tid) for tid in task_ids]
)
return list(image_urls)
if __name__ == "__main__":
prompts = [
"a serene Japanese garden at dawn --ar 3:2",
"abstract digital art, geometric shapes, neon colors --ar 1:1",
"cozy coffee shop interior, warm lighting --ar 4:3",
]
urls = asyncio.run(generate_batch(prompts))
for i, url in enumerate(urls, 1):
print(f"Image {i}: {url}")
Note: With a single hold account, you are limited to 3 concurrent fast generations. Submitting more than 3 simultaneously will queue them — they will not fail, but concurrent benefit is capped at 3.
Webhook-Based Generation (Production Pattern)
For production web applications, polling is inefficient. Use webhooks: LinkrAPI calls your server when the image is ready.
Set Up a Webhook Endpoint
Here is a minimal FastAPI webhook receiver:
python
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
import asyncio
app = FastAPI()
pending_tasks: dict[str, asyncio.Event] = {}
task_results: dict[str, dict] = {}
@app.post("/webhook/midjourney")
async def midjourney_webhook(request: Request):
data = await request.json()
task_id = data.get("task_id")
if task_id and task_id in pending_tasks:
task_results[task_id] = data
pending_tasks[task_id].set()
return JSONResponse({"ok": True})
Submit with Webhook URL
python
async def generate_with_webhook(
client: httpx.AsyncClient,
prompt: str,
webhook_url: str,
) -> str:
response = await client.post(
f"{BASE_URL}/imagine",
headers=HEADERS,
json={
"prompt": prompt,
"webhook_url": webhook_url,
},
)
response.raise_for_status()
task_id = response.json()["task_id"]
# Create an event to await webhook delivery
event = asyncio.Event()
pending_tasks[task_id] = event
await asyncio.wait_for(event.wait(), timeout=180)
result = task_results.pop(task_id)
return result["image_url"]
This pattern allows your web server to handle other requests while waiting for image generation — the webhook fires when Midjourney finishes.
Error Handling Best Practices
Production integrations need robust error handling:
python
import time
from requests.exceptions import RequestException, HTTPError
def generate_with_retry(prompt: str, max_retries: int = 3) -> str:
"""Generate an image with exponential backoff retry."""
for attempt in range(max_retries):
try:
return generate_image(prompt)
except HTTPError as e:
status_code = e.response.status_code
if status_code == 400:
# Content violation — do not retry
raise ValueError(f"Prompt rejected: {e.response.json().get('detail')}")
elif status_code == 429:
# Rate limited — back off
wait = 2 ** attempt * 10
print(f"Rate limited. Waiting {wait}s before retry {attempt + 1}")
time.sleep(wait)
elif status_code >= 500:
# Server error — retry
wait = 2 ** attempt * 5
print(f"Server error. Retrying in {wait}s (attempt {attempt + 1})")
time.sleep(wait)
else:
raise
except (RequestException, TimeoutError) as e:
wait = 2 ** attempt * 5
print(f"Request error: {e}. Retrying in {wait}s")
time.sleep(wait)
raise RuntimeError(f"Failed after {max_retries} attempts")
Working with Upscales and Actions
Midjourney generates a grid of 4 images initially. You can then upscale or vary individual images using the action endpoint:
python
def upscale_image(task_id: str, index: int) -> str:
"""Upscale one of the 4 generated images (index 1-4)."""
response = requests.post(
f"{BASE_URL}/action",
headers=HEADERS,
json={
"task_id": task_id,
"action": f"upsample{index}",
},
)
response.raise_for_status()
action_task_id = response.json()["task_id"]
return poll_for_result(action_task_id) # Your polling function
Available actions: upsample1 through upsample4 (upscale), variation1 through variation4 (vary), and reroll (regenerate with same prompt).
Summary and Next Steps
You now have everything you need to integrate the Midjourney API into Python projects:
- Simple scripts: use
requests with polling
- Concurrent generation: use
httpx with asyncio
- Web applications: use webhooks for non-blocking generation
- Production systems: add retry logic, queue management, and proper logging
For no-code automation, see our guide on connecting Midjourney to Make, n8n, and Zapier. For a comparison of LinkrAPI with competing services, see best Midjourney API services in 2026.