Back to BlogTutorial

Google Search API in Python: Complete Guide with Code Examples (2026)

February 28, 202610 min read

Why Use a Google Search API in Python?

Building applications that need real-time Google search data — whether it's an AI agent, SEO tool, price monitor, or research aggregator — requires a reliable SERP API. Raw web scraping is fragile, gets blocked by CAPTCHAs, and breaks when Google changes its HTML.

This guide shows you how to get structured Google search results in Python using Searlo's API. Every example is copy-paste ready.

Prerequisites

pip install httpx # or: pip install requests

Get a free API key at [dashboard.searlo.tech](https://dashboard.searlo.tech/auth) — 3,000 credits included, no credit card required.

Basic Web Search

import httpx

API_KEY = "your_api_key_here"

BASE_URL = "https://api.searlo.tech/api/v1/search"

def google_search(query: str, num_results: int = 10) -> dict:

"""Search Google and return structured results."""

response = httpx.get(

BASE_URL,

params={"q": query, "num": num_results, "gl": "us"},

headers={"X-API-Key": API_KEY},

timeout=10.0,

)

response.raise_for_status()

return response.json()

# Usage

results = google_search("best python frameworks 2026")

for item in results["organic"]:

print(f"{item['position']}. {item['title']}")

print(f" {item['link']}")

print(f" {item['snippet']}")

print()

Response Structure

The API returns clean JSON with these key fields:

{

"organic": [

{

"position": 1,

"title": "Result Title",

"link": "https://example.com",

"snippet": "Description text...",

"domain": "example.com"

}

],

"searchParameters": { "q": "query", "gl": "us", "num": 10 },

"peopleAlsoAsk": ["..."],

"relatedSearches": ["..."]

}

Async Search with httpx

For high-throughput applications, use async:

import asyncio

import httpx

API_KEY = "your_api_key_here"

BASE_URL = "https://api.searlo.tech/api/v1/search"

async def async_search(client: httpx.AsyncClient, query: str) -> dict:

response = await client.get(

BASE_URL,

params={"q": query, "num": 10},

headers={"X-API-Key": API_KEY},

)

return response.json()

async def bulk_search(queries: list[str]) -> list[dict]:

async with httpx.AsyncClient(timeout=15.0) as client:

tasks = [async_search(client, q) for q in queries]

return await asyncio.gather(*tasks)

# Search multiple queries concurrently

queries = [f"best {topic} 2026" for topic in ["laptop", "phone", "tablet"]]

results = asyncio.run(bulk_search(queries))

for r in results:

q = r['searchParameters']['q']

n = len(r.get('organic', []))

print(f"Query: {q} -- {n} results")

Image Search

def image_search(query: str, num: int = 10) -> list:

response = httpx.get(

BASE_URL,

params={"q": query, "type": "images", "num": num},

headers={"X-API-Key": API_KEY},

timeout=10.0,

)

data = response.json()

return data.get("images", [])

images = image_search("python programming")

for img in images:

print(f"{img['title']} -- {img['imageUrl']}")

News Search

def news_search(query: str) -> list:

response = httpx.get(

BASE_URL,

params={"q": query, "type": "news", "num": 10},

headers={"X-API-Key": API_KEY},

timeout=10.0,

)

return response.json().get("news", [])

news = news_search("artificial intelligence")

for article in news:

print(f"{article['title']} -- {article['source']}")

Pagination

def search_all_pages(query: str, max_pages: int = 5) -> list:

"""Fetch multiple pages of results."""

all_results = []

for page in range(1, max_pages + 1):

response = httpx.get(

BASE_URL,

params={"q": query, "num": 10, "page": page},

headers={"X-API-Key": API_KEY},

timeout=10.0,

)

data = response.json()

results = data.get("organic", [])

if not results:

break

all_results.extend(results)

return all_results

# Get top 50 results

top_50 = search_all_pages("python web scraping", max_pages=5)

print(f"Got {len(top_50)} total results")

Error Handling

import httpx

from httpx import HTTPStatusError, TimeoutException

def safe_search(query: str) -> dict | None:

try:

response = httpx.get(

BASE_URL,

params={"q": query, "num": 10},

headers={"X-API-Key": API_KEY},

timeout=10.0,

)

response.raise_for_status()

return response.json()

except TimeoutException:

print("Request timed out -- retrying...")

return safe_search(query) # simple retry

except HTTPStatusError as e:

if e.response.status_code == 429:

print("Rate limited -- wait and retry")

elif e.response.status_code == 401:

print("Invalid API key")

else:

print(f"HTTP error: {e.response.status_code}")

return None

TOON Format for AI/LLM Pipelines

If you're feeding search results into an LLM, use TOON format to reduce token count by 60%:

def search_for_llm(query: str) -> str:

"""Get search results in TOON format (optimized for LLMs)."""

response = httpx.get(

BASE_URL,

params={"q": query, "num": 5, "format": "toon"},

headers={"X-API-Key": API_KEY},

timeout=10.0,

)

return response.text # TOON is a text format, not JSON

# Feed directly to your LLM

toon_results = search_for_llm("latest AI research papers")

print(toon_results)

Building a Simple Rank Tracker

Here's a practical example -- track your website's Google position for target keywords:

def check_ranking(domain: str, keyword: str) -> int | None:

"""Check where a domain ranks for a keyword. Returns position or None."""

results = google_search(keyword, num_results=100)

for item in results.get("organic", []):

if domain in item.get("link", ""):

return item["position"]

return None # Not in top 100

# Track rankings

keywords = ["serp api", "google search api", "cheapest serp api"]

for kw in keywords:

pos = check_ranking("searlo.tech", kw)

status = f"Position {pos}" if pos else "Not ranked"

print(f"{kw}: {status}")

Next Steps

[API Documentation](/docs) -- full parameter reference

[SERP API for AI](/serp-api-for-ai) -- TOON format, MCP, and streaming details

[Build a Rank Tracker](/tutorials/build-rank-tracker) -- complete tutorial

[LangChain Integration](/blog/add-web-search-to-langchain-agents) -- add search to AI agents

[Best SERP API 2026](/blog/best-serp-api-2026) -- full provider comparison

bash
pip install httpx # or: pip install requests
python
import httpx API_KEY = "your_api_key_here" BASE_URL = "https://api.searlo.tech/api/v1/search" def google_search(query: str, num_results: int = 10) -> dict: """Search Google and return structured results.""" response = httpx.get( BASE_URL, params={"q": query, "num": num_results, "gl": "us"}, headers={"X-API-Key": API_KEY}, timeout=10.0, ) response.raise_for_status() return response.json() # Usage results = google_search("best python frameworks 2026") for item in results["organic"]: print(f"{item['position']}. {item['title']}") print(f" {item['link']}") print(f" {item['snippet']}") print()
json
{ "organic": [ { "position": 1, "title": "Result Title", "link": "https://example.com", "snippet": "Description text...", "domain": "example.com" } ], "searchParameters": { "q": "query", "gl": "us", "num": 10 }, "peopleAlsoAsk": ["..."], "relatedSearches": ["..."] }
python
import asyncio import httpx API_KEY = "your_api_key_here" BASE_URL = "https://api.searlo.tech/api/v1/search" async def async_search(client: httpx.AsyncClient, query: str) -> dict: response = await client.get( BASE_URL, params={"q": query, "num": 10}, headers={"X-API-Key": API_KEY}, ) return response.json() async def bulk_search(queries: list[str]) -> list[dict]: async with httpx.AsyncClient(timeout=15.0) as client: tasks = [async_search(client, q) for q in queries] return await asyncio.gather(*tasks) # Search multiple queries concurrently queries = [f"best {topic} 2026" for topic in ["laptop", "phone", "tablet"]] results = asyncio.run(bulk_search(queries)) for r in results: q = r['searchParameters']['q'] n = len(r.get('organic', [])) print(f"Query: {q} -- {n} results")
python
def image_search(query: str, num: int = 10) -> list: response = httpx.get( BASE_URL, params={"q": query, "type": "images", "num": num}, headers={"X-API-Key": API_KEY}, timeout=10.0, ) data = response.json() return data.get("images", []) images = image_search("python programming") for img in images: print(f"{img['title']} -- {img['imageUrl']}")
python
def news_search(query: str) -> list: response = httpx.get( BASE_URL, params={"q": query, "type": "news", "num": 10}, headers={"X-API-Key": API_KEY}, timeout=10.0, ) return response.json().get("news", []) news = news_search("artificial intelligence") for article in news: print(f"{article['title']} -- {article['source']}")
python
def search_all_pages(query: str, max_pages: int = 5) -> list: """Fetch multiple pages of results.""" all_results = [] for page in range(1, max_pages + 1): response = httpx.get( BASE_URL, params={"q": query, "num": 10, "page": page}, headers={"X-API-Key": API_KEY}, timeout=10.0, ) data = response.json() results = data.get("organic", []) if not results: break all_results.extend(results) return all_results # Get top 50 results top_50 = search_all_pages("python web scraping", max_pages=5) print(f"Got {len(top_50)} total results")
python
import httpx from httpx import HTTPStatusError, TimeoutException def safe_search(query: str) -> dict | None: try: response = httpx.get( BASE_URL, params={"q": query, "num": 10}, headers={"X-API-Key": API_KEY}, timeout=10.0, ) response.raise_for_status() return response.json() except TimeoutException: print("Request timed out -- retrying...") return safe_search(query) # simple retry except HTTPStatusError as e: if e.response.status_code == 429: print("Rate limited -- wait and retry") elif e.response.status_code == 401: print("Invalid API key") else: print(f"HTTP error: {e.response.status_code}") return None
python
def search_for_llm(query: str) -> str: """Get search results in TOON format (optimized for LLMs).""" response = httpx.get( BASE_URL, params={"q": query, "num": 5, "format": "toon"}, headers={"X-API-Key": API_KEY}, timeout=10.0, ) return response.text # TOON is a text format, not JSON # Feed directly to your LLM toon_results = search_for_llm("latest AI research papers") print(toon_results)
python
def check_ranking(domain: str, keyword: str) -> int | None: """Check where a domain ranks for a keyword. Returns position or None.""" results = google_search(keyword, num_results=100) for item in results.get("organic", []): if domain in item.get("link", ""): return item["position"] return None # Not in top 100 # Track rankings keywords = ["serp api", "google search api", "cheapest serp api"] for kw in keywords: pos = check_ranking("searlo.tech", kw) status = f"Position {pos}" if pos else "Not ranked" print(f"{kw}: {status}")

Ready to try Searlo?

Get 3,000 free credits. No credit card required.

Get Free API Key