Library
The Airia Python SDK provides access to the Library API, allowing you to retrieve available models from the Airia marketplace. This is particularly useful for finding model IDs to use in other API calls.
Basic Usage
Synchronous Client
from airia import AiriaClient
# Initialize client
client = AiriaClient(api_key="your_api_key")
# Get all models
response = client.library.get_models()
print(f"Found {response.total_count} models")
for model in response.models:
print(f"Model: {model.name} (ID: {model.id})")
print(f"Provider: {model.provider}")
print(f"Category: {model.category}")
print("---")
Asynchronous Client
import asyncio
from airia import AiriaAsyncClient
async def main():
# Initialize async client
client = AiriaAsyncClient(api_key="your_api_key")
# Get all models
response = await client.library.get_models()
print(f"Found {response.total_count} models")
for model in response.models:
print(f"Model: {model.name} (ID: {model.id})")
print(f"Provider: {model.provider}")
print(f"Category: {model.category}")
print("---")
asyncio.run(main())
Filtering and Searching
Search by Name or Description
# Search for GPT models
response = client.library.get_models(search="gpt")
print(f"Found {len(response.models)} GPT models")
for model in response.models:
print(f"{model.name} - {model.description}")
Filter by Provider
# Get only OpenAI models
response = client.library.get_models(providers="OpenAI")
print(f"Found {len(response.models)} OpenAI models")
for model in response.models:
print(f"{model.name} - {model.provider}")
Filter by Category
# Get multimodal models
response = client.library.get_models(categories="Multimodal")
print(f"Found {len(response.models)} multimodal models")
for model in response.models:
print(f"{model.name} - {model.category}")
Filter by Capabilities
# Get models with tool support and streaming
response = client.library.get_models(
has_tool_support=True,
has_stream_support=True
)
print(f"Found {len(response.models)} models with tool and stream support")
for model in response.models:
print(f"{model.name} - Tools: {model.has_tool_support}, Stream: {model.has_stream_support}")
Filter by License and Usage
# Get open source models for commercial use
response = client.library.get_models(
is_open_source=True,
commercial_use=True
)
print(f"Found {len(response.models)} open source commercial models")
for model in response.models:
print(f"{model.name} - License: {model.license_type}")
Pagination and Sorting
Pagination
# Get first page with 10 models per page
response = client.library.get_models(
page_number=1,
page_size=10
)
print(f"Showing {len(response.models)} models out of {response.total_count} total")
Sorting
# Sort by rating (highest first)
response = client.library.get_models(
sort_by="rating",
sort_direction="DESC",
page_size=5
)
print("Top 5 highest rated models:")
for model in response.models:
print(f"{model.name} - Rating: {model.rating}/5")
# Sort by name alphabetically
response = client.library.get_models(
sort_by="name",
sort_direction="ASC",
page_size=10
)
print("Models sorted alphabetically:")
for model in response.models:
print(f"{model.name}")
Advanced Filtering
Combine Multiple Filters
# Find OpenAI models with tool support, sorted by rating
response = client.library.get_models(
search="gpt",
providers="OpenAI",
has_tool_support=True,
sort_by="rating",
sort_direction="DESC",
page_size=5
)
print("Top OpenAI GPT models with tool support:")
for model in response.models:
print(f"{model.name}")
print(f" Rating: {model.rating}/5")
print(f" Price: ${model.input_token_price}/1K input, ${model.output_token_price}/1K output")
print(f" Context Window: {model.context_window:,} tokens")
print("---")
Filter by Author and Industry
# Get models from specific authors for specific industries
response = client.library.get_models(
authors="Anthropic,OpenAI",
industries="Technology,Healthcare"
)
print(f"Found {len(response.models)} models from specified authors for specified industries")
Filter by Certifications
# Get models with HIPAA certification
response = client.library.get_models(
certifications="HIPAA",
commercial_use=True
)
print(f"Found {len(response.models)} HIPAA-compliant commercial models")
for model in response.models:
if model.certifications:
print(f"{model.name} - Certifications: {', '.join(model.certifications)}")
Model Information
Detailed Model Information
response = client.library.get_models(search="gpt-4", page_size=1)
if response.models:
model = response.models[0]
print(f"Model Name: {model.name}")
print(f"Display Name: {model.display_name}")
print(f"Description: {model.description}")
print(f"Provider: {model.provider}")
print(f"Author: {model.author}")
print(f"Category: {model.category}")
print(f"Type: {model.type}")
print(f"Languages: {', '.join(model.languages)}")
print(f"License: {model.license_type}")
print(f"Price: {model.price}")
print(f"Input Token Price: ${model.input_token_price}")
print(f"Output Token Price: ${model.output_token_price}")
print(f"Rating: {model.rating}/5")
print(f"Downloads: {model.downloads:,}")
print(f"Context Window: {model.context_window:,} tokens")
print(f"Max Output Tokens: {model.max_output_tokens:,}")
print(f"Available: {model.available}")
print(f"Open Source: {model.is_open_source}")
print(f"Chat Specialized: {model.chat_specialized}")
print(f"Commercial Use: {model.commercial_use}")
print(f"Tool Support: {model.has_tool_support}")
print(f"Stream Support: {model.has_stream_support}")
if model.tags:
print(f"Tags: {', '.join(model.tags)}")
if model.certifications:
print(f"Certifications: {', '.join(model.certifications)}")
if model.released_at:
print(f"Released: {model.released_at}")
if model.deprecated_at:
print(f"Deprecated: {model.deprecated_at}")
if model.license_link:
print(f"License Link: {model.license_link}")
if model.url:
print(f"URL: {model.url}")