Skip to content

Gemini

Gemini

Gemini(model_name: str = 'gemini/gemini-2.0-flash', skill_sources: str | None = None, system_template: str | None = None, execution_output_template: str | None = None, execution_error_template: str | None = None, api_key: str | None = None, **kwargs)

Bases: LiteLLM

Code action model class for Gemini 2 models.

Parameters:

Name Type Description Default
model_name str

The LiteLLM-specific name of the model.

'gemini/gemini-2.0-flash'
skill_sources str | None

Skill modules source code to be included into system_template.

None
system_template str | None

Prompt template for the system instruction that guides the model to generate code actions. Must define a {python_modules} placeholder for the skill_sources.

None
execution_output_template str | None

A template for formatting successful code execution output. Must define an {execution_feedback} placeholder.

None
execution_error_template str | None

A template for formatting code execution errors. Must define an {execution_feedback} placeholder.

None
api_key str | None

Provider-specific API key. If not provided, reads from GEMINI_API_KEY or GOOGLE_API_KEY environment variable.

None
**kwargs

Default completion kwargs passed used for request and feedback calls. These are overriden by request and feedback specific kwargs.

{}
Source code in freeact/model/gemini/model/chat.py
def __init__(
    self,
    model_name: str = "gemini/gemini-2.0-flash",
    skill_sources: str | None = None,
    system_template: str | None = None,
    execution_output_template: str | None = None,
    execution_error_template: str | None = None,
    api_key: str | None = None,
    **kwargs,
):
    if system_template:
        system_instruction = system_template.format(
            python_modules=skill_sources or "",
        )
    else:
        system_instruction = ""

    if "thinking" in model_name.lower():
        # ------------------------------------------------------
        #  EXPERIMENTAL
        # ------------------------------------------------------
        system_instruction = system_instruction or thinking.SYSTEM_TEMPLATE.format(
            python_modules=skill_sources or "",
            python_packages=thinking.EXAMPLE_PYTHON_PACKAGES,
            rest_apis=thinking.EXAMPLE_REST_APIS,
        )
        execution_error_template = execution_error_template or thinking.EXECUTION_ERROR_TEMPLATE
        execution_output_template = execution_output_template or thinking.EXECUTION_OUTPUT_TEMPLATE
    else:
        system_instruction = system_instruction or default.SYSTEM_TEMPLATE.format(
            python_modules=skill_sources or "",
        )
        execution_error_template = execution_error_template or default.EXECUTION_ERROR_TEMPLATE
        execution_output_template = execution_output_template or default.EXECUTION_OUTPUT_TEMPLATE

    super().__init__(
        model_name=model_name,
        execution_output_template=execution_output_template,
        execution_error_template=execution_error_template,
        system_instruction=system_instruction,
        api_key=api_key or os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY"),
        **kwargs,
    )

extract_code

extract_code(response: LiteLLMResponse)

Extracts all Python code blocks from response.text and joins them by empty lines.

Source code in freeact/model/gemini/model/chat.py
def extract_code(self, response: LiteLLMResponse):
    """Extracts all Python code blocks from `response.text` and joins them by empty lines."""
    pattern = r"```(?:python|tool_code|tool)\s*(.*?)(?:\s*```|\s*$)"
    blocks = code_blocks(response.text, pattern=pattern)
    return "\n\n".join(blocks) if blocks else None

GeminiLive async

GeminiLive(model_name: str = 'gemini-2.0-flash', skill_sources: str | None = None, temperature: float = 0.0, max_tokens: int = 4096, **kwargs)

Context manager for a CodeActModel implementation based on Google's Gemini 2 live API.

Parameters:

Name Type Description Default
model_name str

The specific Gemini 2 model to use

'gemini-2.0-flash'
skill_sources str | None

Skill module sources to include in the system instruction.

None
temperature float

Controls randomness in the model's output (0.0 = deterministic)

0.0
max_tokens int

Maximum number of tokens in the model's response

4096
**kwargs

Additional keyword arguments to pass to the Google Gen AI client.

{}
Example
async with GeminiLive(model_name="gemini-2.0-flash", skill_sources=skill_sources) as model:
    # use model with active session to Gemini 2 live API
    agent = CodeActAgent(model=model, ...)
Source code in freeact/model/gemini/model/live.py
@asynccontextmanager
async def GeminiLive(
    model_name: str = "gemini-2.0-flash",
    skill_sources: str | None = None,
    temperature: float = 0.0,
    max_tokens: int = 4096,
    **kwargs,
):
    """
    Context manager for a `CodeActModel` implementation based on Google's Gemini 2 live API.

    Args:
        model_name: The specific Gemini 2 model to use
        skill_sources: Skill module sources to include in the system instruction.
        temperature: Controls randomness in the model's output (0.0 = deterministic)
        max_tokens: Maximum number of tokens in the model's response
        **kwargs: Additional keyword arguments to pass to the Google Gen AI client.

    Example:
        ```python
        async with GeminiLive(model_name="gemini-2.0-flash", skill_sources=skill_sources) as model:
            # use model with active session to Gemini 2 live API
            agent = CodeActAgent(model=model, ...)
        ```
    """

    client = genai.Client(http_options={"api_version": "v1alpha"}, **kwargs)
    config = {
        "tools": [],
        "generation_config": {
            "temperature": temperature,
            "max_output_tokens": max_tokens,
            "response_modalities": ["TEXT"],
            "system_instruction": SYSTEM_TEMPLATE.format(
                python_modules=skill_sources or "",
            ),
        },
    }

    async with client.aio.live.connect(model=model_name, config=config) as session:
        yield _GeminiLive(session)