code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def test_load_and_run_multi_agent_a2a_sync(agent_framework: AgentFramework) -> None:
"""Tests that an agent contacts another using A2A using the sync adapter tool.
Note that there is an issue when using Google ADK: https://github.com/google/adk-python/pull/566
"""
if agent_framework in [
# async a2a is not supported
AgentFramework.SMOLAGENTS,
# spans are not built correctly
AgentFramework.LLAMA_INDEX,
# AgentFramework.GOOGLE,
]:
pytest.skip(
"https://github.com/mozilla-ai/any-agent/issues/357 tracks fixing so these tests can be re-enabled"
)
kwargs = {}
kwargs["model_id"] = "gpt-4.1-nano"
agent_model = kwargs["model_id"]
env_check = validate_environment(kwargs["model_id"])
if not env_check["keys_in_environment"]:
pytest.skip(f"{env_check['missing_keys']} needed for {agent_framework}")
server_process = None
tool_agent_endpoint = "tool_agent_sync"
server_queue = Queue()
try:
# Start the server in a separate process
server_process = Process(
target=_run_server,
args=(
agent_framework.value,
0,
tool_agent_endpoint,
agent_model,
server_queue,
),
)
server_process.start()
test_port = server_queue.get()
server_url = f"http://localhost:{test_port}/{tool_agent_endpoint}"
wait_for_server(server_url)
logger.info(
"Setting up sync agent",
extra={"endpoint": f"http://localhost:{test_port}/{tool_agent_endpoint}"},
)
# Create main agent using sync methods
main_agent_cfg = AgentConfig(
instructions="Use the available tools to obtain additional information to answer the query.",
description="The orchestrator that can use other agents via tools using the A2A protocol (sync version).",
tools=[
a2a_tool(
f"http://localhost:{test_port}/{tool_agent_endpoint}",
http_kwargs={"timeout": 10.0},
)
],
**kwargs, # type: ignore[arg-type]
)
main_agent = AnyAgent.create(
agent_framework=agent_framework,
agent_config=main_agent_cfg,
)
agent_trace = main_agent.run(DATE_PROMPT)
_assert_valid_agent_trace(agent_trace)
_assert_contains_current_date_info(agent_trace.final_output)
_assert_has_date_agent_tool_call(agent_trace)
finally:
if server_process and server_process.is_alive():
# Send SIGTERM for graceful shutdown
server_process.terminate()
server_process.join(timeout=10)
if server_process.is_alive():
# Force kill if graceful shutdown failed
server_process.kill()
server_process.join()
|
Tests that an agent contacts another using A2A using the sync adapter tool.
Note that there is an issue when using Google ADK: https://github.com/google/adk-python/pull/566
|
test_load_and_run_multi_agent_a2a_sync
|
python
|
mozilla-ai/any-agent
|
tests/integration/test_load_and_run_multi_agent_a2a_tool.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/integration/test_load_and_run_multi_agent_a2a_tool.py
|
Apache-2.0
|
async def test_agent_serving_and_communication(test_port):
"""This test can be refactored to remove the need for multiproc, once we have support for control of the uvicorn server."""
# Start the agent in a subprocess
proc = multiprocessing.Process(target=run_agent, args=(test_port,), daemon=True)
proc.start()
server_url = f"http://localhost:{test_port}"
await wait_for_server_async(server_url)
try:
async with httpx.AsyncClient() as httpx_client:
client = await A2AClient.get_client_from_agent_card_url(
httpx_client, server_url
)
send_message_payload = {
"message": {
"role": "user",
"parts": [{"kind": "text", "text": "how much is 10 USD in EUR?"}],
"messageId": str(uuid4()),
},
}
request = SendMessageRequest(
id=str(uuid4()), params=MessageSendParams(**send_message_payload)
)
response = await client.send_message(request)
assert response is not None
finally:
proc.kill()
proc.join()
|
This test can be refactored to remove the need for multiproc, once we have support for control of the uvicorn server.
|
test_agent_serving_and_communication
|
python
|
mozilla-ai/any-agent
|
tests/integration/test_serve_agent.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/integration/test_serve_agent.py
|
Apache-2.0
|
def test_evaluate_runs_all_evaluators(
evaluation_case: EvaluationCase, agent_trace: AgentTrace
) -> None:
"""This unit test checks that all evaluators are called when evaluating a trace."""
#### Set up the mocks for the evaluators so that we don't actually call LLMs.
mock_checkpoint_evaluate = MagicMock()
mock_qa_evaluate = MagicMock()
### Every evaluate will return the same result
eval_result = [
EvaluationResult(
criteria="test criteria", passed=True, reason="test passed", points=1
)
]
mock_checkpoint_evaluate.return_value = eval_result
mock_qa_evaluate.return_value = eval_result[0]
with (
patch(
"any_agent.evaluation.evaluate.evaluate_checkpoints",
mock_checkpoint_evaluate,
),
patch("any_agent.evaluation.evaluate.evaluate_final_output", mock_qa_evaluate),
):
evaluate(
evaluation_case=evaluation_case,
trace=agent_trace,
)
assert mock_checkpoint_evaluate.call_count == 1
assert mock_qa_evaluate.call_count == 1
|
This unit test checks that all evaluators are called when evaluating a trace.
|
test_evaluate_runs_all_evaluators
|
python
|
mozilla-ai/any-agent
|
tests/unit/evaluation/test_evaluate.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/evaluation/test_evaluate.py
|
Apache-2.0
|
def test_evaluate_when_no_final_output(
evaluation_case: EvaluationCase, agent_trace: AgentTrace
) -> None:
"""This unit test checks that the hypothesis and qa evaluators are not called when there is no final output."""
#### Set up the mocks for the evaluators so that we don't actually call LLMs.
mock_checkpoint_evaluate = MagicMock()
mock_hypothesis_evaluate = MagicMock()
mock_qa_evaluate = MagicMock()
agent_trace.final_output = None
### Every evaluate will return the same result
eval_result = [
EvaluationResult(
criteria="test criteria", passed=True, reason="test passed", points=1
)
]
for evaluate_fn in [
mock_checkpoint_evaluate,
mock_hypothesis_evaluate,
mock_qa_evaluate,
]:
evaluate_fn.return_value = eval_result
with (
patch(
"any_agent.evaluation.evaluate.evaluate_checkpoints",
mock_checkpoint_evaluate,
),
patch("any_agent.evaluation.evaluate.evaluate_final_output", mock_qa_evaluate),
):
evaluate(
evaluation_case=evaluation_case,
trace=agent_trace,
)
assert mock_checkpoint_evaluate.call_count == 1
assert mock_hypothesis_evaluate.call_count == 0
assert mock_qa_evaluate.call_count == 0
|
This unit test checks that the hypothesis and qa evaluators are not called when there is no final output.
|
test_evaluate_when_no_final_output
|
python
|
mozilla-ai/any-agent
|
tests/unit/evaluation/test_evaluate.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/evaluation/test_evaluate.py
|
Apache-2.0
|
def test_trace_evaluation_result_score_calculation(agent_trace: AgentTrace) -> None:
"""Test that the score property of TraceEvaluationResult correctly calculates the ratio of passed points to total points."""
# Create evaluation results with different point values and pass status
checkpoint_results = [
EvaluationResult(
criteria="Criterion 1", passed=True, reason="Passed", points=2
),
EvaluationResult(
criteria="Criterion 2", passed=False, reason="Failed", points=3
),
]
ground_truth_result = EvaluationResult(
criteria="Direct 1", passed=True, reason="Passed", points=3
)
# Create a TraceEvaluationResult instance
evaluation_result = TraceEvaluationResult(
trace=agent_trace,
checkpoint_results=checkpoint_results,
ground_truth_result=ground_truth_result,
)
expected_score = 5 / 8
# Check that the score property returns the correct value
assert evaluation_result.score == expected_score, (
f"Expected score {expected_score}, got {evaluation_result.score}"
)
# Test case with no points (should raise ValueError)
zero_point_result = TraceEvaluationResult(trace=agent_trace, checkpoint_results=[])
with pytest.raises(ValueError, match="Total points is 0, cannot calculate score."):
zero_point_result.score # noqa: B018
|
Test that the score property of TraceEvaluationResult correctly calculates the ratio of passed points to total points.
|
test_trace_evaluation_result_score_calculation
|
python
|
mozilla-ai/any-agent
|
tests/unit/evaluation/test_evaluate.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/evaluation/test_evaluate.py
|
Apache-2.0
|
def create_agent_with_model_args(framework: AgentFramework) -> AnyAgent:
"""Helper function to create an agent with test model arguments"""
return AnyAgent.create(
framework,
AgentConfig(
model_id="gpt-4o",
model_args={
"temperature": TEST_TEMPERATURE,
"frequency_penalty": TEST_PENALTY,
},
),
)
|
Helper function to create an agent with test model arguments
|
create_agent_with_model_args
|
python
|
mozilla-ai/any-agent
|
tests/unit/frameworks/test_any_agent.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/frameworks/test_any_agent.py
|
Apache-2.0
|
def test_get_agent_card_with_explicit_skills(agent_framework: AgentFramework) -> None:
"""Test that when skills are explicitly provided in A2AServingConfig, they are used instead of inferring from tools."""
agent = MagicMock()
agent.config = AgentConfig(model_id="foo", description="test agent")
agent.framework = agent_framework
# Give the agent some tools that would normally be used to infer skills
agent._tools = [WRAPPERS[agent_framework](search_web)]
# Create explicit skills that are different from what would be inferred
explicit_skills = [
AgentSkill(
id="custom-skill-1",
name="custom_function_1",
description="This is a custom skill that does something amazing",
tags=["custom", "test"],
),
AgentSkill(
id="custom-skill-2",
name="custom_function_2",
description="Another custom skill for testing purposes",
tags=["custom", "demo"],
),
]
serving_config = A2AServingConfig(skills=explicit_skills)
agent_card = _get_agent_card(agent, serving_config)
# Verify basic agent card properties
assert agent_card.name == "any_agent"
assert agent_card.description == "test agent"
# Verify that the explicit skills are used (not inferred from tools)
assert len(agent_card.skills) == 2
assert agent_card.skills[0].id == "custom-skill-1"
assert agent_card.skills[1].id == "custom-skill-2"
# Verify that the skills are NOT the ones that would be inferred from search_web tool
skill_names = [skill.name for skill in agent_card.skills]
assert (
"search_web" not in skill_names
) # This would be present if skills were inferred from tools
|
Test that when skills are explicitly provided in A2AServingConfig, they are used instead of inferring from tools.
|
test_get_agent_card_with_explicit_skills
|
python
|
mozilla-ai/any-agent
|
tests/unit/serving/test_agent_card.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/serving/test_agent_card.py
|
Apache-2.0
|
def test_bad_functions(agent_framework: AgentFramework) -> None:
"""Test the verify_callable function with various bad functions."""
# Test missing return type
def missing_return_type(foo: str): # type: ignore[no-untyped-def]
"""Docstring for foo."""
return foo
with pytest.raises(ValueError, match="return type"):
asyncio.run(_wrap_tools([missing_return_type], agent_framework))
# Test missing docstring
def missing_docstring(foo: str) -> str:
return foo
with pytest.raises(ValueError, match="docstring"):
asyncio.run(_wrap_tools([missing_docstring], agent_framework))
# Test missing parameter type
def missing_param_type(foo) -> str: # type: ignore[no-untyped-def]
"""Docstring for foo."""
return foo # type: ignore[no-any-return]
with pytest.raises(ValueError, match="typed arguments"):
asyncio.run(_wrap_tools([missing_param_type], agent_framework))
# Good function should not raise an error
def good_function(foo: str) -> str:
"""Docstring for foo.
Args:
foo: The foo argument.
Returns:
The foo result.
"""
return foo
asyncio.run(_wrap_tools([good_function], agent_framework))
|
Test the verify_callable function with various bad functions.
|
test_bad_functions
|
python
|
mozilla-ai/any-agent
|
tests/unit/tools/test_unit_wrappers.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tools/test_unit_wrappers.py
|
Apache-2.0
|
async def test_agno_client_session_timeout_passed():
"""Test that client_session_timeout_seconds parameter is properly passed to AgnoMCPTools (STDIO only)."""
custom_timeout = 15
stdio_params = MCPStdio(
command="echo",
args=["test"],
client_session_timeout_seconds=custom_timeout,
)
sse_params = MCPSse(
url="http://localhost:8000",
client_session_timeout_seconds=custom_timeout,
)
# STDIO
server = _get_mcp_server(stdio_params, AgentFramework.AGNO)
with patch("any_agent.tools.mcp.frameworks.agno.AgnoMCPTools") as mock_agno_tools:
mock_tools_instance = AsyncMock()
mock_tools_instance.__aenter__ = AsyncMock(return_value=mock_tools_instance)
mock_tools_instance.__aexit__ = AsyncMock(return_value=None)
mock_agno_tools.return_value = mock_tools_instance
await server._setup_tools()
mock_agno_tools.assert_called_once()
call_args = mock_agno_tools.call_args
assert call_args.kwargs["timeout_seconds"] == custom_timeout
# SSE (check that timeout is passed to ClientSession)
server = _get_mcp_server(sse_params, AgentFramework.AGNO)
with (
patch("any_agent.tools.mcp.frameworks.agno.sse_client") as sse_client_patch,
patch("any_agent.tools.mcp.frameworks.agno.ClientSession") as mock_session,
patch("any_agent.tools.mcp.frameworks.agno.AgnoMCPTools") as mock_agno_tools,
):
mock_sse_client = AsyncMock()
mock_sse_client.__aenter__ = AsyncMock(return_value=(MagicMock(), MagicMock()))
mock_sse_client.__aexit__ = AsyncMock(return_value=None)
sse_client_patch.return_value = mock_sse_client
mock_session_instance = AsyncMock()
mock_session_instance.__aenter__ = AsyncMock(return_value=mock_session_instance)
mock_session_instance.__aexit__ = AsyncMock(return_value=None)
mock_session_instance.initialize = AsyncMock()
mock_session.return_value = mock_session_instance
mock_tools_instance = AsyncMock()
mock_tools_instance.__aenter__ = AsyncMock(return_value=mock_tools_instance)
mock_tools_instance.__aexit__ = AsyncMock(return_value=None)
mock_agno_tools.return_value = mock_tools_instance
await server._setup_tools()
mock_session.assert_called_once()
# Check that the timeout was passed as read_timeout_seconds
call_args = mock_session.call_args
assert call_args.kwargs["read_timeout_seconds"] == timedelta(
seconds=custom_timeout
)
|
Test that client_session_timeout_seconds parameter is properly passed to AgnoMCPTools (STDIO only).
|
test_agno_client_session_timeout_passed
|
python
|
mozilla-ai/any-agent
|
tests/unit/tools/mcp/test_unit_agno_mcp.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tools/mcp/test_unit_agno_mcp.py
|
Apache-2.0
|
async def test_langchain_client_session_timeout_passed():
"""Test that client_session_timeout_seconds parameter is properly passed to LangChain ClientSession (STDIO and SSE)."""
custom_timeout = 15.0
stdio_params = MCPStdio(
command="echo",
args=["test"],
client_session_timeout_seconds=custom_timeout,
)
sse_params = MCPSse(
url="http://localhost:8000",
client_session_timeout_seconds=custom_timeout,
)
# STDIO
server = _get_mcp_server(stdio_params, AgentFramework.LANGCHAIN)
with patch(
"any_agent.tools.mcp.frameworks.langchain.ClientSession"
) as mock_session:
mock_session_instance = AsyncMock()
mock_session_instance.__aenter__ = AsyncMock(return_value=mock_session_instance)
mock_session_instance.__aexit__ = AsyncMock(return_value=None)
mock_session_instance.initialize = AsyncMock()
mock_session.return_value = mock_session_instance
with patch(
"any_agent.tools.mcp.frameworks.langchain.load_mcp_tools"
) as mock_load_tools:
mock_load_tools.return_value = []
with patch(
"any_agent.tools.mcp.frameworks.langchain.stdio_client"
) as mock_client:
mock_client.return_value.__aenter__ = AsyncMock(
return_value=(MagicMock(), MagicMock())
)
mock_client.return_value.__aexit__ = AsyncMock(return_value=None)
await server._setup_tools()
mock_session.assert_called_once()
call_args = mock_session.call_args
assert (
call_args.kwargs["read_timeout_seconds"].total_seconds()
== custom_timeout
)
# SSE
server = _get_mcp_server(sse_params, AgentFramework.LANGCHAIN)
with patch(
"any_agent.tools.mcp.frameworks.langchain.ClientSession"
) as mock_session:
mock_session_instance = AsyncMock()
mock_session_instance.__aenter__ = AsyncMock(return_value=mock_session_instance)
mock_session_instance.__aexit__ = AsyncMock(return_value=None)
mock_session_instance.initialize = AsyncMock()
mock_session.return_value = mock_session_instance
with patch(
"any_agent.tools.mcp.frameworks.langchain.load_mcp_tools"
) as mock_load_tools:
mock_load_tools.return_value = []
with patch(
"any_agent.tools.mcp.frameworks.langchain.sse_client"
) as mock_client:
mock_client.return_value.__aenter__ = AsyncMock(
return_value=(MagicMock(), MagicMock())
)
mock_client.return_value.__aexit__ = AsyncMock(return_value=None)
await server._setup_tools()
mock_session.assert_called_once()
call_args = mock_session.call_args
assert (
call_args.kwargs["read_timeout_seconds"].total_seconds()
== custom_timeout
)
|
Test that client_session_timeout_seconds parameter is properly passed to LangChain ClientSession (STDIO and SSE).
|
test_langchain_client_session_timeout_passed
|
python
|
mozilla-ai/any-agent
|
tests/unit/tools/mcp/test_unit_langchain_mcp.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tools/mcp/test_unit_langchain_mcp.py
|
Apache-2.0
|
async def test_llamaindex_client_session_timeout_passed():
"""Test that client_session_timeout_seconds parameter is properly passed to LlamaIndex BasicMCPClient (STDIO only)."""
custom_timeout = 15.0
stdio_params = MCPStdio(
command="echo",
args=["test"],
client_session_timeout_seconds=custom_timeout,
)
sse_params = MCPSse(
url="http://localhost:8000",
client_session_timeout_seconds=custom_timeout,
)
# STDIO
server = _get_mcp_server(stdio_params, AgentFramework.LLAMA_INDEX)
with patch(
"any_agent.tools.mcp.frameworks.llama_index.LlamaIndexMCPClient"
) as mock_client:
mock_client_instance = MagicMock()
mock_client.return_value = mock_client_instance
with patch(
"any_agent.tools.mcp.frameworks.llama_index.LlamaIndexMcpToolSpec"
) as mock_spec:
mock_spec_instance = MagicMock()
mock_spec_instance.to_tool_list_async = AsyncMock(return_value=[])
mock_spec.return_value = mock_spec_instance
await server._setup_tools()
mock_client.assert_called_once()
call_args = mock_client.call_args
assert call_args.kwargs["timeout"] == custom_timeout
# SSE (check that timeout is passed to LlamaIndexMCPClient)
server = _get_mcp_server(sse_params, AgentFramework.LLAMA_INDEX)
with patch(
"any_agent.tools.mcp.frameworks.llama_index.LlamaIndexMCPClient"
) as mock_client:
mock_client_instance = MagicMock()
mock_client.return_value = mock_client_instance
with patch(
"any_agent.tools.mcp.frameworks.llama_index.LlamaIndexMcpToolSpec"
) as mock_spec:
mock_spec_instance = MagicMock()
mock_spec_instance.to_tool_list_async = AsyncMock(return_value=[])
mock_spec.return_value = mock_spec_instance
await server._setup_tools()
mock_client.assert_called_once()
call_args = mock_client.call_args
assert call_args.kwargs["timeout"] == custom_timeout
|
Test that client_session_timeout_seconds parameter is properly passed to LlamaIndex BasicMCPClient (STDIO only).
|
test_llamaindex_client_session_timeout_passed
|
python
|
mozilla-ai/any-agent
|
tests/unit/tools/mcp/test_unit_llama_index_mcp.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tools/mcp/test_unit_llama_index_mcp.py
|
Apache-2.0
|
def test_openai_mcpsse(
mcp_sse_params_no_tools: MCPSse,
) -> None:
"""This is a test kept for legacy purposes."""
agent_config = AgentConfig(model_id="gpt-4o", tools=[mcp_sse_params_no_tools])
agent = AnyAgent.create("openai", agent_config)
servers = agent._mcp_servers
assert servers
server, *_ = agent._mcp_servers
assert server.mcp_tool == mcp_sse_params_no_tools
|
This is a test kept for legacy purposes.
|
test_openai_mcpsse
|
python
|
mozilla-ai/any-agent
|
tests/unit/tools/mcp/test_unit_openai_mcp.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tools/mcp/test_unit_openai_mcp.py
|
Apache-2.0
|
def test_openai_client_session_timeout_passed():
"""Test that client_session_timeout_seconds parameter is properly passed to OpenAI MCPServerStdio and MCPServerSse."""
custom_timeout = 15.0
stdio_params = MCPStdio(
command="echo",
args=["test"],
client_session_timeout_seconds=custom_timeout,
)
sse_params = MCPSse(
url="http://localhost:8000",
client_session_timeout_seconds=custom_timeout,
)
# STDIO
server = _get_mcp_server(stdio_params, AgentFramework.OPENAI)
with patch(
"any_agent.tools.mcp.frameworks.openai.OpenAIInternalMCPServerStdio"
) as mock_stdio:
mock_server_instance = AsyncMock()
mock_server_instance.__aenter__ = AsyncMock(return_value=mock_server_instance)
mock_server_instance.__aexit__ = AsyncMock(return_value=None)
mock_server_instance.list_tools = AsyncMock(return_value=[])
mock_stdio.return_value = mock_server_instance
import asyncio
asyncio.run(server._setup_tools())
mock_stdio.assert_called_once()
call_args = mock_stdio.call_args
assert call_args.kwargs["client_session_timeout_seconds"] == custom_timeout
# SSE
server = _get_mcp_server(sse_params, AgentFramework.OPENAI)
with patch(
"any_agent.tools.mcp.frameworks.openai.OpenAIInternalMCPServerSse"
) as mock_sse:
mock_server_instance = AsyncMock()
mock_server_instance.__aenter__ = AsyncMock(return_value=mock_server_instance)
mock_server_instance.__aexit__ = AsyncMock(return_value=None)
mock_server_instance.list_tools = AsyncMock(return_value=[])
mock_sse.return_value = mock_server_instance
asyncio.run(server._setup_tools())
mock_sse.assert_called_once()
call_args = mock_sse.call_args
assert call_args.kwargs["client_session_timeout_seconds"] == custom_timeout
|
Test that client_session_timeout_seconds parameter is properly passed to OpenAI MCPServerStdio and MCPServerSse.
|
test_openai_client_session_timeout_passed
|
python
|
mozilla-ai/any-agent
|
tests/unit/tools/mcp/test_unit_openai_mcp.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tools/mcp/test_unit_openai_mcp.py
|
Apache-2.0
|
def test_set_llm_input_missing_fields() -> None:
"""It should not fail when missing fields."""
span = MagicMock()
_set_llm_input([Message(role="user")], span)
span.set_attribute.assert_called_with(
"gen_ai.input.messages", '[{"role": "user", "content": null}]'
)
|
It should not fail when missing fields.
|
test_set_llm_input_missing_fields
|
python
|
mozilla-ai/any-agent
|
tests/unit/tracing/instrumentation/test_unit_agno_instrumentation.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tracing/instrumentation/test_unit_agno_instrumentation.py
|
Apache-2.0
|
def test_set_llm_output_missing_fields() -> None:
"""It should not fail when missing fields."""
span = MagicMock()
_set_llm_output(Message(role="assistant"), span)
span.set_attributes.assert_called_once_with(
{"gen_ai.usage.input_tokens": 0, "gen_ai.usage.output_tokens": 0}
)
|
It should not fail when missing fields.
|
test_set_llm_output_missing_fields
|
python
|
mozilla-ai/any-agent
|
tests/unit/tracing/instrumentation/test_unit_agno_instrumentation.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tracing/instrumentation/test_unit_agno_instrumentation.py
|
Apache-2.0
|
def test_set_llm_input_missing_fields() -> None:
"""It should not fail when missing fields."""
span = MagicMock()
_set_llm_input(LlmRequest(), span)
span.set_attribute.assert_not_called()
|
It should not fail when missing fields.
|
test_set_llm_input_missing_fields
|
python
|
mozilla-ai/any-agent
|
tests/unit/tracing/instrumentation/test_unit_google_instrumentation.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tracing/instrumentation/test_unit_google_instrumentation.py
|
Apache-2.0
|
def test_set_llm_output_missing_fields() -> None:
"""It should not fail when missing fields."""
span = MagicMock()
_set_llm_output(LlmResponse(), span)
span.set_attributes.assert_not_called()
|
It should not fail when missing fields.
|
test_set_llm_output_missing_fields
|
python
|
mozilla-ai/any-agent
|
tests/unit/tracing/instrumentation/test_unit_google_instrumentation.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tracing/instrumentation/test_unit_google_instrumentation.py
|
Apache-2.0
|
def test_set_llm_input_missing_fields() -> None:
"""It should not fail when missing fields."""
span = MagicMock()
_set_llm_input([[BaseMessage(content="foo", type="human")]], span)
span.set_attribute.assert_called_with(
"gen_ai.input.messages", '[{"role": "user", "content": "foo"}]'
)
|
It should not fail when missing fields.
|
test_set_llm_input_missing_fields
|
python
|
mozilla-ai/any-agent
|
tests/unit/tracing/instrumentation/test_unit_langchain_instrumentation.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tracing/instrumentation/test_unit_langchain_instrumentation.py
|
Apache-2.0
|
def test_set_llm_output_missing_fields() -> None:
"""It should not fail when missing fields."""
span = MagicMock()
_set_llm_output(LLMResult(generations=[[Generation(text="")]]), span)
span.set_attributes.assert_not_called()
|
It should not fail when missing fields.
|
test_set_llm_output_missing_fields
|
python
|
mozilla-ai/any-agent
|
tests/unit/tracing/instrumentation/test_unit_langchain_instrumentation.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tracing/instrumentation/test_unit_langchain_instrumentation.py
|
Apache-2.0
|
def test_set_llm_input_missing_fields() -> None:
"""It should not fail when missing fields."""
span = MagicMock()
_set_llm_input([ChatMessage()], span)
span.set_attribute.assert_called_with(
"gen_ai.input.messages", '[{"role": "user", "content": "No content"}]'
)
|
It should not fail when missing fields.
|
test_set_llm_input_missing_fields
|
python
|
mozilla-ai/any-agent
|
tests/unit/tracing/instrumentation/test_unit_llama_index_instrumentation.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tracing/instrumentation/test_unit_llama_index_instrumentation.py
|
Apache-2.0
|
def test_set_llm_output_missing_fields() -> None:
"""It should not fail when missing fields."""
span = MagicMock()
_set_llm_output(
AgentOutput(
response=ChatMessage(), tool_calls=[], raw=None, current_agent_name="foo"
),
span,
)
span.set_attributes.assert_not_called()
|
It should not fail when missing fields.
|
test_set_llm_output_missing_fields
|
python
|
mozilla-ai/any-agent
|
tests/unit/tracing/instrumentation/test_unit_llama_index_instrumentation.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tracing/instrumentation/test_unit_llama_index_instrumentation.py
|
Apache-2.0
|
def test_set_llm_input_missing_fields() -> None:
"""It should not fail when missing fields."""
span = MagicMock()
_set_llm_input(GenerationSpanData(), span)
span.set_attribute.assert_not_called()
|
It should not fail when missing fields.
|
test_set_llm_input_missing_fields
|
python
|
mozilla-ai/any-agent
|
tests/unit/tracing/instrumentation/test_unit_openai_instrumentation.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tracing/instrumentation/test_unit_openai_instrumentation.py
|
Apache-2.0
|
def test_set_llm_output_missing_fields() -> None:
"""It should not fail when missing fields."""
span = MagicMock()
_set_llm_output(GenerationSpanData(), span)
span.set_attributes.assert_not_called()
|
It should not fail when missing fields.
|
test_set_llm_output_missing_fields
|
python
|
mozilla-ai/any-agent
|
tests/unit/tracing/instrumentation/test_unit_openai_instrumentation.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tracing/instrumentation/test_unit_openai_instrumentation.py
|
Apache-2.0
|
def test_set_llm_input_missing_fields() -> None:
"""It should not fail when missing fields."""
span = MagicMock()
_set_llm_input([], span)
span.set_attribute.assert_not_called()
|
It should not fail when missing fields.
|
test_set_llm_input_missing_fields
|
python
|
mozilla-ai/any-agent
|
tests/unit/tracing/instrumentation/test_unit_smolagents_instrumentation.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tracing/instrumentation/test_unit_smolagents_instrumentation.py
|
Apache-2.0
|
def test_set_llm_output_missing_fields() -> None:
"""It should not fail when missing fields."""
span = MagicMock()
_set_llm_output(ChatMessage("assistant"), span)
span.set_attributes.assert_not_called()
|
It should not fail when missing fields.
|
test_set_llm_output_missing_fields
|
python
|
mozilla-ai/any-agent
|
tests/unit/tracing/instrumentation/test_unit_smolagents_instrumentation.py
|
https://github.com/mozilla-ai/any-agent/blob/master/tests/unit/tracing/instrumentation/test_unit_smolagents_instrumentation.py
|
Apache-2.0
|
def experiment_setup(models: Sequence[ModelInfo], c_ref: int, batch_size: int):
"""Return information related to training `models` under compute budget `c_ref`."""
data = []
for m in models:
c_self_per_token = m.self_attn_flops()
d_iso = c_ref / c_self_per_token
s_iso = num_training_steps(
num_tokens=d_iso,
num_latents=m.num_latents,
batch_size=batch_size,
)
c_self_approx = m.self_attn_flops_approx() * d_iso
c_self = c_self_per_token * d_iso
c_cross = m.cross_attn_flops() * d_iso
c = c_self + c_cross
n_self = m.num_self_attn_params()
n_cross = m.num_cross_attn_params()
n = n_self + n_cross
data.append([m.num_channels, m.num_layers, s_iso, d_iso, n, n_cross, n_self, c, c_cross, c_self, c_self_approx])
df = pd.DataFrame(
data,
columns=[
"num_channels",
"num_layers",
"num_steps",
"$D_{iso}$",
"$N$",
"$N_{cross}$",
"$N_{self}$",
"$C$",
"$C_{cross}$",
"$C_{self}$",
r"$\hat{C}_{self}$",
],
)
df.index += 1
format_spec = ["{:}", "{:}", "{:}", "{:.2e}", "{:.2e}", "{:.2e}", "{:.2e}", "{:.2e}", "{:.2e}", "{:.2e}", "{:.2e}"]
return df.style.format(dict(zip(df.columns, format_spec)))
|
Return information related to training `models` under compute budget `c_ref`.
|
experiment_setup
|
python
|
krasserm/perceiver-io
|
examples/scaling/clm/article.py
|
https://github.com/krasserm/perceiver-io/blob/master/examples/scaling/clm/article.py
|
Apache-2.0
|
def experiment_ratios(models: Sequence[ModelInfo]):
"""Return compute- and parameter-related ratios, independent of compute budget."""
data = []
for m in models:
c_self_approx_per_token = m.self_attn_flops_approx()
c_self_per_token = m.self_attn_flops()
c_cross_per_token = m.cross_attn_flops()
c_self_approx_ratio = c_self_per_token / c_self_approx_per_token
c_cross_contrib = c_cross_per_token / (c_cross_per_token + c_self_per_token)
n_self = m.num_self_attn_params()
n_cross = m.num_cross_attn_params()
n_cross_contrib = n_cross / (n_cross + n_self)
data.append([n_cross_contrib, c_cross_contrib, c_self_approx_ratio])
df = pd.DataFrame(
data, columns=[r"$N_{cross} \over N$", r"$C_{cross} \over C$", r"${C_{self}} \over {\hat{C}_{self}}$"]
)
df.index += 1
format_spec = ["{:.4f}", "{:.4f}", "{:.4f}"]
return df.style.format(dict(zip(df.columns, format_spec)))
|
Return compute- and parameter-related ratios, independent of compute budget.
|
experiment_ratios
|
python
|
krasserm/perceiver-io
|
examples/scaling/clm/article.py
|
https://github.com/krasserm/perceiver-io/blob/master/examples/scaling/clm/article.py
|
Apache-2.0
|
def self_attn(self, num_channels, num_layers):
"""Self-attention FLOPs per latent token.
Equivalent to a decoder-only transformer.
:param num_channels: model dimension
:param num_layers: number of self attention layers incl hybrid layer
"""
embed = self._input_embed(num_channels)
attn_all = self._self_attn_layer(num_channels) * num_layers
mlp_all = self._mlp_layer(num_channels) * num_layers
logits = self._final_logits(num_channels)
forward = embed + attn_all + mlp_all + logits
forward_backward = forward * 3
return forward_backward
|
Self-attention FLOPs per latent token.
Equivalent to a decoder-only transformer.
:param num_channels: model dimension
:param num_layers: number of self attention layers incl hybrid layer
|
self_attn
|
python
|
krasserm/perceiver-io
|
examples/scaling/clm/scaling/flops.py
|
https://github.com/krasserm/perceiver-io/blob/master/examples/scaling/clm/scaling/flops.py
|
Apache-2.0
|
def cross_attn(self, num_channels, prefix_dropout=0.5):
"""Prefix cross-attention FLOPS per latent token.
Perceiver AR extra compute compared to a decoder-only transformer.
:param num_channels: model dimension
:param prefix_dropout: dropout probability of prefix positions
"""
prefix_latent_ratio = self.num_prefix / self.num_latents
# contribution from prefix embedding (per latent token)
embed_prefix = self._input_embed(num_channels) * prefix_latent_ratio
# contribution from prefix attention (per latent token)
attn_prefix = self._cross_attn_layer(num_channels) * prefix_latent_ratio * (1.0 - prefix_dropout)
forward = embed_prefix + attn_prefix
forward_backward = int(forward) * 3
return forward_backward
|
Prefix cross-attention FLOPS per latent token.
Perceiver AR extra compute compared to a decoder-only transformer.
:param num_channels: model dimension
:param prefix_dropout: dropout probability of prefix positions
|
cross_attn
|
python
|
krasserm/perceiver-io
|
examples/scaling/clm/scaling/flops.py
|
https://github.com/krasserm/perceiver-io/blob/master/examples/scaling/clm/scaling/flops.py
|
Apache-2.0
|
def _self_attn_layer(self, num_channels):
"""Self-attention FLOPs per latent token per layer."""
qkv = 6 * num_channels**2
attn = 2 * num_channels * self.num_latents
out = 2 * num_channels**2
return qkv + attn + out
|
Self-attention FLOPs per latent token per layer.
|
_self_attn_layer
|
python
|
krasserm/perceiver-io
|
examples/scaling/clm/scaling/flops.py
|
https://github.com/krasserm/perceiver-io/blob/master/examples/scaling/clm/scaling/flops.py
|
Apache-2.0
|
def _cross_attn_layer(self, num_channels):
"""Cross-attention FLOPs per prefix token per layer."""
kv = 4 * num_channels**2
attn = 2 * num_channels * self.num_latents
return kv + attn
|
Cross-attention FLOPs per prefix token per layer.
|
_cross_attn_layer
|
python
|
krasserm/perceiver-io
|
examples/scaling/clm/scaling/flops.py
|
https://github.com/krasserm/perceiver-io/blob/master/examples/scaling/clm/scaling/flops.py
|
Apache-2.0
|
def __init__(self, num_channels: int, num_layers: int, compute_estimator: ComputeEstimator):
"""...
:param num_channels: model dimension.
:param num_layers: number of self attention layers incl hybrid layer.
"""
self.num_channels = num_channels
self.num_layers = num_layers
self.compute_estimator = compute_estimator
|
...
:param num_channels: model dimension.
:param num_layers: number of self attention layers incl hybrid layer.
|
__init__
|
python
|
krasserm/perceiver-io
|
examples/scaling/clm/scaling/flops.py
|
https://github.com/krasserm/perceiver-io/blob/master/examples/scaling/clm/scaling/flops.py
|
Apache-2.0
|
def num_self_attn_params(self):
"""Parameter count of self-attention part.
Equivalent to a decoder-only transformer.
"""
return num_self_attn_params(
num_channels=self.num_channels,
num_layers=self.num_layers,
num_latents=self.num_latents,
num_prefix=self.num_prefix,
vocab_size=self.vocab_size,
)
|
Parameter count of self-attention part.
Equivalent to a decoder-only transformer.
|
num_self_attn_params
|
python
|
krasserm/perceiver-io
|
examples/scaling/clm/scaling/flops.py
|
https://github.com/krasserm/perceiver-io/blob/master/examples/scaling/clm/scaling/flops.py
|
Apache-2.0
|
def num_cross_attn_params(self):
"""Parameter count of cross-attention part.."""
# parameters for prefix position embedding
return num_cross_attn_params(self.num_channels, self.num_prefix)
|
Parameter count of cross-attention part..
|
num_cross_attn_params
|
python
|
krasserm/perceiver-io
|
examples/scaling/clm/scaling/flops.py
|
https://github.com/krasserm/perceiver-io/blob/master/examples/scaling/clm/scaling/flops.py
|
Apache-2.0
|
def encode_midi_files(files: List[Path], num_workers: int) -> List[np.ndarray]:
"""Encode a list of midi files using multiple cpu workers."""
with Pool(processes=num_workers) as pool:
res = list(tqdm(pool.imap(_encode_midi_file, files), total=len(files)))
return [r for r in res if r is not None]
|
Encode a list of midi files using multiple cpu workers.
|
encode_midi_files
|
python
|
krasserm/perceiver-io
|
perceiver/data/audio/midi_processor.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/data/audio/midi_processor.py
|
Apache-2.0
|
def __init__(
self,
dataset_dir: str,
max_seq_len: int,
min_seq_len: Optional[int] = None,
padding_side: str = "left",
batch_size: int = 16,
num_workers: int = 1,
preproc_workers: Optional[int] = None,
pin_memory: bool = True,
):
"""Base class for data preprocessing and loading across different audio data sources using MIDI as the
source data format.
:param dataset_dir: Directory for storing the preprocessed dataset.
:param max_seq_len: Maximum sequence length generated by this data module.
:param min_seq_len: Minimum sequence length generated by this data module. If set the length of each sequence
will be randomly chosen from the interval [min_seq_len, max_seq_len].
:param padding_side: Padding side for sequences that are shorter than the configured max_seq_len. Can be set to
"left" or "right".
:param batch_size: Batch size of loaded training data.
:param num_workers: Number of data loading processes.
:param preproc_workers: Number of preprocessing processes. If not defined, defaults to `num_workers`.
"""
super().__init__()
if min_seq_len is not None and not (0 < min_seq_len < max_seq_len):
raise ValueError(
"Invalid data configuration supplied. "
"Parameter 'min_seq_len' must adhere to 0 < min_seq_len < max_seq_len."
)
self.save_hyperparameters()
self._collator = SymbolicAudioCollator(
max_seq_len=self.hparams.max_seq_len + 1,
pad_token=self._PAD_INPUT_ID,
padding_side=self.hparams.padding_side,
)
self._ds_train = None
self._ds_valid = None
|
Base class for data preprocessing and loading across different audio data sources using MIDI as the
source data format.
:param dataset_dir: Directory for storing the preprocessed dataset.
:param max_seq_len: Maximum sequence length generated by this data module.
:param min_seq_len: Minimum sequence length generated by this data module. If set the length of each sequence
will be randomly chosen from the interval [min_seq_len, max_seq_len].
:param padding_side: Padding side for sequences that are shorter than the configured max_seq_len. Can be set to
"left" or "right".
:param batch_size: Batch size of loaded training data.
:param num_workers: Number of data loading processes.
:param preproc_workers: Number of preprocessing processes. If not defined, defaults to `num_workers`.
|
__init__
|
python
|
krasserm/perceiver-io
|
perceiver/data/audio/symbolic.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/data/audio/symbolic.py
|
Apache-2.0
|
def mask_words(self, examples):
"""A modified version of whole word masking as described in https://huggingface.co/course/chapter7/3.
The implementation in the linked document replaces words, randomly selected with `wwm_probability`, with mask
tokens (one or more per word). The implementation here, however, only replaces 80% of selected words with mask
tokens and replaces 10% with random words and leaves 10% unchanged.
"""
for example in examples:
self.mask_words_1(example)
return examples
|
A modified version of whole word masking as described in https://huggingface.co/course/chapter7/3.
The implementation in the linked document replaces words, randomly selected with `wwm_probability`, with mask
tokens (one or more per word). The implementation here, however, only replaces 80% of selected words with mask
tokens and replaces 10% with random words and leaves 10% unchanged.
|
mask_words
|
python
|
krasserm/perceiver-io
|
perceiver/data/text/collator.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/data/text/collator.py
|
Apache-2.0
|
def __init__(
self,
dataset_dir: str,
tokenizer: str,
max_seq_len: int,
task: Task = Task.mlm,
mask_prob: float = 0.15,
mask_words: bool = True,
static_masking: bool = False,
add_special_tokens: bool = False,
add_eos_token: bool = False,
padding_side: Optional[str] = None,
random_train_shift: bool = False,
random_valid_shift: bool = False,
random_train_truncation: bool = False,
random_valid_truncation: bool = False,
random_min_seq_len: int = 16,
preproc_batch_size: int = 1000,
preproc_workers: Optional[int] = None,
batch_size: int = 64,
valid_batch_size: Optional[int] = None,
num_workers: int = 3,
pin_memory: bool = True,
):
"""Base class for consistent data preprocessing and loading across different text data sources.
:param dataset_dir: Directory for storing the preprocessed dataset.
:param tokenizer: Reference to a Hugging Face fast tokenizer (or the `deepmind/language-perceiver` tokenizer).
:param max_seq_len: Maximum sequence length generated by this data module.
:param task: The task for which this data module is used. Data are preprocessed and loaded in a task-specific
way.
:param mask_prob: Masking probability. Ignored if task is not `Task.mlm`.
:param mask_words: Whether to mask words or individual tokens. Ignored if task is not `Task.mlm`.
:param static_masking: Whether to mask at preprocessing time (static) or at data loading time (dynamic). Ignored
if task is not `Task.mlm`.
:param add_special_tokens: Whether to add special tokens to tokenized text.
:param add_eos_token: Whether to append an EOS tokens to each example.
:param padding_side: If `None`, uses the pre-configured `padding_side` of the tokenizer. Can be overridden by
setting to "left" or "right".
:param random_train_truncation: Randomly truncates sequences in the training set to length
`randint(random_min_seq_len, max_seq_len + 1)`.
:param random_valid_truncation: Randomly truncates sequences in the validation set to length
`randint(random_min_seq_len, max_seq_len + 1)`.
:param random_min_seq_len: Minimum sequence length when using `random_train_truncation` or
`random_valid_truncation`.
:param preproc_batch_size: Preprocessing batch size.
:param preproc_workers: Number of preprocessing processes. If not defined, defaults to `num_workers`.
:param batch_size: Batch size of loaded training data.
:param valid_batch_size: Batch size of loaded validation data. If `None` defaults to `batch_size`
:param num_workers: Number of data loading processes.
"""
super().__init__()
self.save_hyperparameters()
if self.hparams.static_masking and not self.hparams.mask_words:
raise ValueError("static_masking=true is only supported for mask_words=true")
self.tokenizer = AutoTokenizer.from_pretrained(self.hparams.tokenizer, verbose=False)
if self.hparams.padding_side is not None:
self.tokenizer.padding_side = self.hparams.padding_side
# PerceiverTokenizer needs special support for generating word_ids as it is not a fast tokenizer
self.perceiver_tokenizer_configured = self.hparams.tokenizer in [
"krasserm/perceiver-io-mlm",
"deepmind/language-perceiver",
]
if self.perceiver_tokenizer_configured:
self.perceiver_tokenizer_util = PerceiverTokenizerUtil(self.tokenizer)
if self.hparams.task == Task.mlm and not self.hparams.static_masking:
if self.hparams.mask_words:
self.collator = WordMaskingCollator(tokenizer=self.tokenizer, mask_prob=self.hparams.mask_prob)
else:
self.collator = TokenMaskingCollator(tokenizer=self.tokenizer, mask_prob=self.hparams.mask_prob)
else:
self.collator = DefaultCollator(tokenizer=self.tokenizer, max_seq_len=self.hparams.max_seq_len)
self.ds_train = None
self.ds_valid = None
|
Base class for consistent data preprocessing and loading across different text data sources.
:param dataset_dir: Directory for storing the preprocessed dataset.
:param tokenizer: Reference to a Hugging Face fast tokenizer (or the `deepmind/language-perceiver` tokenizer).
:param max_seq_len: Maximum sequence length generated by this data module.
:param task: The task for which this data module is used. Data are preprocessed and loaded in a task-specific
way.
:param mask_prob: Masking probability. Ignored if task is not `Task.mlm`.
:param mask_words: Whether to mask words or individual tokens. Ignored if task is not `Task.mlm`.
:param static_masking: Whether to mask at preprocessing time (static) or at data loading time (dynamic). Ignored
if task is not `Task.mlm`.
:param add_special_tokens: Whether to add special tokens to tokenized text.
:param add_eos_token: Whether to append an EOS tokens to each example.
:param padding_side: If `None`, uses the pre-configured `padding_side` of the tokenizer. Can be overridden by
setting to "left" or "right".
:param random_train_truncation: Randomly truncates sequences in the training set to length
`randint(random_min_seq_len, max_seq_len + 1)`.
:param random_valid_truncation: Randomly truncates sequences in the validation set to length
`randint(random_min_seq_len, max_seq_len + 1)`.
:param random_min_seq_len: Minimum sequence length when using `random_train_truncation` or
`random_valid_truncation`.
:param preproc_batch_size: Preprocessing batch size.
:param preproc_workers: Number of preprocessing processes. If not defined, defaults to `num_workers`.
:param batch_size: Batch size of loaded training data.
:param valid_batch_size: Batch size of loaded validation data. If `None` defaults to `batch_size`
:param num_workers: Number of data loading processes.
|
__init__
|
python
|
krasserm/perceiver-io
|
perceiver/data/text/common.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/data/text/common.py
|
Apache-2.0
|
def word_ids(self, token_ids):
"""Creates word ids from `token_ids`.
Words boundaries are defined using whitespace boundaries. Whitespaces preceding a word have the same word id as
the actual word following these whitespaces. Special tokens are assigned a `None` word id. Consecutive words do
not necessarily have consecutive word ids. This method only guarantees that distinct words have distinct word
ids. This is sufficient for `WordMaskingCollator` to function properly.
"""
word_ids = []
curr_id = 0
special_mask = self.tokenizer.get_special_tokens_mask(token_ids, already_has_special_tokens=True)
regular_token = True
for i, token_id in enumerate(token_ids):
if special_mask[i]:
word_ids.append(None)
curr_id += 1
elif token_id in self.whitespace_ids:
if regular_token:
regular_token = False
curr_id += 1
word_ids.append(curr_id)
else:
regular_token = True
word_ids.append(curr_id)
return word_ids
|
Creates word ids from `token_ids`.
Words boundaries are defined using whitespace boundaries. Whitespaces preceding a word have the same word id as
the actual word following these whitespaces. Special tokens are assigned a `None` word id. Consecutive words do
not necessarily have consecutive word ids. This method only guarantees that distinct words have distinct word
ids. This is sufficient for `WordMaskingCollator` to function properly.
|
word_ids
|
python
|
krasserm/perceiver-io
|
perceiver/data/text/utils.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/data/text/utils.py
|
Apache-2.0
|
def _extract_image_patches(self, x: torch.Tensor, kernel: int, stride: int = 1, dilation: int = 1):
"""Equivalent to the implementation of https://www.tensorflow.org/api_docs/python/tf/image/extract_patches
using "SAME" padding.
From: https://discuss.pytorch.org/t/tf-extract-image-patches-in-pytorch/43837/9
"""
b = x.shape[0]
x = self._pad(x, kernel, stride, dilation)
# extract patches
patches = x.unfold(2, kernel, stride).unfold(3, kernel, stride)
# re-order patch dimensions
patches = patches.permute(0, 4, 5, 1, 2, 3).contiguous()
# stack patches along second dimension
return patches.view(b, -1, patches.shape[-2], patches.shape[-1])
|
Equivalent to the implementation of https://www.tensorflow.org/api_docs/python/tf/image/extract_patches
using "SAME" padding.
From: https://discuss.pytorch.org/t/tf-extract-image-patches-in-pytorch/43837/9
|
_extract_image_patches
|
python
|
krasserm/perceiver-io
|
perceiver/data/vision/optical_flow.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/data/vision/optical_flow.py
|
Apache-2.0
|
def _pad(x: torch.Tensor, kernel: int, stride: int = 1, dilation: int = 1) -> torch.Tensor:
"""Applies a pad to the input using "SAME" strategy."""
*_, h, w = x.shape
h2 = math.ceil(h / stride)
w2 = math.ceil(w / stride)
pad_row = (h2 - 1) * stride + (kernel - 1) * dilation + 1 - h
pad_col = (w2 - 1) * stride + (kernel - 1) * dilation + 1 - w
return F.pad(x, (pad_col // 2, pad_col - pad_col // 2, pad_row // 2, pad_row - pad_row // 2))
|
Applies a pad to the input using "SAME" strategy.
|
_pad
|
python
|
krasserm/perceiver-io
|
perceiver/data/vision/optical_flow.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/data/vision/optical_flow.py
|
Apache-2.0
|
def _compute_patch_grid_indices(self, img_shape: Tuple[int, ...]) -> List[Tuple[int, int]]:
"""From https://github.com/deepmind/deepmind-research/blob/master/perceiver/colabs/optical_flow.ipynb."""
ys = list(range(0, img_shape[0], self.patch_size[0] - self.patch_min_overlap))
xs = list(range(0, img_shape[1], self.patch_size[1] - self.patch_min_overlap))
ys[-1] = img_shape[0] - self.patch_size[0]
xs[-1] = img_shape[1] - self.patch_size[1]
return list(itertools.product(ys, xs))
|
From https://github.com/deepmind/deepmind-research/blob/master/perceiver/colabs/optical_flow.ipynb.
|
_compute_patch_grid_indices
|
python
|
krasserm/perceiver-io
|
perceiver/data/vision/optical_flow.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/data/vision/optical_flow.py
|
Apache-2.0
|
def preprocess(
self, image_pair: Union[Tuple[np.ndarray, np.ndarray], Tuple[torch.Tensor, torch.Tensor]]
) -> torch.Tensor:
"""Creates the input features for the model for a pair of images.
The input images are stacked and split into image patches of size `patch_size`. For each pixel of each
individual patch, 3x3 patches are extracted and stacked in the channel dimension.
Output shape: torch.Size(nr_patches, 2, 27, patch_size[0], patch_size[1])
"""
grid_indices = self._compute_patch_grid_indices(image_pair[0].shape)
return self._preprocess(image_pair, grid_indices)
|
Creates the input features for the model for a pair of images.
The input images are stacked and split into image patches of size `patch_size`. For each pixel of each
individual patch, 3x3 patches are extracted and stacked in the channel dimension.
Output shape: torch.Size(nr_patches, 2, 27, patch_size[0], patch_size[1])
|
preprocess
|
python
|
krasserm/perceiver-io
|
perceiver/data/vision/optical_flow.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/data/vision/optical_flow.py
|
Apache-2.0
|
def preprocess_batch(
self,
image_pairs: Union[List[Tuple[np.ndarray, np.ndarray]], List[Tuple[torch.Tensor, torch.Tensor]]],
) -> torch.Tensor:
"""Creates the input features for the model for a batch of image pairs.
For each image pair the images are stacked and split into image patches of size `patch_size`. For each pixel
of each individual patch, 3x3 patches are extracted and stacked in the channel dimension.
Output shape: torch.Size(batch_size, nr_patches, 2, 27, patch_size[0], patch_size[1])
"""
grid_indices = self._compute_patch_grid_indices(image_pairs[0][0].shape)
return self._preprocess_batch(image_pairs, grid_indices)
|
Creates the input features for the model for a batch of image pairs.
For each image pair the images are stacked and split into image patches of size `patch_size`. For each pixel
of each individual patch, 3x3 patches are extracted and stacked in the channel dimension.
Output shape: torch.Size(batch_size, nr_patches, 2, 27, patch_size[0], patch_size[1])
|
preprocess_batch
|
python
|
krasserm/perceiver-io
|
perceiver/data/vision/optical_flow.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/data/vision/optical_flow.py
|
Apache-2.0
|
def postprocess(self, predictions: torch.Tensor, img_shape: Tuple[int, ...]) -> torch.Tensor:
"""Combines optical flow predictions for individual image patches into a single prediction per image pair.
Predictions can be supplied for a single image pair or a batch of image pairs, hence the supported input shapes
are:
* (nr_patches, patch_size[0], patch_size[1], 2) and
* (batch_size, nr_patches, patch_size[0], patch_size[1], 2).
Returns combined predictions for each supplied image pair.
Output shape: (batch_size, height, width, 2)
"""
flow_batch = []
height = img_shape[0]
width = img_shape[1]
grid_indices = self._compute_patch_grid_indices(img_shape)
prediction_batch = predictions.unsqueeze(0).cpu() if predictions.dim() == 4 else predictions.cpu()
b, p, *_ = prediction_batch.shape
if p != len(grid_indices):
raise ValueError(
f"Number of patches in the input does not match the number of calculated patches based "
f"on the supplied image size (nr_patches='{p}', calculated={len(grid_indices)})."
)
for prediction in prediction_batch:
flow = torch.zeros(1, height, width, 2).type(torch.float32)
flow_weights = torch.zeros(1, height, width, 1).type(torch.float32)
for flow_patch, (y, x) in zip(prediction, grid_indices):
flow_patch = flow_patch * self.flow_scale_factor
weights_y, weights_x = torch.meshgrid(
torch.arange(self.patch_size[0]), torch.arange(self.patch_size[1]), indexing="ij"
)
weights_x = torch.minimum(torch.add(weights_x, 1), self.patch_size[1] - weights_x)
weights_y = torch.minimum(torch.add(weights_y, 1), self.patch_size[0] - weights_y)
weights = rearrange(torch.minimum(weights_x, weights_y), "h w -> 1 h w 1")
pad = (0, 0, x, width - x - self.patch_size[1], y, height - y - self.patch_size[0], 0, 0)
flow += F.pad(flow_patch * weights, pad, "constant", 0)
flow_weights += F.pad(weights, pad, "constant", 0)
flow /= flow_weights
flow_batch.append(flow)
return torch.concat(flow_batch, dim=0)
|
Combines optical flow predictions for individual image patches into a single prediction per image pair.
Predictions can be supplied for a single image pair or a batch of image pairs, hence the supported input shapes
are:
* (nr_patches, patch_size[0], patch_size[1], 2) and
* (batch_size, nr_patches, patch_size[0], patch_size[1], 2).
Returns combined predictions for each supplied image pair.
Output shape: (batch_size, height, width, 2)
|
postprocess
|
python
|
krasserm/perceiver-io
|
perceiver/data/vision/optical_flow.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/data/vision/optical_flow.py
|
Apache-2.0
|
def process(
self,
model,
image_pairs: Union[List[Tuple[np.ndarray, np.ndarray]], List[Tuple[torch.Tensor, torch.Tensor]]],
batch_size: int,
) -> torch.Tensor:
"""Combines preprocessing, inference and postprocessing steps for the optical flow.
The input features for model are created by stacking each image pair in the channel dimension and splitting the
result into image patches of size `patch_size`. For each pixel in each individual patch, 3x3 patches
are extracted and stacked in the channel dimension.
The input is processed using the supplied optical flow model and the optical flow predictions per image pair
are returned.
Output shape: (batch_size, height, width, 2)
"""
image_shape = image_pairs[0][0].shape
grid_indices = self._compute_patch_grid_indices(image_shape)
predictions = []
with torch.no_grad():
for i in tqdm(range(0, len(image_pairs), batch_size)):
input_features_batch = self._preprocess_batch(image_pairs[i : i + batch_size], grid_indices)
input_features_batch = rearrange(input_features_batch, "b p t c h w -> (b p) t c h w")
for j in range(0, input_features_batch.shape[0], batch_size):
input_features_micro_batch = input_features_batch[j : (j + batch_size)]
pred = model(input_features_micro_batch)
predictions.append(pred.cpu().detach())
flow_predictions = torch.concat(predictions, dim=0)
flow_predictions = rearrange(flow_predictions, "(b p) h w c -> b p h w c", b=len(image_pairs))
return self.postprocess(flow_predictions, image_shape)
|
Combines preprocessing, inference and postprocessing steps for the optical flow.
The input features for model are created by stacking each image pair in the channel dimension and splitting the
result into image patches of size `patch_size`. For each pixel in each individual patch, 3x3 patches
are extracted and stacked in the channel dimension.
The input is processed using the supplied optical flow model and the optical flow predictions per image pair
are returned.
Output shape: (batch_size, height, width, 2)
|
process
|
python
|
krasserm/perceiver-io
|
perceiver/data/vision/optical_flow.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/data/vision/optical_flow.py
|
Apache-2.0
|
def render_optical_flow(flow: np.ndarray) -> np.ndarray:
"""Renders optical flow predictions produced by an optical flow model."""
hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang / np.pi / 2 * 180
hsv[..., 1] = np.clip(mag * 255 / 24, 0, 255)
hsv[..., 2] = 255
return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
|
Renders optical flow predictions produced by an optical flow model.
|
render_optical_flow
|
python
|
krasserm/perceiver-io
|
perceiver/data/vision/optical_flow.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/data/vision/optical_flow.py
|
Apache-2.0
|
def __init__(self, num_input_channels: int, *args, **kwargs):
"""Transforms and position-encodes task-specific input to generic encoder input.
:param num_input_channels: Number of channels of the generic encoder input produced by this adapter.
"""
super().__init__()
self._num_input_channels = num_input_channels
|
Transforms and position-encodes task-specific input to generic encoder input.
:param num_input_channels: Number of channels of the generic encoder input produced by this adapter.
|
__init__
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/adapter.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/adapter.py
|
Apache-2.0
|
def __init__(self, rotated_channels_per_head: int, *args, **kwargs):
"""An input adapter mixin that additionally generates a frequency position encoding for input sequence
`x`."""
super().__init__(*args, **kwargs)
self.frq_pos_encoding = FrequencyPositionEncoding(dim=rotated_channels_per_head)
|
An input adapter mixin that additionally generates a frequency position encoding for input sequence
`x`.
|
__init__
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/adapter.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/adapter.py
|
Apache-2.0
|
def generate(
self,
inputs: Optional[torch.Tensor] = None,
input_ids: Optional[torch.Tensor] = None,
num_latents: int = 1,
**kwargs,
):
"""Augments `GenerationMixin.generate` to support a `num_latents` argument.
This argument determines the initial number of latents positions assigned to the end of a prompt. During
generation, first, the number of latent positions grows until `self.backend_model.max_latents` is reached, then
the prefix length grows until `self.backend_model.max_prefix_len` is reached.
If the sequence reaches `self.backend_model.max_seq_len`, the left-most prefix token is discarded so that a new
latent position becomes available for generating the next token.
:param num_latents: Initial number of latent positions assigned to the end of the input.
"""
if input_ids is not None:
seq_len = input_ids.shape[1]
elif inputs is not None:
seq_len = inputs.shape[1]
else:
raise ValueError("Either inputs or input_ids must be defined")
if not 0 < seq_len <= self.backend_model.max_seq_len:
raise ValueError(f"Input sequence length out of valid range [1..{self.backend_model.max_seq_len}]")
if not 0 < num_latents <= self.backend_model.max_latents:
raise ValueError(f"num_latents={num_latents} out of valid range [1..{self.backend_model.max_latents}]")
else:
num_latents = min(seq_len, num_latents)
prefix_len = seq_len - num_latents
if prefix_len > self.backend_model.max_prefix_len:
num_latents_min = num_latents + prefix_len - self.backend_model.max_prefix_len
raise ValueError(
f"For given sequence of length={seq_len}, num_latents must "
f"be in range [{num_latents_min}..{self.backend_model.max_latents}]"
)
return super().generate(inputs=inputs, input_ids=input_ids, prefix_len=prefix_len, **kwargs)
|
Augments `GenerationMixin.generate` to support a `num_latents` argument.
This argument determines the initial number of latents positions assigned to the end of a prompt. During
generation, first, the number of latent positions grows until `self.backend_model.max_latents` is reached, then
the prefix length grows until `self.backend_model.max_prefix_len` is reached.
If the sequence reaches `self.backend_model.max_seq_len`, the left-most prefix token is discarded so that a new
latent position becomes available for generating the next token.
:param num_latents: Initial number of latent positions assigned to the end of the input.
|
generate
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/huggingface.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/huggingface.py
|
Apache-2.0
|
def __init__(
self,
num_heads: int,
num_q_input_channels: int,
num_kv_input_channels: int,
num_qk_channels: Optional[int] = None,
num_v_channels: Optional[int] = None,
num_output_channels: Optional[int] = None,
max_heads_parallel: Optional[int] = None,
causal_attention: bool = False,
dropout: float = 0.0,
qkv_bias: bool = True,
out_bias: bool = True,
):
"""Multi-head attention as specified in https://arxiv.org/abs/2107.14795 Appendix E plus support for rotary
position embeddings (https://arxiv.org/abs/2104.09864) and causal attention. Causal attention requires
queries and keys to be right-aligned, if they have different length.
:param num_heads: Number of attention heads.
:param num_q_input_channels: Number of query input channels.
:param num_kv_input_channels: Number of key/value input channels.
:param num_qk_channels: Number of query and key channels. Default is number `num_q_input_channels`
:param num_v_channels: Number of value channels. Default is `num_qk_channels`.
:param num_output_channels: Number of output channels. Default is `num_q_input_channels`
:param max_heads_parallel: Maximum number of heads to be processed in parallel. Default is `num_heads`.
:param causal_attention: Whether to apply a causal attention mask. Default is `False`.
:param dropout: Dropout probability for attention matrix values. Default is `0.0`
:param qkv_bias: Whether to use a bias term for query, key and value projections. Default is `True`.
:param qkv_bias: Whether to use a bias term for output projection. Default is `True`.
"""
super().__init__()
if num_qk_channels is None:
num_qk_channels = num_q_input_channels
if num_v_channels is None:
num_v_channels = num_qk_channels
if num_output_channels is None:
num_output_channels = num_q_input_channels
if num_qk_channels % num_heads != 0:
raise ValueError("num_qk_channels must be divisible by num_heads")
if num_v_channels % num_heads != 0:
raise ValueError("num_v_channels must be divisible by num_heads")
num_qk_channels_per_head = num_qk_channels // num_heads
self.dp_scale = num_qk_channels_per_head**-0.5
self.num_heads = num_heads
self.num_qk_channels = num_qk_channels
self.num_v_channels = num_v_channels
self.causal_attention = causal_attention
if max_heads_parallel is None:
self.max_heads_parallel = num_heads
else:
self.max_heads_parallel = max_heads_parallel
self.q_proj = nn.Linear(num_q_input_channels, num_qk_channels, bias=qkv_bias)
self.k_proj = nn.Linear(num_kv_input_channels, num_qk_channels, bias=qkv_bias)
self.v_proj = nn.Linear(num_kv_input_channels, num_v_channels, bias=qkv_bias)
self.o_proj = nn.Linear(num_v_channels, num_output_channels, bias=out_bias)
self.dropout = nn.Dropout(dropout)
|
Multi-head attention as specified in https://arxiv.org/abs/2107.14795 Appendix E plus support for rotary
position embeddings (https://arxiv.org/abs/2104.09864) and causal attention. Causal attention requires
queries and keys to be right-aligned, if they have different length.
:param num_heads: Number of attention heads.
:param num_q_input_channels: Number of query input channels.
:param num_kv_input_channels: Number of key/value input channels.
:param num_qk_channels: Number of query and key channels. Default is number `num_q_input_channels`
:param num_v_channels: Number of value channels. Default is `num_qk_channels`.
:param num_output_channels: Number of output channels. Default is `num_q_input_channels`
:param max_heads_parallel: Maximum number of heads to be processed in parallel. Default is `num_heads`.
:param causal_attention: Whether to apply a causal attention mask. Default is `False`.
:param dropout: Dropout probability for attention matrix values. Default is `0.0`
:param qkv_bias: Whether to use a bias term for query, key and value projections. Default is `True`.
:param qkv_bias: Whether to use a bias term for output projection. Default is `True`.
|
__init__
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/modules.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/modules.py
|
Apache-2.0
|
def forward(
self,
x_q: torch.Tensor,
x_kv: torch.Tensor,
pad_mask: Optional[torch.Tensor] = None,
rot_pos_emb_q: Optional[RotaryPositionEmbedding] = None,
rot_pos_emb_k: Optional[RotaryPositionEmbedding] = None,
kv_cache: Optional[KVCache] = None,
):
"""...
:param x_q: Query input of shape (B, N, D) where B is the batch size, N the query sequence length and D the
number of query input channels (= `num_q_input_channels`)
:param x_kv: Key/value input of shape (B, L, C) where B is the batch size, L the key/value sequence length and C
are the number of key/value input channels (= `num_kv_input_channels`)
:param pad_mask: Boolean key padding mask. `True` values indicate padding tokens.
:param rot_pos_emb_q: Applies a rotary position embedding to query i.e. if defined, rotates the query.
:param rot_pos_emb_k: Applies a rotary position embedding to key i.e. if defined, rotates the key.
:param kv_cache: cache with past keys and values.
:return: attention result of shape (B, N, F) where B is the batch size, N the query sequence length and F the
number of output channels (= `num_output_channels`)
"""
q = self.q_proj(x_q)
k = self.k_proj(x_kv)
v = self.v_proj(x_kv)
if kv_cache is not None:
k_cache, v_cache = kv_cache
k = torch.cat([k_cache, k], dim=1)
v = torch.cat([v_cache, v], dim=1)
kv_cache = (k, v)
q, k, v = (rearrange(x, "b n (h c) -> b h n c", h=self.num_heads) for x in [q, k, v])
q = q * self.dp_scale
if rot_pos_emb_q is not None:
q = rot_pos_emb_q.rotate(q)
if rot_pos_emb_k is not None:
k = rot_pos_emb_k.rotate(k)
if pad_mask is not None:
pad_mask = rearrange(pad_mask, "b j -> b 1 1 j")
if self.causal_attention:
i = q.shape[2]
j = k.shape[2]
# If q and k have different length, causal masking only works if they are right-aligned.
causal_mask = torch.ones((i, j), device=x_q.device, dtype=torch.bool).triu(j - i + 1)
o_chunks = []
# Only process a given maximum number of heads in
# parallel, using several iterations, if necessary.
for q_chunk, k_chunk, v_chunk in zip(
q.split(self.max_heads_parallel, dim=1),
k.split(self.max_heads_parallel, dim=1),
v.split(self.max_heads_parallel, dim=1),
):
attn = torch.einsum("b h i c, b h j c -> b h i j", q_chunk, k_chunk)
attn_max_neg = -torch.finfo(attn.dtype).max
if pad_mask is not None:
attn.masked_fill_(pad_mask, attn_max_neg)
if self.causal_attention:
attn.masked_fill_(causal_mask, attn_max_neg)
attn = attn.softmax(dim=-1)
attn = self.dropout(attn)
o_chunk = torch.einsum("b h i j, b h j c -> b h i c", attn, v_chunk)
o_chunks.append(o_chunk)
o = torch.cat(o_chunks, dim=1)
o = rearrange(o, "b h n c -> b n (h c)", h=self.num_heads)
o = self.o_proj(o)
return ModuleOutput(last_hidden_state=o, kv_cache=kv_cache)
|
...
:param x_q: Query input of shape (B, N, D) where B is the batch size, N the query sequence length and D the
number of query input channels (= `num_q_input_channels`)
:param x_kv: Key/value input of shape (B, L, C) where B is the batch size, L the key/value sequence length and C
are the number of key/value input channels (= `num_kv_input_channels`)
:param pad_mask: Boolean key padding mask. `True` values indicate padding tokens.
:param rot_pos_emb_q: Applies a rotary position embedding to query i.e. if defined, rotates the query.
:param rot_pos_emb_k: Applies a rotary position embedding to key i.e. if defined, rotates the key.
:param kv_cache: cache with past keys and values.
:return: attention result of shape (B, N, F) where B is the batch size, N the query sequence length and F the
number of output channels (= `num_output_channels`)
|
forward
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/modules.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/modules.py
|
Apache-2.0
|
def __init__(
self,
num_heads: int,
num_q_input_channels: int,
num_kv_input_channels: int,
num_qk_channels: Optional[int] = None,
num_v_channels: Optional[int] = None,
max_heads_parallel: Optional[int] = None,
causal_attention: bool = False,
dropout: float = 0.0,
qkv_bias: bool = True,
out_bias: bool = True,
):
"""Pre-layer-norm cross-attention (see `MultiHeadAttention` for attention details)."""
super().__init__()
self.q_norm = nn.LayerNorm(num_q_input_channels)
self.kv_norm = nn.LayerNorm(num_kv_input_channels)
self.attention = MultiHeadAttention(
num_heads=num_heads,
num_q_input_channels=num_q_input_channels,
num_kv_input_channels=num_kv_input_channels,
num_qk_channels=num_qk_channels,
num_v_channels=num_v_channels,
max_heads_parallel=max_heads_parallel,
causal_attention=causal_attention,
dropout=dropout,
qkv_bias=qkv_bias,
out_bias=out_bias,
)
|
Pre-layer-norm cross-attention (see `MultiHeadAttention` for attention details).
|
__init__
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/modules.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/modules.py
|
Apache-2.0
|
def forward(
self,
x_q: torch.Tensor,
x_kv: Optional[torch.Tensor] = None,
x_kv_prefix: Optional[torch.Tensor] = None,
pad_mask: Optional[torch.Tensor] = None,
rot_pos_emb_q: Optional[RotaryPositionEmbedding] = None,
rot_pos_emb_k: Optional[RotaryPositionEmbedding] = None,
kv_cache: Optional[KVCache] = None,
):
"""Pre-layer-norm cross-attention of query input `x_q` to key/value input (`x_kv` or `x_kv_prefix`).
If `x_kv_prefix` is defined, the entire key/value input is a concatenation of `x_kv_prefix` and `x_q` along
the sequence dimension. In this case, the query attends to itself at the end of the key/value sequence (use
case: Perceiver AR). If `x_kv_prefix` is not defined, `x_kv` is the entire key/value input.
"""
x_q = self.q_norm(x_q)
if x_kv is None:
x_kv_prefix = self.kv_norm(x_kv_prefix)
x_kv = torch.cat([x_kv_prefix, x_q], dim=1)
else:
x_kv = self.kv_norm(x_kv)
return self.attention(
x_q, x_kv, pad_mask=pad_mask, rot_pos_emb_q=rot_pos_emb_q, rot_pos_emb_k=rot_pos_emb_k, kv_cache=kv_cache
)
|
Pre-layer-norm cross-attention of query input `x_q` to key/value input (`x_kv` or `x_kv_prefix`).
If `x_kv_prefix` is defined, the entire key/value input is a concatenation of `x_kv_prefix` and `x_q` along
the sequence dimension. In this case, the query attends to itself at the end of the key/value sequence (use
case: Perceiver AR). If `x_kv_prefix` is not defined, `x_kv` is the entire key/value input.
|
forward
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/modules.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/modules.py
|
Apache-2.0
|
def __init__(
self,
num_heads: int,
num_channels: int,
num_qk_channels: Optional[int] = None,
num_v_channels: Optional[int] = None,
max_heads_parallel: Optional[int] = None,
causal_attention: bool = False,
dropout: float = 0.0,
qkv_bias: bool = True,
out_bias: bool = True,
):
"""Pre-layer norm self-attention (see `MultiHeadAttention` and for attention details)."""
super().__init__()
self.norm = nn.LayerNorm(num_channels)
self.attention = MultiHeadAttention(
num_heads=num_heads,
num_q_input_channels=num_channels,
num_kv_input_channels=num_channels,
num_qk_channels=num_qk_channels,
num_v_channels=num_v_channels,
max_heads_parallel=max_heads_parallel,
causal_attention=causal_attention,
dropout=dropout,
qkv_bias=qkv_bias,
out_bias=out_bias,
)
|
Pre-layer norm self-attention (see `MultiHeadAttention` and for attention details).
|
__init__
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/modules.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/modules.py
|
Apache-2.0
|
def forward(
self,
x: torch.Tensor,
pad_mask: Optional[torch.Tensor] = None,
rot_pos_emb: Optional[RotaryPositionEmbedding] = None,
kv_cache: Optional[KVCache] = None,
):
"""Pre-layer-norm self-attention of input `x`."""
x = self.norm(x)
return self.attention(
x,
x,
pad_mask=pad_mask,
rot_pos_emb_q=rot_pos_emb,
rot_pos_emb_k=rot_pos_emb,
kv_cache=kv_cache,
)
|
Pre-layer-norm self-attention of input `x`.
|
forward
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/modules.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/modules.py
|
Apache-2.0
|
def __init__(
self,
input_adapter: InputAdapter,
num_latents: int,
num_latent_channels: int,
num_cross_attention_heads: int = 4,
num_cross_attention_qk_channels: Optional[int] = None,
num_cross_attention_v_channels: Optional[int] = None,
num_cross_attention_layers: int = 1,
first_cross_attention_layer_shared: bool = False,
cross_attention_widening_factor: int = 1,
num_self_attention_heads: int = 4,
num_self_attention_qk_channels: Optional[int] = None,
num_self_attention_v_channels: Optional[int] = None,
num_self_attention_layers_per_block: int = 6,
num_self_attention_blocks: int = 1,
first_self_attention_block_shared: bool = True,
self_attention_widening_factor: int = 1,
dropout: float = 0.0,
residual_dropout: float = 0.0,
init_scale: float = 0.02,
activation_checkpointing: bool = False,
activation_offloading: bool = False,
):
"""Generic Perceiver IO encoder.
:param input_adapter: Transforms and position-encodes task-specific input to generic encoder input of shape (B,
M, C) where B is the batch size, M the input sequence length and C the number of key/value input
channels. C is determined by the `num_input_channels` property of the `input_adapter`.
:param num_latents: Number of latent variables (N).
:param num_latent_channels: Number of latent channels (D).
:param num_cross_attention_heads: Number of cross-attention heads.
:param num_cross_attention_qk_channels: Number of query and key channels for cross-attention
(see`MultiHeadAttention.num_qk_channels` for details).
:param num_cross_attention_v_channels: Number of value channels for cross-attention (see
`MultiHeadAttention.num_v_channels` for details).
:param num_cross_attention_layers: Number of cross-attention layers (alternating with self-attention blocks).
:param first_cross_attention_layer_shared: Whether the first cross-attention layer should share its weights with
subsequent cross-attention layers (if any).
:param num_self_attention_heads: Number of self-attention heads.
:param num_self_attention_qk_channels: Number of query and key channels for self-attention (see
`MultiHeadAttention.num_qk_channels` for details).
:param num_self_attention_v_channels: Number of value channels for self-attention
(see `MultiHeadAttention.num_v_channels` for details).
:param num_self_attention_layers_per_block: Number of self-attention layers per self-attention block.
:param num_self_attention_blocks: Number of self-attention blocks, with weights shared between corresponding
self-attention layers.
:param first_self_attention_block_shared: Whether the first self-attention block should share its weights with
subsequent self-attention blocks (if any).
:param dropout: Dropout probability for self- and cross-attention layers.
:param residual_dropout: Dropout probability for residual connections.
:param init_scale: Standard deviation for random normal initialization of parameters.
:param activation_checkpointing: If True, implements an activation checkpoint for each self-attention layer and
each cross-attention layer.
:param activation_offloading: If True, offloads checkpointed activations to CPU.
"""
super().__init__()
self.latent_provider = TrainableQueryProvider(num_latents, num_latent_channels, init_scale=init_scale)
self.input_adapter = input_adapter
if num_cross_attention_layers <= 0:
raise ValueError("num_cross_attention_layers must be > 0")
if num_self_attention_blocks <= 0:
raise ValueError("num_self_attention_blocks must be > 0")
if num_cross_attention_layers > num_self_attention_blocks:
raise ValueError("num_cross_attention_layers must be <= num_self_attention_blocks")
self.num_cross_attention_layers = num_cross_attention_layers
self.num_self_attention_blocks = num_self_attention_blocks
self.first_cross_attention_layer_shared = first_cross_attention_layer_shared
self.first_self_attention_block_shared = first_self_attention_block_shared
def cross_attn():
layer = CrossAttentionLayer(
num_heads=num_cross_attention_heads,
num_q_input_channels=num_latent_channels,
num_kv_input_channels=input_adapter.num_input_channels,
num_qk_channels=num_cross_attention_qk_channels,
num_v_channels=num_cross_attention_v_channels,
widening_factor=cross_attention_widening_factor,
dropout=dropout,
residual_dropout=residual_dropout,
)
return (
activation_checkpoint_wrapper(layer, offload_to_cpu=activation_offloading)
if activation_checkpointing else layer
)
def self_attn():
return SelfAttentionBlock(
num_layers=num_self_attention_layers_per_block,
num_heads=num_self_attention_heads,
num_channels=num_latent_channels,
num_qk_channels=num_self_attention_qk_channels,
num_v_channels=num_self_attention_v_channels,
widening_factor=self_attention_widening_factor,
dropout=dropout,
residual_dropout=residual_dropout,
activation_checkpointing=activation_checkpointing,
activation_offloading=activation_offloading,
)
self.cross_attn_1 = cross_attn()
self.self_attn_1 = self_attn()
if self.extra_cross_attention_layer:
self.cross_attn_n = cross_attn()
if self.extra_self_attention_block:
self.self_attn_n = self_attn()
self._init_parameters(init_scale)
|
Generic Perceiver IO encoder.
:param input_adapter: Transforms and position-encodes task-specific input to generic encoder input of shape (B,
M, C) where B is the batch size, M the input sequence length and C the number of key/value input
channels. C is determined by the `num_input_channels` property of the `input_adapter`.
:param num_latents: Number of latent variables (N).
:param num_latent_channels: Number of latent channels (D).
:param num_cross_attention_heads: Number of cross-attention heads.
:param num_cross_attention_qk_channels: Number of query and key channels for cross-attention
(see`MultiHeadAttention.num_qk_channels` for details).
:param num_cross_attention_v_channels: Number of value channels for cross-attention (see
`MultiHeadAttention.num_v_channels` for details).
:param num_cross_attention_layers: Number of cross-attention layers (alternating with self-attention blocks).
:param first_cross_attention_layer_shared: Whether the first cross-attention layer should share its weights with
subsequent cross-attention layers (if any).
:param num_self_attention_heads: Number of self-attention heads.
:param num_self_attention_qk_channels: Number of query and key channels for self-attention (see
`MultiHeadAttention.num_qk_channels` for details).
:param num_self_attention_v_channels: Number of value channels for self-attention
(see `MultiHeadAttention.num_v_channels` for details).
:param num_self_attention_layers_per_block: Number of self-attention layers per self-attention block.
:param num_self_attention_blocks: Number of self-attention blocks, with weights shared between corresponding
self-attention layers.
:param first_self_attention_block_shared: Whether the first self-attention block should share its weights with
subsequent self-attention blocks (if any).
:param dropout: Dropout probability for self- and cross-attention layers.
:param residual_dropout: Dropout probability for residual connections.
:param init_scale: Standard deviation for random normal initialization of parameters.
:param activation_checkpointing: If True, implements an activation checkpoint for each self-attention layer and
each cross-attention layer.
:param activation_offloading: If True, offloads checkpointed activations to CPU.
|
__init__
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/modules.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/modules.py
|
Apache-2.0
|
def __init__(
self,
output_adapter: OutputAdapter,
output_query_provider: QueryProvider,
num_latent_channels: int,
num_cross_attention_heads: int = 4,
num_cross_attention_qk_channels: Optional[int] = None,
num_cross_attention_v_channels: Optional[int] = None,
cross_attention_widening_factor: int = 1,
cross_attention_residual: bool = True,
dropout: float = 0.0,
init_scale: float = 0.02,
activation_checkpointing: bool = False,
activation_offloading: bool = False,
):
"""Generic Perceiver IO decoder.
:param output_adapter: Transforms generic decoder cross-attention output of shape (B, O, F) to task-specific
output. B is the batch size, O the output sequence length and F the number of cross-attention output
channels.
:param output_query_provider: Provides the decoder's output query. Abstracts over output query details e.g. can
be a learned query, a deterministic function of the model's input, etc. Configured by `PerceiverIO`
subclasses.
:param num_latent_channels: Number of latent channels of the Perceiver IO encoder output.
:param num_cross_attention_heads: Number of cross-attention heads.
:param num_cross_attention_qk_channels: Number of query and key channels for cross-attention (see
`MultiHeadAttention.num_qk_channels` for details).
:param num_cross_attention_v_channels: Number of value channels for cross-attention
(see `MultiHeadAttention.num_v_channels` for details).
:param dropout: Dropout probability for cross-attention layer.
:param init_scale: Standard deviation for random normal initialization of parameters.
:param activation_checkpointing: If True, implements an activation checkpoint for the decoder's
cross-attention layer.
:param activation_offloading: If True, offloads checkpointed activations to CPU.
"""
super().__init__()
self.output_query_provider = output_query_provider
self.output_adapter = output_adapter
cross_attn = CrossAttentionLayer(
num_heads=num_cross_attention_heads,
num_q_input_channels=output_query_provider.num_query_channels,
num_kv_input_channels=num_latent_channels,
num_qk_channels=num_cross_attention_qk_channels,
num_v_channels=num_cross_attention_v_channels,
widening_factor=cross_attention_widening_factor,
attention_residual=cross_attention_residual,
dropout=dropout,
)
if activation_checkpointing:
cross_attn = activation_checkpoint_wrapper(cross_attn, offload_to_cpu=activation_offloading)
self.cross_attn = cross_attn
self._init_parameters(init_scale)
|
Generic Perceiver IO decoder.
:param output_adapter: Transforms generic decoder cross-attention output of shape (B, O, F) to task-specific
output. B is the batch size, O the output sequence length and F the number of cross-attention output
channels.
:param output_query_provider: Provides the decoder's output query. Abstracts over output query details e.g. can
be a learned query, a deterministic function of the model's input, etc. Configured by `PerceiverIO`
subclasses.
:param num_latent_channels: Number of latent channels of the Perceiver IO encoder output.
:param num_cross_attention_heads: Number of cross-attention heads.
:param num_cross_attention_qk_channels: Number of query and key channels for cross-attention (see
`MultiHeadAttention.num_qk_channels` for details).
:param num_cross_attention_v_channels: Number of value channels for cross-attention
(see `MultiHeadAttention.num_v_channels` for details).
:param dropout: Dropout probability for cross-attention layer.
:param init_scale: Standard deviation for random normal initialization of parameters.
:param activation_checkpointing: If True, implements an activation checkpoint for the decoder's
cross-attention layer.
:param activation_offloading: If True, offloads checkpointed activations to CPU.
|
__init__
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/modules.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/modules.py
|
Apache-2.0
|
def __init__(
self,
input_adapter: RotarySupport,
num_heads: int = 8,
max_heads_parallel: Optional[int] = None,
num_self_attention_layers: int = 6,
num_self_attention_rotary_layers: int = 1,
self_attention_widening_factor: int = 4,
cross_attention_widening_factor: int = 4,
cross_attention_dropout: float = 0.5,
post_attention_dropout: float = 0.0,
residual_dropout: float = 0.0,
activation_checkpointing: bool = False,
activation_offloading: bool = False,
):
"""Implementation of Perceiver AR (https://arxiv.org/abs/2202.07765).
:param input_adapter: Transforms an input sequence to generic Perceiver AR input. An input adapter may choose to
add (absolute) position information to transformed inputs while `PerceiverAR` additionally computes a
rotary position embedding (i.e. relative position information) for queries and keys. To support the
computation of rotary position embeddings, concrete input adapters need to mixin `RotarySupport`.
:param num_heads: Number of cross- and self-attention heads.
:param max_heads_parallel: Maximum number of cross-attention heads to be processed in parallel.
Default is `num_heads`.
:param num_self_attention_layers: Number of self-attention layers.
:param cross_attention_dropout: Probability of dropping positions in the prefix sequence.
:param post_attention_dropout: Probability of dropping cross- and self-attention scores (same as `dropout` in
Perceiver IO encoder and decoder).
:param residual_dropout: Probability of dropping residual connections.
:param activation_checkpointing: If True, implements an activation checkpoint for each self-attention layer and
cross-attention layer.
:param activation_offloading: If True, offloads checkpointed activations to CPU.
"""
super().__init__()
def cross_attn():
layer = CrossAttentionLayer(
num_heads=num_heads,
num_q_input_channels=input_adapter.num_input_channels,
num_kv_input_channels=input_adapter.num_input_channels,
max_heads_parallel=max_heads_parallel,
causal_attention=True,
widening_factor=cross_attention_widening_factor,
dropout=post_attention_dropout,
residual_dropout=residual_dropout,
qkv_bias=False,
out_bias=True,
mlp_bias=False,
)
return (
activation_checkpoint_wrapper(layer, offload_to_cpu=activation_offloading)
if activation_checkpointing else layer
)
def self_attn():
return SelfAttentionBlock(
num_layers=num_self_attention_layers,
num_heads=num_heads,
num_channels=input_adapter.num_input_channels,
causal_attention=True,
widening_factor=self_attention_widening_factor,
dropout=post_attention_dropout,
residual_dropout=residual_dropout,
num_rotary_layers=num_self_attention_rotary_layers,
activation_checkpointing=activation_checkpointing,
activation_offloading=activation_offloading,
qkv_bias=False,
out_bias=False,
mlp_bias=False,
)
self.input_adapter = input_adapter
self.cross_attention_dropout = cross_attention_dropout
self.cross_attention = cross_attn()
self.self_attention = self_attn()
|
Implementation of Perceiver AR (https://arxiv.org/abs/2202.07765).
:param input_adapter: Transforms an input sequence to generic Perceiver AR input. An input adapter may choose to
add (absolute) position information to transformed inputs while `PerceiverAR` additionally computes a
rotary position embedding (i.e. relative position information) for queries and keys. To support the
computation of rotary position embeddings, concrete input adapters need to mixin `RotarySupport`.
:param num_heads: Number of cross- and self-attention heads.
:param max_heads_parallel: Maximum number of cross-attention heads to be processed in parallel.
Default is `num_heads`.
:param num_self_attention_layers: Number of self-attention layers.
:param cross_attention_dropout: Probability of dropping positions in the prefix sequence.
:param post_attention_dropout: Probability of dropping cross- and self-attention scores (same as `dropout` in
Perceiver IO encoder and decoder).
:param residual_dropout: Probability of dropping residual connections.
:param activation_checkpointing: If True, implements an activation checkpoint for each self-attention layer and
cross-attention layer.
:param activation_offloading: If True, offloads checkpointed activations to CPU.
|
__init__
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/modules.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/modules.py
|
Apache-2.0
|
def _positions(self, v_min=-1.0, v_max=1.0):
"""Create evenly spaced position coordinates for self.spatial_shape with values in [v_min, v_max].
:param v_min: minimum coordinate value per dimension.
:param v_max: maximum coordinate value per dimension.
:return: position coordinates tensor of shape (*shape, len(shape)).
"""
coords = [torch.linspace(v_min, v_max, steps=s) for s in self.input_shape]
return torch.stack(torch.meshgrid(*coords), dim=len(self.input_shape))
|
Create evenly spaced position coordinates for self.spatial_shape with values in [v_min, v_max].
:param v_min: minimum coordinate value per dimension.
:param v_max: maximum coordinate value per dimension.
:return: position coordinates tensor of shape (*shape, len(shape)).
|
_positions
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/position.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/position.py
|
Apache-2.0
|
def _position_encodings(
self, p: torch.Tensor, max_frequencies: Optional[Tuple[int, ...]] = None, include_positions: bool = True
) -> torch.Tensor:
"""Fourier-encode positions p using self.num_bands frequency bands.
:param p: positions of shape (*d, c) where c = len(d).
:param max_frequencies: maximum frequency for each dimension (1-tuple for sequences, 2-tuple for images, ...).
If `None` values are derived from shape of p.
:param include_positions: whether to include input positions p in returned encodings tensor.
:returns: position encodings tensor of shape (*d, c * (2 * num_bands + include_positions)).
"""
encodings = []
if max_frequencies is None:
max_frequencies = p.shape[:-1]
frequencies = [
torch.linspace(1.0, max_freq / 2.0, self.num_frequency_bands, device=p.device)
for max_freq in max_frequencies
]
frequency_grids = []
for i, frequencies_i in enumerate(frequencies):
frequency_grids.append(p[..., i : i + 1] * frequencies_i[None, ...])
if include_positions:
encodings.append(p)
encodings.extend([torch.sin(math.pi * frequency_grid) for frequency_grid in frequency_grids])
encodings.extend([torch.cos(math.pi * frequency_grid) for frequency_grid in frequency_grids])
return torch.cat(encodings, dim=-1)
|
Fourier-encode positions p using self.num_bands frequency bands.
:param p: positions of shape (*d, c) where c = len(d).
:param max_frequencies: maximum frequency for each dimension (1-tuple for sequences, 2-tuple for images, ...).
If `None` values are derived from shape of p.
:param include_positions: whether to include input positions p in returned encodings tensor.
:returns: position encodings tensor of shape (*d, c * (2 * num_bands + include_positions)).
|
_position_encodings
|
python
|
krasserm/perceiver-io
|
perceiver/model/core/position.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/core/position.py
|
Apache-2.0
|
def convert_checkpoint(save_dir, ckpt_url, tokenizer_name, id2label=None, label2id=None, **kwargs):
"""Convert a `LitTextClassifier` checkpoint to a persistent `PerceiverTextClassifier`."""
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, verbose=False)
tokenizer.save_pretrained(save_dir, **kwargs)
model = PerceiverTextClassifier.from_checkpoint(ckpt_url)
model.config.tokenizer_class = tokenizer.__class__.__name__
if id2label is not None:
model.config.id2label = id2label
if label2id is not None:
model.config.label2id = label2id
model.save_pretrained(save_dir, **kwargs)
|
Convert a `LitTextClassifier` checkpoint to a persistent `PerceiverTextClassifier`.
|
convert_checkpoint
|
python
|
krasserm/perceiver-io
|
perceiver/model/text/classifier/huggingface.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/text/classifier/huggingface.py
|
Apache-2.0
|
def convert_checkpoint(save_dir, ckpt_url, tokenizer_name, **kwargs):
"""Convert a `LitCausalLanguageModel` checkpoint to a persistent `PerceiverCausalLanguageModel`."""
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, padding_side="left", verbose=False)
tokenizer.save_pretrained(save_dir, **kwargs)
model = PerceiverCausalLanguageModel.from_checkpoint(ckpt_url)
model.config.tokenizer_class = tokenizer.__class__.__name__
model.save_pretrained(save_dir, **kwargs)
|
Convert a `LitCausalLanguageModel` checkpoint to a persistent `PerceiverCausalLanguageModel`.
|
convert_checkpoint
|
python
|
krasserm/perceiver-io
|
perceiver/model/text/clm/huggingface.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/text/clm/huggingface.py
|
Apache-2.0
|
def convert_checkpoint(save_dir, ckpt_url, tokenizer_name, **kwargs):
"""Convert a `LitMaskedLanguageModel` checkpoint to a persistent `PerceiverMaskedLanguageModel`."""
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, verbose=False)
tokenizer.save_pretrained(save_dir, **kwargs)
model = PerceiverMaskedLanguageModel.from_checkpoint(ckpt_url)
model.config.tokenizer_class = tokenizer.__class__.__name__
model.save_pretrained(save_dir, **kwargs)
|
Convert a `LitMaskedLanguageModel` checkpoint to a persistent `PerceiverMaskedLanguageModel`.
|
convert_checkpoint
|
python
|
krasserm/perceiver-io
|
perceiver/model/text/mlm/huggingface.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/text/mlm/huggingface.py
|
Apache-2.0
|
def convert_model(save_dir, source_repo_id="deepmind/language-perceiver", **kwargs):
"""Convert a Hugging Face `PerceiverForMaskedLM` to a persistent `PerceiverMaskedLanguageModel`."""
src_model = transformers.PerceiverForMaskedLM.from_pretrained(source_repo_id)
tgt_config = PerceiverMaskedLanguageModelConfig(convert_config(src_model.config))
tgt_model = PerceiverMaskedLanguageModel(tgt_config)
copy_text_encoder_params(src_model.perceiver, tgt_model.backend_model.encoder)
copy_text_decoder_params(src_model, tgt_model.backend_model.decoder)
src_tokenizer = AutoTokenizer.from_pretrained(source_repo_id, verbose=False)
src_tokenizer.save_pretrained(save_dir, **kwargs)
tgt_model.config.tokenizer_class = src_tokenizer.__class__.__name__
tgt_model.save_pretrained(save_dir, **kwargs)
|
Convert a Hugging Face `PerceiverForMaskedLM` to a persistent `PerceiverMaskedLanguageModel`.
|
convert_model
|
python
|
krasserm/perceiver-io
|
perceiver/model/text/mlm/huggingface.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/text/mlm/huggingface.py
|
Apache-2.0
|
def convert_config(config: transformers.PerceiverConfig) -> MaskedLanguageModelConfig:
"""Convert a Hugging Face `PerceiverConfig` to a `PerceiverMaskedLanguageModelConfig`."""
assert config.hidden_act == "gelu"
assert config.tie_word_embeddings
encoder_config = TextEncoderConfig(
vocab_size=config.vocab_size,
max_seq_len=config.max_position_embeddings,
num_input_channels=config.d_model,
num_cross_attention_qk_channels=config.qk_channels,
num_cross_attention_v_channels=config.v_channels,
num_cross_attention_heads=config.num_cross_attention_heads,
num_self_attention_qk_channels=config.qk_channels,
num_self_attention_v_channels=config.v_channels,
num_self_attention_heads=config.num_self_attention_heads,
num_self_attention_layers_per_block=config.num_self_attends_per_block,
num_self_attention_blocks=config.num_blocks,
cross_attention_widening_factor=config.cross_attention_widening_factor,
self_attention_widening_factor=config.self_attention_widening_factor,
dropout=config.attention_probs_dropout_prob,
init_scale=config.initializer_range,
)
decoder_config = TextDecoderConfig(
vocab_size=config.vocab_size,
max_seq_len=config.max_position_embeddings,
num_cross_attention_qk_channels=config.qk_channels,
num_cross_attention_v_channels=config.d_model,
num_cross_attention_heads=config.num_cross_attention_heads,
cross_attention_widening_factor=config.cross_attention_widening_factor,
cross_attention_residual=False,
dropout=config.attention_probs_dropout_prob,
init_scale=config.initializer_range,
)
return MaskedLanguageModelConfig(
encoder_config,
decoder_config,
num_latents=config.num_latents,
num_latent_channels=config.d_latents,
)
|
Convert a Hugging Face `PerceiverConfig` to a `PerceiverMaskedLanguageModelConfig`.
|
convert_config
|
python
|
krasserm/perceiver-io
|
perceiver/model/text/mlm/huggingface.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/text/mlm/huggingface.py
|
Apache-2.0
|
def convert_checkpoint(save_dir, ckpt_url, image_processor, id2label=None, label2id=None, **kwargs):
"""Convert a `LitImageClassifier` checkpoint to a persistent `PerceiverImageClassifier`."""
image_processor.save_pretrained(save_dir, **kwargs)
model = PerceiverImageClassifier.from_checkpoint(ckpt_url)
if id2label is not None:
model.config.id2label = id2label
if label2id is not None:
model.config.label2id = label2id
model.save_pretrained(save_dir, **kwargs)
|
Convert a `LitImageClassifier` checkpoint to a persistent `PerceiverImageClassifier`.
|
convert_checkpoint
|
python
|
krasserm/perceiver-io
|
perceiver/model/vision/image_classifier/huggingface.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/vision/image_classifier/huggingface.py
|
Apache-2.0
|
def convert_config(config: transformers.PerceiverConfig) -> ImageClassifierConfig:
"""Convert a Hugging Face `PerceiverConfig` to a `PerceiverImageClassifierConfig`."""
assert config.hidden_act == "gelu"
encoder_config = ImageEncoderConfig(
image_shape=(224, 224, 3),
num_frequency_bands=64,
num_cross_attention_heads=config.num_cross_attention_heads,
num_self_attention_heads=config.num_self_attention_heads,
num_self_attention_layers_per_block=config.num_self_attends_per_block,
num_self_attention_blocks=config.num_blocks,
dropout=config.attention_probs_dropout_prob,
init_scale=config.initializer_range,
)
decoder_config = ClassificationDecoderConfig(
num_classes=config.num_labels,
num_output_query_channels=config.d_latents,
num_cross_attention_heads=config.num_cross_attention_heads,
cross_attention_residual=True,
dropout=config.attention_probs_dropout_prob,
init_scale=config.initializer_range,
)
return ImageClassifierConfig(
encoder_config,
decoder_config,
num_latents=config.num_latents,
num_latent_channels=config.d_latents,
)
|
Convert a Hugging Face `PerceiverConfig` to a `PerceiverImageClassifierConfig`.
|
convert_config
|
python
|
krasserm/perceiver-io
|
perceiver/model/vision/image_classifier/huggingface.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/vision/image_classifier/huggingface.py
|
Apache-2.0
|
def convert_model(save_dir, source_repo_id="deepmind/vision-perceiver-fourier", **kwargs):
"""Convert a Hugging Face `PerceiverForImageClassificationFourier` to a persistent
`PerceiverImageClassifier`."""
src_model = transformers.PerceiverForImageClassificationFourier.from_pretrained(source_repo_id)
tgt_config = PerceiverImageClassifierConfig(
convert_config(src_model.config), id2label=src_model.config.id2label, label2id=src_model.config.label2id
)
tgt_model = PerceiverImageClassifier(tgt_config)
copy_image_encoder_params(src_model.perceiver, tgt_model.backend_model.encoder)
copy_classification_decoder_params(src_model.perceiver, tgt_model.backend_model.decoder)
tgt_model.save_pretrained(save_dir, **kwargs)
src_tokenizer = PerceiverImageClassifierInputProcessor.from_pretrained(source_repo_id, channels_last=True)
src_tokenizer.save_pretrained(save_dir, **kwargs)
|
Convert a Hugging Face `PerceiverForImageClassificationFourier` to a persistent
`PerceiverImageClassifier`.
|
convert_model
|
python
|
krasserm/perceiver-io
|
perceiver/model/vision/image_classifier/huggingface.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/vision/image_classifier/huggingface.py
|
Apache-2.0
|
def convert_model(save_dir, source_repo_id="deepmind/optical-flow-perceiver", **kwargs):
"""Convert a Hugging Face `PerceiverForOpticalFlow` to a persistent `OpticalFlowPerceiver`."""
src_model = transformers.PerceiverForOpticalFlow.from_pretrained(source_repo_id)
tgt_config = OpticalFlowPerceiverConfig(convert_config(src_model.config))
tgt_model = OpticalFlowPerceiver(tgt_config)
copy_flow_encoder_params(src_model.perceiver, tgt_model.backend_model.encoder)
copy_flow_decoder_params(src_model.perceiver, tgt_model.backend_model.decoder)
tgt_model.save_pretrained(save_dir, **kwargs)
|
Convert a Hugging Face `PerceiverForOpticalFlow` to a persistent `OpticalFlowPerceiver`.
|
convert_model
|
python
|
krasserm/perceiver-io
|
perceiver/model/vision/optical_flow/huggingface.py
|
https://github.com/krasserm/perceiver-io/blob/master/perceiver/model/vision/optical_flow/huggingface.py
|
Apache-2.0
|
def is_datetime_naive(dt):
"""
This method returns true if the datetime is naive else returns false
"""
if dt.tzinfo is None:
return True
else:
return False
|
This method returns true if the datetime is naive else returns false
|
is_datetime_naive
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def _move_datetime(dt, direction, delta):
"""
Move datetime given delta by given direction
"""
if direction == 'next':
dt = dt + delta
elif direction == 'last':
dt = dt - delta
else:
pass
# raise some delorean error here
return dt
|
Move datetime given delta by given direction
|
_move_datetime
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def move_datetime_month(dt, direction, num_shifts):
"""
Move datetime 1 month in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(months=+num_shifts)
return _move_datetime(dt, direction, delta)
|
Move datetime 1 month in the chosen direction.
unit is a no-op, to keep the API the same as the day case
|
move_datetime_month
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def move_datetime_week(dt, direction, num_shifts):
"""
Move datetime 1 week in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(weeks=+num_shifts)
return _move_datetime(dt, direction, delta)
|
Move datetime 1 week in the chosen direction.
unit is a no-op, to keep the API the same as the day case
|
move_datetime_week
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def move_datetime_year(dt, direction, num_shifts):
"""
Move datetime 1 year in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(years=+num_shifts)
return _move_datetime(dt, direction, delta)
|
Move datetime 1 year in the chosen direction.
unit is a no-op, to keep the API the same as the day case
|
move_datetime_year
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def datetime_timezone(tz):
"""
This method given a timezone returns a localized datetime object.
"""
utc_datetime_naive = datetime.utcnow()
# return a localized datetime to UTC
utc_localized_datetime = localize(utc_datetime_naive, 'UTC')
# normalize the datetime to given timezone
normalized_datetime = normalize(utc_localized_datetime, tz)
return normalized_datetime
|
This method given a timezone returns a localized datetime object.
|
datetime_timezone
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def localize(dt, tz):
"""
Given a naive datetime object this method will return a localized
datetime object
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
return tz.localize(dt)
|
Given a naive datetime object this method will return a localized
datetime object
|
localize
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def normalize(dt, tz):
"""
Given a object with a timezone return a datetime object
normalized to the proper timezone.
This means take the give localized datetime and returns the
datetime normalized to match the specified timezone.
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
dt = tz.normalize(dt)
return dt
|
Given a object with a timezone return a datetime object
normalized to the proper timezone.
This means take the give localized datetime and returns the
datetime normalized to match the specified timezone.
|
normalize
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
|
Implement __getattr__ to call `shift_date` function when function
called does not exist
|
__getattr__
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
|
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
|
_shift_date
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
|
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
|
truncate
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
|
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
|
shift
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
|
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
|
epoch
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
|
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
|
humanize
|
python
|
myusuf3/delorean
|
delorean/dates.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/dates.py
|
MIT
|
def parse(datetime_str, timezone=None, isofirst=True, dayfirst=True, yearfirst=True):
"""
Parses a datetime string and returns a `Delorean` object.
:param datetime_str: The string to be interpreted into a `Delorean` object.
:param timezone: Pass this parameter and the returned Delorean object will be normalized to this timezone. Any
offsets passed as part of datetime_str will be ignored.
:param isofirst: try to parse string as date in ISO format before everything else.
:param dayfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the day
(True) or month (False). If yearfirst is set to True, this distinguishes between YDM and YMD.
:param yearfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the
year. If True, the first number is taken to be the year, otherwise the last number is taken to be the year.
.. testsetup::
from delorean import Delorean
from delorean import parse
.. doctest::
>>> parse('2015-01-01 00:01:02')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC')
If a fixed offset is provided in the datetime_str, it will be parsed and the returned `Delorean` object will store a
`pytz.FixedOffest` as it's timezone.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0800')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone=pytz.FixedOffset(-480))
If the timezone argument is supplied, the returned Delorean object will be in the timezone supplied. Any offsets in
the datetime_str will be ignored.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0500', timezone='US/Pacific')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='US/Pacific')
If an unambiguous timezone is detected in the datetime string, a Delorean object with that datetime and
timezone will be returned.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0800')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone=pytz.FixedOffset(-480))
However if the provided timezone is ambiguous, parse will ignore the timezone and return a `Delorean` object in UTC
time.
>>> parse('2015-01-01 00:01:02 -0400')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone=pytz.FixedOffset(-240))
"""
# parse string to datetime object
dt = None
if isofirst:
try:
dt = isocapture(datetime_str)
except Exception:
pass
if dt is None:
dt = capture(datetime_str, dayfirst=dayfirst, yearfirst=yearfirst)
if timezone:
dt = dt.replace(tzinfo=None)
do = Delorean(datetime=dt, timezone=timezone)
elif dt.tzinfo is None:
# assuming datetime object passed in is UTC
do = Delorean(datetime=dt, timezone='UTC')
elif isinstance(dt.tzinfo, tzoffset):
utcoffset = dt.tzinfo.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (utcoffset.seconds + utcoffset.days * 24 * 3600) * 10**6) / 10**6)
tz = pytz.FixedOffset(total_seconds / 60)
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
elif isinstance(dt.tzinfo, tzlocal):
tz = get_localzone()
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
else:
dt = pytz.utc.normalize(dt)
# making dt naive so we can pass it to Delorean
dt = dt.replace(tzinfo=None)
# if parse string has tzinfo we return a normalized UTC
# delorean object that represents the time.
do = Delorean(datetime=dt, timezone='UTC')
return do
|
Parses a datetime string and returns a `Delorean` object.
:param datetime_str: The string to be interpreted into a `Delorean` object.
:param timezone: Pass this parameter and the returned Delorean object will be normalized to this timezone. Any
offsets passed as part of datetime_str will be ignored.
:param isofirst: try to parse string as date in ISO format before everything else.
:param dayfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the day
(True) or month (False). If yearfirst is set to True, this distinguishes between YDM and YMD.
:param yearfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the
year. If True, the first number is taken to be the year, otherwise the last number is taken to be the year.
.. testsetup::
from delorean import Delorean
from delorean import parse
.. doctest::
>>> parse('2015-01-01 00:01:02')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC')
If a fixed offset is provided in the datetime_str, it will be parsed and the returned `Delorean` object will store a
`pytz.FixedOffest` as it's timezone.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0800')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone=pytz.FixedOffset(-480))
If the timezone argument is supplied, the returned Delorean object will be in the timezone supplied. Any offsets in
the datetime_str will be ignored.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0500', timezone='US/Pacific')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='US/Pacific')
If an unambiguous timezone is detected in the datetime string, a Delorean object with that datetime and
timezone will be returned.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0800')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone=pytz.FixedOffset(-480))
However if the provided timezone is ambiguous, parse will ignore the timezone and return a `Delorean` object in UTC
time.
>>> parse('2015-01-01 00:01:02 -0400')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone=pytz.FixedOffset(-240))
|
parse
|
python
|
myusuf3/delorean
|
delorean/interface.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/interface.py
|
MIT
|
def stops(freq, interval=1, count=None, wkst=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None, byhour=None, byminute=None,
bysecond=None, timezone='UTC', start=None, stop=None):
"""
This will create a list of delorean objects the apply to
setting possed in.
"""
# check to see if datetimees passed in are naive if so process them
# with given timezone.
if all([(start is None or is_datetime_naive(start)),
(stop is None or is_datetime_naive(stop))]):
pass
else:
raise DeloreanInvalidDatetime('Provide a naive datetime object')
# if no datetimes are passed in create a proper datetime object for
# start default because default in dateutil is datetime.now() :(
if start is None:
start = datetime_timezone(timezone)
for dt in rrule(freq, interval=interval, count=count, wkst=wkst, bysetpos=bysetpos,
bymonth=bymonth, bymonthday=bymonthday, byyearday=byyearday, byeaster=byeaster,
byweekno=byweekno, byweekday=byweekday, byhour=byhour, byminute=byminute,
bysecond=bysecond, until=stop, dtstart=start):
# make the delorean object
# yield it.
# doing this to make sure delorean receives a naive datetime.
dt = dt.replace(tzinfo=None)
d = Delorean(datetime=dt, timezone=timezone)
yield d
|
This will create a list of delorean objects the apply to
setting possed in.
|
stops
|
python
|
myusuf3/delorean
|
delorean/interface.py
|
https://github.com/myusuf3/delorean/blob/master/delorean/interface.py
|
MIT
|
def test_timezone_delorean_to_datetime_to_delorean_non_utc(self):
"""Test if when you create Delorean object from Delorean's datetime
it still behaves the same
"""
d1 = delorean.Delorean(timezone='America/Chicago')
d2 = delorean.Delorean(d1.datetime)
# these deloreans should be the same
self.assertEqual(d1.next_day(1), d2.next_day(1))
self.assertEqual(d2.last_week(), d2.last_week())
self.assertEqual(d1.timezone, d2.timezone)
self.assertEqual(d1, d2)
|
Test if when you create Delorean object from Delorean's datetime
it still behaves the same
|
test_timezone_delorean_to_datetime_to_delorean_non_utc
|
python
|
myusuf3/delorean
|
tests/delorean_tests.py
|
https://github.com/myusuf3/delorean/blob/master/tests/delorean_tests.py
|
MIT
|
def test_stops_bymonth(self):
"""Test if create stops, checks bymonth, bymonthday, count
and start parameters work properly
"""
days = list(delorean.interface.stops(
delorean.MONTHLY,
bymonth=(1, 4, 7, 10),
bymonthday=15,
count=4,
start=datetime(datetime.now().year, 1, 1))
)
year = datetime.now().year
day = 15
dt1 = datetime(year, 1, day)
dt4 = datetime(year, 4, day)
dt7 = datetime(year, 7, day)
dt10 = datetime(year, 10, day)
self.assertTrue(len(days) == 4)
dl1 = delorean.Delorean(datetime=dt1, timezone='UTC')
self.assertEqual(days[0], dl1)
dl4 = delorean.Delorean(datetime=dt4, timezone='UTC')
self.assertEqual(days[1], dl4)
dl7 = delorean.Delorean(datetime=dt7, timezone='UTC')
self.assertEqual(days[2], dl7)
dl10 = delorean.Delorean(datetime=dt10, timezone='UTC')
self.assertEqual(days[3], dl10)
|
Test if create stops, checks bymonth, bymonthday, count
and start parameters work properly
|
test_stops_bymonth
|
python
|
myusuf3/delorean
|
tests/delorean_tests.py
|
https://github.com/myusuf3/delorean/blob/master/tests/delorean_tests.py
|
MIT
|
def create_table():
"""
Creates the 'images' table in the SQLite database if it doesn't exist.
"""
with connection:
connection.execute('''
CREATE TABLE IF NOT EXISTS images (
id INTEGER PRIMARY KEY,
filename TEXT NOT NULL,
file_path TEXT NOT NULL,
file_date TEXT NOT NULL,
file_md5 TEXT NOT NULL,
embeddings BLOB
)
''')
connection.execute('CREATE INDEX IF NOT EXISTS idx_filename ON images (filename)')
connection.execute('CREATE INDEX IF NOT EXISTS idx_file_path ON images (file_path)')
logger.info("Table 'images' ensured to exist.")
|
Creates the 'images' table in the SQLite database if it doesn't exist.
|
create_table
|
python
|
harperreed/photo-similarity-search
|
generate_embeddings.py
|
https://github.com/harperreed/photo-similarity-search/blob/master/generate_embeddings.py
|
MIT
|
def file_generator(directory):
"""
Generates file paths for all files in the specified directory and its subdirectories.
:param directory: The directory path to search for files.
:return: A generator yielding file paths.
"""
logger.debug(f"Generating file paths for directory: {directory}")
for root, _, files in os.walk(directory):
for file in files:
yield os.path.join(root, file)
|
Generates file paths for all files in the specified directory and its subdirectories.
:param directory: The directory path to search for files.
:return: A generator yielding file paths.
|
file_generator
|
python
|
harperreed/photo-similarity-search
|
generate_embeddings.py
|
https://github.com/harperreed/photo-similarity-search/blob/master/generate_embeddings.py
|
MIT
|
def hydrate_cache(directory, cache_file_path):
"""
Loads or generates a cache of file paths for the specified directory.
:param directory: The directory path to search for files.
:param cache_file_path: The path to the cache file.
:return: A list of cached file paths.
"""
logger.info(f"Hydrating cache for {directory} using {cache_file_path}...")
if os.path.exists(cache_file_path):
try:
with open(cache_file_path, 'rb') as f:
cached_files = msgpack.load(f)
logger.info(f"Loaded cached files from {cache_file_path}")
if len(cached_files) == 0:
logger.warning(f"Cache file {cache_file_path} is empty. Regenerating cache...")
cached_files = list(file_generator(directory))
with open(cache_file_path, 'wb') as f:
msgpack.dump(cached_files, f)
logger.info(f"Regenerated cache with {len(cached_files)} files and dumped to {cache_file_path}")
except (msgpack.UnpackException, IOError) as e:
logger.error(f"Error loading cache file {cache_file_path}: {e}. Regenerating cache...")
cached_files = list(file_generator(directory))
with open(cache_file_path, 'wb') as f:
msgpack.dump(cached_files, f)
logger.info(f"Regenerated cache with {len(cached_files)} files and dumped to {cache_file_path}")
else:
logger.info(f"Cache file not found at {cache_file_path}. Creating cache dirlist for {directory}...")
cached_files = list(file_generator(directory))
try:
with open(cache_file_path, 'wb') as f:
msgpack.dump(cached_files, f)
logger.info(f"Created cache with {len(cached_files)} files and dumped to {cache_file_path}")
except IOError as e:
logger.error(f"Error creating cache file {cache_file_path}: {e}. Proceeding without cache.")
return cached_files
|
Loads or generates a cache of file paths for the specified directory.
:param directory: The directory path to search for files.
:param cache_file_path: The path to the cache file.
:return: A list of cached file paths.
|
hydrate_cache
|
python
|
harperreed/photo-similarity-search
|
generate_embeddings.py
|
https://github.com/harperreed/photo-similarity-search/blob/master/generate_embeddings.py
|
MIT
|
def update_db(image):
"""
Updates the database with the image embeddings.
:param image: A dictionary containing image information.
"""
try:
embeddings_blob = sqlite3.Binary(msgpack.dumps(image.get('embeddings', [])))
with sqlite3.connect(SQLITE_DB_FILEPATH) as conn:
conn.execute("UPDATE images SET embeddings = ? WHERE filename = ?",
(embeddings_blob, image['filename']))
logger.debug(f"Database updated successfully for image: {image['filename']}")
except sqlite3.Error as e:
logger.error(f"Database update failed for image: {image['filename']}. Error: {e}")
|
Updates the database with the image embeddings.
:param image: A dictionary containing image information.
|
update_db
|
python
|
harperreed/photo-similarity-search
|
generate_embeddings.py
|
https://github.com/harperreed/photo-similarity-search/blob/master/generate_embeddings.py
|
MIT
|
def process_image(file_path):
"""
Processes an image file by extracting metadata and inserting it into the database.
:param file_path: The path to the image file.
"""
file = os.path.basename(file_path)
file_date = time.ctime(os.path.getmtime(file_path))
with open(file_path, 'rb') as f:
file_content = f.read()
file_md5 = hashlib.md5(file_content).hexdigest()
conn = None
try:
conn = sqlite3.connect(SQLITE_DB_FILEPATH)
with conn:
cursor = conn.cursor()
cursor.execute('''
SELECT EXISTS(SELECT 1 FROM images WHERE filename=? AND file_path=? LIMIT 1)
''', (file, file_path))
result = cursor.fetchone()
file_exists = result[0] if result else False
if not file_exists:
cursor.execute('''
INSERT INTO images (filename, file_path, file_date, file_md5)
VALUES (?, ?, ?, ?)
''', (file, file_path, file_date, file_md5))
logger.debug(f'Inserted {file} with metadata into the database.')
else:
logger.debug(f'File {file} already exists in the database. Skipping insertion.')
except sqlite3.Error as e:
logger.error(f'Error processing image {file}: {e}')
finally:
if conn:
conn.close()
|
Processes an image file by extracting metadata and inserting it into the database.
:param file_path: The path to the image file.
|
process_image
|
python
|
harperreed/photo-similarity-search
|
generate_embeddings.py
|
https://github.com/harperreed/photo-similarity-search/blob/master/generate_embeddings.py
|
MIT
|
def process_embeddings(photo):
"""
Processes image embeddings by uploading them to the embedding server and updating the database.
:param photo: A dictionary containing photo information.
"""
logger.debug(f"Processing photo: {photo['filename']}")
if photo['embeddings']:
logger.debug(f"Photo {photo['filename']} already has embeddings. Skipping.")
return
try:
start_time = time.time()
imemb = clip.image_encoder(photo['file_path'])
photo['embeddings'] = imemb
update_db(photo)
end_time = time.time()
logger.debug(f"Processed embeddings for {photo['filename']} in {end_time - start_time:.5f} seconds")
except Exception as e:
logger.error(f"Error generating embeddings for {photo['filename']}: {e}")
|
Processes image embeddings by uploading them to the embedding server and updating the database.
:param photo: A dictionary containing photo information.
|
process_embeddings
|
python
|
harperreed/photo-similarity-search
|
generate_embeddings.py
|
https://github.com/harperreed/photo-similarity-search/blob/master/generate_embeddings.py
|
MIT
|
def main():
"""
Main function to process images and embeddings.
"""
cache_start_time = time.time()
cached_files = hydrate_cache(SOURCE_IMAGE_DIRECTORY, FILELIST_CACHE_FILEPATH)
cache_end_time = time.time()
logger.info(f"Cache operation took {cache_end_time - cache_start_time:.2f} seconds")
logger.info(f"Directory has {len(cached_files)} files: {SOURCE_IMAGE_DIRECTORY}")
create_table()
with ThreadPoolExecutor() as executor:
futures = []
for file_path in cached_files:
if file_path.lower().endswith('.jpg'):
future = executor.submit(process_image, file_path)
futures.append(future)
for future in futures:
future.result()
with connection:
cursor = connection.cursor()
cursor.execute("SELECT filename, file_path, file_date, file_md5, embeddings FROM images")
photos = [{'filename': row[0], 'file_path': row[1], 'file_date': row[2], 'file_md5': row[3], 'embeddings': msgpack.loads(row[4]) if row[4] else []} for row in cursor.fetchall()]
# for photo in photos:
# photo['embeddings'] = msgpack.loads(photo['embeddings']) if photo['embeddings'] else []
num_photos = len(photos)
logger.info(f"Loaded {len(photos)} photos from database")
#cant't use ThreadPoolExecutor here because of the MLX memory thing
start_time = time.time()
photo_ite = 0
for photo in photos:
process_embeddings(photo)
photo_ite += 1
if log_level != 'DEBUG':
if photo_ite % 100 == 0:
logger.info(f"Processed {photo_ite}/{num_photos} photos")
end_time = time.time()
logger.info(f"Generated embeddings for {len(photos)} photos in {end_time - start_time:.2f} seconds")
connection.close()
logger.info("Database connection pool closed.")
logger.info(f"Initializing Chrome DB: {CHROMA_COLLECTION_NAME}")
client = chromadb.PersistentClient(path=CHROMA_DB_PATH)
collection = client.get_or_create_collection(name=CHROMA_COLLECTION_NAME)
logger.info(f"Generated embeddings for {len(photos)} photos")
start_time = time.time()
photo_ite = 0
for photo in photos:
# Skip processing if the photo does not have embeddings
if not photo['embeddings']:
logger.debug(f"[{photo_ite}/{num_photos}] Photo {photo['filename']} has no embeddings. Skipping addition to Chroma.")
continue
try:
# Add the photo's embeddings to the Chroma collection
item = collection.get(ids=[photo['filename']])
if item['ids'] !=[]:
continue
collection.add(
embeddings=[photo["embeddings"]],
documents=[photo['filename']],
ids=[photo['filename']]
)
logger.debug(f"[{photo_ite}/{num_photos}] Added embedding to Chroma for {photo['filename']}")
photo_ite += 1
if log_level != 'DEBUG':
if photo_ite % 100 == 0:
logger.info(f"Processed {photo_ite}/{num_photos} photos")
except Exception as e:
# Log an error if the addition to Chroma fails
logger.error(f"[{photo_ite}/{num_photos}] Failed to add embedding to Chroma for {photo['filename']}: {e}")
end_time = time.time()
logger.info(f"Inserted embeddings {len(photos)} photos into Chroma in {end_time - start_time:.2f} seconds")
|
Main function to process images and embeddings.
|
main
|
python
|
harperreed/photo-similarity-search
|
generate_embeddings.py
|
https://github.com/harperreed/photo-similarity-search/blob/master/generate_embeddings.py
|
MIT
|
def serve_image(filename):
"""
Serve a resized image directly from the filesystem outside of the static directory.
"""
# Construct the full file path. Be careful with security implications.
# Ensure that you validate `filename` to prevent directory traversal attacks.
filepath = os.path.join(SOURCE_IMAGE_DIRECTORY, filename)
if not os.path.exists(filepath):
# You can return a default image or a 404 error if the file does not exist.
return "Image not found", 404
# Check the file size
file_size = os.path.getsize(filepath)
if file_size > 1 * 1024 * 1024: # File size is greater than 1 megabyte
with Image.open(filepath) as img:
# Resize the image to half the original size
img.thumbnail((img.width // 2, img.height // 2))
img = ImageOps.exif_transpose(img)
# Save the resized image to a BytesIO object
img_io = BytesIO()
img.save(img_io, 'JPEG', quality=85)
img_io.seek(0)
return send_file(img_io, mimetype='image/jpeg')
return send_file(filepath)
|
Serve a resized image directly from the filesystem outside of the static directory.
|
serve_image
|
python
|
harperreed/photo-similarity-search
|
start_web.py
|
https://github.com/harperreed/photo-similarity-search/blob/master/start_web.py
|
MIT
|
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
|
Load a config file and merge it into the default options.
|
cfg_from_file
|
python
|
sshaoshuai/PointRCNN
|
lib/config.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/config.py
|
MIT
|
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]), type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print(('Error under config key: {}'.format(k)))
raise
else:
b[k] = v
|
Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
|
_merge_a_into_b
|
python
|
sshaoshuai/PointRCNN
|
lib/config.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/config.py
|
MIT
|
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d
d = d[subkey]
subkey = key_list[-1]
assert subkey in d
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(type(value), type(d[subkey]))
d[subkey] = value
|
Set config keys via list (e.g., from command line).
|
cfg_from_list
|
python
|
sshaoshuai/PointRCNN
|
lib/config.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/config.py
|
MIT
|
def preprocess_rpn_training_data(self):
"""
Discard samples which don't have current classes, which will not be used for training.
Valid sample_id is stored in self.sample_id_list
"""
self.logger.info('Loading %s samples from %s ...' % (self.mode, self.label_dir))
for idx in range(0, self.num_sample):
sample_id = int(self.image_idx_list[idx])
obj_list = self.filtrate_objects(self.get_label(sample_id))
if len(obj_list) == 0:
# self.logger.info('No gt classes: %06d' % sample_id)
continue
self.sample_id_list.append(sample_id)
self.logger.info('Done: filter %s results: %d / %d\n' % (self.mode, len(self.sample_id_list),
len(self.image_idx_list)))
|
Discard samples which don't have current classes, which will not be used for training.
Valid sample_id is stored in self.sample_id_list
|
preprocess_rpn_training_data
|
python
|
sshaoshuai/PointRCNN
|
lib/datasets/kitti_rcnn_dataset.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/datasets/kitti_rcnn_dataset.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.