date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | Abhishek-1211/langchain | libs~langchain~tests~integration_tests~chat_models~test_vertexai.py | """Test Vertex AI API wrapper.
In order to run this test, you need to install VertexAI SDK (that is is the private
preview) and be whitelisted to list the models themselves:
In order to run this test, you need to install VertexAI SDK
pip install google-cloud-aiplatform>=1.35.0
Your end-user credentials would be used to make the calls (make sure you've run
`gcloud auth login` first).
"""
from typing import Optional
from unittest.mock import MagicMock, Mock, patch
import pytest
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import LLMResult
from langchain.chat_models import ChatVertexAI
from langchain.chat_models.vertexai import _parse_chat_history, _parse_examples
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
def test_vertexai_instantiation(model_name: str) -> None:
if model_name:
model = ChatVertexAI(model_name=model_name)
else:
model = ChatVertexAI()
assert model._llm_type == "vertexai"
assert model.model_name == model.client._model_id
@pytest.mark.scheduled
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
def test_vertexai_single_call(model_name: str) -> None:
if model_name:
model = ChatVertexAI(model_name=model_name)
else:
model = ChatVertexAI()
message = HumanMessage(content="Hello")
response = model([message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
# mark xfail because Vertex API randomly doesn't respect
# the n/candidate_count parameter
@pytest.mark.xfail
@pytest.mark.scheduled
def test_candidates() -> None:
model = ChatVertexAI(model_name="chat-bison@001", temperature=0.3, n=2)
message = HumanMessage(content="Hello")
response = model.generate(messages=[[message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 1
assert len(response.generations[0]) == 2
@pytest.mark.scheduled
async def test_vertexai_agenerate() -> None:
model = ChatVertexAI(temperature=0)
message = HumanMessage(content="Hello")
response = await model.agenerate([[message]])
assert isinstance(response, LLMResult)
assert isinstance(response.generations[0][0].message, AIMessage) # type: ignore
sync_response = model.generate([[message]])
assert response.generations[0][0] == sync_response.generations[0][0]
@pytest.mark.scheduled
async def test_vertexai_stream() -> None:
model = ChatVertexAI(temperature=0)
message = HumanMessage(content="Hello")
sync_response = model.stream([message])
for chunk in sync_response:
assert isinstance(chunk, AIMessageChunk)
@pytest.mark.scheduled
def test_vertexai_single_call_with_context() -> None:
model = ChatVertexAI()
raw_context = (
"My name is Ned. You are my personal assistant. My favorite movies "
"are Lord of the Rings and Hobbit."
)
question = (
"Hello, could you recommend a good movie for me to watch this evening, please?"
)
context = SystemMessage(content=raw_context)
message = HumanMessage(content=question)
response = model([context, message])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
@pytest.mark.scheduled
def test_vertexai_single_call_with_examples() -> None:
model = ChatVertexAI()
raw_context = "My name is Ned. You are my personal assistant."
question = "2+2"
text_question, text_answer = "4+4", "8"
inp = HumanMessage(content=text_question)
output = AIMessage(content=text_answer)
context = SystemMessage(content=raw_context)
message = HumanMessage(content=question)
response = model([context, message], examples=[inp, output])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
@pytest.mark.scheduled
@pytest.mark.parametrize("model_name", [None, "codechat-bison", "chat-bison"])
def test_vertexai_single_call_with_history(model_name: str) -> None:
if model_name:
model = ChatVertexAI(model_name=model_name)
else:
model = ChatVertexAI()
text_question1, text_answer1 = "How much is 2+2?", "4"
text_question2 = "How much is 3+3?"
message1 = HumanMessage(content=text_question1)
message2 = AIMessage(content=text_answer1)
message3 = HumanMessage(content=text_question2)
response = model([message1, message2, message3])
assert isinstance(response, AIMessage)
assert isinstance(response.content, str)
def test_parse_chat_history_correct() -> None:
from vertexai.language_models import ChatMessage
text_context = (
"My name is Ned. You are my personal assistant. My "
"favorite movies are Lord of the Rings and Hobbit."
)
context = SystemMessage(content=text_context)
text_question = (
"Hello, could you recommend a good movie for me to watch this evening, please?"
)
question = HumanMessage(content=text_question)
text_answer = (
"Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring "
"(2001): This is the first movie in the Lord of the Rings trilogy."
)
answer = AIMessage(content=text_answer)
history = _parse_chat_history([context, question, answer, question, answer])
assert history.context == context.content
assert len(history.history) == 4
assert history.history == [
ChatMessage(content=text_question, author="user"),
ChatMessage(content=text_answer, author="bot"),
ChatMessage(content=text_question, author="user"),
ChatMessage(content=text_answer, author="bot"),
]
def test_vertexai_single_call_fails_no_message() -> None:
chat = ChatVertexAI()
with pytest.raises(ValueError) as exc_info:
_ = chat([])
assert (
str(exc_info.value)
== "You should provide at least one message to start the chat!"
)
@pytest.mark.parametrize("stop", [None, "stop1"])
def test_vertexai_args_passed(stop: Optional[str]) -> None:
response_text = "Goodbye"
user_prompt = "Hello"
prompt_params = {
"max_output_tokens": 1,
"temperature": 10000.0,
"top_k": 10,
"top_p": 0.5,
}
# Mock the library to ensure the args are passed correctly
with patch(
"vertexai.language_models._language_models.ChatModel.start_chat"
) as start_chat:
mock_response = MagicMock()
mock_response.candidates = [Mock(text=response_text)]
mock_chat = MagicMock()
start_chat.return_value = mock_chat
mock_send_message = MagicMock(return_value=mock_response)
mock_chat.send_message = mock_send_message
model = ChatVertexAI(**prompt_params)
message = HumanMessage(content=user_prompt)
if stop:
response = model([message], stop=[stop])
else:
response = model([message])
assert response.content == response_text
mock_send_message.assert_called_once_with(user_prompt, candidate_count=1)
expected_stop_sequence = [stop] if stop else None
start_chat.assert_called_once_with(
context=None,
message_history=[],
**prompt_params,
stop_sequences=expected_stop_sequence,
)
def test_parse_examples_correct() -> None:
from vertexai.language_models import InputOutputTextPair
text_question = (
"Hello, could you recommend a good movie for me to watch this evening, please?"
)
question = HumanMessage(content=text_question)
text_answer = (
"Sure, You might enjoy The Lord of the Rings: The Fellowship of the Ring "
"(2001): This is the first movie in the Lord of the Rings trilogy."
)
answer = AIMessage(content=text_answer)
examples = _parse_examples([question, answer, question, answer])
assert len(examples) == 2
assert examples == [
InputOutputTextPair(input_text=text_question, output_text=text_answer),
InputOutputTextPair(input_text=text_question, output_text=text_answer),
]
def test_parse_examples_failes_wrong_sequence() -> None:
with pytest.raises(ValueError) as exc_info:
_ = _parse_examples([AIMessage(content="a")])
print(str(exc_info.value))
assert (
str(exc_info.value)
== "Expect examples to have an even amount of messages, got 1."
)
| [
"{'max_output_tokens': 1, 'temperature': 10000.0, 'top_k': 10, 'top_p': 0.5}",
"a",
"My name is Ned. You are my personal assistant.",
"2+2",
"Hello",
"How much is 3+3?"
] |
2024-01-10 | Abhishek-1211/langchain | libs~langchain~langchain~llms~tongyi.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from langchain_core.outputs import Generation, LLMResult
from langchain_core.pydantic_v1 import Field, root_validator
from requests.exceptions import HTTPError
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator(llm: Tongyi) -> Callable[[Any], Any]:
min_seconds = 1
max_seconds = 4
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(HTTPError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _generate_with_retry(**_kwargs: Any) -> Any:
resp = llm.client.call(**_kwargs)
if resp.status_code == 200:
return resp
elif resp.status_code in [400, 401]:
raise ValueError(
f"status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
else:
raise HTTPError(
f"HTTP error occurred: status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}",
response=resp,
)
return _generate_with_retry(**kwargs)
def stream_generate_with_retry(llm: Tongyi, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _stream_generate_with_retry(**_kwargs: Any) -> Any:
stream_resps = []
resps = llm.client.call(**_kwargs)
for resp in resps:
if resp.status_code == 200:
stream_resps.append(resp)
elif resp.status_code in [400, 401]:
raise ValueError(
f"status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}"
)
else:
raise HTTPError(
f"HTTP error occurred: status_code: {resp.status_code} \n "
f"code: {resp.code} \n message: {resp.message}",
response=resp,
)
return stream_resps
return _stream_generate_with_retry(**kwargs)
class Tongyi(LLM):
"""Tongyi Qwen large language models.
To use, you should have the ``dashscope`` python package installed, and the
environment variable ``DASHSCOPE_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Tongyi
Tongyi = tongyi()
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"dashscope_api_key": "DASHSCOPE_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
return False
client: Any #: :meta private:
model_name: str = "qwen-plus-v1"
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
top_p: float = 0.8
"""Total probability mass of tokens to consider at each step."""
dashscope_api_key: Optional[str] = None
"""Dashscope api key provide by alicloud."""
n: int = 1
"""How many completions to generate for each prompt."""
streaming: bool = False
"""Whether to stream the results or not."""
max_retries: int = 10
"""Maximum number of retries to make when generating."""
prefix_messages: List = Field(default_factory=list)
"""Series of messages for Chat input."""
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "tongyi"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
get_from_dict_or_env(values, "dashscope_api_key", "DASHSCOPE_API_KEY")
try:
import dashscope
except ImportError:
raise ImportError(
"Could not import dashscope python package. "
"Please install it with `pip install dashscope`."
)
try:
values["client"] = dashscope.Generation
except AttributeError:
raise ValueError(
"`dashscope` has no `Generation` attribute, this is likely "
"due to an old version of the dashscope package. Try upgrading it "
"with `pip install --upgrade dashscope`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
normal_params = {
"top_p": self.top_p,
}
return {**normal_params, **self.model_kwargs}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Tongyi's generate endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = tongyi("Tell me a joke.")
"""
params: Dict[str, Any] = {
**{"model": self.model_name},
**self._default_params,
**kwargs,
}
completion = generate_with_retry(
self,
prompt=prompt,
**params,
)
return completion["output"]["text"]
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
generations = []
params: Dict[str, Any] = {
**{"model": self.model_name},
**self._default_params,
**kwargs,
}
if self.streaming:
if len(prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
params["stream"] = True
temp = ""
for stream_resp in stream_generate_with_retry(
self, prompt=prompts[0], **params
):
if run_manager:
stream_resp_text = stream_resp["output"]["text"]
stream_resp_text = stream_resp_text.replace(temp, "")
# Ali Cloud's streaming transmission interface, each return content
# will contain the output
# of the previous round(as of September 20, 2023, future updates to
# the Alibaba Cloud API may vary)
run_manager.on_llm_new_token(stream_resp_text)
# The implementation of streaming transmission primarily relies on
# the "on_llm_new_token" method
# of the streaming callback.
temp = stream_resp["output"]["text"]
generations.append(
[
Generation(
text=stream_resp["output"]["text"],
generation_info=dict(
finish_reason=stream_resp["output"]["finish_reason"],
),
)
]
)
generations.reverse()
# In the official implementation of the OpenAI API,
# the "generations" parameter passed to LLMResult seems to be a 1*1*1
# two-dimensional list
# (including in non-streaming mode).
# Considering that Alibaba Cloud's streaming transmission
# (as of September 20, 2023, future updates to the Alibaba Cloud API may
# vary)
# includes the output of the previous round in each return,
# reversing this "generations" list should suffice
# (This is the solution with the least amount of changes to the source code,
# while still allowing for convenient modifications in the future,
# although it may result in slightly more memory consumption).
else:
for prompt in prompts:
completion = generate_with_retry(
self,
prompt=prompt,
**params,
)
generations.append(
[
Generation(
text=completion["output"]["text"],
generation_info=dict(
finish_reason=completion["output"]["finish_reason"],
),
)
]
)
return LLMResult(generations=generations)
| [] |
2024-01-10 | Abhishek-1211/langchain | libs~langchain~tests~integration_tests~embeddings~test_azure_openai.py | """Test openai embeddings."""
import os
from typing import Any
import numpy as np
import pytest
from langchain.embeddings import AzureOpenAIEmbeddings
OPENAI_API_VERSION = os.environ.get("AZURE_OPENAI_API_VERSION", "")
OPENAI_API_BASE = os.environ.get("AZURE_OPENAI_API_BASE", "")
OPENAI_API_KEY = os.environ.get("AZURE_OPENAI_API_KEY", "")
DEPLOYMENT_NAME = os.environ.get(
"AZURE_OPENAI_DEPLOYMENT_NAME",
os.environ.get("AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME", ""),
)
def _get_embeddings(**kwargs: Any) -> AzureOpenAIEmbeddings:
return AzureOpenAIEmbeddings(
azure_deployment=DEPLOYMENT_NAME,
api_version=OPENAI_API_VERSION,
openai_api_base=OPENAI_API_BASE,
openai_api_key=OPENAI_API_KEY,
**kwargs,
)
@pytest.mark.scheduled
def test_azure_openai_embedding_documents() -> None:
"""Test openai embeddings."""
documents = ["foo bar"]
embedding = _get_embeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 1536
@pytest.mark.scheduled
def test_azure_openai_embedding_documents_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = _get_embeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = embedding.embed_documents(documents)
assert embedding.chunk_size == 2
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
def test_azure_openai_embedding_documents_chunk_size() -> None:
"""Test openai embeddings."""
documents = ["foo bar"] * 20
embedding = _get_embeddings()
embedding.embedding_ctx_length = 8191
output = embedding.embed_documents(documents)
# Max 16 chunks per batch on Azure OpenAI embeddings
assert embedding.chunk_size == 16
assert len(output) == 20
assert all([len(out) == 1536 for out in output])
@pytest.mark.scheduled
async def test_azure_openai_embedding_documents_async_multiple() -> None:
"""Test openai embeddings."""
documents = ["foo bar", "bar foo", "foo"]
embedding = _get_embeddings(chunk_size=2)
embedding.embedding_ctx_length = 8191
output = await embedding.aembed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 1536
assert len(output[1]) == 1536
assert len(output[2]) == 1536
@pytest.mark.scheduled
def test_azure_openai_embedding_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = _get_embeddings()
output = embedding.embed_query(document)
assert len(output) == 1536
@pytest.mark.scheduled
async def test_azure_openai_embedding_async_query() -> None:
"""Test openai embeddings."""
document = "foo bar"
embedding = _get_embeddings()
output = await embedding.aembed_query(document)
assert len(output) == 1536
@pytest.mark.skip(reason="Unblock scheduled testing. TODO: fix.")
def test_azure_openai_embedding_with_empty_string() -> None:
"""Test openai embeddings with empty string."""
import openai
document = ["", "abc"]
embedding = _get_embeddings()
output = embedding.embed_documents(document)
assert len(output) == 2
assert len(output[0]) == 1536
expected_output = openai.Embedding.create(input="", model="text-embedding-ada-002")[
"data"
][0]["embedding"]
assert np.allclose(output[0], expected_output)
assert len(output[1]) == 1536
@pytest.mark.scheduled
def test_embed_documents_normalized() -> None:
output = _get_embeddings().embed_documents(["foo walked to the market"])
assert np.isclose(np.linalg.norm(output[0]), 1.0)
@pytest.mark.scheduled
def test_embed_query_normalized() -> None:
output = _get_embeddings().embed_query("foo walked to the market")
assert np.isclose(np.linalg.norm(output), 1.0)
| [] |
2024-01-10 | Abhishek-1211/langchain | libs~langchain~langchain~llms~fireworks.py | import asyncio
from concurrent.futures import ThreadPoolExecutor
from typing import Any, AsyncIterator, Callable, Dict, Iterator, List, Optional, Union
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import convert_to_secret_str
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import BaseLLM, create_base_retry_decorator
from langchain.utils.env import get_from_dict_or_env
def _stream_response_to_generation_chunk(
stream_response: Any,
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
return GenerationChunk(
text=stream_response.choices[0].text,
generation_info=dict(
finish_reason=stream_response.choices[0].finish_reason,
logprobs=stream_response.choices[0].logprobs,
),
)
class Fireworks(BaseLLM):
"""Fireworks models."""
model: str = "accounts/fireworks/models/llama-v2-7b-chat"
model_kwargs: dict = Field(
default_factory=lambda: {
"temperature": 0.7,
"max_tokens": 512,
"top_p": 1,
}.copy()
)
fireworks_api_key: Optional[SecretStr] = None
max_retries: int = 20
batch_size: int = 20
use_retry: bool = True
@property
def lc_secrets(self) -> Dict[str, str]:
return {"fireworks_api_key": "FIREWORKS_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "llms", "fireworks"]
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key in environment."""
try:
import fireworks.client
except ImportError as e:
raise ImportError(
"Could not import fireworks-ai python package. "
"Please install it with `pip install fireworks-ai`."
) from e
fireworks_api_key = convert_to_secret_str(
get_from_dict_or_env(values, "fireworks_api_key", "FIREWORKS_API_KEY")
)
fireworks.client.api_key = fireworks_api_key.get_secret_value()
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fireworks"
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to Fireworks endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
"""
params = {
"model": self.model,
**self.model_kwargs,
}
sub_prompts = self.get_batch_prompts(prompts)
choices = []
for _prompts in sub_prompts:
response = completion_with_retry_batching(
self,
self.use_retry,
prompt=_prompts,
run_manager=run_manager,
stop=stop,
**params,
)
choices.extend(response)
return self.create_llm_result(choices, prompts)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to Fireworks endpoint async with k unique prompts."""
params = {
"model": self.model,
**self.model_kwargs,
}
sub_prompts = self.get_batch_prompts(prompts)
choices = []
for _prompts in sub_prompts:
response = await acompletion_with_retry_batching(
self,
self.use_retry,
prompt=_prompts,
run_manager=run_manager,
stop=stop,
**params,
)
choices.extend(response)
return self.create_llm_result(choices, prompts)
def get_batch_prompts(
self,
prompts: List[str],
) -> List[List[str]]:
"""Get the sub prompts for llm call."""
sub_prompts = [
prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
def create_llm_result(self, choices: Any, prompts: List[str]) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
sub_choices = choices[i : (i + 1)]
generations.append(
[
Generation(
text=choice.__dict__["choices"][0].text,
)
for choice in sub_choices
]
)
llm_output = {"model": self.model}
return LLMResult(generations=generations, llm_output=llm_output)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = {
"model": self.model,
"prompt": prompt,
"stream": True,
**self.model_kwargs,
}
for stream_resp in completion_with_retry(
self, self.use_retry, run_manager=run_manager, stop=stop, **params
):
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params = {
"model": self.model,
"prompt": prompt,
"stream": True,
**self.model_kwargs,
}
async for stream_resp in await acompletion_with_retry_streaming(
self, self.use_retry, run_manager=run_manager, stop=stop, **params
):
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
def conditional_decorator(
condition: bool, decorator: Callable[[Any], Any]
) -> Callable[[Any], Any]:
def actual_decorator(func: Callable[[Any], Any]) -> Callable[[Any], Any]:
if condition:
return decorator(func)
return func
return actual_decorator
def completion_with_retry(
llm: Fireworks,
use_retry: bool,
*,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
def _completion_with_retry(**kwargs: Any) -> Any:
return fireworks.client.Completion.create(
**kwargs,
)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
llm: Fireworks,
use_retry: bool,
*,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
async def _completion_with_retry(**kwargs: Any) -> Any:
return await fireworks.client.Completion.acreate(
**kwargs,
)
return await _completion_with_retry(**kwargs)
def completion_with_retry_batching(
llm: Fireworks,
use_retry: bool,
*,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
prompt = kwargs["prompt"]
del kwargs["prompt"]
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
def _completion_with_retry(prompt: str) -> Any:
return fireworks.client.Completion.create(**kwargs, prompt=prompt)
def batch_sync_run() -> List:
with ThreadPoolExecutor() as executor:
results = list(executor.map(_completion_with_retry, prompt))
return results
return batch_sync_run()
async def acompletion_with_retry_batching(
llm: Fireworks,
use_retry: bool,
*,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
prompt = kwargs["prompt"]
del kwargs["prompt"]
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
async def _completion_with_retry(prompt: str) -> Any:
return await fireworks.client.Completion.acreate(**kwargs, prompt=prompt)
def run_coroutine_in_new_loop(
coroutine_func: Any, *args: Dict, **kwargs: Dict
) -> Any:
new_loop = asyncio.new_event_loop()
try:
asyncio.set_event_loop(new_loop)
return new_loop.run_until_complete(coroutine_func(*args, **kwargs))
finally:
new_loop.close()
async def batch_sync_run() -> List:
with ThreadPoolExecutor() as executor:
results = list(
executor.map(
run_coroutine_in_new_loop,
[_completion_with_retry] * len(prompt),
prompt,
)
)
return results
return await batch_sync_run()
async def acompletion_with_retry_streaming(
llm: Fireworks,
use_retry: bool,
*,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call for streaming."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@conditional_decorator(use_retry, retry_decorator)
async def _completion_with_retry(**kwargs: Any) -> Any:
return fireworks.client.Completion.acreate(
**kwargs,
)
return await _completion_with_retry(**kwargs)
def _create_retry_decorator(
llm: Fireworks,
*,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
"""Define retry mechanism."""
import fireworks.client
errors = [
fireworks.client.error.RateLimitError,
fireworks.client.error.InternalServerError,
fireworks.client.error.BadGatewayError,
fireworks.client.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
| [] |
2024-01-10 | Abhishek-1211/langchain | templates~rag-timescale-conversation~rag_timescale_conversation~load_sample_dataset.py | import os
import tempfile
from datetime import datetime, timedelta
import requests
from langchain.document_loaders import JSONLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.timescalevector import TimescaleVector
from timescale_vector import client
def parse_date(date_string: str) -> datetime:
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
return datetime.strptime(date_string, time_format)
def extract_metadata(record: dict, metadata: dict) -> dict:
dt = parse_date(record["date"])
metadata["id"] = str(client.uuid_from_time(dt))
if dt is not None:
metadata["date"] = dt.isoformat()
else:
metadata["date"] = None
metadata["author"] = record["author"]
metadata["commit_hash"] = record["commit"]
return metadata
def load_ts_git_dataset(
service_url,
collection_name="timescale_commits",
num_records: int = 500,
partition_interval=timedelta(days=7),
):
json_url = "https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json"
tmp_file = "ts_git_log.json"
temp_dir = tempfile.gettempdir()
json_file_path = os.path.join(temp_dir, tmp_file)
if not os.path.exists(json_file_path):
response = requests.get(json_url)
if response.status_code == 200:
with open(json_file_path, "w") as json_file:
json_file.write(response.text)
else:
print(f"Failed to download JSON file. Status code: {response.status_code}")
loader = JSONLoader(
file_path=json_file_path,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
# Remove documents with None dates
documents = [doc for doc in documents if doc.metadata["date"] is not None]
if num_records > 0:
documents = documents[:num_records]
# Split the documents into chunks for embedding
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
# Create a Timescale Vector instance from the collection of documents
TimescaleVector.from_documents(
embedding=embeddings,
ids=[doc.metadata["id"] for doc in docs],
documents=docs,
collection_name=collection_name,
service_url=service_url,
time_partition_interval=partition_interval,
)
| [] |
2024-01-10 | Abhishek-1211/langchain | libs~langchain~langchain~chat_models~ollama.py | import json
from typing import Any, Iterator, List, Optional
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain.callbacks.manager import (
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.ollama import _OllamaCommon
def _stream_response_to_chat_generation_chunk(
stream_response: str,
) -> ChatGenerationChunk:
"""Convert a stream response to a generation chunk."""
parsed_response = json.loads(stream_response)
generation_info = parsed_response if parsed_response.get("done") is True else None
return ChatGenerationChunk(
message=AIMessageChunk(content=parsed_response.get("response", "")),
generation_info=generation_info,
)
class ChatOllama(BaseChatModel, _OllamaCommon):
"""Ollama locally runs large language models.
To use, follow the instructions at https://ollama.ai/.
Example:
.. code-block:: python
from langchain.chat_models import ChatOllama
ollama = ChatOllama(model="llama2")
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "ollama-chat"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return False
def _format_message_as_text(self, message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
elif isinstance(message, HumanMessage):
message_text = f"[INST] {message.content} [/INST]"
elif isinstance(message, AIMessage):
message_text = f"{message.content}"
elif isinstance(message, SystemMessage):
message_text = f"<<SYS>> {message.content} <</SYS>>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def _format_messages_as_text(self, messages: List[BaseMessage]) -> str:
return "\n".join(
[self._format_message_as_text(message) for message in messages]
)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to Ollama's generate endpoint.
Args:
messages: The list of base messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
Chat generations from the model
Example:
.. code-block:: python
response = ollama([
HumanMessage(content="Tell me about the history of AI")
])
"""
prompt = self._format_messages_as_text(messages)
final_chunk = super()._stream_with_aggregation(
prompt, stop=stop, run_manager=run_manager, verbose=self.verbose, **kwargs
)
chat_generation = ChatGeneration(
message=AIMessage(content=final_chunk.text),
generation_info=final_chunk.generation_info,
)
return ChatResult(generations=[chat_generation])
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
prompt = self._format_messages_as_text(messages)
for stream_resp in self._create_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_chat_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
)
| [] |
2024-01-10 | Abhishek-1211/langchain | libs~langchain~langchain~llms~anyscale.py | """Wrapper around Anyscale Endpoint"""
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
cast,
)
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.utils import convert_to_secret_str
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.openai import (
BaseOpenAI,
acompletion_with_retry,
completion_with_retry,
)
from langchain.utils import get_from_dict_or_env
def update_token_usage(
keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any]
) -> None:
"""Update token usage."""
_keys_to_use = keys.intersection(response["usage"])
for _key in _keys_to_use:
if _key not in token_usage:
token_usage[_key] = response["usage"][_key]
else:
token_usage[_key] += response["usage"][_key]
def create_llm_result(
choices: Any, prompts: List[str], token_usage: Dict[str, int], model_name: str
) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
choice = choices[i]
generations.append(
[
Generation(
text=choice["message"]["content"],
generation_info=dict(
finish_reason=choice.get("finish_reason"),
logprobs=choice.get("logprobs"),
),
)
]
)
llm_output = {"token_usage": token_usage, "model_name": model_name}
return LLMResult(generations=generations, llm_output=llm_output)
class Anyscale(BaseOpenAI):
"""Anyscale large language models.
To use, you should have the environment variable ``ANYSCALE_API_BASE`` and
``ANYSCALE_API_KEY``set with your Anyscale Endpoint, or pass it as a named
parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Anyscale
anyscalellm = Anyscale(anyscale_api_base="ANYSCALE_API_BASE",
anyscale_api_key="ANYSCALE_API_KEY",
model_name="meta-llama/Llama-2-7b-chat-hf")
# To leverage Ray for parallel processing
@ray.remote(num_cpus=1)
def send_query(llm, text):
resp = llm(text)
return resp
futures = [send_query.remote(anyscalellm, text) for text in texts]
results = ray.get(futures)
"""
"""Key word arguments to pass to the model."""
anyscale_api_base: Optional[str] = None
anyscale_api_key: Optional[SecretStr] = None
prefix_messages: List = Field(default_factory=list)
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["anyscale_api_base"] = get_from_dict_or_env(
values, "anyscale_api_base", "ANYSCALE_API_BASE"
)
values["anyscale_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "anyscale_api_key", "ANYSCALE_API_KEY")
)
try:
import openai
## Always create ChatComplete client, replacing the legacy Complete client
values["client"] = openai.ChatCompletion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
raise ValueError("Cannot stream results when best_of > 1.")
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_name": self.model_name},
**super()._identifying_params,
}
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {
"api_key": cast(SecretStr, self.anyscale_api_key).get_secret_value(),
"api_base": self.anyscale_api_base,
}
return {**openai_creds, **{"model": self.model_name}, **super()._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "Anyscale LLM"
def _get_chat_messages(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> Tuple:
if len(prompts) > 1:
raise ValueError(
f"Anyscale currently only supports single prompt, got {prompts}"
)
messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}]
params: Dict[str, Any] = self._invocation_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
if params.get("max_tokens") == -1:
# for Chat api, omitting max_tokens is equivalent to having no limit
del params["max_tokens"]
return messages, params
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
messages, params = self._get_chat_messages([prompt], stop)
params = {**params, **kwargs, "stream": True}
for stream_resp in completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
messages, params = self._get_chat_messages([prompt], stop)
params = {**params, **kwargs, "stream": True}
async for stream_resp in await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(token, chunk=chunk)
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
choices = []
token_usage: Dict[str, int] = {}
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
for prompt in prompts:
if self.streaming:
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"message": {"content": generation.text},
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
messages, params = self._get_chat_messages([prompt], stop)
params = {**params, **kwargs}
response = completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
return create_llm_result(choices, prompts, token_usage, self.model_name)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
choices = []
token_usage: Dict[str, int] = {}
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
for prompt in prompts:
messages = self.prefix_messages + [{"role": "user", "content": prompt}]
if self.streaming:
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(prompt, stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"message": {"content": generation.text},
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
messages, params = self._get_chat_messages([prompt], stop)
params = {**params, **kwargs}
response = await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
return create_llm_result(choices, prompts, token_usage, self.model_name)
| [] |
2024-01-10 | Abhishek-1211/langchain | libs~langchain~langchain~chat_models~volcengine_maas.py | from __future__ import annotations
from typing import Any, Dict, Iterator, List, Mapping, Optional, cast
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import BaseChatModel
from langchain.llms.volcengine_maas import VolcEngineMaasBase
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {"role": "function", "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
return message_dict
def convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage:
content = _dict.get("choice", {}).get("message", {}).get("content", "")
return AIMessage(content=content)
class VolcEngineMaasChat(BaseChatModel, VolcEngineMaasBase):
"""volc engine maas hosts a plethora of models.
You can utilize these models through this class.
To use, you should have the ``volcengine`` python package installed.
and set access key and secret key by environment variable or direct pass those
to this class.
access key, secret key are required parameters which you could get help
https://www.volcengine.com/docs/6291/65568
In order to use them, it is necessary to install the 'volcengine' Python package.
The access key and secret key must be set either via environment variables or
passed directly to this class.
access key and secret key are mandatory parameters for which assistance can be
sought at https://www.volcengine.com/docs/6291/65568.
The two methods are as follows:
* Environment Variable
Set the environment variables 'VOLC_ACCESSKEY' and 'VOLC_SECRETKEY' with your
access key and secret key.
* Pass Directly to Class
Example:
.. code-block:: python
from langchain.llms import VolcEngineMaasLLM
model = VolcEngineMaasChat(model="skylark-lite-public",
volc_engine_maas_ak="your_ak",
volc_engine_maas_sk="your_sk")
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "volc-engine-maas-chat"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return False
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
**{"endpoint": self.endpoint, "model": self.model},
**super()._identifying_params,
}
def _convert_prompt_msg_params(
self,
messages: List[BaseMessage],
**kwargs: Any,
) -> Dict[str, Any]:
model_req = {
"model": {
"name": self.model,
}
}
if self.model_version is not None:
model_req["model"]["version"] = self.model_version
return {
**model_req,
"messages": [_convert_message_to_dict(message) for message in messages],
"parameters": {**self._default_params, **kwargs},
}
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
params = self._convert_prompt_msg_params(messages, **kwargs)
for res in self.client.stream_chat(params):
if res:
msg = convert_dict_to_message(res)
yield ChatGenerationChunk(message=AIMessageChunk(content=msg.content))
if run_manager:
run_manager.on_llm_new_token(cast(str, msg.content))
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
completion = ""
if self.streaming:
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
else:
params = self._convert_prompt_msg_params(messages, **kwargs)
res = self.client.chat(params)
msg = convert_dict_to_message(res)
completion = cast(str, msg.content)
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
| [] |
2024-01-10 | Abhishek-1211/langchain | libs~langchain~langchain~chat_models~bedrock.py | from typing import Any, Dict, Iterator, List, Optional
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain_core.pydantic_v1 import Extra
from langchain.callbacks.manager import (
CallbackManagerForLLMRun,
)
from langchain.chat_models.anthropic import convert_messages_to_prompt_anthropic
from langchain.chat_models.base import BaseChatModel
from langchain.chat_models.meta import convert_messages_to_prompt_llama
from langchain.llms.bedrock import BedrockBase
from langchain.utilities.anthropic import (
get_num_tokens_anthropic,
get_token_ids_anthropic,
)
class ChatPromptAdapter:
"""Adapter class to prepare the inputs from Langchain to prompt format
that Chat model expects.
"""
@classmethod
def convert_messages_to_prompt(
cls, provider: str, messages: List[BaseMessage]
) -> str:
if provider == "anthropic":
prompt = convert_messages_to_prompt_anthropic(messages=messages)
elif provider == "meta":
prompt = convert_messages_to_prompt_llama(messages=messages)
else:
raise NotImplementedError(
f"Provider {provider} model does not support chat."
)
return prompt
class BedrockChat(BaseChatModel, BedrockBase):
"""A chat model that uses the Bedrock API."""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "amazon_bedrock_chat"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "bedrock"]
@property
def lc_attributes(self) -> Dict[str, Any]:
attributes: Dict[str, Any] = {}
if self.region_name:
attributes["region_name"] = self.region_name
return attributes
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
provider = self._get_provider()
prompt = ChatPromptAdapter.convert_messages_to_prompt(
provider=provider, messages=messages
)
for chunk in self._prepare_input_and_invoke_stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
delta = chunk.text
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
completion = ""
if self.streaming:
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
else:
provider = self._get_provider()
prompt = ChatPromptAdapter.convert_messages_to_prompt(
provider=provider, messages=messages
)
params: Dict[str, Any] = {**kwargs}
if stop:
params["stop_sequences"] = stop
completion = self._prepare_input_and_invoke(
prompt=prompt, stop=stop, run_manager=run_manager, **params
)
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
def get_num_tokens(self, text: str) -> int:
if self._model_is_anthropic:
return get_num_tokens_anthropic(text)
else:
return super().get_num_tokens(text)
def get_token_ids(self, text: str) -> List[int]:
if self._model_is_anthropic:
return get_token_ids_anthropic(text)
else:
return super().get_token_ids(text)
| [] |
2024-01-10 | Abhishek-1211/langchain | libs~langchain~langchain~utils~aiter.py | from langchain_core.utils.aiter import NoLock, Tee, py_anext
__all__ = ["py_anext", "NoLock", "Tee"]
| [] |
2024-01-10 | Abhishek-1211/langchain | libs~langchain~langchain~llms~vllm.py | from typing import Any, Dict, List, Optional
from langchain_core.outputs import Generation, LLMResult
from langchain_core.pydantic_v1 import Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import BaseLLM
from langchain.llms.openai import BaseOpenAI
from langchain.utils.openai import is_openai_v1
class VLLM(BaseLLM):
"""VLLM language model."""
model: str = ""
"""The name or path of a HuggingFace Transformers model."""
tensor_parallel_size: Optional[int] = 1
"""The number of GPUs to use for distributed execution with tensor parallelism."""
trust_remote_code: Optional[bool] = False
"""Trust remote code (e.g., from HuggingFace) when downloading the model
and tokenizer."""
n: int = 1
"""Number of output sequences to return for the given prompt."""
best_of: Optional[int] = None
"""Number of output sequences that are generated from the prompt."""
presence_penalty: float = 0.0
"""Float that penalizes new tokens based on whether they appear in the
generated text so far"""
frequency_penalty: float = 0.0
"""Float that penalizes new tokens based on their frequency in the
generated text so far"""
temperature: float = 1.0
"""Float that controls the randomness of the sampling."""
top_p: float = 1.0
"""Float that controls the cumulative probability of the top tokens to consider."""
top_k: int = -1
"""Integer that controls the number of top tokens to consider."""
use_beam_search: bool = False
"""Whether to use beam search instead of sampling."""
stop: Optional[List[str]] = None
"""List of strings that stop the generation when they are generated."""
ignore_eos: bool = False
"""Whether to ignore the EOS token and continue generating tokens after
the EOS token is generated."""
max_new_tokens: int = 512
"""Maximum number of tokens to generate per output sequence."""
logprobs: Optional[int] = None
"""Number of log probabilities to return per output token."""
dtype: str = "auto"
"""The data type for the model weights and activations."""
download_dir: Optional[str] = None
"""Directory to download and load the weights. (Default to the default
cache dir of huggingface)"""
vllm_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `vllm.LLM` call not explicitly specified."""
client: Any #: :meta private:
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
from vllm import LLM as VLLModel
except ImportError:
raise ImportError(
"Could not import vllm python package. "
"Please install it with `pip install vllm`."
)
values["client"] = VLLModel(
model=values["model"],
tensor_parallel_size=values["tensor_parallel_size"],
trust_remote_code=values["trust_remote_code"],
dtype=values["dtype"],
download_dir=values["download_dir"],
**values["vllm_kwargs"],
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling vllm."""
return {
"n": self.n,
"best_of": self.best_of,
"max_tokens": self.max_new_tokens,
"top_k": self.top_k,
"top_p": self.top_p,
"temperature": self.temperature,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
"stop": self.stop,
"ignore_eos": self.ignore_eos,
"use_beam_search": self.use_beam_search,
"logprobs": self.logprobs,
}
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
from vllm import SamplingParams
# build sampling parameters
params = {**self._default_params, **kwargs, "stop": stop}
sampling_params = SamplingParams(**params)
# call the model
outputs = self.client.generate(prompts, sampling_params)
generations = []
for output in outputs:
text = output.outputs[0].text
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "vllm"
class VLLMOpenAI(BaseOpenAI):
"""vLLM OpenAI-compatible API client"""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
params: Dict[str, Any] = {
"model": self.model_name,
**self._default_params,
"logit_bias": None,
}
if not is_openai_v1():
params.update(
{
"api_key": self.openai_api_key,
"api_base": self.openai_api_base,
}
)
return params
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "vllm-openai"
| [] |
2024-01-10 | Abhishek-1211/langchain | libs~langchain~langchain~memory~readonly.py | from typing import Any, Dict, List
from langchain_core.memory import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
| [] |
2024-01-10 | Abhishek-1211/langchain | libs~core~langchain_core~runnables~history.py | from __future__ import annotations
import asyncio
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Type,
Union,
)
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.load import load
from langchain_core.pydantic_v1 import BaseModel, create_model
from langchain_core.runnables.base import Runnable, RunnableBindingBase, RunnableLambda
from langchain_core.runnables.passthrough import RunnablePassthrough
from langchain_core.runnables.utils import (
ConfigurableFieldSpec,
get_unique_config_specs,
)
if TYPE_CHECKING:
from langchain_core.messages import BaseMessage
from langchain_core.runnables.config import RunnableConfig
from langchain_core.tracers.schemas import Run
MessagesOrDictWithMessages = Union[Sequence["BaseMessage"], Dict[str, Any]]
GetSessionHistoryCallable = Callable[..., BaseChatMessageHistory]
class RunnableWithMessageHistory(RunnableBindingBase):
"""A runnable that manages chat message history for another runnable.
Base runnable must have inputs and outputs that can be converted to a list of
BaseMessages.
RunnableWithMessageHistory must always be called with a config that contains session_id, e.g.:
``{"configurable": {"session_id": "<SESSION_ID>"}}``
Example (dict input):
.. code-block:: python
from typing import Optional
from langchain.chat_models import ChatAnthropic
from langchain.memory.chat_message_histories import RedisChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
prompt = ChatPromptTemplate.from_messages([
("system", "You're an assistant who's good at {ability}"),
MessagesPlaceholder(variable_name="history"),
("human", "{question}"),
])
chain = prompt | ChatAnthropic(model="claude-2")
chain_with_history = RunnableWithMessageHistory(
chain,
RedisChatMessageHistory,
input_messages_key="question",
history_messages_key="history",
)
chain_with_history.invoke(
{"ability": "math", "question": "What does cosine mean?"},
config={"configurable": {"session_id": "foo"}}
)
# -> "Cosine is ..."
chain_with_history.invoke(
{"ability": "math", "question": "What's its inverse"},
config={"configurable": {"session_id": "foo"}}
)
# -> "The inverse of cosine is called arccosine ..."
""" # noqa: E501
get_session_history: GetSessionHistoryCallable
input_messages_key: Optional[str] = None
output_messages_key: Optional[str] = None
history_messages_key: Optional[str] = None
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "runnable"]
def __init__(
self,
runnable: Runnable[
MessagesOrDictWithMessages,
Union[str, BaseMessage, MessagesOrDictWithMessages],
],
get_session_history: GetSessionHistoryCallable,
*,
input_messages_key: Optional[str] = None,
output_messages_key: Optional[str] = None,
history_messages_key: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Initialize RunnableWithMessageHistory.
Args:
runnable: The base Runnable to be wrapped.
Must take as input one of:
- A sequence of BaseMessages
- A dict with one key for all messages
- A dict with one key for the current input string/message(s) and
a separate key for historical messages. If the input key points
to a string, it will be treated as a HumanMessage in history.
Must return as output one of:
- A string which can be treated as an AIMessage
- A BaseMessage or sequence of BaseMessages
- A dict with a key for a BaseMessage or sequence of BaseMessages
get_session_history: Function that returns a new BaseChatMessageHistory
given a session id. Should take a single
positional argument `session_id` which is a string and a named argument
`user_id` which can be a string or None. e.g.:
```python
def get_session_history(
session_id: str,
*,
user_id: Optional[str]=None
) -> BaseChatMessageHistory:
...
```
input_messages_key: Must be specified if the base runnable accepts a dict
as input.
output_messages_key: Must be specified if the base runnable returns a dict
as output.
history_messages_key: Must be specified if the base runnable accepts a dict
as input and expects a separate key for historical messages.
**kwargs: Arbitrary additional kwargs to pass to parent class
``RunnableBindingBase`` init.
""" # noqa: E501
history_chain: Runnable = RunnableLambda(
self._enter_history, self._aenter_history
).with_config(run_name="load_history")
messages_key = history_messages_key or input_messages_key
if messages_key:
history_chain = RunnablePassthrough.assign(
**{messages_key: history_chain}
).with_config(run_name="insert_history")
bound = (
history_chain | runnable.with_listeners(on_end=self._exit_history)
).with_config(run_name="RunnableWithMessageHistory")
super().__init__(
get_session_history=get_session_history,
input_messages_key=input_messages_key,
output_messages_key=output_messages_key,
bound=bound,
history_messages_key=history_messages_key,
**kwargs,
)
@property
def config_specs(self) -> List[ConfigurableFieldSpec]:
return get_unique_config_specs(
super().config_specs
+ [
ConfigurableFieldSpec(
id="session_id",
annotation=str,
name="Session ID",
description="Unique identifier for a session.",
default="",
is_shared=True,
),
]
)
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
super_schema = super().get_input_schema(config)
if super_schema.__custom_root_type__ is not None:
from langchain_core.messages import BaseMessage
fields: Dict = {}
if self.input_messages_key and self.history_messages_key:
fields[self.input_messages_key] = (
Union[str, BaseMessage, Sequence[BaseMessage]],
...,
)
elif self.input_messages_key:
fields[self.input_messages_key] = (Sequence[BaseMessage], ...)
else:
fields["__root__"] = (Sequence[BaseMessage], ...)
return create_model( # type: ignore[call-overload]
"RunnableWithChatHistoryInput",
**fields,
)
else:
return super_schema
def _get_input_messages(
self, input_val: Union[str, BaseMessage, Sequence[BaseMessage]]
) -> List[BaseMessage]:
from langchain_core.messages import BaseMessage
if isinstance(input_val, str):
from langchain_core.messages import HumanMessage
return [HumanMessage(content=input_val)]
elif isinstance(input_val, BaseMessage):
return [input_val]
elif isinstance(input_val, (list, tuple)):
return list(input_val)
else:
raise ValueError(
f"Expected str, BaseMessage, List[BaseMessage], or Tuple[BaseMessage]. "
f"Got {input_val}."
)
def _get_output_messages(
self, output_val: Union[str, BaseMessage, Sequence[BaseMessage], dict]
) -> List[BaseMessage]:
from langchain_core.messages import BaseMessage
if isinstance(output_val, dict):
output_val = output_val[self.output_messages_key or "output"]
if isinstance(output_val, str):
from langchain_core.messages import AIMessage
return [AIMessage(content=output_val)]
elif isinstance(output_val, BaseMessage):
return [output_val]
elif isinstance(output_val, (list, tuple)):
return list(output_val)
else:
raise ValueError()
def _enter_history(self, input: Any, config: RunnableConfig) -> List[BaseMessage]:
hist = config["configurable"]["message_history"]
# return only historic messages
if self.history_messages_key:
return hist.messages.copy()
# return all messages
else:
input_val = (
input if not self.input_messages_key else input[self.input_messages_key]
)
return hist.messages.copy() + self._get_input_messages(input_val)
async def _aenter_history(
self, input: Dict[str, Any], config: RunnableConfig
) -> List[BaseMessage]:
return await asyncio.get_running_loop().run_in_executor(
None, self._enter_history, input, config
)
def _exit_history(self, run: Run, config: RunnableConfig) -> None:
hist = config["configurable"]["message_history"]
# Get the input messages
inputs = load(run.inputs)
input_val = inputs[self.input_messages_key or "input"]
input_messages = self._get_input_messages(input_val)
# Get the output messages
output_val = load(run.outputs)
output_messages = self._get_output_messages(output_val)
for m in input_messages + output_messages:
hist.add_message(m)
def _merge_configs(self, *configs: Optional[RunnableConfig]) -> RunnableConfig:
config = super()._merge_configs(*configs)
# extract session_id
if "session_id" not in config.get("configurable", {}):
example_input = {self.input_messages_key: "foo"}
example_config = {"configurable": {"session_id": "123"}}
raise ValueError(
"session_id is required."
" Pass it in as part of the config argument to .invoke() or .stream()"
f"\neg. chain.invoke({example_input}, {example_config})"
)
# attach message_history
session_id = config["configurable"]["session_id"]
config["configurable"]["message_history"] = self.get_session_history(session_id)
return config
| [] |
2024-01-10 | xuyifan-0731/STEPS_benchmark | src~agents~api_agents~claude_agents.py | import anthropic
from src.agent import Agent
import os
import json
import sys
import time
import re
import math
import random
import datetime
import argparse
import requests
from typing import List, Callable
import dataclasses
from copy import deepcopy
class Claude(Agent):
def __init__(self, api_args=None, **config):
if not api_args:
api_args = {}
api_args = deepcopy(api_args)
self.key = api_args.pop("key", None) or os.getenv('Claude_API_KEY')
api_args["model"] = api_args.pop("model", None)
if not self.key:
raise ValueError("Claude API KEY is required, please assign api_args.key or set OPENAI_API_KEY environment variable.")
if not api_args["model"]:
raise ValueError("Claude model is required, please assign api_args.model.")
self.api_args = api_args
if not self.api_args.get("stop_sequences"):
self.api_args["stop_sequences"] = [anthropic.HUMAN_PROMPT]
super.__init__(**config)
def inference(self, history: List[dict]) -> str:
prompt = ""
for message in history:
if message["role"] == "user":
prompt += anthropic.HUMAN_PROMPT + message["content"]
else:
prompt += anthropic.AI_PROMPT + message["content"]
prompt += anthropic.AI_PROMPT
c = anthropic.Client(self.key)
resp = c.completion(
prompt=prompt,
**self.api_args
)
return resp | [
"content"
] |
2024-01-10 | MinhChaosBoDoiQua/GPTClone | myapp.py | #2122024_Nguyen Tien Minh
#専門演習iiiの中間課題
from flask import Flask, render_template, request
import openai
app = Flask(__name__)
openai.api_key = "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"
def get_completion(prompt, model="gpt-3.5-turbo"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0,
)
return response.choices[0].message["content"]
@app.route("/")
def home():
return render_template("index.html")
@app.route("/get")
def get_bot_response():
userText = request.args.get('msg')
response = get_completion(userText)
return response
if __name__ == "__main__":
app.run() | [] |
2024-01-10 | sgugger/transformers | tests~test_tokenization_fast.py | import logging
import unittest
from collections import namedtuple
from itertools import takewhile
from transformers import (
BertTokenizer,
BertTokenizerFast,
DistilBertTokenizer,
GPT2Tokenizer,
GPT2TokenizerFast,
OpenAIGPTTokenizer,
PreTrainedTokenizer,
RobertaTokenizer,
TransfoXLTokenizer,
is_torch_available,
)
from transformers.testing_utils import require_torch
from transformers.tokenization_distilbert import DistilBertTokenizerFast
from transformers.tokenization_openai import OpenAIGPTTokenizerFast
from transformers.tokenization_roberta import RobertaTokenizerFast
from transformers.tokenization_transfo_xl import TransfoXLTokenizerFast
logger = logging.getLogger(__name__)
NON_ENGLISH_TAGS = ["chinese", "dutch", "french", "finnish", "german", "multilingual"]
Tokenizer = namedtuple("Tokenizer", ["name", "rust_cls", "python_cls", "vocab_key", "filter", "kwargs"])
def filter_non_english(_: Tokenizer, pretrained_name: str):
""" Filter all the model for non-english language """
return not any([lang in pretrained_name for lang in NON_ENGLISH_TAGS])
def filter_roberta_detectors(_: Tokenizer, pretrained_name: str):
return "detector" not in pretrained_name
class CommonFastTokenizerTest(unittest.TestCase):
TOKENIZERS_CLASSES = frozenset([])
def setUp(self) -> None:
with open("tests/fixtures/sample_text.txt", encoding="utf-8") as f_data:
self._data = f_data.read().replace("\n\n", "\n").strip()
def test_all_tokenizers(self):
for tok_case in self.TOKENIZERS_CLASSES:
for pretrained_name in tok_case.python_cls.pretrained_vocab_files_map[tok_case.vocab_key].keys():
# Tokenizer.filter makes it possible to filter which Tokenizer to case based on all the
# information available in Tokenizer (name, rust class, python class, vocab key name)
if tok_case.filter is None or (
tok_case.filter is not None and tok_case.filter(tok_case, pretrained_name)
):
kwargs = dict(t for t in tok_case.kwargs) if tok_case.kwargs else {}
with self.subTest("{} ({})".format(tok_case.name, pretrained_name)):
tokenizer_r = tok_case.rust_cls.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = tok_case.python_cls.from_pretrained(pretrained_name, **kwargs)
self.fast_align_python(tokenizer_r, tokenizer_p, tok_case, pretrained_name)
self.fast_only(tokenizer_r)
def test_pretokenized_tokenizers(self):
for tok_case in self.TOKENIZERS_CLASSES:
for pretrained_name in tok_case.python_cls.pretrained_vocab_files_map[tok_case.vocab_key].keys():
# Tokenizer.filter makes it possible to filter which Tokenizer to case based on all the
# information available in Tokenizer (name, rust class, python class, vocab key name)
if tok_case.filter is None or (
tok_case.filter is not None and tok_case.filter(tok_case, pretrained_name)
):
with self.subTest("{} ({})".format(tok_case.name, pretrained_name)):
tokenizer_r = tok_case.rust_cls.from_pretrained(pretrained_name, add_prefix_space=True)
tokenizer_p = tok_case.python_cls.from_pretrained(pretrained_name, add_prefix_space=True)
self.assert_pretokenized_inputs(tokenizer_r, tokenizer_p)
def fast_align_python(self, tokenizer_r, tokenizer_p, tok_case, pretrained_name):
# Check is_fast is set correctly
self.assertFalse(tokenizer_p.is_fast)
self.assertTrue(tokenizer_r.is_fast)
# Check that Rust and Python align
self.assert_tokenization_python_rust_equals(tokenizer_r, tokenizer_p)
self.assert_num_special_tokens_to_add_equal(tokenizer_r, tokenizer_p)
self.assert_max_length_equal(tokenizer_r, tokenizer_p)
self.assert_special_tokens_map_equal(tokenizer_r, tokenizer_p)
self.assert_embeded_special_tokens(tokenizer_r, tokenizer_p)
self.assert_padding(tokenizer_r, tokenizer_p)
self.assert_create_token_type_ids(tokenizer_r, tokenizer_p)
self.assert_prepare_for_model(tokenizer_r, tokenizer_p)
# TODO: enable for v3.0.0
# self.assert_empty_output_no_special_tokens(tokenizer_r, tokenizer_p)
def fast_only(self, tokenizer_r):
# Ensure None raise an error
self.assertRaises(ValueError, tokenizer_r.tokenize, None)
self.assertRaises(ValueError, tokenizer_r.encode, None)
self.assertRaises(ValueError, tokenizer_r.encode_plus, None)
self.assertRaises(ValueError, tokenizer_r.batch_encode_plus, None)
self.assert_add_tokens(tokenizer_r)
self.assert_offsets_mapping(tokenizer_r)
self.assert_add_special_tokens(tokenizer_r)
self.assert_alignement_methods(tokenizer_r)
self.assert_batch_encode_dynamic_overflowing(tokenizer_r)
def assert_alignement_methods(self, tokenizer_r):
words = ["Wonderful", "no", "inspiration", "example", "with", "subtoken"]
text = " ".join(words)
batch_size = 3
encoding = tokenizer_r.encode_plus(text, add_special_tokens=False)
batch_encoding = tokenizer_r.batch_encode_plus([text] * batch_size, add_special_tokens=False)
num_tokens = len(encoding["input_ids"])
last_word_index = len(words) - 1
last_token_index = num_tokens - 1
last_batch_index = batch_size - 1
last_char_index = len(text) - 1
# words, tokens
self.assertEqual(len(encoding.words(0)), num_tokens)
self.assertEqual(max(encoding.words(0)), last_word_index)
self.assertEqual(min(encoding.words(0)), 0)
self.assertEqual(len(batch_encoding.words(last_batch_index)), num_tokens)
self.assertEqual(max(batch_encoding.words(last_batch_index)), last_word_index)
self.assertEqual(min(batch_encoding.words(last_batch_index)), 0)
self.assertEqual(len(encoding.tokens(0)), num_tokens)
# Assert token_to_word
self.assertEqual(encoding.token_to_word(0), 0)
self.assertEqual(encoding.token_to_word(0, 0), 0)
self.assertEqual(encoding.token_to_word(last_token_index), last_word_index)
self.assertEqual(encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(1, 0), 0)
self.assertEqual(batch_encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(last_batch_index, last_token_index), last_word_index)
# Assert word_to_tokens
self.assertEqual(encoding.word_to_tokens(0).start, 0)
self.assertEqual(encoding.word_to_tokens(0, 0).start, 0)
self.assertEqual(encoding.word_to_tokens(last_word_index).end, last_token_index + 1)
self.assertEqual(encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
self.assertEqual(batch_encoding.word_to_tokens(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_tokens(0, last_word_index).end, last_token_index + 1)
self.assertEqual(batch_encoding.word_to_tokens(last_batch_index, last_word_index).end, last_token_index + 1)
# Assert token_to_chars
self.assertEqual(encoding.token_to_chars(0).start, 0)
self.assertEqual(encoding.token_to_chars(0, 0).start, 0)
self.assertEqual(encoding.token_to_chars(last_token_index).end, last_char_index + 1)
self.assertEqual(encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.token_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.token_to_chars(0, last_token_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.token_to_chars(last_batch_index, last_token_index).end, last_char_index + 1)
# Assert char_to_token
self.assertEqual(encoding.char_to_token(0), 0)
self.assertEqual(encoding.char_to_token(0, 0), 0)
self.assertEqual(encoding.char_to_token(last_char_index), last_token_index)
self.assertEqual(encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(1, 0), 0)
self.assertEqual(batch_encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(last_batch_index, last_char_index), last_token_index)
# Assert char_to_word
self.assertEqual(encoding.char_to_word(0), 0)
self.assertEqual(encoding.char_to_word(0, 0), 0)
self.assertEqual(encoding.char_to_word(last_char_index), last_word_index)
self.assertEqual(encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(1, 0), 0)
self.assertEqual(batch_encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(last_batch_index, last_char_index), last_word_index)
# Assert word_to_chars
self.assertEqual(encoding.word_to_chars(0).start, 0)
self.assertEqual(encoding.word_to_chars(0, 0).start, 0)
self.assertEqual(encoding.word_to_chars(last_word_index).end, last_char_index + 1)
self.assertEqual(encoding.word_to_chars(0, last_word_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.word_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_chars(0, last_word_index).end, last_char_index + 1)
self.assertEqual(batch_encoding.word_to_chars(last_batch_index, last_word_index).end, last_char_index + 1)
def assert_tokenization_python_rust_equals(self, tokenizer_r, tokenizer_p):
# Ensure basic input match
input_p = tokenizer_p.encode_plus(self._data)
input_r = tokenizer_r.encode_plus(self._data)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
input_pairs_p = tokenizer_p.encode_plus(self._data, self._data)
input_pairs_r = tokenizer_r.encode_plus(self._data, self._data)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])
# Ensure truncation match
input_p = tokenizer_p.encode_plus(self._data, max_length=512, truncation=True)
input_r = tokenizer_r.encode_plus(self._data, max_length=512, truncation=True)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
# Ensure truncation with stride match
input_p = tokenizer_p.encode_plus(
self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
input_r = tokenizer_r.encode_plus(
self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True
)
for key in filter(lambda x: x in ["input_ids", "token_type_ids", "attention_mask"], input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key][0])
def assert_num_special_tokens_to_add_equal(self, tokenizer_r, tokenizer_p):
# Check we have the same number of added_tokens for both pair and non-pair inputs.
self.assertEqual(tokenizer_r.num_special_tokens_to_add(False), tokenizer_p.num_special_tokens_to_add(False))
self.assertEqual(tokenizer_r.num_special_tokens_to_add(True), tokenizer_p.num_special_tokens_to_add(True))
def assert_max_length_equal(self, tokenizer_r, tokenizer_p):
# Check we have the correct max_length for both pair and non-pair inputs.
self.assertEqual(tokenizer_r.max_len_single_sentence, tokenizer_p.max_len_single_sentence)
self.assertEqual(tokenizer_r.max_len_sentences_pair, tokenizer_p.max_len_sentences_pair)
def assert_special_tokens_map_equal(self, tokenizer_r, tokenizer_p):
# Assert the set of special tokens match.
self.assertSequenceEqual(
tokenizer_p.special_tokens_map.items(), tokenizer_r.special_tokens_map.items(),
)
def assert_add_tokens(self, tokenizer_r):
vocab_size = tokenizer_r.vocab_size
self.assertEqual(tokenizer_r.add_tokens(""), 0)
self.assertEqual(tokenizer_r.add_tokens("testoken"), 1)
self.assertEqual(tokenizer_r.add_tokens(["testoken1", "testtoken2"]), 2)
self.assertEqual(len(tokenizer_r), vocab_size + 3)
self.assertEqual(tokenizer_r.add_special_tokens({}), 0)
self.assertEqual(tokenizer_r.add_special_tokens({"bos_token": "[BOS]", "eos_token": "[EOS]"}), 2)
self.assertRaises(
AssertionError, tokenizer_r.add_special_tokens, {"additional_special_tokens": "<testtoken1>"}
)
self.assertEqual(tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken2>"]}), 1)
self.assertEqual(
tokenizer_r.add_special_tokens({"additional_special_tokens": ["<testtoken3>", "<testtoken4>"]}), 2
)
self.assertEqual(len(tokenizer_r), vocab_size + 8)
def assert_offsets_mapping(self, tokenizer_r):
text = "Wonderful no inspiration example with subtoken"
pair = "Along with an awesome pair"
# No pair
tokens_with_offsets = tokenizer_r.encode_plus(
text, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True
)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
# Pairs
tokens_with_offsets = tokenizer_r.encode_plus(
text, pair, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True
)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
def assert_batch_encode_dynamic_overflowing(self, tokenizer: PreTrainedTokenizer):
"""
When calling batch_encode with multiple sequence it can returns different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
returned_tensor = "pt" if is_torch_available() else "tf"
if not tokenizer.pad_token or tokenizer.pad_token_id < 0:
return
tokens = tokenizer.encode_plus(
"HuggingFace is solving NLP one commit at a time",
max_length=6,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
# Mono sample
tokens = tokenizer.batch_encode_plus(
["HuggingFace is solving NLP one commit at a time"],
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
# Multi sample
tokens = tokenizer.batch_encode_plus(
["HuggingFace is solving NLP one commit at a time", "Very tiny input"],
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
def assert_pretokenized_inputs(self, tokenizer_r, tokenizer_p):
# Input string
pretokenized_input_simple = "This is a sample input".split()
pretokenized_input_pair = "This is a sample pair".split()
# Test encode for pretokenized inputs
output_r = tokenizer_r.encode(pretokenized_input_simple, is_pretokenized=True)
output_p = tokenizer_p.encode(pretokenized_input_simple, is_pretokenized=True)
self.assertEqual(output_p, output_r)
kwargs = {
"is_pretokenized": True,
"return_token_type_ids": True,
"return_attention_mask": True,
"return_overflowing_tokens": False,
"return_special_tokens_mask": True,
"return_offsets_mapping": False, # Not implemented in python tokenizers
}
batch_kwargs = {
"is_pretokenized": True,
"return_token_type_ids": True,
"return_attention_mask": True, # we have an 's' here
"return_overflowing_tokens": False,
"return_special_tokens_mask": True, # we have an 's' here
"return_offsets_mapping": False, # Not implemented in python tokenizers
}
# Test encode_plus for pretokenized inputs
output_r = tokenizer_r.encode_plus(pretokenized_input_simple, **kwargs)
output_p = tokenizer_p.encode_plus(pretokenized_input_simple, **kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
# Test batch_encode_plus for pretokenized inputs
input_batch = ([pretokenized_input_simple] * 2) + [pretokenized_input_simple + pretokenized_input_pair]
output_r = tokenizer_r.batch_encode_plus(input_batch, **batch_kwargs)
output_p = tokenizer_p.batch_encode_plus(input_batch, **batch_kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
# Test encode for pretokenized inputs pairs
output_r = tokenizer_r.encode(pretokenized_input_simple, pretokenized_input_pair, is_pretokenized=True)
output_p = tokenizer_p.encode(pretokenized_input_simple, pretokenized_input_pair, is_pretokenized=True)
self.assertEqual(output_p, output_r)
# Test encode_plus for pretokenized inputs
output_r = tokenizer_r.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
output_p = tokenizer_p.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
# Test batch_encode_plus for pretokenized inputs
input_batch_pair = ([pretokenized_input_simple, pretokenized_input_pair] * 2) + [
pretokenized_input_simple + pretokenized_input_pair,
pretokenized_input_pair,
]
output_r = tokenizer_r.batch_encode_plus(input_batch_pair, **batch_kwargs)
output_p = tokenizer_p.batch_encode_plus(input_batch_pair, **batch_kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
def assert_create_token_type_ids(self, tokenizer_r, tokenizer_p):
input_simple = [1, 2, 3]
input_pair = [1, 2, 3]
# Generate output
output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple)
output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple)
self.assertEqual(output_p, output_r)
# Generate pair output
output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple, input_pair)
output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def assert_build_inputs_with_special_tokens(self, tokenizer_r, tokenizer_p):
# Input string
input_simple = tokenizer_p.tokenize("This is a sample input")
input_pair = tokenizer_p.tokenize("This is a sample pair")
# Generate output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
# Generate pair output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
# Input tokens id
input_simple = tokenizer_p.encode("This is a sample input")
input_pair = tokenizer_p.encode("This is a sample pair")
# Generate output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
# Generate pair output
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def assert_padding(self, tokenizer_r, tokenizer_p, max_length=15):
def assert_padded_input_match(input_r: list, input_p: list, max_length: int):
# Ensure we match max_length
self.assertEqual(len(input_r), max_length)
self.assertEqual(len(input_p), max_length)
# Ensure the number of padded tokens is the same
padded_tokens_r = list(takewhile(lambda i: i == tokenizer_r.pad_token_id, reversed(input_r)))
padded_tokens_p = list(takewhile(lambda i: i == tokenizer_p.pad_token_id, reversed(input_p)))
self.assertSequenceEqual(padded_tokens_r, padded_tokens_p)
def assert_batch_padded_input_match(input_r: dict, input_p: dict, max_length: int):
for i_r in input_r.values():
self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(
len(i_r[1]), max_length
)
self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(
len(i_r[1]), max_length
)
for i_r, i_p in zip(input_r["input_ids"], input_p["input_ids"]):
assert_padded_input_match(i_r, i_p, max_length)
for i_r, i_p in zip(input_r["attention_mask"], input_p["attention_mask"]):
self.assertSequenceEqual(i_r, i_p)
# Encode - Simple input
input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, pad_to_max_length=True)
assert_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.encode("This is a simple input", max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode("This is a simple input", max_length=max_length, padding="max_length")
assert_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.encode("This is a simple input", padding="longest")
input_p = tokenizer_p.encode("This is a simple input", padding=True)
assert_padded_input_match(input_r, input_p, len(input_r))
# Encode - Pair input
input_r = tokenizer_r.encode(
"This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode(
"This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
)
assert_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.encode(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
assert_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.encode("This is a simple input", "This is a pair", padding=True)
input_p = tokenizer_p.encode("This is a simple input", "This is a pair", padding="longest")
assert_padded_input_match(input_r, input_p, len(input_r))
# Encode_plus - Simple input
input_r = tokenizer_r.encode_plus("This is a simple input", max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus("This is a simple input", max_length=max_length, pad_to_max_length=True)
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus("This is a simple input", max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode_plus("This is a simple input", max_length=max_length, padding="max_length")
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus("This is a simple input", padding="longest")
input_p = tokenizer_p.encode_plus("This is a simple input", padding=True)
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]))
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Encode_plus - Pair input
input_r = tokenizer_r.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, pad_to_max_length=True
)
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode_plus(
"This is a simple input", "This is a pair", max_length=max_length, padding="max_length"
)
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus("This is a simple input", "This is a pair", padding="longest")
input_p = tokenizer_p.encode_plus("This is a simple input", "This is a pair", padding=True)
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]))
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Batch_encode_plus - Simple input
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], max_length=max_length, pad_to_max_length=True
)
input_p = tokenizer_p.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], max_length=max_length, pad_to_max_length=True
)
assert_batch_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="max_length",
)
assert_batch_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding="longest",
)
input_p = tokenizer_p.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], max_length=max_length, padding=True,
)
assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]))
input_r = tokenizer_r.batch_encode_plus(
["This is a simple input 1", "This is a simple input 2"], padding="longest"
)
input_p = tokenizer_p.batch_encode_plus(["This is a simple input 1", "This is a simple input 2"], padding=True)
assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]))
# Batch_encode_plus - Pair input
input_r = tokenizer_r.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
max_length=max_length,
truncation=True,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
max_length=max_length,
truncation=True,
padding="max_length",
)
assert_batch_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
padding=True,
)
input_p = tokenizer_p.batch_encode_plus(
[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
],
padding="longest",
)
assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]))
# Using pad on single examples after tokenization
input_r = tokenizer_r.encode_plus("This is a input 1")
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.encode_plus("This is a input 1")
input_p = tokenizer_r.pad(input_p)
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]))
# Using pad on single examples after tokenization
input_r = tokenizer_r.encode_plus("This is a input 1")
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.encode_plus("This is a input 1")
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length)
# Using pad after tokenization
input_r = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_p = tokenizer_r.pad(input_p)
assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]))
# Using pad after tokenization
input_r = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.batch_encode_plus(
["This is a input 1", "This is a much longer input whilch should be padded"]
)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
assert_batch_padded_input_match(input_r, input_p, max_length)
def assert_save_pretrained(self, tokenizer_r, tokenizer_p):
# Checks it save with the same files
self.assertSequenceEqual(tokenizer_r.save_vocabulary("."), tokenizer_p.save_vocabulary("."))
# Checks everything loads correctly in the same way
tokenizer_rp, tokenizer_pp = tokenizer_r.from_pretrained("."), tokenizer_p.from_pretrained(".")
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
def assert_embeded_special_tokens(self, tokenizer_r, tokenizer_p):
sentence = "A, <mask> AllenNLP sentence."
tokens_r = tokenizer_r.encode_plus(
sentence, add_special_tokens=True, return_attention_mask=False, return_token_type_ids=True
)
tokens_p = tokenizer_p.encode_plus(
sentence, add_special_tokens=True, return_attention_mask=False, return_token_type_ids=True
)
for key in tokens_p.keys():
self.assertEqual(tokens_r[key], tokens_p[key])
self.assertEqual(sum(tokens_r["token_type_ids"]), 0)
self.assertEqual(sum(tokens_p["token_type_ids"]), 0)
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_r, tokens_p)
def assert_add_special_tokens(self, tokenizer_r):
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
# pair_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=True)
for text in ["", " "]:
# tokenize()
no_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode()
no_special_tokens = tokenizer_r.encode(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode(text, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode_plus()
no_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=True)
for key in no_special_tokens.keys():
self.assertEqual(
len(no_special_tokens[key]), len(with_special_tokens[key]) - simple_num_special_tokens_to_add
)
# # batch_encode_plus
no_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=True)
for key in no_special_tokens.keys():
for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add)
def assert_prepare_for_model(self, tokenizer_r, tokenizer_p):
string_sequence = "Asserting that both tokenizers are equal"
python_output = tokenizer_p.prepare_for_model(tokenizer_p.encode(string_sequence))
rust_output = tokenizer_r.prepare_for_model(tokenizer_r.encode(string_sequence))
self.assertEqual(python_output, rust_output)
class WordPieceFastTokenizerTest(CommonFastTokenizerTest):
"""
Override all the specific methods to test WordPiece behavior
"""
TOKENIZERS_CLASSES = frozenset(
[
Tokenizer("Bert", BertTokenizerFast, BertTokenizer, "vocab_file", filter_non_english, None),
Tokenizer(
"DistilBert", DistilBertTokenizerFast, DistilBertTokenizer, "vocab_file", filter_non_english, None
),
]
)
def fast_only(self, tokenizer_r):
super().fast_only(tokenizer_r)
self.assert_offsets_with_special_characters(tokenizer_r)
def assert_add_special_tokens(self, tokenizer_r):
super().assert_add_special_tokens(tokenizer_r)
def assert_offsets_with_special_characters(self, tokenizer_r):
sentence = "A, naïve [MASK] AllenNLP sentence."
tokens = tokenizer_r.encode_plus(
sentence,
return_attention_mask=False,
return_token_type_ids=False,
return_offsets_mapping=True,
add_special_tokens=True,
)
expected_results = [
((0, 1), "A"),
((1, 2), ","),
((3, 8), "naive"), # BERT normalizes this away
# Append MASK here after lower-casing
((16, 21), "Allen"),
((22, 24), "##NL"),
((24, 25), "##P"),
((26, 34), "sentence"),
((35, 36), "."),
]
# Check if the tokenizer is uncased
if tokenizer_r.init_kwargs.get("do_lower_case"):
expected_results = [(offset, token.lower()) for (offset, token) in expected_results]
# Append the special tokens
expected_results.insert(3, ((9, 15), "[MASK]"))
expected_results.insert(0, (None, "[CLS]"))
expected_results.append((None, "[SEP]"))
self.assertEqual([e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
# self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
class RobertaFastTokenizerTest(CommonFastTokenizerTest):
TOKENIZERS_CLASSES = frozenset(
[
Tokenizer(
"Roberta",
RobertaTokenizerFast,
RobertaTokenizer,
"vocab_file",
filter_roberta_detectors,
(("cls_token", "<s>"),),
)
]
)
def assert_embeded_special_tokens(self, tokenizer_r, tokenizer_p):
sentence = "A, <mask> AllenNLP sentence."
tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True)
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
# token_type_ids should put 0 everywhere
self.assertEquals(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEquals(
sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]),
sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]),
)
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_r, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
self.assertSequenceEqual(tokens_p, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
class NoPaddingTokenFastTokenizerMatchingTest(CommonFastTokenizerTest):
TOKENIZERS_CLASSES = [
Tokenizer("OpenAI GPT", OpenAIGPTTokenizerFast, OpenAIGPTTokenizer, "vocab_file", None, None),
Tokenizer("GPT2", GPT2TokenizerFast, GPT2Tokenizer, "vocab_file", None, [("add_prefix_space", True)]),
]
def fast_align_python(self, tokenizer_r, tokenizer_p, tok_case, pretrained_name):
# Check is_fast is set correctly
self.assertFalse(tokenizer_p.is_fast)
self.assertTrue(tokenizer_r.is_fast)
# Check that Rust and Python align
self.assert_tokenization_python_rust_equals(tokenizer_r, tokenizer_p)
self.assert_num_special_tokens_to_add_equal(tokenizer_r, tokenizer_p)
self.assert_max_length_equal(tokenizer_r, tokenizer_p)
self.assert_special_tokens_map_equal(tokenizer_r, tokenizer_p)
self.assert_embeded_special_tokens(tokenizer_r, tokenizer_p)
self.assert_padding(tokenizer_r, tokenizer_p)
# Specific for
kwargs = {}
if tok_case.kwargs is not None:
kwargs = dict(tok_case.kwargs)
tokenizer_r = tok_case.rust_cls.from_pretrained(pretrained_name, **kwargs)
self.assert_pretokenized_inputs(tokenizer_r, tokenizer_p)
def assert_padding(self, tokenizer_r, tokenizer_p, max_length=15):
# Simple input
s = "This is a simple input"
s2 = ["This is a simple input 1", "This is a simple input 2"]
p = ("This is a simple input", "This is a pair")
p2 = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length")
# Simple input
self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length")
# Simple input
self.assertRaises(
ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding="max_length",
)
# Pair input
self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length")
# Pair input
self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length")
# Pair input
self.assertRaises(
ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding="max_length",
)
class TransfoXLFastTokenizerTest(NoPaddingTokenFastTokenizerMatchingTest):
TOKENIZERS_CLASSES = frozenset(
[Tokenizer("TransfoXL", TransfoXLTokenizerFast, TransfoXLTokenizer, "pretrained_vocab_file", None, None)]
)
@require_torch
def test_all_tokenizers(self):
super().test_all_tokenizers()
@require_torch
def test_pretokenized_tokenizers(self):
super().test_pretokenized_tokenizers()
| [] |
2024-01-10 | gaurangdave/Aletheia | utils~common_utils.py | from tqdm.auto import tqdm
from joblib import dump, load
import pickle
import plotly.io as io
import plotly.graph_objects as go
import plotly.express as px
import matplotlib.pyplot as plt
import pyLDAvis.gensim_models as gensimvis
import pyLDAvis.sklearn
import pyLDAvis
from gensim.models.ldamulticore import LdaMulticore
from gensim.models.ldamodel import LdaModel
from gensim.models import CoherenceModel
from gensim.models.phrases import Phraser
from gensim.models import Phrases
from gensim.utils import simple_preprocess
from gensim import corpora, models
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import spacy
from pprint import pprint
import pandas as pd
# pd.set_option('display.max_rows', 500)
# pd.set_option('display.max_columns', 500)
# pd.set_option('display.width', 1000)
import re
import string
from bs4 import BeautifulSoup
import nltk
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
nltk.download('stopwords')
# Plotting tools
# loading library
nlp = spacy.load('en_core_web_md')
# nlp = spacy.load('en_core_web_trf')
nlp.add_pipe('merge_entities')
# nlp.add_pipe("merge_noun_chunks")
tqdm.pandas(desc="processing")
# Utility Functions for Text Cleaning
def sent_to_words(sentences):
for sentence in tqdm(sentences):
yield (simple_preprocess(str(sentence), deacc=True))
# function to clean html tags from text
def clean_html(html):
# parse html content
soup = BeautifulSoup(html, "html.parser")
for data in soup(['style', 'script', 'code', 'a']):
# Remove tags
data.decompose()
# return data by retrieving the tag content
return ' '.join(soup.stripped_strings)
# function to convert text to lowercase
def lower_case(text):
return text.lower()
# function to remove line breaks
def remove_line_breaks(text):
return re.sub(r'\n', '', text)
# function to remove punctuation
def remove_punctuation(text):
return text.translate(str.maketrans('', '', string.punctuation))
# function to remove numbers
def remove_numbers(text):
return re.sub(r'\d+', '', text)
# function to remove extra spaces
def remove_extra_spaces(text):
text = text.replace(u'\xa0', u' ')
return text
# return re.sub(' +', ' ', text)
# function to remove stopwords
def remove_stopwords(texts, stop_words = []):
preprocess_text = simple_preprocess(str(texts), deacc=True)
word_list = [word for word in preprocess_text if word not in stop_words]
return " ".join(word_list)
# return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
# helper function to create pos tags
def create_pos_tag(str_sent):
return nlp(str_sent)
# function for text lemmatization using spac
##'ADJ', 'VERB'
def lemmatization(texts, allowed_postags=['PROPN', 'NOUN'], stop_words=[]):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in tqdm(texts):
doc = nlp(" ".join(sent))
texts_out.append(
[token.lemma_ for token in doc if (token.pos_ in allowed_postags and token.is_stop == False and token.text not in stop_words)])
return texts_out
def tokenization(texts, allowed_postags=['PROPN', 'NOUN'], stop_words=[]):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in tqdm(texts):
doc = nlp(" ".join(sent))
texts_out.append(
["_".join(token.text.split(" ")) for token in doc if (token.pos_ in allowed_postags and token.is_stop == False and token.text not in stop_words)])
return texts_out
def lemmatization_without_pos(texts):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append(
[token.lemma_ for token in doc])
return texts_out
def simple_tokenization(texts):
"""https://spacy.io/api/annotation"""
return [nlp(text) for text in tqdm(texts)]
def make_bigrams(texts, bigram_mod):
return [bigram_mod[doc] for doc in texts]
def make_trigrams(texts, bigram_mod, trigram_mod):
return [trigram_mod[bigram_mod[doc]] for doc in texts]
# helper function to create pos tags distribution
def create_pos_tags_distribution(docs=[]):
token_distribution = {}
is_alpha = 0
is_stop = 0
for doc in tqdm(docs):
for token in doc:
token_distribution[token.pos_] = token_distribution.get(
token.pos_, 0) + 1
if (token.is_alpha):
is_alpha += 1
if (token.is_stop):
is_stop += 1
return token_distribution, is_alpha, is_stop
# function to create n-grams from noun chunks
def create_noun_chunk_ngrams(docs):
n_gram_docs = []
for doc in docs:
doc_text = doc.text
for chunk in doc.noun_chunks:
chunk_n_gram = "_".join(chunk.text.split(" "))
doc_text = doc_text.replace(chunk.text, chunk_n_gram)
n_gram_docs.append(doc_text.split(" "))
return n_gram_docs
def lemmatization_noun_chunks(texts):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if (
("_" in token.text) or # if the token is a noun chunk allow that
# if the token is a noun or proper noun allow that
(token.pos_ in ['NOUN', 'PROPN']
and token.is_alpha and token.is_stop == False)
)])
return texts_out | [] |
2024-01-10 | gaurangdave/Aletheia | utils~sklearn_utils.py | import numpy as np
from tqdm.auto import tqdm
from joblib import dump, load
import pickle
import plotly.io as io
import plotly.graph_objects as go
import plotly.express as px
import matplotlib.pyplot as plt
import pyLDAvis.gensim_models as gensimvis
import pyLDAvis.sklearn
import pyLDAvis
from gensim.models.ldamulticore import LdaMulticore
from gensim.models.ldamodel import LdaModel
from gensim.models import CoherenceModel
from gensim.models.phrases import Phraser
from gensim.models import Phrases
from gensim.utils import simple_preprocess
from gensim import corpora, models
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import spacy
from pprint import pprint
import pandas as pd
import re
import string
from bs4 import BeautifulSoup
import nltk
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
nltk.download('stopwords')
# Styling
def color_green(val):
color = 'green' if val > .1 else 'black'
return 'color: {col}'.format(col=color)
def make_bold(val):
weight = 700 if val > .1 else 400
return 'font-weight: {weight}'.format(weight=weight)
def print_sklearn_sparcity(data_vectorized):
# Materialize the sparse data
data_dense = data_vectorized.todense()
# Compute Sparsicity = Percentage of Non-Zero cells
print("Sparsicity: ", ((data_dense > 0).sum()/data_dense.size)*100, "%")
def create_sklearn_dominent_topic_dataframe(lda_model, data_vectorized):
lda_output = lda_model.transform(data_vectorized)
# column names
topicnames = ["Topic" + str(i) for i in range(lda_model.n_components)]
# index names
docnames = ["Doc" + str(i) for i in range(len(data))]
df_document_topic = pd.DataFrame(
np.round(lda_output, 2), columns=topicnames, index=docnames)
# Get dominant topic for each document
dominant_topic = np.argmax(df_document_topic.values, axis=1)
df_document_topic['dominant_topic'] = dominant_topic
return df_document_topic
def print_sklearn_dominant_topics(lda_model, data_vectorized):
df_document_topic = create_sklearn_dominent_topic_dataframe(
lda_model, data_vectorized)
# Apply Style
df_document_topics = df_document_topic.head(
15).style.applymap(color_green).applymap(make_bold)
return df_document_topics
def print_sklearn_topic_distribution(lda_model, data_vectorized):
df_document_topic = create_sklearn_dominent_topic_dataframe(
lda_model, data_vectorized)
df_topic_distribution = df_document_topic['dominant_topic'].value_counts(
).reset_index(name="Num Documents").rename(columns={'index': 'Topic'})
# df_topic_distribution.columns = ["Topic Num", "Num Documents"]
return df_topic_distribution
# Show top n keywords for each topic
def show_sklearn_topics(vectorizer, lda_model, n_words=20):
keywords = np.array(vectorizer.get_feature_names_out())
topic_keywords = []
for topic_weights in lda_model.components_:
top_keyword_locs = (-topic_weights).argsort()[:n_words]
topic_keywords.append(keywords.take(top_keyword_locs))
return topic_keywords
def format_sklearn_topics(topic_keywords):
df_topic_keywords = pd.DataFrame(topic_keywords)
df_topic_keywords.columns = [
'Word '+str(i) for i in range(df_topic_keywords.shape[1])]
df_topic_keywords.index = [
'Topic '+str(i) for i in range(df_topic_keywords.shape[0])]
return df_topic_keywords
def analyze_sklearn_lda_model(lda_model, data_vectorized):
# Log Likelyhood: Higher the better
print("Log Likelihood: ", lda_model.score(data_vectorized))
# Perplexity: Lower the better. Perplexity = exp(-1. * log-likelihood per word)
print("Perplexity: ", lda_model.perplexity(data_vectorized))
# helper function to visualize lda model
def visualize_sklearn_lda_model(lda_model, data_vectorized, vectorizer, mds='tsne'):
pyLDAvis.enable_notebook()
panel2 = pyLDAvis.sklearn.prepare(
lda_model, data_vectorized, vectorizer, mds=mds)
return panel2
| [] |
2024-01-10 | gaurangdave/Aletheia | utils~gensim_utils.py | from tqdm.auto import tqdm
from joblib import dump, load
import pickle
import plotly.io as io
import plotly.graph_objects as go
import plotly.express as px
import matplotlib.pyplot as plt
import pyLDAvis.gensim_models as gensimvis
import pyLDAvis.sklearn
import pyLDAvis
from gensim.models.ldamulticore import LdaMulticore
from gensim.models.ldamodel import LdaModel
from gensim.models import CoherenceModel
from gensim.models.phrases import Phraser
from gensim.models import Phrases
from gensim.utils import simple_preprocess
from gensim import corpora, models
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import spacy
from pprint import pprint
import pandas as pd
import re
import string
from bs4 import BeautifulSoup
import nltk
from nltk.stem import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer
nltk.download('stopwords')
# Plotting tools
# loading library
# function to compute optimal parameters for LDA model
def compute_coherence_values(corpus, id2word, texts, num_topics, passes, chunk_sizes=[200], iterations=[100]):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
params = []
for num_topic in tqdm(num_topics):
# for chunk_size in tqdm(chunk_sizes):
for num_passes in tqdm(passes):
for iteration in tqdm(iterations):
model = LdaModel(corpus=corpus,
id2word=id2word,
num_topics=num_topic,
random_state=100,
update_every=1,
# chunksize=chunk_size,
passes=num_passes,
iterations=iteration,
per_word_topics=True)
model_list.append(model)
# Compute Perplexity
perplexity = model.log_perplexity(corpus)
# Compute Coherence Score
coherence_model_lda = CoherenceModel(
model=model, texts=texts, dictionary=id2word, coherence='c_v')
cv_coherence = coherence_model_lda.get_coherence()
coherence_model_umass = CoherenceModel(
model=model, texts=texts, dictionary=id2word, coherence='u_mass')
umass_coherence = coherence_model_umass.get_coherence()
coherence_values.append({
"perplexity": perplexity,
"cv_coherence": cv_coherence,
"umass_coherence": umass_coherence,
})
params.append({'num_topics': num_topic, 'chunk_size': "chunk_size",
'passes': num_passes, 'iterations': iteration})
return model_list, coherence_values, params
def analyze_gensim_lda_model(lda_model, corpus, id2word, texts, num_topics, passes, chunk_sizes=[200]):
# Compute Perplexity
# a measure of how good the model is. lower the better.
print('\nPerplexity: ', lda_model.log_perplexity(corpus))
# Compute Coherence Score
coherence_model_lda = CoherenceModel(
model=lda_model, texts=texts, dictionary=id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
# helper functions to visualize LDA model
def visualize_gensim_lda_model(lda_model, corpus, id2word, filename="gensim_lda.html"):
# Visualize the topics
pyLDAvis.enable_notebook()
vis = gensimvis.prepare(lda_model, corpus, id2word)
vis.save(filename)
| [] |
2024-01-10 | kdliaokueida/Deep_Reinforcement_Learning | p3_collab-compet~replay_buffer.py | #code from openai
#https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
import numpy as np
import random
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
class ReplayBuffer(object):
def __init__(self, size):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
return len(self._storage)
def push(self, all_states, states, actions, rewards, all_next_states, next_states, dones):
data = (all_states, states, actions, rewards, all_next_states, next_states, dones)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha > 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def push(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super(PrioritizedReplayBuffer, self).push(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
for _ in range(batch_size):
# TODO(szymon): should we ensure no repeats?
mass = random.random() * self._it_sum.sum(0, len(self._storage) - 1)
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
| [] |
2024-01-10 | autodistill/autodistill-gpt-4v | autodistill_gpt_4v~gpt4v_model.py | import base64
import os
from dataclasses import dataclass
import numpy as np
import supervision as sv
from autodistill.detection import CaptionOntology, DetectionBaseModel
from openai import OpenAI
HOME = os.path.expanduser("~")
@dataclass
class GPT4V(DetectionBaseModel):
ontology: CaptionOntology
def __init__(self, ontology: CaptionOntology, api_key, prompt: str = None):
self.client = OpenAI(api_key=api_key)
self.ontology = ontology
self.prompt = prompt
def set_of_marks(self, input, masked_input, classes, masks) -> sv.Detections:
if classes is None:
classes = {k:k for k in self.ontology.prompts()}
payload = [
{
"role": "user",
"content": [
{
"type": "text",
"text": f"Attached is an image and a set of selections for this image. Please return the number of any selections for a {', '.join(classes)}. Return each class on a new line like Banana: 1, 2, 3 [new line] Apple: 5, 7, 8."
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,"
+ base64.b64encode(open(input, "rb").read()).decode(
"utf-8"
),
}
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,"
+ base64.b64encode(open(masked_input, "rb").read()).decode(
"utf-8"
),
}
},
],
}
]
print(payload[0]["content"][0])
response = self.client.chat.completions.create(
model="gpt-4-vision-preview",
messages=payload,
max_tokens=300,
)
print(response)
if "none" not in self.ontology.prompts():
self.ontology.prompts().append("none")
class_map = {}
for line in response.choices[0].message.content.split("\n"):
if ":" in line:
class_name = line.split(":")[0].strip().lower()
if class_name not in self.ontology.prompts():
continue
if class_name not in class_map:
class_map[class_name] = []
try:
int(line.split(":")[1].strip().lower())
except:
continue
class_map[class_name].append(int(line.split(":")[1].strip().lower()))
# get ids from class_map
all_ids = [item for sublist in class_map.values() for item in sublist]
print(all_ids)
# change class id for each mask
for idx, _ in enumerate(masks.xyxy):
for class_name, mask_ids in class_map.items():
for mask_id in mask_ids:
if mask_id == idx:
masks.class_id[idx] = self.ontology.prompts().index(class_name)
break
new_masks = []
new_class_id = []
new_xyxy = []
for idx, item in enumerate(masks.class_id):
if item in all_ids:
new_masks.append(masks.mask[idx])
new_class_id.append(masks.class_id[idx])
new_xyxy.append(masks.xyxy[idx])
masks.confidence = np.array([1] * len(masks.class_id))
return sv.Detections(
xyxy=np.array(new_xyxy),
mask=np.array(new_masks),
class_id=np.array(new_class_id),
confidence=np.array([1] * len(new_masks))
)
def predict(self, input, classes: list = None) -> sv.Classifications:
if classes is None:
classes = {k:k for k in self.ontology.prompts()}
payload = [
{
"role": "user",
"content": [
{
"type": "text",
"text": f"What is in the image? Return the class of the object in the image. Here are the classes: {', '.join(classes)}. You can only return one class from that list." if self.prompt is None else self.prompt
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,"
+ base64.b64encode(open(input, "rb").read()).decode(
"utf-8"
),
}
},
],
}
]
response = self.client.chat.completions.create(
model="gpt-4-vision-preview",
messages=payload,
max_tokens=300,
)
if "none" not in self.ontology.prompts():
self.ontology.prompts().append("none")
if self.prompt:
class_ids = []
for line in response.choices[0].message.content.split("\n"):
if ":" in line:
class_name = line.split(":")[1].strip().lower()
print(class_name)
if class_name not in self.ontology.prompts():
class_ids.append(self.ontology.prompts().index("none"))
else:
class_ids.append(self.ontology.prompts().index(class_name))
print(line)
confidence = [1] * len(class_ids)
else:
result = response.choices[0].message.content.lower()
if result not in self.ontology.prompts():
class_ids.append(self.ontology.prompts().index("none"))
confidence = [0]
else:
class_ids = [self.ontology.prompts().index(result)]
confidence = [1]
return sv.Classifications(
class_id=np.array(class_ids),
confidence=np.array(confidence),
)
| [
", ",
"data:image/jpeg;base64,"
] |
2024-01-10 | zl172463468/langchain-ChatGLM | chatglm_llm.py | from langchain.llms.base import LLM
from typing import Optional, List
from langchain.llms.utils import enforce_stop_tokens
from transformers import AutoTokenizer, AutoModel
import torch
DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
DEVICE_ID = "0" if torch.cuda.is_available() else None
CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE
def torch_gc():
if torch.cuda.is_available():
with torch.cuda.device(CUDA_DEVICE):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
class ChatGLM(LLM):
max_token: int = 10000
temperature: float = 0.01
top_p = 0.9
history = []
tokenizer: object = None
model: object = None
history_len: int = 10
def __init__(self):
super().__init__()
@property
def _llm_type(self) -> str:
return "ChatGLM"
def _call(self,
prompt: str,
stop: Optional[List[str]] = None) -> str:
response, _ = self.model.chat(
self.tokenizer,
prompt,
history=self.history[-self.history_len:] if self.history_len>0 else [],
max_length=self.max_token,
temperature=self.temperature,
)
torch_gc()
if stop is not None:
response = enforce_stop_tokens(response, stop)
self.history = self.history+[[None, response]]
return response
def load_model(self, model_name_or_path: str = "THUDM/chatglm-6b"):
self.tokenizer = AutoTokenizer.from_pretrained(
model_name_or_path,
trust_remote_code=True
)
if torch.cuda.is_available():
self.model = (
AutoModel.from_pretrained(
model_name_or_path,
trust_remote_code=True)
.half()
.cuda()
)
elif torch.backends.mps.is_available():
self.model = (
AutoModel.from_pretrained(
model_name_or_path,
trust_remote_code=True)
.float()
.to('mps')
)
else:
self.model = (
AutoModel.from_pretrained(
model_name_or_path,
trust_remote_code=True)
.float()
)
self.model = self.model.eval()
| [] |
2024-01-10 | zl172463468/langchain-ChatGLM | knowledge_based_chatglm.py | from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.document_loaders import UnstructuredFileLoader
from chatglm_llm import ChatGLM
import sentence_transformers
import torch
import os
import readline
# Global Parameters
EMBEDDING_MODEL = "text2vec"
VECTOR_SEARCH_TOP_K = 6
LLM_MODEL = "chatglm-6b"
LLM_HISTORY_LEN = 3
DEVICE = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
# Show reply with source text from input document
REPLY_WITH_SOURCE = True
embedding_model_dict = {
"ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
"ernie-base": "nghuyong/ernie-3.0-base-zh",
"text2vec": "GanymedeNil/text2vec-large-chinese",
}
llm_model_dict = {
"chatglm-6b-int4-qe": "THUDM/chatglm-6b-int4-qe",
"chatglm-6b-int4": "THUDM/chatglm-6b-int4",
"chatglm-6b": "THUDM/chatglm-6b",
}
def init_cfg(LLM_MODEL, EMBEDDING_MODEL, LLM_HISTORY_LEN, V_SEARCH_TOP_K=6):
global chatglm, embeddings, VECTOR_SEARCH_TOP_K
VECTOR_SEARCH_TOP_K = V_SEARCH_TOP_K
chatglm = ChatGLM()
chatglm.load_model(model_name_or_path=llm_model_dict[LLM_MODEL])
chatglm.history_len = LLM_HISTORY_LEN
embeddings = HuggingFaceEmbeddings(model_name=embedding_model_dict[EMBEDDING_MODEL],)
embeddings.client = sentence_transformers.SentenceTransformer(embeddings.model_name,
device=DEVICE)
def init_knowledge_vector_store(filepath:str):
if not os.path.exists(filepath):
print("路径不存在")
return None
elif os.path.isfile(filepath):
file = os.path.split(filepath)[-1]
try:
loader = UnstructuredFileLoader(filepath, mode="elements")
docs = loader.load()
print(f"{file} 已成功加载")
except:
print(f"{file} 未能成功加载")
return None
elif os.path.isdir(filepath):
docs = []
for file in os.listdir(filepath):
fullfilepath = os.path.join(filepath, file)
try:
loader = UnstructuredFileLoader(fullfilepath, mode="elements")
docs += loader.load()
print(f"{file} 已成功加载")
except:
print(f"{file} 未能成功加载")
vector_store = FAISS.from_documents(docs, embeddings)
return vector_store
def get_knowledge_based_answer(query, vector_store, chat_history=[]):
global chatglm, embeddings
prompt_template = """基于以下已知信息,简洁和专业的来回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。
已知内容:
{context}
问题:
{question}"""
prompt = PromptTemplate(
template=prompt_template,
input_variables=["context", "question"]
)
chatglm.history = chat_history
knowledge_chain = RetrievalQA.from_llm(
llm=chatglm,
retriever=vector_store.as_retriever(search_kwargs={"k": VECTOR_SEARCH_TOP_K}),
prompt=prompt
)
knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}"
)
knowledge_chain.return_source_documents = True
result = knowledge_chain({"query": query})
chatglm.history[-1][0] = query
return result, chatglm.history
if __name__ == "__main__":
init_cfg(LLM_MODEL, EMBEDDING_MODEL, LLM_HISTORY_LEN)
vector_store = None
while not vector_store:
filepath = input("Input your local knowledge file path 请输入本地知识文件路径:")
vector_store = init_knowledge_vector_store(filepath)
history = []
while True:
query = input("Input your question 请输入问题:")
resp, history = get_knowledge_based_answer(query=query,
vector_store=vector_store,
chat_history=history)
if REPLY_WITH_SOURCE:
print(resp)
else:
print(resp["result"])
| [
"基于以下已知信息,简洁和专业的来回答用户的问题。\n如果无法从中得到答案,请说 \"根据已知信息无法回答该问题\" 或 \"没有提供足够的相关信息\",不允许在答案中添加编造成分,答案请使用中文。\n\n已知内容:\n{context}\n\n问题:\n{question}",
"没有提供足够的相关信息",
"{page_content}",
"question",
"根据已知信息无法回答该问题",
"context"
] |
2024-01-10 | lucifercarnicero/python | yoquese.py | def limpiar (frase):
frase= frase.lower()
doc = nlp(frase)
lista_limpia = [token.txt for token in doc if not token.is_space and not token.is_punct and not token.is_whitespace]
return lista_limpia
import gensim
from gensim.parsing.preprocessing import remove_stopwords, strip_punctuation, preprocess_string
from gensim import corpora
from gensim.models import LsiModel
from gensim.models.coherencemodel import CoherenceModel
from pprint import pprint
from matplotlib import pyplot as plt
import pyLDAvis
import pyLDAvis.gensim
mensajes=["Me encanta la tortilla de patata con cebolla", "No puedo creer que haya gente que tome la tortilla con cebolla", "sincebollista hasta la muerte, abajo la cebolla", "La cebolla no se toca","ojalá desaparezcan del mundo todos los cebollistas", "yo por mi cebolla mato","cada vez que veo una tortilla de patata con cebolla, me dan una puñalada en el corazon","la tortilla con chorizo es la mejor","cada vez que le echan cebolla a una tortilla, muere un gatito","no me gusta la tortilla","la tortilla mejor con cebolla"]
def ejecutarLSA(mensajes, min_topics, max_topics):
mensajes_preparados=[limpiar(mensaje) for mensaje in mensajes]
#les doy corpus y asigno números
dic=corpora.Dictionary(mensajes_preparados)
#vemos cuantas veces aparece palabra y lo guardo en variable llamada corpus
corpus=[dic.doc2bow(text) for text in mensajes_preparados]
models=[]
coherences=[]
for num_topics in range (min_topics,max_topics-1):
#generamos modelo con diccionario de palabras
lsa=LsiModel(corpus,num_topics=num_topics,id2word=dic)
#modelo de coherencia mira cuan parecidas son las frases entre si convirtiendolas en vectores
coherence_model_lsa=CoherenceModel(model=lsa, texts=mensajes_preparados,dictionary=dic,coherence='c-v')
coherence_lsa=coherence_model_lsa.get_coherence()
models.append(lsa)
coherences.append(coherence_lsa)
return(dic,coherences,models)
#obtenemos un nivel de coherencia de mimimimimi
def plot_graph(min_topics,max_topics,coherences,path):
x=range(min_topics,max_topics-1)
plt.plot(x,coherences)
plt.xlabel("Numero de temas")
plt.ylavel("Coherencia")
plt.legend("valores de coherencia", loc='best')
plt.savefig(path) #path la ruta donde guardamos la imagen que se genere mimimi
(dic_lsa, coherencias_lsa, modelos_lsa)=ejecutarLSA(mensajes,2,10)
plot_graph=(2,10,coherencias_lsa,"lucia.png") | [] |
2024-01-10 | stefan-it/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | YanJiaHuan/AI_Tutor | lib~chat_paper.py | import numpy as np
import os
import datetime
import arxiv
import openai, tenacity
import base64, requests
import argparse
import configparser
import json
import tiktoken
from get_paper_from_pdf import Paper
import re
def validateTitle_2(title):
# 将论文的乱七八糟的路径格式修正
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
new_title = re.sub(rstr, "_", title) # 替换为下划线
return new_title
# print(validateTitle_2('Regret Bounds for Risk-Sensitive.md'))
# 定义Reader类
class Reader:
# 初始化方法,设置属性
def __init__(self, key_word, query, filter_keys,
root_path='./',
gitee_key='',
sort=arxiv.SortCriterion.SubmittedDate, user_name='defualt', args=None):
self.user_name = user_name # 读者姓名
self.key_word = key_word # 读者感兴趣的关键词
self.query = query # 读者输入的搜索查询
self.sort = sort # 读者选择的排序方式
self.language = 'English'
self.filter_keys = filter_keys # 用于在摘要中筛选的关键词
self.root_path = root_path
# 创建一个ConfigParser对象
self.config = configparser.ConfigParser()
# 读取配置文件
self.config.read('./lib/apikey.ini')
# 获取某个键对应的值
self.chat_api_list = self.config.get('OpenAI', 'OPENAI_API_KEYS')[1:-1].replace('\'', '').split(',')
self.chat_api_list = [api.strip() for api in self.chat_api_list if len(api) > 5]
self.cur_api = 0
self.file_format = args.file_format
if args.save_image:
self.gitee_key = self.config.get('Gitee', 'api')
else:
self.gitee_key = ''
self.max_token_num = 4096
self.encoding = tiktoken.get_encoding("gpt2")
def get_arxiv(self, max_results=30):
search = arxiv.Search(query=self.query,
max_results=max_results,
sort_by=self.sort,
sort_order=arxiv.SortOrder.Descending,
)
return search
def filter_arxiv(self, max_results=30):
search = self.get_arxiv(max_results=max_results)
print("all search:")
for index, result in enumerate(search.results()):
print(index, result.title, result.updated)
filter_results = []
filter_keys = self.filter_keys
print("filter_keys:", self.filter_keys)
# 确保每个关键词都能在摘要中找到,才算是目标论文
for index, result in enumerate(search.results()):
abs_text = result.summary.replace('-\n', '-').replace('\n', ' ')
meet_num = 0
for f_key in filter_keys.split(" "):
if f_key.lower() in abs_text.lower():
meet_num += 1
if meet_num == len(filter_keys.split(" ")):
filter_results.append(result)
# break
print("筛选后剩下的论文数量:")
print("filter_results:", len(filter_results))
print("filter_papers:")
for index, result in enumerate(filter_results):
print(index, result.title, result.updated)
return filter_results
def validateTitle(self, title):
# 将论文的乱七八糟的路径格式修正
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
new_title = re.sub(rstr, "_", title) # 替换为下划线
return new_title
def download_pdf(self, filter_results):
# 先创建文件夹
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
key_word = str(self.key_word.replace(':', ' '))
path = self.root_path + 'pdf_files/' + self.query.replace('au: ', '').replace('title: ', '').replace('ti: ',
'').replace(
':', ' ')[:25] + '-' + date_str
try:
os.makedirs(path)
except:
pass
print("All_paper:", len(filter_results))
# 开始下载:
paper_list = []
for r_index, result in enumerate(filter_results):
try:
title_str = self.validateTitle(result.title)
pdf_name = title_str + '.pdf'
# result.download_pdf(path, filename=pdf_name)
self.try_download_pdf(result, path, pdf_name)
paper_path = os.path.join(path, pdf_name)
print("paper_path:", paper_path)
paper = Paper(path=paper_path,
url=result.entry_id,
title=result.title,
abs=result.summary.replace('-\n', '-').replace('\n', ' '),
authers=[str(aut) for aut in result.authors],
)
# 下载完毕,开始解析:
paper.parse_pdf()
paper_list.append(paper)
except Exception as e:
print("download_error:", e)
pass
return paper_list
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def try_download_pdf(self, result, path, pdf_name):
result.download_pdf(path, filename=pdf_name)
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def upload_gitee(self, image_path, image_name='', ext='png'):
"""
上传到码云
:return:
"""
with open(image_path, 'rb') as f:
base64_data = base64.b64encode(f.read())
base64_content = base64_data.decode()
date_str = str(datetime.datetime.now())[:19].replace(':', '-').replace(' ', '-') + '.' + ext
path = image_name + '-' + date_str
payload = {
"access_token": self.gitee_key,
"owner": self.config.get('Gitee', 'owner'),
"repo": self.config.get('Gitee', 'repo'),
"path": self.config.get('Gitee', 'path'),
"content": base64_content,
"message": "upload image"
}
# 这里需要修改成你的gitee的账户和仓库名,以及文件夹的名字:
url = f'https://gitee.com/api/v5/repos/' + self.config.get('Gitee', 'owner') + '/' + self.config.get('Gitee',
'repo') + '/contents/' + self.config.get(
'Gitee', 'path') + '/' + path
rep = requests.post(url, json=payload).json()
print("rep:", rep)
if 'content' in rep.keys():
image_url = rep['content']['download_url']
else:
image_url = r"https://gitee.com/api/v5/repos/" + self.config.get('Gitee', 'owner') + '/' + self.config.get(
'Gitee', 'repo') + '/contents/' + self.config.get('Gitee', 'path') + '/' + path
return image_url
def summary_with_chat(self, paper_list):
htmls = []
for paper_index, paper in enumerate(paper_list):
# 第一步先用title,abs,和introduction进行总结。
text = ''
text += 'Title:' + paper.title
text += 'Url:' + paper.url
text += 'Abstrat:' + paper.abs
text += 'Paper_info:' + paper.section_text_dict['paper_info']
# intro
text += list(paper.section_text_dict.values())[0]
chat_summary_text = self.chat_summary(text=text)
htmls.append('## Paper:' + str(paper_index + 1))
htmls.append('\n\n\n')
htmls.append(chat_summary_text)
# 由于图像信息不重要,还经常报错,我把这段内容注释掉。
# # TODO 往md文档中插入论文里的像素最大的一张图片,这个方案可以弄的更加智能一些:
# first_image, ext = paper.get_image_path()
# if first_image is None or self.gitee_key == '':
# pass
# else:
# image_title = self.validateTitle(paper.title)
# image_url = self.upload_gitee(image_path=first_image, image_name=image_title, ext=ext)
# htmls.append("\n\n")
# htmls.append("")
# htmls.append("\n\n")
# 第二步总结方法:
# TODO,由于有些文章的方法章节名是算法名,所以简单的通过关键词来筛选,很难获取,后面需要用其他的方案去优化。
method_key = ''
for parse_key in paper.section_text_dict.keys():
if 'method' in parse_key.lower() or 'approach' in parse_key.lower():
method_key = parse_key
break
if method_key != '':
text = ''
method_text = ''
summary_text = ''
summary_text += "<summary>" + chat_summary_text
# methods
method_text += paper.section_text_dict[method_key]
text = summary_text + "\n\n<Methods>:\n\n" + method_text
chat_method_text = self.chat_method(text=text)
htmls.append(chat_method_text)
else:
chat_method_text = ''
htmls.append("\n" * 4)
# 第三步总结全文,并打分:
conclusion_key = ''
for parse_key in paper.section_text_dict.keys():
if 'conclu' in parse_key.lower():
conclusion_key = parse_key
break
text = ''
conclusion_text = ''
summary_text = ''
summary_text += "<summary>" + chat_summary_text + "\n <Method summary>:\n" + chat_method_text
if conclusion_key != '':
# conclusion
conclusion_text += paper.section_text_dict[conclusion_key]
text = summary_text + "\n\n<Conclusion>:\n\n" + conclusion_text
else:
text = summary_text
chat_conclusion_text = self.chat_conclusion(text=text)
htmls.append(chat_conclusion_text)
htmls.append("\n" * 4)
# # 整合成一个文件,打包保存下来。
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
try:
export_path = os.path.join(self.root_path, 'export')
os.makedirs(export_path)
except:
pass
mode = 'w' if paper_index == 0 else 'a'
file_name = os.path.join(export_path, self.validateTitle(paper.title) + "." + self.file_format)
self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
# file_name = os.path.join(export_path, date_str+'-'+self.validateTitle(paper.title)+".md")
# self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
htmls = []
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_conclusion(self, text):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
conclusion_prompt_token = 650
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - conclusion_prompt_token) / text_token)
clip_text = text[:clip_text_index]
messages = [
{"role": "system",
"content": "You are a reviewer in the field of [" + self.key_word + "] and you need to critically review this article"},
# chatgpt 角色
{"role": "assistant",
"content": "This is the <summary> and <conclusion> part of an English literature, where <summary> you have already summarized, but <conclusion> part, I need your help to summarize the following questions:" + clip_text},
# 背景知识,可以参考OpenReview的审稿流程
{"role": "user", "content": """
8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English).
- (1):What is the significance of this piece of work?
- (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload.
.......
Follow the format of the output later:
8. Conclusion: \n\n
- (1):xxx;\n
- (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.your total feedback should be no more than 20 words
""".format(self.language, self.language)},
]
# print('#' * 20)
# print(messages)
# print('#' * 20)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
# prompt需要用英语替换,少占用token。
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
# print("conclusion_result:\n", result)
# print("prompt_token_used:", response.usage.prompt_tokens,
# "completion_token_used:", response.usage.completion_tokens,
# "total_token_used:", response.usage.total_tokens)
# print("response_time:", response.response_ms / 1000.0, 's')
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_method(self, text):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
method_prompt_token = 650
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - method_prompt_token) / text_token)
clip_text = text[:clip_text_index]
# print(clip_text)
messages = [
{"role": "system",
"content": "You are a researcher in the field of [" + self.key_word + "] who is good at summarizing papers using concise statements"},
# chatgpt 角色
{"role": "assistant",
"content": "This is the <summary> and <Method> part of an English document, where <summary> you have summarized, but the <Methods> part, I need your help to read and summarize the following questions." + clip_text},
# 背景知识
{"role": "user", "content": """
7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are. your total feedback should be no more than 20 words
- (1):...
- (2):...
- (3):...
- .......
Follow the format of the output that follows:
7. Methods: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
....... \n\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.
""".format(self.language, self.language)},
]
# print('#' * 20)
# print(messages)
# print('#' * 20)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
# print("method_result:\n", result)
# print("prompt_token_used:", response.usage.prompt_tokens,
# "completion_token_used:", response.usage.completion_tokens,
# "total_token_used:", response.usage.total_tokens)
# print("response_time:", response.response_ms / 1000.0, 's')
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_summary(self, text):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
summary_prompt_token = 1000
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - summary_prompt_token) / text_token)
clip_text = text[:clip_text_index]
messages = [
{"role": "system",
"content": "You are a researcher in the field of [" + self.key_word + "] who is good at summarizing papers using concise statements"},
{"role": "assistant",
"content": "This is the title, author, link, abstract and introduction of an English document. I need your help to read and summarize the following questions: " + clip_text},
{"role": "user", "content": """
You need to answer the following questions:
1. Title of Presentation
2. Section 1: Introduction
- Brief overview of topic
- Key points to be covered
3. Section 2: Background Information
- Historical context
- Key figures and events
- Relevant theories
4. Section 3: Main Points
- Point 1
- Point 2
- Point 3
5. Section 4: Case Study or Example
- Real-world example
- Analyze how topic applies to case study
6. Section 5: Conclusion
- Summary of key points
- Takeaways for the audience
- Future directions for research or practice
7. References
- List of sources cited in presentation
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed.
""".format(self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("summary_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms / 1000.0, 's')
return result
def export_to_markdown(self, text, file_name, mode='w'):
# 使用markdown模块的convert方法,将文本转换为html格式
# html = markdown.markdown(text)
# 打开一个文件,以写入模式
with open(file_name, mode, encoding="utf-8") as f:
# 将html格式的内容写入文件
f.write(text)
# 定义一个方法,打印出读者信息
def show_info(self):
print(f"Key word: {self.key_word}")
print(f"Query: {self.query}")
print(f"Sort: {self.sort}")
def chat_terminology(self, text):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
conclusion_prompt_token = 650
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - conclusion_prompt_token) / text_token)
clip_text = text[:clip_text_index]
messages = [
{"role": "system",
"content": "You are a reviewer in the field of [" + self.key_word + "] and you need to critically review this article"},
# chatgpt 角色
{"role": "assistant",
"content": "This is the <summary> and <conclusion> part of an English literature, where <summary> you have already summarized, but <conclusion> part, I need your help to answer the following questions:" + clip_text},
# 背景知识,可以参考OpenReview的审稿流程
{"role": "user", "content": """
List 10 terminology in this article and use your own knowledge or content from thia article to explain them.
""".format(self.language, self.language)},
]
print('#'*20)
print(messages)
print('#' * 20)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
# prompt需要用英语替换,少占用token。
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print(result)
return result
def content_extractor(md_name):
with open(
md_name,
'r', encoding='utf-8') as file:
text = file.read()
# Create patterns for the sections you want to extract
# summary_pattern = re.compile(r"6\. Summary:(.*?)(?=\n\n\n\n)", re.DOTALL)
# methods_pattern = re.compile(r"7\. Methods:(.*?)(?=\n\n\n\n)", re.DOTALL)
# conclusion_pattern = re.compile(r"8\. Conclusion:(.*?)(?=\n\n\n\n)", re.DOTALL)
# summary_pattern = re.compile(r'\d+\.summary(.*?)\d+\.(?:[^summary]|summary(?!$))+', re.DOTALL)
# methods_pattern = re.compile(r'\d+\.methods(.*?)\d+\.(?:[^methods]|methods(?!$))+', re.DOTALL)
# conclusion_pattern = re.compile(r'\d+\.conclusion(.*?)\d+\.(?:[^conclusion]|conclusion(?!$))+', re.DOTALL)
summary_pattern = r'\d+\.summary(.*?)\d+\.(?:[^summary]|summary(?!$))+'
summary_match = re.search(summary_pattern, text, flags=re.DOTALL)
methods_pattern = r'\d+\.methods(.*?)\d+\.(?:[^methods]|methods(?!$))+'
methods_match = re.search(methods_pattern, text, flags=re.DOTALL)
conclusion_pattern = r'\d+\.conclusion(.*?)\d+\.(?:[^conclusion]|conclusion(?!$))+'
conclusion_match = re.search(conclusion_pattern, text, flags=re.DOTALL)
if summary_match:
summary = summary_match.group(1).strip()
else:
summary = 'Re error '
if methods_match:
methods = methods_match.group(1).strip()
else:
methods = 'Re error '
if conclusion_match:
conclusion = conclusion_match.group(1).strip()
else:
conclusion = 'Re error '
paper_info = {
"GPTsummary": summary,
"GPTmethods": methods,
"GPTconclusion": conclusion
}
return paper_info
# def main(args):
# # 创建一个Reader对象,并调用show_info方法
# # if args.sort == 'Relevance':
# # sort = arxiv.SortCriterion.Relevance
# # elif args.sort == 'LastUpdatedDate':
# # sort = arxiv.SortCriterion.LastUpdatedDate
# # else:
# # sort = arxiv.SortCriterion.Relevance
#
# if args.pdf_path:
# reader1 = Reader(key_word=args.key_word,
# query=args.query,
# filter_keys=args.filter_keys,
# sort=args.sort,
# args=args
# )
# reader1.show_info()
# #开始判断是路径还是文件:
# paper_list = []
# if args.pdf_path.endswith(".pdf"):
# paper_list.append(Paper(path=args.pdf_path))
# else:
# for root, dirs, files in os.walk(args.pdf_path):
# print("root:", root, "dirs:", dirs, 'files:', files) # 当前目录路径
# for filename in files:
# # 如果找到PDF文件,则将其复制到目标文件夹中
# if filename.endswith(".pdf"):
# paper_list.append(Paper(path=os.path.join(root, filename)))
# print("------------------paper_num: {}------------------".format(len(paper_list)))
# [print(paper_index, paper_name.path.split('\\')[-1]) for paper_index, paper_name in enumerate(paper_list)]
# # output = reader1.summary_with_chat(paper_list=paper_list)
# for paper in paper_list:
# print(paper.title)
# with open(f"./data/{validateTitle_2(paper.title)}.json", "w") as write_file:
# paper.section_text_dict.update(content_extractor(f"./export/{validateTitle_2(paper.title)}.md"))
# json.dump(paper.section_text_dict, write_file, indent=4)
# else:
# reader1 = Reader(key_word=args.key_word,
# query=args.query,
# filter_keys=args.filter_keys,
# sort=args.sort,
# args=args
# )
# reader1.show_info()
# filter_results = reader1.filter_arxiv(max_results=args.max_results)
# paper_list = reader1.download_pdf(filter_results)
# reader1.summary_with_chat(paper_list=paper_list)
# # for paper in paper_list:
# # with open(f"./data/{paper.title}.json", "w") as write_file:
# # paper.section_text_dict.update(content_extractor(f"./export/{validateTitle_2(paper.title)}.md"))
# # json.dump(paper.section_text_dict, write_file, indent=4)
def main(args):
# 创建一个Reader对象,并调用show_info方法
if args.sort == 'Relevance':
sort = arxiv.SortCriterion.Relevance
elif args.sort == 'LastUpdatedDate':
sort = arxiv.SortCriterion.LastUpdatedDate
else:
sort = arxiv.SortCriterion.Relevance
if args.pdf_path:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=sort,
args=args
)
reader1.show_info()
# 开始判断是路径还是文件:
paper_list = []
if args.pdf_path.endswith(".pdf"):
paper_list.append(Paper(path=args.pdf_path))
else:
for root, dirs, files in os.walk(args.pdf_path):
print("root:", root, "dirs:", dirs, 'files:', files) # 当前目录路径
for filename in files:
# 如果找到PDF文件,则将其复制到目标文件夹中
if filename.endswith(".pdf"):
paper_list.append(Paper(path=os.path.join(root, filename)))
print("------------------paper_num: {}------------------".format(len(paper_list)))
[print(paper_index, paper_name.path.split('\\')[-1]) for paper_index, paper_name in enumerate(paper_list)]
reader1.summary_with_chat(paper_list=paper_list)
else:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=sort,
args=args
)
reader1.show_info()
filter_results = reader1.filter_arxiv(max_results=args.max_results)
paper_list = reader1.download_pdf(filter_results)
reader1.summary_with_chat(paper_list=paper_list)
def main_2(args):
if args.pdf_path:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=args.sort,
args=args
)
reader1.show_info()
paper_content = Paper(args.pdf_path)
terminology = reader1.chat_terminology(paper_content)
print(terminology)
else:
print("not find pdf_path")
# type in terminal:
# def main():
# path_file = './pdf_files/reinforcement learning-2023-03-23-11'
# print('start')
# paper_list = []
# for root, dirs, files in os.walk(path_file):
# for filename in files:
# if filename.endswith(".pdf"):
# paper_list.append(Paper(path=(root+'/'+filename)))
# print(len(paper_list))
# for paper in paper_list:
# print(paper.title)
# paper_name = validateTitle_2(paper.title)
# with open(f"./data/{paper_name}.json", "w") as write_file:
# # paper.section_text_dict.update(content_extractor(f"./export/{paper_name}.md"))
# json.dump(paper.section_text_dict, write_file, indent=4)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument("--pdf_path", type=str, default=r'demo.pdf', help="if none, the bot will download from arxiv with query")
parser.add_argument("--pdf_path", type=str, default=r'/Users/yan/Desktop/Project/ChatPaper/pdf_files/reinforcement learning-2023-03-23-11', help="if none, the bot will download from arxiv with query")
# parser.add_argument("--pdf_path", type=str, default='', help="if none, the bot will download from arxiv with query")
parser.add_argument("--query", type=str, default='reinforcement learning',
help="the query string, ti: xx, au: xx, all: xx,")
parser.add_argument("--key_word", type=str, default='reinforcement learning',
help="the key word of user research fields")
parser.add_argument("--filter_keys", type=str, default=' ',
help="the filter key words, 摘要中每个单词都得有,才会被筛选为目标论文")
parser.add_argument("--max_results", type=int, default=100, help="the maximum number of results")
# arxiv.SortCriterion.Relevance
parser.add_argument("--sort", type=str, default="Relevance", help="another is LastUpdatedDate")
parser.add_argument("--save_image", default=False,
help="save image? It takes a minute or two to save a picture! But pretty")
parser.add_argument("--file_format", type=str, default='md',
help="导出的文件格式,如果存图片的话,最好是md,如果不是的话,txt的不会乱")
parser.add_argument("--language", type=str, default='en', help="The other output lauguage is English, is en")
args = parser.parse_args()
main(args)
##################
#type "python ./lib/chat_paper.py " in your terminal, the code will download 100 paper to local,and do parser and GPTsummary afterwards | [
" \n List 10 terminology in this article and use your own knowledge or content from thia article to explain them.\n \n ",
" \n You need to answer the following questions:\n 1. Title of Presentation \n\n 2. Section 1: Introduction \n\n - Brief overview of topic \n \n - Key points to be covered \n\n \n\n 3. Section 2: Background Information \n\n - Historical context \n \n - Key figures and events \n \n - Relevant theories \n\n \n\n 4. Section 3: Main Points \n \n - Point 1 \n \n - Point 2 \n \n - Point 3 \n\n \n\n 5. Section 4: Case Study or Example \n \n - Real-world example \n \n - Analyze how topic applies to case study \n\n \n\n 6. Section 5: Conclusion \n \n - Summary of key points \n \n - Takeaways for the audience \n \n - Future directions for research or practice \n\n \n \n 7. References \n \n - List of sources cited in presentation \n\n \n\n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed. \n ",
" \n 8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English).\n - (1):What is the significance of this piece of work?\n - (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload. \n .......\n Follow the format of the output later: \n 8. Conclusion: \n\n\n - (1):xxx;\n \n - (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n \n\n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.your total feedback should be no more than 20 words \n ",
" \n 7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are. your total feedback should be no more than 20 words\n - (1):...\n - (2):...\n - (3):...\n - .......\n Follow the format of the output that follows: \n 7. Methods: \n\n\n - (1):xxx;\n \n - (2):xxx;\n \n - (3):xxx;\n \n ....... \n\n \n\n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write. \n ",
"This is the <summary> and <conclusion> part of an English literature, where <summary> you have already summarized, but <conclusion> part, I need your help to answer the following questions:PLACEHOLDER",
"650",
"] and you need to critically review this article",
"This is the <summary> and <Method> part of an English document, where <summary> you have summarized, but the <Methods> part, I need your help to read and summarize the following questions.PLACEHOLDER",
"] who is good at summarizing papers using concise statements",
"This is the <summary> and <conclusion> part of an English literature, where <summary> you have already summarized, but <conclusion> part, I need your help to summarize the following questions:PLACEHOLDER",
"This is the title, author, link, abstract and introduction of an English document. I need your help to read and summarize the following questions: PLACEHOLDER",
"You are a researcher in the field of [",
"You are a reviewer in the field of [",
"1000"
] |
2024-01-10 | YanJiaHuan/AI_Tutor | test~pdf_parser.py | import numpy as np
import os
import re
import datetime
import arxiv
import openai, tenacity
import base64, requests
import argparse
import configparser
import json
import tiktoken
from get_paper_from_pdf import Paper
# 定义Reader类
class Reader:
# 初始化方法,设置属性
def __init__(self, key_word, query, filter_keys,
root_path='./',
gitee_key='',
sort=arxiv.SortCriterion.SubmittedDate, user_name='defualt', args=None):
self.user_name = user_name # 读者姓名
self.key_word = key_word # 读者感兴趣的关键词
self.query = query # 读者输入的搜索查询
self.sort = sort # 读者选择的排序方式
if args.language == 'en':
self.language = 'English'
elif args.language == 'zh':
self.language = 'Chinese'
else:
self.language = 'Chinese'
self.filter_keys = filter_keys # 用于在摘要中筛选的关键词
self.root_path = root_path
# 创建一个ConfigParser对象
self.config = configparser.ConfigParser()
# 读取配置文件
self.config.read('./test/apikey.ini')
# 获取某个键对应的值
self.chat_api_list = self.config.get('OpenAI', 'OPENAI_API_KEYS')[1:-1].replace('\'', '').split(',')
self.chat_api_list = [api.strip() for api in self.chat_api_list if len(api) > 5]
self.cur_api = 0
self.file_format = args.file_format
if args.save_image:
self.gitee_key = self.config.get('Gitee', 'api')
else:
self.gitee_key = ''
self.max_token_num = 4096
self.encoding = tiktoken.get_encoding("cl100k_base")
# self.encoding = tiktoken.encoding_for_model("gpt-4")
def get_arxiv(self, max_results=30):
search = arxiv.Search(query=self.query,
max_results=max_results,
sort_by=self.sort,
sort_order=arxiv.SortOrder.Descending,
)
return search
def filter_arxiv(self, max_results=30):
search = self.get_arxiv(max_results=max_results)
print("all search:")
for index, result in enumerate(search.results()):
print(index, result.title, result.updated)
filter_results = []
filter_keys = self.filter_keys
print("filter_keys:", self.filter_keys)
# 确保每个关键词都能在摘要中找到,才算是目标论文
for index, result in enumerate(search.results()):
abs_text = result.summary.replace('-\n', '-').replace('\n', ' ')
meet_num = 0
for f_key in filter_keys.split(" "):
if f_key.lower() in abs_text.lower():
meet_num += 1
if meet_num == len(filter_keys.split(" ")):
filter_results.append(result)
# break
print("筛选后剩下的论文数量:")
print("filter_results:", len(filter_results))
print("filter_papers:")
for index, result in enumerate(filter_results):
print(index, result.title, result.updated)
return filter_results
def validateTitle(self, title):
# 将论文的乱七八糟的路径格式修正
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
new_title = re.sub(rstr, "_", title) # 替换为下划线
return new_title
def download_pdf(self, filter_results):
# 先创建文件夹
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
key_word = str(self.key_word.replace(':', ' '))
path = self.root_path + 'pdf_files/' + self.query.replace('au: ', '').replace('title: ', '').replace('ti: ', '').replace(':', ' ')[:25] + '-' + date_str
try:
os.makedirs(path)
except:
pass
print("All_paper:", len(filter_results))
# 开始下载:
paper_list = []
for r_index, result in enumerate(filter_results):
try:
title_str = self.validateTitle(result.title)
pdf_name = title_str+'.pdf'
# result.download_pdf(path, filename=pdf_name)
self.try_download_pdf(result, path, pdf_name)
paper_path = os.path.join(path, pdf_name)
print("paper_path:", paper_path)
paper = Paper(path=paper_path,
url=result.entry_id,
title=result.title,
abs=result.summary.replace('-\n', '-').replace('\n', ' '),
authers=[str(aut) for aut in result.authors],
)
# 下载完毕,开始解析:
paper.parse_pdf()
paper_list.append(paper)
except Exception as e:
print("download_error:", e)
pass
return paper_list
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def try_download_pdf(self, result, path, pdf_name):
result.download_pdf(path, filename=pdf_name)
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def upload_gitee(self, image_path, image_name='', ext='png'):
"""
上传到码云
:return:
"""
with open(image_path, 'rb') as f:
base64_data = base64.b64encode(f.read())
base64_content = base64_data.decode()
date_str = str(datetime.datetime.now())[:19].replace(':', '-').replace(' ', '-') + '.' + ext
path = image_name+ '-' +date_str
payload = {
"access_token": self.gitee_key,
"owner": self.config.get('Gitee', 'owner'),
"repo": self.config.get('Gitee', 'repo'),
"path": self.config.get('Gitee', 'path'),
"content": base64_content,
"message": "upload image"
}
# 这里需要修改成你的gitee的账户和仓库名,以及文件夹的名字:
url = f'https://gitee.com/api/v5/repos/'+self.config.get('Gitee', 'owner')+'/'+self.config.get('Gitee', 'repo')+'/contents/'+self.config.get('Gitee', 'path')+'/'+path
rep = requests.post(url, json=payload).json()
print("rep:", rep)
if 'content' in rep.keys():
image_url = rep['content']['download_url']
else:
image_url = r"https://gitee.com/api/v5/repos/"+self.config.get('Gitee', 'owner')+'/'+self.config.get('Gitee', 'repo')+'/contents/'+self.config.get('Gitee', 'path')+'/' + path
return image_url
def summary_with_chat(self, paper_list):
# htmls = []
data_list=[]
for paper_index, paper in enumerate(paper_list):
data_dic = {}
# print(paper_index, paper.abstract)
# 第一步先用title,abs,和introduction进行总结。
# data_dic['Abstract']=paper.abs
# data_dic['Introduction']=paper.section_text_dict['Introduction']
data_dic['paper_info']=paper.section_text_dict['paper_info']
data_list.append(data_dic)
with open("./test/mydata.json", "w") as out_file:
json.dump(data_list, out_file)
# 定义一个方法,打印出读者信息
def show_info(self):
print(f"Key word: {self.key_word}")
print(f"Query: {self.query}")
print(f"Sort: {self.sort}")
def main(args):
# 创建一个Reader对象,并调用show_info方法
if args.sort == 'Relevance':
sort = arxiv.SortCriterion.Relevance
elif args.sort == 'LastUpdatedDate':
sort = arxiv.SortCriterion.LastUpdatedDate
else:
sort = arxiv.SortCriterion.Relevance
if args.pdf_path:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=sort,
args=args
)
reader1.show_info()
# 开始判断是路径还是文件:
paper_list = []
if args.pdf_path.endswith(".pdf"):
paper_list.append(Paper(path=args.pdf_path))
else:
for root, dirs, files in os.walk(args.pdf_path):
print("root:", root, "dirs:", dirs, 'files:', files) #当前目录路径
for filename in files:
# 如果找到PDF文件,则将其复制到目标文件夹中
if filename.endswith(".pdf"):
paper_list.append(Paper(path=os.path.join(root, filename)))
print("------------------paper_num: {}------------------".format(len(paper_list)))
[print(paper_index, paper_name.path.split('\\')[-1]) for paper_index, paper_name in enumerate(paper_list)]
reader1.summary_with_chat(paper_list=paper_list)
else:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=sort,
args=args
)
reader1.show_info()
filter_results = reader1.filter_arxiv(max_results=args.max_results)
paper_list = reader1.download_pdf(filter_results)
reader1.summary_with_chat(paper_list=paper_list)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument("--pdf_path", type=str, default='./test/demo.pdf', help="if none, the bot will download from arxiv with query")
# parser.add_argument("--pdf_path", type=str, default=r'C:\Users\Administrator\Desktop\DHER\RHER_Reset\ChatPaper', help="if none, the bot will download from arxiv with query")
parser.add_argument("--pdf_path", type=str, default='', help="if none, the bot will download from arxiv with query")
parser.add_argument("--query", type=str, default='all: nlp', help="the query string, ti: xx, au: xx, all: xx,")
parser.add_argument("--key_word", type=str, default='nlp', help="the key word of user research fields")
parser.add_argument("--filter_keys", type=str, default='nlp', help="the filter key words, 摘要中每个单词都得有,才会被筛选为目标论文")
parser.add_argument("--max_results", type=int, default=2000, help="the maximum number of results")
# arxiv.SortCriterion.Relevance
parser.add_argument("--sort", type=str, default="LastUpdatedDate", help="another is LastUpdatedDate")
parser.add_argument("--save_image", default=False, help="save image? It takes a minute or two to save a picture! But pretty")
parser.add_argument("--file_format", type=str, default='md', help="导出的文件格式,如果存图片的话,最好是md,如果不是的话,txt的不会乱")
parser.add_argument("--language", type=str, default='en', help="The other output lauguage is English, is en")
args = parser.parse_args()
import time
start_time = time.time()
main(args=args)
print("summary time:", time.time() - start_time)
| [] |
2024-01-10 | YanJiaHuan/AI_Tutor | test~chat_paper.py | import numpy as np
import os
import re
import datetime
import arxiv
import openai, tenacity
import base64, requests
import argparse
import configparser
import json
import tiktoken
from get_paper_from_pdf import Paper
# 定义Reader类
class Reader:
# 初始化方法,设置属性
def __init__(self, key_word, query, filter_keys,
root_path='./',
gitee_key='',
sort=arxiv.SortCriterion.SubmittedDate, user_name='defualt', args=None):
self.user_name = user_name # 读者姓名
self.key_word = key_word # 读者感兴趣的关键词
self.query = query # 读者输入的搜索查询
self.sort = sort # 读者选择的排序方式
if args.language == 'en':
self.language = 'English'
elif args.language == 'zh':
self.language = 'Chinese'
else:
self.language = 'Chinese'
self.filter_keys = filter_keys # 用于在摘要中筛选的关键词
self.root_path = root_path
# 创建一个ConfigParser对象
self.config = configparser.ConfigParser()
# 读取配置文件
self.config.read('./test/apikey.ini')
# 获取某个键对应的值
self.chat_api_list = self.config.get('OpenAI', 'OPENAI_API_KEYS')[1:-1].replace('\'', '').split(',')
self.chat_api_list = [api.strip() for api in self.chat_api_list if len(api) > 5]
self.cur_api = 0
self.file_format = args.file_format
if args.save_image:
self.gitee_key = self.config.get('Gitee', 'api')
else:
self.gitee_key = ''
self.max_token_num = 4096
self.encoding = tiktoken.get_encoding("cl100k_base")
# self.encoding = tiktoken.encoding_for_model("gpt-4")
def get_arxiv(self, max_results=30):
search = arxiv.Search(query=self.query,
max_results=max_results,
sort_by=self.sort,
sort_order=arxiv.SortOrder.Descending,
)
return search
def filter_arxiv(self, max_results=30):
search = self.get_arxiv(max_results=max_results)
print("all search:")
for index, result in enumerate(search.results()):
print(index, result.title, result.updated)
filter_results = []
filter_keys = self.filter_keys
print("filter_keys:", self.filter_keys)
# 确保每个关键词都能在摘要中找到,才算是目标论文
for index, result in enumerate(search.results()):
abs_text = result.summary.replace('-\n', '-').replace('\n', ' ')
meet_num = 0
for f_key in filter_keys.split(" "):
if f_key.lower() in abs_text.lower():
meet_num += 1
if meet_num == len(filter_keys.split(" ")):
filter_results.append(result)
# break
print("筛选后剩下的论文数量:")
print("filter_results:", len(filter_results))
print("filter_papers:")
for index, result in enumerate(filter_results):
print(index, result.title, result.updated)
return filter_results
def validateTitle(self, title):
# 将论文的乱七八糟的路径格式修正
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
new_title = re.sub(rstr, "_", title) # 替换为下划线
return new_title
def download_pdf(self, filter_results):
# 先创建文件夹
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
key_word = str(self.key_word.replace(':', ' '))
path = self.root_path + 'pdf_files/' + self.query.replace('au: ', '').replace('title: ', '').replace('ti: ', '').replace(':', ' ')[:25] + '-' + date_str
try:
os.makedirs(path)
except:
pass
print("All_paper:", len(filter_results))
# 开始下载:
paper_list = []
for r_index, result in enumerate(filter_results):
try:
title_str = self.validateTitle(result.title)
pdf_name = title_str+'.pdf'
# result.download_pdf(path, filename=pdf_name)
self.try_download_pdf(result, path, pdf_name)
paper_path = os.path.join(path, pdf_name)
print("paper_path:", paper_path)
paper = Paper(path=paper_path,
url=result.entry_id,
title=result.title,
abs=result.summary.replace('-\n', '-').replace('\n', ' '),
authers=[str(aut) for aut in result.authors],
)
# 下载完毕,开始解析:
paper.parse_pdf()
paper_list.append(paper)
except Exception as e:
print("download_error:", e)
pass
return paper_list
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def try_download_pdf(self, result, path, pdf_name):
result.download_pdf(path, filename=pdf_name)
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def upload_gitee(self, image_path, image_name='', ext='png'):
"""
上传到码云
:return:
"""
with open(image_path, 'rb') as f:
base64_data = base64.b64encode(f.read())
base64_content = base64_data.decode()
date_str = str(datetime.datetime.now())[:19].replace(':', '-').replace(' ', '-') + '.' + ext
path = image_name+ '-' +date_str
payload = {
"access_token": self.gitee_key,
"owner": self.config.get('Gitee', 'owner'),
"repo": self.config.get('Gitee', 'repo'),
"path": self.config.get('Gitee', 'path'),
"content": base64_content,
"message": "upload image"
}
# 这里需要修改成你的gitee的账户和仓库名,以及文件夹的名字:
url = f'https://gitee.com/api/v5/repos/'+self.config.get('Gitee', 'owner')+'/'+self.config.get('Gitee', 'repo')+'/contents/'+self.config.get('Gitee', 'path')+'/'+path
rep = requests.post(url, json=payload).json()
print("rep:", rep)
if 'content' in rep.keys():
image_url = rep['content']['download_url']
else:
image_url = r"https://gitee.com/api/v5/repos/"+self.config.get('Gitee', 'owner')+'/'+self.config.get('Gitee', 'repo')+'/contents/'+self.config.get('Gitee', 'path')+'/' + path
return image_url
def summary_with_chat(self, mydata, paper_list):
# htmls = []
data_dic = {}
for paper_index, paper in enumerate(paper_list):
# print(paper_index, paper.abstract)
# 第一步先用title,abs,和introduction进行总结。
text = ''
text += 'Title:' + paper.title
text += '|Abstract:' + paper.section_text_dict['Abstract']
text += '|Introduction:' + paper.section_text_dict['Introduction']
text += '|The first page info:' + paper.section_text_dict['paper_info']
# intro
text += list(paper.section_text_dict.values())[0]
# chat_summary_text = ""
try:
mydata = self.chat_summary(text=text)
except Exception as e:
print("summary_error:", e)
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len("your messages resulted in")+1
offset = int(str(e)[current_tokens_index:current_tokens_index+4])
summary_prompt_token = offset+1000+150
mydata = self.chat_summary(text=text, summary_prompt_token=summary_prompt_token)
# htmls.append('## Paper:' + str(paper_index+1))
# htmls.append('\n\n\n')
# htmls.append(chat_summary_text)
# # 第二步总结方法:
# # TODO,由于有些文章的方法章节名是算法名,所以简单的通过关键词来筛选,很难获取,后面需要用其他的方案去优化。
# method_key = ''
# for parse_key in paper.section_text_dict.keys():
# if 'method' in parse_key.lower() or 'approach' in parse_key.lower():
# method_key = parse_key
# break
# if method_key != '':
# text = ''
# method_text = ''
# summary_text = ''
# summary_text += "<Summary>" + chat_summary_text
# # methods
# method_text += paper.section_text_dict[method_key]
# text = summary_text + "\n\n<Method>:\n\n" + method_text
# chat_method_text = ""
# try:
# chat_method_text = self.chat_method(text=text)
# except Exception as e:
# print("method_error:", e)
# if "maximum context" in str(e):
# current_tokens_index = str(e).find("your messages resulted in") + len("your messages resulted in")+1
# offset = int(str(e)[current_tokens_index:current_tokens_index+4])
# method_prompt_token = offset+800+150
# chat_method_text = self.chat_method(text=text, method_prompt_token=method_prompt_token)
# # htmls.append(chat_method_text)
# else:
# chat_method_text = ''
# # htmls.append("\n"*4)
# # 第三步总结全文,并打分:
# conclusion_key = ''
# for parse_key in paper.section_text_dict.keys():
# if 'conclu' in parse_key.lower():
# conclusion_key = parse_key
# break
# text = ''
# conclusion_text = ''
# summary_text = ''
# summary_text += "<Summary>" + chat_summary_text + "\n <Method>:\n" + chat_method_text
# if conclusion_key != '':
# # conclusion
# conclusion_text += paper.section_text_dict[conclusion_key]
# text = summary_text + "\n\n<Conclusion>:\n\n" + conclusion_text
# else:
# text = summary_text
# chat_conclusion_text = ""
# try:
# chat_conclusion_text = self.chat_conclusion(text=text)
# except Exception as e:
# print("conclusion_error:", e)
# if "maximum context" in str(e):
# current_tokens_index = str(e).find("your messages resulted in") + len("your messages resulted in")+1
# offset = int(str(e)[current_tokens_index:current_tokens_index+4])
# conclusion_prompt_token = offset+800+150
# chat_conclusion_text = self.chat_conclusion(text=text, conclusion_prompt_token=conclusion_prompt_token)
# # htmls.append(chat_conclusion_text)
# # htmls.append("\n"*4)
# summary_text += "\n <Conclusion>:\n" + chat_conclusion_text
# print("===========================================================================")
# print(summary_text)
# try:
# chat_ppt_text = self.chat_ppt(text=summary_text)
# except Exception as e:
# print("ppt_error:", e)
# if "maximum context" in str(e):
# current_tokens_index = str(e).find("your messages resulted in") + len("your messages resulted in")+1
# offset = int(str(e)[current_tokens_index:current_tokens_index+4])
# ppt_prompt_token = offset+1000+150
# chat_ppt_text = self.chat_ppt(text=summary_text, ppt_prompt_token=ppt_prompt_token)
# htmls.append(chat_ppt_text)
# # # 整合成一个文件,打包保存下来。
# date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
# try:
# export_path = os.path.join(self.root_path, 'export')
# os.makedirs(export_path)
# except:
# pass
# mode = 'w' if paper_index == 0 else 'a'
# file_name = os.path.join(export_path, date_str+'-'+self.validateTitle(paper.title[:80])+"."+self.file_format)
# self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
# file_name = os.path.join(export_path, date_str+'-'+self.validateTitle(paper.title)+".md")
# self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
# htmls = []
with open("./test/mydata.json", "w") as out_file:
json.dump(mydata, out_file)
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_conclusion(self, text, conclusion_prompt_token = 800):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list)-1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text)*(self.max_token_num-conclusion_prompt_token)/text_token)
clip_text = text[:clip_text_index]
messages=[
{"role": "system", "content": "You are a reviewer in the field of ["+self.key_word+"] and you need to critically review this article"}, # chatgpt 角色
{"role": "assistant", "content": "This is the <Summary> and <Conclusion> part of an English literature, where <Summary> you have already summarized, but <Conclusion> part, I need your help to summarize the following questions:"+clip_text}, # 背景知识,可以参考OpenReview的审稿流程
{"role": "user", "content": """
8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English).
- (1):What is the significance of this piece of work?
- (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload.
.......
Follow the format of the output later:
8. Conclusion: \n\n
- (1):xxx;\n
- (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.
""".format(self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
# prompt需要用英语替换,少占用token。
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("conclusion_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms/1000.0, 's')
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_method(self, text, method_prompt_token = 800):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list)-1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text)*(self.max_token_num-method_prompt_token)/text_token)
clip_text = text[:clip_text_index]
messages=[
{"role": "system", "content": "You are a researcher in the field of ["+self.key_word+"] who is good at summarizing papers using concise statements"}, # chatgpt 角色
{"role": "assistant", "content": "This is the <Summary> and <Method> part of an English document, where <Summary> you have summarized, but the <Method> part, I need your help to read and summarize the following questions."+clip_text}, # 背景知识
{"role": "user", "content": """
7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are.
- (1):...
- (2):...
- (3):...
- .......
Follow the format of the output that follows:
7. Methods: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
....... \n\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.
""".format(self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("method_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms/1000.0, 's')
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_summary(self, text, summary_prompt_token = 1100):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list)-1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text)*(self.max_token_num-summary_prompt_token)/text_token)
clip_text = text[:clip_text_index]
contents=["You are a researcher in the field of ["+self.key_word+"] who is good at summarizing papers using concise statements",
"This is the title, abstract, introduction and the first page info of an English document seperated by |: "+clip_text+" I need your help to read and summarize the following questions: ",
"Mark the title of the paper \n Follow the format of the output that follows: Title: xxx\n\n ",
"List all the authors' names \n Follow the format of the output that follows: Authors: xxx\n\n ",
"Mark the first author's affiliation \n Follow the format of the output that follows: Affiliation: xxx\n\n ",
"Mark the keywords of this article \n Follow the format of the output that follows: Keywords: xxx\n\n ",
"Link to the paper, Github code link (if available, fill in Github:None if not) \n Follow the format of the output that follows: Urls: xxx or xxx , xxx \n\n ",
"""Summarize according to the following four points.Be sure to use {} answers (proper nouns need to be marked in English)
- (1):What is the research background of this article?
- (2):What are the past methods? What are the problems with them? Is the approach well motivated?
- (3):What is the research methodology proposed in this paper?
- (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals? \n Follow the format of the output that follows:
Summary: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
- (4):xxx.\n\n """,
"Make sure the statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed. "
]
msg_list=[]
for i in range(2,5):
msg_dict={}
messages=[
{"role": "system", "content": contents[0]},
{"role": "assistant", "content": contents[1]},
{"role": "user", "content":contents[i]+contents[8]},
]
# print(messages)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
# print("summary_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms/1000.0, 's')
# print(result)
msg_dict["instruction"]=contents[i]+contents[8]
msg_dict["input"]= contents[0]+contents[1]
msg_dict["output"]=result
msg_list.append(msg_dict)
# print(msg_list)
return msg_list
# def chat_ppt(self, text, ppt_prompt_token = 1100):
# openai.api_key = self.chat_api_list[self.cur_api]
# self.cur_api += 1
# self.cur_api = 0 if self.cur_api >= len(self.chat_api_list)-1 else self.cur_api
# text_token = len(self.encoding.encode(text))
# clip_text_index = int(len(text)*(self.max_token_num-ppt_prompt_token)/text_token)
# clip_text = text[:clip_text_index]
# messages=[
# {"role": "system", "content": "You are a tutor in the field of ["+self.key_word+"] who is good at creating powerpoint slides from research papers."},
# {"role": "assistant", "content": "This is the <Summary>, <Method> and <Conclusion> part of an English document where all you have summarized, I need your help to read and summarize the following questions: "+clip_text},
# {"role": "user", "content": """
# Follow the format of the output that follows:
# # Title of Presentation: xxx\n
# # Section 1: Introduction
# - Brief overview of the topic: xxx\n
# - Key points to be covered: xxx\n
# # Section 2: Background Information
# - Historical context: xxx\n
# - Key figures and events: xxx\n
# - Relevant theories: xxx\n
# # Section 3: Main Points
# - What dataset did this paper use? xxx\n
# - What is the process of the proposed method? xxx\n
# - What is the performance of the proposed method? Please note down its performance metrics. xxx\n
# - What are the baseline models and their performances? Please note down these baseline methods. xxx\n
# # Section 4: Case Study or Example
# - Real-world example: xxx\n
# - Analyze how this topic applies to case study: xxx\n
# # Section 5: Conclusion
# - Summary of key points: xxx\n
# - Takeaways for the audience: xxx\n
# - Future directions for research or practice: xxx\n
# # References
# - List of sources cited in the slides: xxx\n
# Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed.
# """.format(self.language, self.language, self.language)},
# ]
# response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# messages=messages,
# )
# result = ''
# for choice in response.choices:
# result += choice.message.content
# print("summary_result:\n", result)
# print("prompt_token_used:", response.usage.prompt_tokens,
# "completion_token_used:", response.usage.completion_tokens,
# "total_token_used:", response.usage.total_tokens)
# print("response_time:", response.response_ms/1000.0, 's')
# return result
def export_to_markdown(self, text, file_name, mode='w'):
# 使用markdown模块的convert方法,将文本转换为html格式
# html = markdown.markdown(text)
# 打开一个文件,以写入模式
with open(file_name, mode, encoding="utf-8") as f:
# 将html格式的内容写入文件
f.write(text)
# 定义一个方法,打印出读者信息
def show_info(self):
print(f"Key word: {self.key_word}")
print(f"Query: {self.query}")
print(f"Sort: {self.sort}")
def main(args):
# 创建一个Reader对象,并调用show_info方法
if args.sort == 'Relevance':
sort = arxiv.SortCriterion.Relevance
elif args.sort == 'LastUpdatedDate':
sort = arxiv.SortCriterion.LastUpdatedDate
else:
sort = arxiv.SortCriterion.Relevance
if args.pdf_path:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=sort,
args=args
)
reader1.show_info()
# 开始判断是路径还是文件:
paper_list = []
if args.pdf_path.endswith(".pdf"):
paper_list.append(Paper(path=args.pdf_path))
else:
for root, dirs, files in os.walk(args.pdf_path):
print("root:", root, "dirs:", dirs, 'files:', files) #当前目录路径
for filename in files:
# 如果找到PDF文件,则将其复制到目标文件夹中
if filename.endswith(".pdf"):
paper_list.append(Paper(path=os.path.join(root, filename)))
print("------------------paper_num: {}------------------".format(len(paper_list)))
[print(paper_index, paper_name.path.split('\\')[-1]) for paper_index, paper_name in enumerate(paper_list)]
mydata=[]
reader1.summary_with_chat(mydata,paper_list=paper_list)
else:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=sort,
args=args
)
reader1.show_info()
filter_results = reader1.filter_arxiv(max_results=args.max_results)
paper_list = reader1.download_pdf(filter_results)
reader1.summary_with_chat(paper_list=paper_list)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--pdf_path", type=str, default='./test/demo.pdf', help="if none, the bot will download from arxiv with query")
# parser.add_argument("--pdf_path", type=str, default=r'C:\Users\Administrator\Desktop\DHER\RHER_Reset\ChatPaper', help="if none, the bot will download from arxiv with query")
# parser.add_argument("--pdf_path", type=str, default='', help="if none, the bot will download from arxiv with query")
parser.add_argument("--query", type=str, default='all: ChatGPT robot', help="the query string, ti: xx, au: xx, all: xx,")
parser.add_argument("--key_word", type=str, default='reinforcement learning', help="the key word of user research fields")
parser.add_argument("--filter_keys", type=str, default='ChatGPT robot', help="the filter key words, 摘要中每个单词都得有,才会被筛选为目标论文")
parser.add_argument("--max_results", type=int, default=1, help="the maximum number of results")
# arxiv.SortCriterion.Relevance
parser.add_argument("--sort", type=str, default="Relevance", help="another is LastUpdatedDate")
parser.add_argument("--save_image", default=False, help="save image? It takes a minute or two to save a picture! But pretty")
parser.add_argument("--file_format", type=str, default='md', help="导出的文件格式,如果存图片的话,最好是md,如果不是的话,txt的不会乱")
parser.add_argument("--language", type=str, default='en', help="The other output lauguage is English, is en")
args = parser.parse_args()
import time
start_time = time.time()
main(args=args)
print("summary time:", time.time() - start_time)
| [
"This is the <Summary> and <Method> part of an English document, where <Summary> you have summarized, but the <Method> part, I need your help to read and summarize the following questions.PLACEHOLDER",
"This is the <Summary> and <Conclusion> part of an English literature, where <Summary> you have already summarized, but <Conclusion> part, I need your help to summarize the following questions:PLACEHOLDER",
" \n 7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are.\n - (1):...\n - (2):...\n - (3):...\n - .......\n Follow the format of the output that follows: \n 7. Methods: \n\n\n - (1):xxx;\n \n - (2):xxx;\n \n - (3):xxx;\n \n ....... \n\n \n \n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write. \n ",
"] and you need to critically review this article",
" \n 8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English).\n - (1):What is the significance of this piece of work?\n - (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload. \n .......\n Follow the format of the output later: \n 8. Conclusion: \n\n\n - (1):xxx;\n \n - (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n \n \n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write. \n ",
"] who is good at summarizing papers using concise statements",
"You are a researcher in the field of [",
"You are a reviewer in the field of ["
] |
2024-01-10 | YanJiaHuan/AI_Tutor | test~get_instruct.py | import numpy as np
import os
import re
import datetime
import arxiv
import openai, tenacity
import base64, requests
import argparse
import configparser
import json
import tiktoken
# 定义Reader类
class Reader:
# 初始化方法,设置属性
def __init__(self, key_word, root_path='./'):
self.key_word = key_word
self.root_path = root_path
# 创建一个ConfigParser对象
self.config = configparser.ConfigParser()
# 读取配置文件
self.config.read('./test/apikey.ini')
# 获取某个键对应的值
self.chat_api_list = self.config.get('OpenAI', 'OPENAI_API_KEYS')[1:-1].replace('\'', '').split(',')
self.chat_api_list = [api.strip() for api in self.chat_api_list if len(api) > 5]
self.cur_api = 0
self.max_token_num = 4096
self.encoding = tiktoken.get_encoding("cl100k_base")
# self.encoding = tiktoken.encoding_for_model("gpt-4")
def summary_with_chat(self, paper_info):
try:
output = self.chat_summary(text=paper_info)
except Exception as e:
print("summary_error:", e)
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len("your messages resulted in")+1
offset = int(str(e)[current_tokens_index:current_tokens_index+4])
summary_prompt_token = offset+1000+150
output = self.chat_summary(text=paper_info, summary_prompt_token=summary_prompt_token)
return output
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_summary(self, text, summary_prompt_token = 1100):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list) - 1 else self.cur_api
summary_prompt_token = 1000
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text) * (self.max_token_num - summary_prompt_token) / text_token)
clip_text = text[:clip_text_index]
# print('=======================================================================================================')
# print(clip_text)
# print('=======================================================================================================')
messages = [
{"role": "system",
"content": "You are a researcher in the field of [" + self.key_word + "] who is good at summarizing papers using concise statements"},
{"role": "assistant",
"content": "This is the first page of a paper including title, author, link, abstract and introduction. I need your help to read and summarize the following questions: " + clip_text},
{"role": "user", "content": """
You need to answer the following questions:
1. Mark the title of the given paper
2. List all the authors' names
3. Mark the keywords of this paper and give the definitions of each keyword
4. Summarize the given introduction to generate the research background of this paper
5. List all the research methodologies proposed by this paper and summarize their details
6. Give a conclusion about this paper's major achievements and breakthroughs
Follow the format of the output that follows:
||1||xxx\n
||2||xxx\n
||3||xxx\n
||4||xxx\n
||5||xxx\n
||6||xxx\n
Make sure the statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed.
"""},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
# print("summary_result:\n", result)
# print("prompt_token_used:", response.usage.prompt_tokens,
# "completion_token_used:", response.usage.completion_tokens,
# "total_token_used:", response.usage.total_tokens)
# print("response_time:", response.response_ms / 1000.0, 's')
return result
# 定义一个方法,打印出读者信息
def show_info(self):
print(f"Key word: {self.key_word}")
def main():
reader = Reader(key_word='natural language processing')
reader.show_info()
instruction = ["Mark the title of the given paper.","List all the authors' names.","Mark the keywords of this paper and give their definitions.","Summarize the given introduction to generate the research background of this paper.","List all the research methodologies proposed by this paper and summarize their details.","Give a conclusion about this paper's major achievements and breakthroughs."]
with open('./test/mydata.json', 'r') as f:
papers = json.load(f)
instruct_list=[]
from tqdm import tqdm
for paper in tqdm(papers[1261:], desc="Prepare the instruction", unit="paper"):
# for paper in tqdm(papers, desc="Prepare the instruction", unit="paper"):
chatgpt_output=reader.summary_with_chat(paper_info=paper['paper_info'])
for n in range(6):
if n != 5:
pattern = r"\|\|{i}\|\|((.|\n)*)\|\|{j}".format(i=n+1,j=n+2)
else:
pattern = r"\|\|{i}\|\|((.|\n)*)".format(i=n+1)
result = re.search(pattern, chatgpt_output)
if result:
extracted_str = result.group(1)
else:
continue
instruct = {
"instruction": instruction[n],
"input": paper['paper_info'],
"output": extracted_str,
}
instruct_list.append(instruct)
# print(instruct_list)
break
# with open("./test/myinstruct.json", "w") as out_file:
# json.dump(instruct_list, out_file)
if __name__ == '__main__':
import time
start_time = time.time()
main()
print("summary time:", time.time() - start_time)
| [
" \n You need to answer the following questions:\n 1. Mark the title of the given paper\n 2. List all the authors' names\n 3. Mark the keywords of this paper and give the definitions of each keyword\n 4. Summarize the given introduction to generate the research background of this paper\n 5. List all the research methodologies proposed by this paper and summarize their details\n 6. Give a conclusion about this paper's major achievements and breakthroughs\n \n Follow the format of the output that follows:\n ||1||xxx\n\n ||2||xxx\n\n ||3||xxx\n\n ||4||xxx\n\n ||5||xxx\n\n ||6||xxx\n\n Make sure the statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed. \n ",
"] who is good at summarizing papers using concise statements",
"This is the first page of a paper including title, author, link, abstract and introduction. I need your help to read and summarize the following questions: PLACEHOLDER",
"You are a researcher in the field of [",
"1000"
] |
2024-01-10 | little51/FastChat | fastchat~serve~api_provider.py | """Call API providers."""
import os
import random
import time
from fastchat.utils import build_logger
logger = build_logger("gradio_web_server", "gradio_web_server.log")
def openai_api_stream_iter(model_name, messages, temperature, top_p, max_new_tokens):
import openai
# Make requests
gen_params = {
"model": model_name,
"prompt": messages,
"temperature": temperature,
"top_p": top_p,
}
logger.info(f"==== request ====\n{gen_params}")
res = openai.ChatCompletion.create(
model=model_name, messages=messages, temperature=temperature, stream=True
)
text = ""
for chunk in res:
text += chunk["choices"][0]["delta"].get("content", "")
data = {
"text": text,
"error_code": 0,
}
yield data
def anthropic_api_stream_iter(model_name, prompt, temperature, top_p, max_new_tokens):
import anthropic
c = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
# Make requests
gen_params = {
"model": model_name,
"prompt": prompt,
"temperature": temperature,
"top_p": top_p,
}
logger.info(f"==== request ====\n{gen_params}")
res = c.completion_stream(
prompt=prompt,
stop_sequences=[anthropic.HUMAN_PROMPT],
max_tokens_to_sample=max_new_tokens,
temperature=temperature,
top_p=top_p,
model=model_name,
stream=True,
)
for chunk in res:
data = {
"text": chunk["completion"],
"error_code": 0,
}
yield data
def bard_api_stream_iter(state):
# TODO: we will use the official PaLM 2 API sooner or later,
# and we will update this function accordingly. So here we just hard code the
# Bard worker address. It is going to be deprecated anyway.
# Make requests
gen_params = {
"model": "bard",
"prompt": state.messages,
}
logger.info(f"==== request ====\n{gen_params}")
response = requests.post(
"http://localhost:18900/chat",
json={
"content": state.messages[-2][-1],
"state": state.session_state,
},
stream=False,
timeout=WORKER_API_TIMEOUT,
)
resp_json = response.json()
state.session_state = resp_json["state"]
content = resp_json["content"]
# The Bard Web API does not support streaming yet. Here we have to simulate
# the streaming behavior by adding some time.sleep().
pos = 0
while pos < len(content):
# This is a fancy way to simulate token generation latency combined
# with a Poisson process.
pos += random.randint(1, 5)
time.sleep(random.expovariate(50))
data = {
"text": content[:pos],
"error_code": 0,
}
yield data
def init_palm_chat(model_name):
import vertexai # pip3 install google-cloud-aiplatform
from vertexai.preview.language_models import ChatModel
project_id = os.environ["GCP_PROJECT_ID"]
location = "us-central1"
vertexai.init(project=project_id, location=location)
chat_model = ChatModel.from_pretrained(model_name)
chat = chat_model.start_chat(examples=[])
return chat
def palm_api_stream_iter(chat, message, temperature, top_p, max_new_tokens):
parameters = {
"temperature": temperature,
"top_p": top_p,
"max_output_tokens": max_new_tokens,
}
gen_params = {
"model": "bard",
"prompt": message,
}
gen_params.update(parameters)
logger.info(f"==== request ====\n{gen_params}")
response = chat.send_message(message, **parameters)
content = response.text
pos = 0
while pos < len(content):
# This is a fancy way to simulate token generation latency combined
# with a Poisson process.
pos += random.randint(1, 5)
time.sleep(random.expovariate(50))
data = {
"text": content[:pos],
"error_code": 0,
}
yield data
| [] |
2024-01-10 | microsoft/LLaVA-Med | llava~eval~eval_gpt_review_visual.py | import argparse
import json
import os
import openai
import tqdm
import ray
import time
@ray.remote(num_cpus=4)
def get_eval(content: str, max_tokens: int):
while True:
try:
response = openai.ChatCompletion.create(
model='gpt-4',
messages=[{
'role': 'system',
'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
}, {
'role': 'user',
'content': content,
}],
temperature=0.2, # TODO: figure out which temperature is best for evaluation
max_tokens=max_tokens,
)
break
except openai.error.RateLimitError:
pass
except Exception as e:
print(e)
time.sleep(1)
print('success!')
return response['choices'][0]['message']['content']
def parse_score(review):
try:
score_pair = review.split('\n')[0]
score_pair = score_pair.replace(',', ' ')
sp = score_pair.split(' ')
if len(sp) == 2:
return [float(sp[0]), float(sp[1])]
else:
print('error', review)
return [-1, -1]
except Exception as e:
print(e)
print('error', review)
return [-1, -1]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
parser.add_argument('-q', '--question')
parser.add_argument('-c', '--context')
parser.add_argument('-a', '--answer-list', nargs='+', default=[])
parser.add_argument('-r', '--rule')
parser.add_argument('-o', '--output')
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
args = parser.parse_args()
ray.init()
f_q = open(os.path.expanduser(args.question))
f_ans1 = open(os.path.expanduser(args.answer_list[0]))
f_ans2 = open(os.path.expanduser(args.answer_list[1]))
rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
review_file = open(f'{args.output}', 'w')
context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
image_to_context = {context['image']: context for context in context_list}
js_list = []
handles = []
idx = 0
for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
ques = json.loads(ques_js)
ans1 = json.loads(ans1_js)
ans2 = json.loads(ans2_js)
inst = image_to_context[ques['image']]
cap_str = '\n'.join(inst['captions'])
box_str = '\n'.join([f'{instance["category"]}: {instance["bbox"]}' for instance in inst['instances']])
category = json.loads(ques_js)['category']
if category in rule_dict:
rule = rule_dict[category]
else:
assert False, f"Visual QA category not found in rule file: {category}."
prompt = rule['prompt']
role = rule['role']
content = (f'[Context]\n{cap_str}\n\n{box_str}\n\n'
f'[Question]\n{ques["text"]}\n\n'
f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
f'[System]\n{prompt}\n\n')
js_list.append({
'id': idx+1,
'question_id': ques['question_id'],
'answer1_id': ans1.get('answer_id', ans1['question_id']),
'answer2_id': ans2.get('answer_id', ans2['answer_id']),
'category': category})
idx += 1
handles.append(get_eval.remote(content, args.max_tokens))
# To avoid the rate limit set by OpenAI
time.sleep(1)
reviews = ray.get(handles)
for idx, review in enumerate(reviews):
scores = parse_score(review)
js_list[idx]['content'] = review
js_list[idx]['tuple'] = scores
review_file.write(json.dumps(js_list[idx]) + '\n')
review_file.close()
| [
"You are a helpful and precise assistant for checking the quality of the answer."
] |
2024-01-10 | microsoft/LLaVA-Med | llava~eval~qa_baseline_gpt4_translate.py | """Generate answers with GPT-3.5"""
# Note: you need to be using OpenAI Python v0.27.0 for the code below to work
import argparse
import json
import os
import time
import concurrent.futures
import openai
import tqdm
import shortuuid
import os, json
import requests
from azure.identity import ManagedIdentityCredential, DefaultAzureCredential, AzureCliCredential
MODEL = 'gpt-4'
MODEL_ID = 'gpt-4:20230527'
# MODEL = 'gpt-3.5-turbo'
# MODEL_ID = 'gpt-3.5-turbo:20230327'
def update_openai_api():
openai.api_key = os.environ.get('API_KEY', 'sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
api_type = os.environ.get('API_TYPE', 'azure')
model = os.environ.get('MODEL', 'gpt-4')
engine = os.environ.get('ENGINE', 'gpt-35-turbo')
if api_type == 'azure':
openai.api_type = 'azure'
openai.api_base = os.environ.get('API_BASE', '')
openai.api_version = os.environ.get('API_VERSION', '2023-03-15-preview')
return {'engine': engine}
else:
global GPT_arch
GPT_arch = "GPT-4"
return {'model': model}
def get_answer(openai_engine_kwargs, question_id: int, question: str, max_tokens: int):
ans = {
'answer_id': shortuuid.uuid(),
'question_id': question_id,
'model_id': MODEL_ID,
}
reach_valid_answer = False
while not reach_valid_answer:
try:
messages=[{
'role': 'system',
'content': 'You are a helpful assistant.'
}, {
'role': 'user',
'content': question,
}]
SCOPE = "https://ml.azure.com"
AZURE_ENDPOINT_URL_PATTERN = "https://{}/v1/engines/davinci/chat/completions"
model = "text-alpha-002" # this doesn't do anything...
deployment = "aims1.eastus.inference.ml.azure.com"
url = AZURE_ENDPOINT_URL_PATTERN.format(deployment, model)
credential = AzureCliCredential()
token = credential.get_token(SCOPE).token
headers = {
"Authorization": f"Bearer {token}",
"azureml-model-deployment": "gpt4-v2", # gpt4-v2 gpt4
"Openai-Internal-AllowChatCompletion": "true",
"Openai-Internal-AllowedSpecialTokens": "1",
"Openai-Internal-AllowedOutputSpecialTokens": "1",
"Openai-Internal-HarmonyVersion": "harmony_v4.0_no_system_message_content_type",
}
# {"messages": [{"name":"history","role":"user","content":"<|im_start|> tell me about the Emperor Ashoka"}]
request_data = {"messages": messages, "max_tokens":max_tokens, "n": 1}
# request_data = {"messages": messages, "max_tokens":max_tokens, "temperature":temperature, "n": 1}
response = requests.post(url, json=request_data, headers=headers)
print(response.json())
content = response.json()['choices'][0]['message']['content']
content = content.strip()
ans['text'] = content
# response = openai.ChatCompletion.create(
# **openai_engine_kwargs,
# messages=[{
# 'role': 'system',
# 'content': 'You are a helpful assistant.'
# }, {
# 'role': 'user',
# 'content': question,
# }],
# max_tokens=max_tokens,
# # stop=["\n", "<|endoftext|>"]
# )
# ans['text'] = response['choices'][0]['message']['content']
reach_valid_answer = True
return ans
except Exception as e:
print('[ERROR]', e)
ans['text'] = '#ERROR#'
time.sleep(20)
return ans
def load_jsonl(path):
data=[]
with open(path, 'r', encoding='utf-8') as reader:
for line in reader:
data.append(json.loads(line))
return data
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ChatGPT answer generation.')
parser.add_argument('-q', '--input')
parser.add_argument('-o', '--output')
parser.add_argument('--max-tokens', type=int, default=2048, help='maximum number of tokens produced in the output')
args = parser.parse_args()
pred = load_jsonl(args.input)
# pred = pred[:3]
openai_engine_kwargs = update_openai_api()
print(openai_engine_kwargs)
answers = []
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
futures = []
for idx, p in enumerate(pred):
qid, question = p['question_id'], p['text']
question = f'Translate the following text into Chinese. Do not be verbose. \n The original sentence is: {question} \n 对应的中文翻译是: \n'
future = executor.submit(get_answer, openai_engine_kwargs, idx, question, args.max_tokens)
futures.append(future)
# import pdb; pdb.set_trace()
for future in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
answers.append(future.result())
answers.sort(key=lambda x: x['question_id'])
output_list = []
for idx, p in enumerate(pred):
for a in answers:
if idx == a['question_id']:
p['text']= a['text']
output_list.append(p)
# import pdb; pdb.set_trace()
with open(os.path.expanduser(args.output), 'w', encoding='utf-8') as f:
table = [json.dumps(out, indent=0, ensure_ascii=False) for out in output_list]
f.write('\n'.join(table))
| [
"You are a helpful assistant.",
"Translate the following text into Chinese. Do not be verbose. \n The original sentence is: question98cbcae0-4cef-4f6a-89f7-0bf3ed2b263d \n 对应的中文翻译是: \n"
] |
2024-01-10 | microsoft/LLaVA-Med | llava~eval~eval_multimodal_chat_gpt_score.py | import sys
import json
import argparse
from pprint import pprint
from copy import deepcopy
from collections import defaultdict
sys.path.append("llava")
from openai_api import call_async
class LLMEvalPromptGenerator:
instruct_prompt = """We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with caption describing the same image.
Please rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.
Please first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."""
role = 'Assistant'
@staticmethod
def conv_to_str(fig_label, fig_caption, fig_inline_mention, question, ans1, ans2):
return (f'[Context]\n'
f'Figure Caption:\n{fig_label}: {fig_caption}\n\n'
f'Figure Context:\n\t- {fig_inline_mention}\n\n'
f'[Question]\n{question}\n\n'
f'[{LLMEvalPromptGenerator.role} 1]\n{ans1}\n\n[End of {LLMEvalPromptGenerator.role} 1]\n\n'
f'[{LLMEvalPromptGenerator.role} 2]\n{ans2}\n\n[End of {LLMEvalPromptGenerator.role} 2]\n\n'
f'[System]\n{LLMEvalPromptGenerator.instruct_prompt}\n\n')
@staticmethod
def compare_messages_gen(sample):
messages = [
{"role": "system", "content": """'You are a helpful and precise assistant for checking the quality of the answer."""},
]
messages.append({"role": "user", "content": LLMEvalPromptGenerator.conv_to_str(sample['fig_label'], sample['fig_caption'], sample['in_text_mention'], sample['question'], sample['ans1'], sample['ans2'])})
return messages
class ChatEvaluation:
# Calculate precision, recall, F1 overall and for each domain.
@staticmethod
def get_domain(x):
for domain in ['chest_xray', 'mri', 'histology', 'gross', 'ct_scan']:
in_domain = x['domain'][domain]
if in_domain:
return domain
@staticmethod
def get_avg(x):
return sum([float(y) for y in x])/len(x)
@staticmethod
def eval(samples):
predictions = [(x['question_id'], x['type'], ChatEvaluation.get_domain(x), x['result'].split('\n')[0].split(' ')) for x in samples]
score_type_dict = defaultdict(lambda: defaultdict(list))
for q_id, q_type, domain, (a1_score, a2_score) in predictions:
score_type_dict[q_type][1].append(a1_score)
score_type_dict[q_type][2].append(a2_score)
score_type_dict['all'][1].append(a1_score)
score_type_dict['all'][2].append(a2_score)
score_type_dict[domain][1].append(a1_score)
score_type_dict[domain][2].append(a2_score)
result = defaultdict(dict)
for q_type, score_dict in score_type_dict.items():
result[q_type]['gpt4_score'] = ChatEvaluation.get_avg(score_dict[1])
result[q_type]['pred_score'] = ChatEvaluation.get_avg(score_dict[2])
result[q_type]['pred_relative_score'] = ChatEvaluation.get_avg([float(s2)/float(s1) for s1, s2 in zip(score_dict[1], score_dict[2])])*100
result[q_type]['data_size'] = len(score_dict[1])
# print results
pprint(result)
def main(args):
# Load input data
answer_data = []
with open(args.input_path) as f:
for line in f:
answer_data.append(json.loads(line))
question_data = []
with open(args.question_input_path) as f:
for line in f:
question_data.append(json.loads(line))
# Merge question and answer input data
samples = []
for question, answer in zip(question_data, answer_data):
sample = deepcopy(question)
question['question'] = sample['text'][:-8]
question['ans1'] = sample.pop('gpt4_answer')
question['ans2'] = answer['text']
samples.append(question)
samples_question_ids = set(x['question_id'] for x in samples)
# Generate GPT-4 evaluation of indivdual answers between model answer and GPT-4 answer
results = []
BATCH_SIZE = 3
for i in range(30):
result_question_ids = set(result['question_id'] for result in results)
batch = []
counter = 0
for sample in samples:
if sample['question_id'] in result_question_ids:
continue
batch.append(sample)
if len(batch)>=BATCH_SIZE:
async_results = call_async(batch, lambda x: LLMEvalPromptGenerator.compare_messages_gen(x))
results.extend(async_results)
print(f"Result Size: {len(results)}")
batch = []
async_results = call_async(batch, lambda x: LLMEvalPromptGenerator.compare_messages_gen(x))
results.extend(async_results)
print(f"Result Size: {len(results)}")
# Print number of questions and results
print(f'all samples: {len(samples_question_ids)}')
print(f'ran samples: {len(result_question_ids)}')
print(f'to be run samples: {len(samples_question_ids-result_question_ids)}')
# Write GPT-4 evaluation outputs to output_path
with open(args.output_path, 'w') as f:
for line in results:
f.write(json.dumps(line)+'\n')
# Perform Evaluation for all results
ChatEvaluation().eval(results)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--question_input_path', type=str, default='data/eval/llava_med_eval_qa50_qa.jsonl')
parser.add_argument('--input_path', type=str, default='dbfs:/mnt/hanoverdev/scratch/clwon/llava/test/answers/test50/2023-05-10_med-pretrain-364m-v1-1epoch.jsonl')
parser.add_argument('--output_path', type=str, default='data/eval/llava_med_eval_qa50_qa_ans.jsonl')
args = parser.parse_args()
main(args)
| [
"in_text_mention",
"fig_caption",
"question",
"We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with caption describing the same image.\n Please rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\n Please first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space. In the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment.",
"ans1",
"ans2",
"'You are a helpful and precise assistant for checking the quality of the answer."
] |
2024-01-10 | microsoft/LLaVA-Med | llava~instruct~instruct_generate.py | import sys
import time
import json
import argparse
import asyncio
import itertools
from pprint import pprint
import instruct_few_shot_examples
sys.path.append("llava")
from openai_api import call_async
conv_to_str = lambda conv: "\n\n".join([("User: " if x["from"] == "human" else "Assistant: ") + x["value"] for x in conv])
class PromptGenerator:
@staticmethod
def few_shot_messages_gen(query_context, use_inline_mentions=True):
messages = [
{"role": "system", "content": """You are an AI assistant specialized in biomedical topics.
You are provided with a text description (Figure Caption) of a figure image from a biomedical research paper. In some cases, you may have additional text (Figure Context) that mentions the image. Unfortunately, you don't have access to the actual image.
Your task is to generate a conversation between a person (User) inquiring about the image and you (Assistant) responding to their questions. The conversation should proceed as though both the User and Assistant are viewing the image, while not referring to the text information (Figure Caption and Figure Context).
Below are requirements for generating the questions and answers in the conversation:
- Avoid quoting or referring to specific facts, terms, abbreviations, dates, numbers, or names, as these may reveal the conversation is based on the text information, rather than the image itself. Focus on the visual aspects of the image that can be inferred without the text information.
- Do not use phrases like "mentioned", "caption", "context" in the conversation. Instead, refer to the information as being "in the image."
- Ensure that questions are diverse and cover a range of visual aspects of the image.
- The conversation should include at least 2-3 turns of questions and answers about the visual aspects of the image.
- Answer responsibly, avoiding overconfidence, and do not provide medical advice or diagnostic information. Encourage the user to consult a healthcare professional for advice.
"""},
]
for ex in instruct_few_shot_examples.fs:
messages += [
{"role": "user", "content": PromptGenerator.context_gen(ex, use_inline_mentions)},
{"role": "assistant", "content": conv_to_str(ex["conversations"])},
]
messages.append({"role": "user", "content": query_context})
return messages
@staticmethod
def context_gen(sample, use_inline_mentions=True):
ctx = []
if use_inline_mentions and sample["in_text_mention"]:
for sent in sample["in_text_mention"]:
if isinstance(sent, dict):
sent = sent["tokens"]
ctx.append(sent)
ret = f"Figure Caption:\n{sample['fig_label']}: {sample['fig_caption']}"
if len(ctx):
ret += "\n\nFigure Context:\n\t- {ctx}".format(ctx="\n\t- ".join(ctx))
return ret
@staticmethod
def wrap_gen_message(sample, use_inline_mentions=False):
text = PromptGenerator.context_gen(sample, use_inline_mentions=use_inline_mentions)
context = PromptGenerator.few_shot_messages_gen(text, use_inline_mentions=use_inline_mentions)
return context
def main(args):
with open(args.input_path) as f:
domain_dict = json.load(f)
results = []
for i in range(3):
print(f'round {i}')
result_pair_ids = set(result['pair_id'] for result in results)
batch = []
counter = 0
for cycle_idx, samples in enumerate(itertools.zip_longest(*domain_dict.values())):
if counter>=args.max_size:
break
for domain_idx, sample in enumerate(samples):
if not sample:
continue
counter+=1
if counter>=args.max_size:
break
if sample['pair_id'] in result_pair_ids:
continue
batch.append(sample)
if len(batch)>=args.batch_size:
async_results = call_async(batch, lambda x: PromptGenerator.wrap_gen_message(x, use_inline_mentions=args.use_inline_mentions))
results.extend(async_results)
print(f"Result Size: {len(results)}")
batch = []
async_results = call_async(batch, lambda x: PromptGenerator.wrap_gen_message(x, use_inline_mentions=args.use_inline_mentions))
results.extend(async_results)
print(f"Result Size: {len(results)}")
with open(args.output_path, 'w') as f:
for line in results:
f.write(json.dumps(line)+'\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', type=str, default='data/instruct/llava_med_instruct_fig_captions.json')
parser.add_argument('--output_path', type=str, default='data/instruct/llava_med_instruct_fig_captions_gen.json')
parser.add_argument('--use_inline_mentions', type=bool, default=False)
parser.add_argument('--batch_size', type=int, default=3)
parser.add_argument('--max_size', type=int, default=60000)
args = parser.parse_args()
main(args)
| [
"conversations",
"You are an AI assistant specialized in biomedical topics.\n\n You are provided with a text description (Figure Caption) of a figure image from a biomedical research paper. In some cases, you may have additional text (Figure Context) that mentions the image. Unfortunately, you don't have access to the actual image.\n\n Your task is to generate a conversation between a person (User) inquiring about the image and you (Assistant) responding to their questions. The conversation should proceed as though both the User and Assistant are viewing the image, while not referring to the text information (Figure Caption and Figure Context). \n\n Below are requirements for generating the questions and answers in the conversation:\n - Avoid quoting or referring to specific facts, terms, abbreviations, dates, numbers, or names, as these may reveal the conversation is based on the text information, rather than the image itself. Focus on the visual aspects of the image that can be inferred without the text information.\n - Do not use phrases like \"mentioned\", \"caption\", \"context\" in the conversation. Instead, refer to the information as being \"in the image.\"\n - Ensure that questions are diverse and cover a range of visual aspects of the image.\n - The conversation should include at least 2-3 turns of questions and answers about the visual aspects of the image.\n - Answer responsibly, avoiding overconfidence, and do not provide medical advice or diagnostic information. Encourage the user to consult a healthcare professional for advice.\n "
] |
2024-01-10 | microsoft/LLaVA-Med | llava~eval~eval_gpt_review.py | import argparse
import json
import os
import openai
import tqdm
import ray
import time
@ray.remote(num_cpus=4)
def get_eval(content: str, max_tokens: int):
while True:
try:
response = openai.ChatCompletion.create(
model='gpt-4',
messages=[{
'role': 'system',
'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
}, {
'role': 'user',
'content': content,
}],
temperature=0.2, # TODO: figure out which temperature is best for evaluation
max_tokens=max_tokens,
)
break
except openai.error.RateLimitError:
pass
except Exception as e:
print(e)
time.sleep(1)
print('success!')
return response['choices'][0]['message']['content']
def parse_score(review):
try:
score_pair = review.split('\n')[0]
score_pair = score_pair.replace(',', ' ')
sp = score_pair.split(' ')
if len(sp) == 2:
return [float(sp[0]), float(sp[1])]
else:
print('error', review)
return [-1, -1]
except Exception as e:
print(e)
print('error', review)
return [-1, -1]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
parser.add_argument('-q', '--question')
# parser.add_argument('-a', '--answer')
parser.add_argument('-a', '--answer-list', nargs='+', default=[])
parser.add_argument('-r', '--rule')
parser.add_argument('-o', '--output')
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
args = parser.parse_args()
ray.init()
f_q = open(os.path.expanduser(args.question))
f_ans1 = open(os.path.expanduser(args.answer_list[0]))
f_ans2 = open(os.path.expanduser(args.answer_list[1]))
rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
review_file = open(f'{args.output}', 'w')
js_list = []
handles = []
idx = 0
for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
# if idx == 1:
# break
ques = json.loads(ques_js)
ans1 = json.loads(ans1_js)
ans2 = json.loads(ans2_js)
category = json.loads(ques_js)['category']
if category in rule_dict:
rule = rule_dict[category]
else:
rule = rule_dict['default']
prompt = rule['prompt']
role = rule['role']
content = (f'[Question]\n{ques["text"]}\n\n'
f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
f'[System]\n{prompt}\n\n')
js_list.append({
'id': idx+1,
'question_id': ques['question_id'],
'answer1_id': ans1['answer_id'],
'answer2_id': ans2['answer_id'],
'category': category})
idx += 1
handles.append(get_eval.remote(content, args.max_tokens))
# To avoid the rate limit set by OpenAI
time.sleep(1)
reviews = ray.get(handles)
for idx, review in enumerate(reviews):
scores = parse_score(review)
js_list[idx]['content'] = review
js_list[idx]['tuple'] = scores
review_file.write(json.dumps(js_list[idx]) + '\n')
review_file.close()
| [
"You are a helpful and precise assistant for checking the quality of the answer."
] |
2024-01-10 | erenyasarkurt/OpenAI-AWS-Lambda-Layer | lambda_function.py | import openai
import json
import datetime
def query_completion(prompt: str, engine: str = 'text-davinci-003', temperature: float = 0.5, max_tokens: int = 1500, top_p: int = 1, frequency_penalty: int = 0.5, presence_penalty: int = 0.2) -> object:
"""
Function for querying GPT-3.
"""
estimated_prompt_tokens = int(len(prompt.split()) * 1.6)
estimated_answer_tokens = 2049 - estimated_prompt_tokens
response = openai.Completion.create(
engine=engine,
prompt=prompt,
temperature=temperature,
max_tokens=min(4096-estimated_prompt_tokens, max_tokens),
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty
)
return response
def lambda_handler(event, context):
'''Provide an event that contains the following keys:
- prompt: text of an open ai prompt
'''
openai.api_key = "YOUR_KEY_HERE"
print("Init:")
print(datetime.datetime.now())
print("Event:")
print(event)
body = json.loads(event['body'])
prompt = body['prompt']
max_tokens = 1500
response = query_completion(prompt)
response_text = response['choices'][0]['text'].strip()
response = {
"statusCode": 200,
"headers": {},
"body": response_text
}
return response | [] |
2024-01-10 | THUDM/ChatGLM3 | tools_using_demo~openai_api_demo.py | import json
from openai import OpenAI
from colorama import init, Fore
from loguru import logger
from tool_register import get_tools, dispatch_tool
init(autoreset=True)
client = OpenAI(
base_url="http://127.0.0.1:8000/v1",
api_key = "xxx"
)
functions = get_tools()
def run_conversation(query: str, stream=False, functions=None, max_retry=5):
params = dict(model="chatglm3", messages=[{"role": "user", "content": query}], stream=stream)
if functions:
params["functions"] = functions
response = client.chat.completions.create(**params)
for _ in range(max_retry):
if not stream:
if response.choices[0].message.function_call:
function_call = response.choices[0].message.function_call
logger.info(f"Function Call Response: {function_call.model_dump()}")
function_args = json.loads(function_call.arguments)
tool_response = dispatch_tool(function_call.name, function_args)
logger.info(f"Tool Call Response: {tool_response}")
params["messages"].append(response.choices[0].message)
params["messages"].append(
{
"role": "function",
"name": function_call.name,
"content": tool_response, # 调用函数返回结果
}
)
else:
reply = response.choices[0].message.content
logger.info(f"Final Reply: \n{reply}")
return
else:
output = ""
for chunk in response:
content = chunk.choices[0].delta.content or ""
print(Fore.BLUE + content, end="", flush=True)
output += content
if chunk.choices[0].finish_reason == "stop":
return
elif chunk.choices[0].finish_reason == "function_call":
print("\n")
function_call = chunk.choices[0].delta.function_call
logger.info(f"Function Call Response: {function_call.model_dump()}")
function_args = json.loads(function_call.arguments)
tool_response = dispatch_tool(function_call.name, function_args)
logger.info(f"Tool Call Response: {tool_response}")
params["messages"].append(
{
"role": "assistant",
"content": output
}
)
params["messages"].append(
{
"role": "function",
"name": function_call.name,
"content": tool_response,
}
)
break
response = client.chat.completions.create(**params)
if __name__ == "__main__":
query = "你是谁"
run_conversation(query, stream=True)
logger.info("\n=========== next conversation ===========")
query = "帮我查询北京的天气怎么样"
run_conversation(query, functions=functions, stream=True)
| [] |
2024-01-10 | THUDM/ChatGLM3 | langchain_demo~tools~Weather.py | import os
import requests
from typing import Type, Any
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
class WeatherInput(BaseModel):
location: str = Field(description="the location need to check the weather")
class Weather(BaseTool):
name = "weather"
description = "Use for searching weather at a specific location"
args_schema: Type[BaseModel] = WeatherInput
def __init__(self):
super().__init__()
def _run(self, location: str) -> dict[str, Any]:
api_key = os.environ["SENIVERSE_KEY"]
url = f"https://api.seniverse.com/v3/weather/now.json?key={api_key}&location={location}&language=zh-Hans&unit=c"
response = requests.get(url)
if response.status_code == 200:
data = response.json()
weather = {
"temperature": data["results"][0]["now"]["temperature"],
"description": data["results"][0]["now"]["text"],
}
return weather
else:
raise Exception(
f"Failed to retrieve weather: {response.status_code}")
| [
"Use for searching weather at a specific location"
] |
2024-01-10 | THUDM/ChatGLM3 | langchain_demo~tools~Calculator.py | import abc
from typing import Type
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
class CalculatorInput(BaseModel):
calculation: str = Field(description="calculation to perform")
class Calculator(BaseTool, abc.ABC):
name = "Calculator"
description = "Useful for when you need to calculate math problems"
args_schema: Type[BaseModel] = CalculatorInput
def __init__(self):
super().__init__()
def _run(self, calculation: str) -> str:
calculation = calculation.replace("^", "**")
if "sqrt" in calculation:
calculation = calculation.replace("sqrt", "math.sqrt")
elif "log" in calculation:
calculation = calculation.replace("log", "math.log")
return eval(calculation)
| [
"Useful for when you need to calculate math problems"
] |
2024-01-10 | THUDM/ChatGLM3 | langchain_demo~tools~DistanceConversion.py | import abc
from typing import Type
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
class DistanceConversionInput(BaseModel):
distance: float = Field(description="The numerical value of the distance to convert")
unit: str = Field(description="The current unit of the distance (m, km, or feet)")
to_unit: str = Field(description="The target unit to convert the distance into (m, km, or feet)")
class DistanceConverter(BaseTool, abc.ABC):
name = "DistanceConverter"
description = "Converts distance between meters, kilometers, and feet"
args_schema: Type[BaseModel] = DistanceConversionInput
def __init__(self):
super().__init__()
def _run(self, distance: float, unit: str, to_unit: str) -> str:
unit_conversions = {
"m_to_km": 0.001,
"km_to_m": 1000,
"feet_to_m": 0.3048,
"m_to_feet": 3.28084,
"km_to_feet": 3280.84,
"feet_to_km": 0.0003048
}
if unit == to_unit:
return f"{distance} {unit} is equal to {distance} {to_unit}"
if unit == "km":
distance *= unit_conversions["km_to_m"]
elif unit == "feet":
distance *= unit_conversions["feet_to_m"]
if to_unit == "km":
converted_distance = distance * unit_conversions["m_to_km"]
elif to_unit == "feet":
converted_distance = distance * unit_conversions["m_to_feet"]
else:
converted_distance = distance # already in meters if this block is reached
return f"{distance} {unit} is equal to {converted_distance} {to_unit}"
| [
"Converts distance between meters, kilometers, and feet"
] |
2024-01-10 | THUDM/ChatGLM3 | langchain_demo~ChatGLM3.py | import ast
import json
from langchain.llms.base import LLM
from transformers import AutoTokenizer, AutoModel, AutoConfig
from typing import List, Optional
class ChatGLM3(LLM):
max_token: int = 8192
do_sample: bool = True
temperature: float = 0.8
top_p = 0.8
tokenizer: object = None
model: object = None
history: List = []
has_search: bool = False
def __init__(self):
super().__init__()
@property
def _llm_type(self) -> str:
return "ChatGLM3"
def load_model(self, model_name_or_path=None):
model_config = AutoConfig.from_pretrained(
model_name_or_path,
trust_remote_code=True
)
self.tokenizer = AutoTokenizer.from_pretrained(
model_name_or_path,
trust_remote_code=True
)
self.model = AutoModel.from_pretrained(
model_name_or_path, config=model_config, trust_remote_code=True, device_map="auto").eval()
def _tool_history(self, prompt: str):
ans = []
tool_prompts = prompt.split(
"You have access to the following tools:\n\n")[1].split("\n\nUse a json blob")[0].split("\n")
tools_json = []
for tool_desc in tool_prompts:
name = tool_desc.split(":")[0]
description = tool_desc.split(":")[1].split(", args:")[0].strip()
parameters_str = tool_desc.split("args:")[1].strip()
parameters_dict = ast.literal_eval(parameters_str)
# Extracting only the 'description' and 'type' for each parameter
params_cleaned = {}
for param, details in parameters_dict.items():
params_cleaned[param] = {'description': details['description'], 'type': details['type']}
tools_json.append({
"name": name,
"description": description,
"parameters": params_cleaned
})
ans.append({
"role": "system",
"content": "Answer the following questions as best as you can. You have access to the following tools:",
"tools": tools_json
})
query = f"""{prompt.split("Human: ")[-1].strip()}"""
return ans, query
def _extract_observation(self, prompt: str):
return_json = prompt.split("Observation: ")[-1].split("\nThought:")[0]
self.history.append({
"role": "observation",
"content": return_json
})
return
def _extract_tool(self):
if len(self.history[-1]["metadata"]) > 0:
metadata = self.history[-1]["metadata"]
content = self.history[-1]["content"]
lines = content.split('\n')
for line in lines:
if 'tool_call(' in line and ')' in line and self.has_search is False:
# 获取括号内的字符串
params_str = line.split('tool_call(')[-1].split(')')[0]
# 解析参数对
params_pairs = [param.split("=") for param in params_str.split(",") if "=" in param]
params = {pair[0].strip(): pair[1].strip().strip("'\"") for pair in params_pairs}
action_json = {
"action": metadata,
"action_input": params
}
self.has_search = True
print("*****Action*****")
print(action_json)
print("*****Answer*****")
return f"""
Action:
```
{json.dumps(action_json, ensure_ascii=False)}
```"""
final_answer_json = {
"action": "Final Answer",
"action_input": self.history[-1]["content"]
}
self.has_search = False
return f"""
Action:
```
{json.dumps(final_answer_json, ensure_ascii=False)}
```"""
def _call(self, prompt: str, history: List = [], stop: Optional[List[str]] = ["<|user|>"]):
if not self.has_search:
self.history, query = self._tool_history(prompt)
else:
self._extract_observation(prompt)
query = ""
_, self.history = self.model.chat(
self.tokenizer,
query,
history=self.history,
do_sample=self.do_sample,
max_length=self.max_token,
temperature=self.temperature,
)
response = self._extract_tool()
history.append((prompt, response))
return response
| [
"Answer the following questions as best as you can. You have access to the following tools:",
"\n",
"You have access to the following tools:\n\n",
"\n\nUse a json blob"
] |
2024-01-10 | FabienRoger/Countergen | countergen~countergen~augmentation~llmd_augmenter.py | # From Fryer, 2022 https://aclanthology.org/2022.woah-1.20.pdf
# Adapted to be usable with InstructGPT
#%%
from typing import Dict, Optional, Tuple
import countergen
from countergen.tools.api_utils import ApiConfig
import openai
from attrs import define
from countergen.tools.utils import estimate_paraphrase_length
from countergen.types import Augmenter, Category, Input
import countergen.config
DEFAULT_AUGMENTERS = {
"gender": {
"male": "Rewrite it to be about a man/about men.",
"female": "Rewrite it to be about a woman/about women.",
},
}
DEFAULT_PROMPT = """0: Here is some text: {When the doctor asked Linda to take the medicine, he smiled and gave her a lollipop.}. Rewrite it to be more scary.
1: {When the doctor told Linda to take the medicine, there had been a malicious gleam in her eye that Linda didn’t like at all.}
0: Here is some text: {they asked loudly, over the sound of the train.}. Rewrite it to be more intense.
1: {they yelled aggressively, over the clanging of the train.}
0: Here is some text: {When Mohammed left the theatre, it was already dark out}. Rewrite it to be more about the movie itself.
1: {The movie was longer than Mohammed had expected, and despite the excellent ratings he was a bit disappointed when he left the theatre.}
0: Here is some text: {next to the path}. Rewrite it to be about France.
1: {next to la Siene}
0: Here is some text: {The man stood outside the grocery store, ringing the bell.}. Rewrite it to be about clowns.
1: {The man stood outside the circus, holding a bunch of balloons.}
0: Here is some text: {the bell ringing}. Rewrite it to be more flowery.
1: {the peales of the jangling bell}
0: Here is some text: {against the tree}. Rewrite it to include the word “snow”.
1: {against the snow-covered bark of the tree}’
0: Here is some text: {__input__}. __instruction__
1: {"""
@define
class LlmdAugmenter(Augmenter):
"""Augmenter that does word substituion between its two categories.
"words" are defined by the word_regex expression.
From Fryer 2022, https://aclanthology.org/2022.woah-1.20.pdf"""
categories_instructions: Dict[Category, str]
prompt_template: str = DEFAULT_PROMPT
engine: str = "text-davinci-003"
apiconfig: Optional[ApiConfig] = None
@classmethod
def from_default(cls, name: str) -> "LlmdAugmenter":
"""Load one of the defaults datasets from "DEFAULT_AUGMENTERS"."""
if name not in DEFAULT_AUGMENTERS:
raise ValueError(f"{name} not a valid default augmenter. Choose one in {set(DEFAULT_AUGMENTERS.keys())}")
return LlmdAugmenter(DEFAULT_AUGMENTERS[name])
@property
def categories(self) -> Tuple[Category, ...]:
return tuple(self.categories_instructions.keys())
def transform(self, inp: Input, to: Category) -> Input:
apiconfig = self.apiconfig or countergen.config.apiconfig
instruction = self.categories_instructions[to]
prompt = self.prompt_template.replace("__input__", inp).replace("__instruction__", instruction)
completion = openai.Completion.create(
engine=self.engine,
prompt=prompt,
max_tokens=estimate_paraphrase_length(inp),
temperature=1,
top_p=0.7, # LLM-D has top_k=40, but not available
stream=False,
**apiconfig.get_config(),
)["choices"][0]["text"]
return completion.split("}")[0]
| [
"__instruction__",
"__input__",
"0: Here is some text: {When the doctor asked Linda to take the medicine, he smiled and gave her a lollipop.}. Rewrite it to be more scary.\n1: {When the doctor told Linda to take the medicine, there had been a malicious gleam in her eye that Linda didn’t like at all.}\n0: Here is some text: {they asked loudly, over the sound of the train.}. Rewrite it to be more intense.\n1: {they yelled aggressively, over the clanging of the train.}\n0: Here is some text: {When Mohammed left the theatre, it was already dark out}. Rewrite it to be more about the movie itself.\n1: {The movie was longer than Mohammed had expected, and despite the excellent ratings he was a bit disappointed when he left the theatre.}\n0: Here is some text: {next to the path}. Rewrite it to be about France.\n1: {next to la Siene}\n0: Here is some text: {The man stood outside the grocery store, ringing the bell.}. Rewrite it to be about clowns.\n1: {The man stood outside the circus, holding a bunch of balloons.}\n0: Here is some text: {the bell ringing}. Rewrite it to be more flowery.\n1: {the peales of the jangling bell}\n0: Here is some text: {against the tree}. Rewrite it to include the word “snow”.\n1: {against the snow-covered bark of the tree}’\n0: Here is some text: {__input__}. __instruction__\n1: {"
] |
2024-01-10 | dylanholmes/griptape | griptape~drivers~prompt~cohere_prompt_driver.py | import cohere
from attr import define, field, Factory
from griptape.artifacts import TextArtifact
from griptape.drivers import BasePromptDriver
from griptape.tokenizers import CohereTokenizer
from griptape.utils import PromptStack
@define
class CoherePromptDriver(BasePromptDriver):
"""
Attributes:
api_key: Cohere API key.
model: Cohere model name. Defaults to `xlarge`.
client: Custom `cohere.Client`.
tokenizer: Custom `CohereTokenizer`.
"""
api_key: str = field(kw_only=True)
model: str = field(default=CohereTokenizer.DEFAULT_MODEL, kw_only=True)
client: cohere.Client = field(
default=Factory(lambda self: cohere.Client(self.api_key), takes_self=True), kw_only=True
)
tokenizer: CohereTokenizer = field(
default=Factory(lambda self: CohereTokenizer(model=self.model, client=self.client), takes_self=True),
kw_only=True
)
def try_run(self, prompt_stack: PromptStack) -> TextArtifact:
prompt = self.prompt_stack_to_string(prompt_stack)
result = self.client.generate(
prompt=prompt,
model=self.model,
temperature=self.temperature,
end_sequences=self.tokenizer.stop_sequences,
max_tokens=self.max_output_tokens(prompt)
)
if len(result.generations) == 1:
generation = result.generations[0]
return TextArtifact(
value=generation.text.strip()
)
else:
raise Exception("Completion with more than one choice is not supported yet.")
| [] |
2024-01-10 | dylanholmes/griptape | griptape~loaders~text_loader.py | from __future__ import annotations
from pathlib import Path
from attr import field, define, Factory
from griptape import utils
from griptape.artifacts import TextArtifact
from griptape.chunkers import TextChunker
from griptape.loaders import BaseLoader
from griptape.tokenizers import OpenAiTokenizer
@define
class TextLoader(BaseLoader):
MAX_TOKEN_RATIO = 0.5
tokenizer: OpenAiTokenizer = field(
default=Factory(lambda: OpenAiTokenizer()),
kw_only=True
)
max_tokens: int = field(
default=Factory(lambda self: round(self.tokenizer.max_tokens * self.MAX_TOKEN_RATIO), takes_self=True),
kw_only=True
)
chunker: TextChunker = field(
default=Factory(
lambda self: TextChunker(
tokenizer=self.tokenizer,
max_tokens=self.max_tokens
),
takes_self=True
),
kw_only=True
)
def load(self, text: str | Path) -> list[TextArtifact]:
return self.text_to_artifacts(text)
def load_collection(self, texts: list[str | Path]) -> dict[str, list[TextArtifact]]:
return utils.execute_futures_dict({
utils.str_to_hash(str(text)): self.futures_executor.submit(self.text_to_artifacts, text)
for text in texts
})
def text_to_artifacts(self, text: str | Path) -> list[TextArtifact]:
artifacts = []
if isinstance(text, Path):
with open(text, "r") as file:
body = file.read()
else:
body = text
if self.chunker:
chunks = self.chunker.chunk(body)
else:
chunks = [TextArtifact(body)]
for chunk in chunks:
artifacts.append(chunk)
return artifacts
| [] |
2024-01-10 | dylanholmes/griptape | tests~mocks~mock_prompt_driver.py | from attr import define, field
from griptape.utils import PromptStack
from griptape.drivers import BasePromptDriver
from griptape.tokenizers import OpenAiTokenizer, BaseTokenizer
from griptape.artifacts import TextArtifact
@define
class MockPromptDriver(BasePromptDriver):
model: str = "test-model"
tokenizer: BaseTokenizer = OpenAiTokenizer()
mock_output: str = field(default="mock output", kw_only=True)
def try_run(self, prompt_stack: PromptStack) -> TextArtifact:
return TextArtifact(value=self.mock_output)
| [] |
2024-01-10 | dylanholmes/griptape | tests~mocks~mock_failing_prompt_driver.py | from attr import define
from griptape.utils import PromptStack
from griptape.drivers import BasePromptDriver
from griptape.tokenizers import OpenAiTokenizer, BaseTokenizer
from griptape.artifacts import TextArtifact
@define
class MockFailingPromptDriver(BasePromptDriver):
max_failures: int
current_attempt: int = 0
model: str = "test-model"
tokenizer: BaseTokenizer = OpenAiTokenizer()
def try_run(self, prompt_stack: PromptStack) -> TextArtifact:
if self.current_attempt < self.max_failures:
self.current_attempt += 1
raise Exception(f"failed attempt")
else:
return TextArtifact("success")
| [] |
2024-01-10 | dylanholmes/griptape | tests~mocks~mock_value_prompt_driver.py | from attr import define
from griptape.drivers import BasePromptDriver
from griptape.tokenizers import OpenAiTokenizer, BaseTokenizer
from griptape.artifacts import TextArtifact
@define
class MockValuePromptDriver(BasePromptDriver):
value: str
model: str = "test-model"
tokenizer: BaseTokenizer = OpenAiTokenizer()
def try_run(self, value: str) -> TextArtifact:
return TextArtifact(value=self.value)
| [] |
2024-01-10 | dylanholmes/griptape | tests~unit~tokenizers~test_anthropic_tokenizer.py | import pytest
from griptape.tokenizers import AnthropicTokenizer
class TestAnthropicTokenizer:
@pytest.fixture
def tokenizer(self):
return AnthropicTokenizer()
def test_encode(self, tokenizer):
assert tokenizer.encode("foo bar") == [3803, 3871]
def test_decode(self, tokenizer):
assert tokenizer.decode([3803, 3871]) == "foo bar"
def test_token_count(self, tokenizer):
assert tokenizer.token_count("foo bar huzzah") == 5
def test_tokens_left(self, tokenizer):
assert tokenizer.tokens_left("foo bar huzzah") == 99995
| [] |
2024-01-10 | dylanholmes/griptape | griptape~memory~structure~summary_conversation_memory.py | from __future__ import annotations
import json
import logging
from typing import TYPE_CHECKING
from typing import Optional
from attr import define, field, Factory
from griptape.drivers import OpenAiChatPromptDriver
from griptape.schemas import SummaryConversationMemorySchema
from griptape.utils import J2
from griptape.memory.structure import ConversationMemory
if TYPE_CHECKING:
from griptape.drivers import BasePromptDriver
from griptape.memory.structure import Run
from griptape.utils import PromptStack
@define
class SummaryConversationMemory(ConversationMemory):
offset: int = field(default=1, kw_only=True)
prompt_driver: BasePromptDriver = field(
default=Factory(lambda: OpenAiChatPromptDriver()),
kw_only=True
)
summary: Optional[str] = field(default=None, kw_only=True)
summary_index: int = field(default=0, kw_only=True)
summary_template_generator: J2 = field(
default=Factory(lambda: J2("memory/conversation/summary.j2")),
kw_only=True
)
summarize_conversation_template_generator: J2 = field(
default=Factory(lambda: J2("memory/conversation/summarize_conversation.j2")),
kw_only=True
)
@classmethod
def from_dict(cls, memory_dict: dict) -> SummaryConversationMemory:
return SummaryConversationMemorySchema().load(memory_dict)
@classmethod
def from_json(cls, memory_json: str) -> SummaryConversationMemory:
return SummaryConversationMemory.from_dict(json.loads(memory_json))
def add_to_prompt_stack(self, stack: PromptStack) -> None:
if self.summary:
stack.add_user_input(self.summary_template_generator.render(summary=self.summary))
for r in self.unsummarized_runs():
stack.add_user_input(r.input)
stack.add_assistant_input(r.output)
def to_dict(self) -> dict:
return dict(SummaryConversationMemorySchema().dump(self))
def unsummarized_runs(self, last_n: Optional[int] = None) -> list[Run]:
summary_index_runs = self.runs[self.summary_index:]
if last_n:
last_n_runs = self.runs[-last_n:]
if len(summary_index_runs) > len(last_n_runs):
return last_n_runs
else:
return summary_index_runs
else:
return summary_index_runs
def try_add_run(self, run: Run) -> None:
super().try_add_run(run)
unsummarized_runs = self.unsummarized_runs()
runs_to_summarize = unsummarized_runs[:max(0, len(unsummarized_runs) - self.offset)]
if len(runs_to_summarize) > 0:
self.summary = self.summarize_runs(self.summary, runs_to_summarize)
self.summary_index = 1 + self.runs.index(runs_to_summarize[-1])
def summarize_runs(self, previous_summary: str, runs: list[Run]) -> str:
try:
if len(runs) > 0:
return self.prompt_driver.run(
prompt_stack=self.summarize_conversation_template_generator.render(
summary=previous_summary,
runs=runs
)
).to_text()
else:
return previous_summary
except Exception as e:
logging.error(f"Error summarizing memory: {type(e).__name__}({e})")
return previous_summary
| [
"memory/conversation/summary.j2",
"memory/conversation/summarize_conversation.j2"
] |
2024-01-10 | dylanholmes/griptape | griptape~chunkers~base_chunker.py | from __future__ import annotations
from abc import ABC
from typing import Optional
from attr import define, field, Factory
from griptape.artifacts import TextArtifact
from griptape.chunkers import ChunkSeparator
from griptape.tokenizers import OpenAiTokenizer
@define
class BaseChunker(ABC):
DEFAULT_SEPARATORS = [
ChunkSeparator(" ")
]
separators: list[ChunkSeparator] = field(
default=Factory(lambda self: self.DEFAULT_SEPARATORS, takes_self=True),
kw_only=True
)
tokenizer: OpenAiTokenizer = field(
default=Factory(lambda: OpenAiTokenizer()),
kw_only=True
)
max_tokens: int = field(
default=Factory(lambda self: self.tokenizer.max_tokens, takes_self=True),
kw_only=True
)
def chunk(self, text: TextArtifact | str) -> list[TextArtifact]:
text = text.value if isinstance(text, TextArtifact) else text
return [TextArtifact(c) for c in self._chunk_recursively(text)]
def _chunk_recursively(self, chunk: str, current_separator: Optional[ChunkSeparator] = None) -> list[str]:
token_count = self.tokenizer.token_count(chunk)
if token_count <= self.max_tokens:
return [chunk]
else:
balance_index = -1
balance_diff = float("inf")
tokens_count = 0
half_token_count = token_count // 2
if current_separator:
separators = self.separators[self.separators.index(current_separator):]
else:
separators = self.separators
for separator in separators:
subchanks = list(filter(None, chunk.split(separator.value)))
if len(subchanks) > 1:
for index, subchunk in enumerate(subchanks):
if index < len(subchanks):
if separator.is_prefix:
subchunk = separator.value + subchunk
else:
subchunk = subchunk + separator.value
tokens_count += self.tokenizer.token_count(subchunk)
if abs(tokens_count - half_token_count) < balance_diff:
balance_index = index
balance_diff = abs(tokens_count - half_token_count)
if separator.is_prefix:
first_subchunk = separator.value + separator.value.join(subchanks[:balance_index + 1])
second_subchunk = separator.value + separator.value.join(subchanks[balance_index + 1:])
else:
first_subchunk = separator.value.join(subchanks[:balance_index + 1]) + separator.value
second_subchunk = separator.value.join(subchanks[balance_index + 1:])
first_subchunk_rec = self._chunk_recursively(first_subchunk.strip(), separator)
second_subchunk_rec = self._chunk_recursively(second_subchunk.strip(), separator)
if first_subchunk_rec and second_subchunk_rec:
return first_subchunk_rec + second_subchunk_rec
elif first_subchunk_rec:
return first_subchunk_rec
elif second_subchunk_rec:
return second_subchunk_rec
else:
return []
return []
| [] |
2024-01-10 | dylanholmes/griptape | tests~unit~tokenizers~test_tiktoken_tokenizer.py | import pytest
from griptape.tokenizers import OpenAiTokenizer
class TestOpenAiTokenizer:
@pytest.fixture
def tokenizer(self):
return OpenAiTokenizer()
def test_encode(self, tokenizer):
assert tokenizer.encode("foo bar") == [8134, 3703]
def test_decode(self, tokenizer):
assert tokenizer.decode([8134, 3703]) == "foo bar"
def test_token_count_for_text(self, tokenizer):
assert tokenizer.token_count("foo bar huzzah") == 5
def test_token_count_for_messages(self, tokenizer):
assert tokenizer.token_count(
[
{
"role": "system",
"content": "foobar baz"
},
{
"role": "user",
"content": "how foobar am I?"
}
],
model="gpt-4"
) == 19
assert tokenizer.token_count(
[
{
"role": "system",
"content": "foobar baz"
},
{
"role": "user",
"content": "how foobar am I?"
}
],
model="gpt-3.5-turbo-0301"
) == 21
assert tokenizer.token_count(
[
{
"role": "system",
"content": "foobar baz"
},
{
"role": "user",
"content": "how foobar am I?"
}
],
model="gpt-35-turbo"
) == 19
def test_tokens_left(self, tokenizer):
assert tokenizer.tokens_left("foo bar huzzah") == 4083
def test_encoding(self, tokenizer):
assert tokenizer.encoding.name == "cl100k_base"
def test_chunk_tokens(self, tokenizer):
tokens = tokenizer.encode("foo bar")
assert [chunk for chunk in tokenizer.chunk_tokens(tokens)] == [(8134, 3703)]
| [
"foobar baz",
"how foobar am I?"
] |
2024-01-10 | bovem/gpt-developer-tools | src~requester.py | import openai
def send_gpt_request(script_path, line_start, line_end, prompt, open_ai_api_key, model_engine, max_tokens, n, temperature):
with open(script_path, "r") as script:
print("\nInput Script: {}".format(script_path))
print()
openai.api_key = open_ai_api_key
prompt+="\n "
total_lines = sum(1 for line in script)
line_end = total_lines if line_end==None else line_end
script.seek(0)
for i, line in enumerate(script):
if line_start<=(i+1) and line_end>=(i+1):
prompt+=line
elif line_end<(i+1):
break
# Generate a response
completion = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=max_tokens,
n=n,
stop=None,
temperature=temperature,
)
response = completion.choices[0].text
print("ChatGPT's Response:")
print(response)
def send_gpt_request_create(prompt, write_flag, write_path, open_ai_api_key, model_engine, max_tokens, n, temperature):
print("\nInput prompt: {}".format(prompt))
print()
openai.api_key = open_ai_api_key
# Generate a response
completion = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=max_tokens,
n=n,
stop=None,
temperature=temperature,
)
response = completion.choices[0].text
if write_flag==True:
script = open(write_path, "w")
script.write(response)
script.close()
print("Program written to file: {}".format(write_path))
else:
print("ChatGPT's Response:")
print(response) | [
"\n "
] |
2024-01-10 | kevinbtalbert/Electric_and_Utilities_System_Demo | CML-Assets~2_outages_app~outages_llm_app.py | import os
import gradio
import pandas as pd
import openai
openai.api_key = os.getenv('OPENAI_KEY')
# Custom CSS
custom_css = f"""
.gradio-header {{
color: white;
}}
.gradio-description {{
color: white;
}}
gradio-app {{
background-image: url('https://raw.githubusercontent.com/kevinbtalbert/Electric_and_Utilities_System_Demo/main/CML-Assets/app_assets/cldr_bg.jpg') !important;
background-size: cover !important;
background-position: center center !important;
background-repeat: no-repeat !important;
background-attachment: fixed !important;
}}
#custom-logo {{
text-align: center;
}}
.dark {{
background-image: url('https://raw.githubusercontent.com/kevinbtalbert/Electric_and_Utilities_System_Demo/main/CML-Assets/app_assets/cldr_bg.jpg') !important;
background-size: cover !important;
background-position: center center !important;
background-repeat: no-repeat !important;
background-attachment: fixed !important;
}}
.gr-interface {{
background-color: rgba(255, 255, 255, 0.8);
}}
.gradio-header {{
background-color: rgba(0, 0, 0, 0.5);
}}
.gradio-input-box, .gradio-output-box {{
background-color: rgba(255, 255, 255, 0.8);
}}
h1 {{
color: white;
font-family: -apple-system, BlinkMacSystemFont, sans-serif;
font-size: large; !important;
}}
"""
def main():
# Configure gradio QA app
print("Configuring gradio app")
demo = gradio.Interface(fn=get_responses,
title="Electric & Utilities Company AI-Powered Assistant",
description="This AI-powered assistant is designed to help you understand outages in your area as well as be a source for questions about your utility company. For outages, served, and affected, simply enter the area name. You can ask complete questions for the chatbot.",
inputs=[gradio.Radio(['outages', 'customers-served', 'customers-affected', 'chatbot'], label="Select Use Case", value="outages"), gradio.Textbox(label="Area/Question", placeholder="")],
outputs=[gradio.Textbox(label="Response")],
allow_flagging="never",
css=custom_css)
# Launch gradio app
print("Launching gradio app")
demo.launch(share=True,
enable_queue=True,
show_error=True,
server_name='127.0.0.1',
server_port=int(os.getenv('CDSW_APP_PORT')))
print("Gradio app ready")
# Helper function for generating responses for the QA app
def get_responses(option, question):
engine = "gpt-3.5-turbo"
if question is "" or question is None:
return "No question and/or engine selected."
if option == "outages":
res = get_outages_by_area(question)
context_chunk="You are a chatbot responding for an electric and utilities company."
question = "Explain to me that the area " + question + " has " + str(res) + " outages."
if option == "customers-served":
res = get_customers_served_by_area(question)
context_chunk="You are a chatbot responding for an electric and utilities company"
question = "Explain to me that the area " + question + " has " + str(res) + " served customers."
if option == "customers-affected":
res = get_customers_affected_by_area(question)
context_chunk = "You are a chatbot responding for an electric and utilities company. "
question = "Explain to me that the area " + question + " has " + str(res) + " affected customers by outages."
if option == "chatbot":
context_chunk="You are a chatbot responding to a question for an electric and utilities company. If this question is not about that domain, say you cannot answer it: "
# Perform text generation with LLM model
response = get_llm_response(question, context_chunk, engine)
return response
def get_outages_by_area(area_name):
try:
# Read the CSV file
data = pd.read_csv('/home/cdsw/CML-Assets/data/utility_outage_data.csv')
# Convert the 'Area Name' column to uppercase for case-insensitive comparison
data['Area Name'] = data['Area Name'].str.upper()
# Find the row with the matching area name
area_data = data[data['Area Name'] == area_name.upper()]
# Check if the area is found
if not area_data.empty:
# Return the number of outages
return area_data.iloc[0]['Number of Outages']
else:
return "Area name not found."
except FileNotFoundError:
return "Outage data not found."
def get_customers_served_by_area(area_name):
try:
# Read the CSV file
data = pd.read_csv('/home/cdsw/CML-Assets/data/utility_outage_data.csv')
# Convert the 'Area Name' column to uppercase for case-insensitive comparison
data['Area Name'] = data['Area Name'].str.upper()
# Find the row with the matching area name
area_data = data[data['Area Name'] == area_name.upper()]
# Check if the area is found
if not area_data.empty:
# Return the number of customers served
return area_data.iloc[0]['Customers Served']
else:
return "Area name not found."
except FileNotFoundError:
return "Outage data not found."
def get_customers_affected_by_area(area_name):
try:
# Read the CSV file
data = pd.read_csv('/home/cdsw/CML-Assets/data/utility_outage_data.csv')
# Convert the 'Area Name' column to uppercase for case-insensitive comparison
data['Area Name'] = data['Area Name'].str.upper()
# Find the row with the matching area name
area_data = data[data['Area Name'] == area_name.upper()]
# Check if the area is found
if not area_data.empty:
# Return the number of customers affected
return area_data.iloc[0]['Approximate Customers Affected']
else:
return "Area name not found."
except FileNotFoundError:
return "Outage data not found."
# Pass through user input to LLM model with enhanced prompt and stop tokens
def get_llm_response(question, context, engine):
response = openai.ChatCompletion.create(
model=engine,
messages=[
{"role": "system", "content": str(context)},
{"role": "user", "content": str(question)}
]
)
return response['choices'][0]['message']['content']
if __name__ == "__main__":
main()
| [] |
2024-01-10 | stas00/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
"""Yields examples."""
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | sohanshow/Convo_Sense_Public | demo~reference_old.py | ## This is going to be our end file for the reference.
import logging
from PyDictionary import PyDictionary
logging.basicConfig(level=logging.INFO)
import requests
import nltk
import os
import wget
import json
from google_images_search import GoogleImagesSearch
import requests
import openai
#============This returns the definition of the word from openai===========
openai.api_key = 'Your API KEY'
def get_def(word, context):
model = "text-davinci-002"
prompt = (
f"Please provide the definition of the word '{word}' as used in the following sentence:\n\n"
f"{context}\n\n"
f"Definition of '{word}': "
)
response = openai.Completion.create(
engine=model,
prompt=prompt,
max_tokens=100,
n=1,
stop=None,
temperature=0.5,
)
definition = response.choices[0].text.strip()
return definition
#=============================================================================================#
#=============================================================#
# This function gets the pictures for our reference from Google Custom Search API
#=============================================================#
def getPicture(keyword):
gis = GoogleImagesSearch('Your API KEY', 'Your custom Image Search Key')
gis.search({'q': keyword})
dir_path = './site/'
try:
image_url = gis.results()[0].url
response = requests.get(image_url, headers={'User-Agent': 'Mozilla/5.0'})
img_data = response.content
except IndexError:
return None
with open(os.path.join(dir_path, 'image.png'), 'wb') as f:
f.write(img_data)
def getPictureGuest(keyword):
gis = GoogleImagesSearch('Your API KEY', 'Your custom Image Search Key')
gis.search({'q': keyword})
dir_path = './site/'
try:
image_url = gis.results()[0].url
response = requests.get(image_url, headers={'User-Agent': 'Mozilla/5.0'})
img_data = response.content
except IndexError:
return None
with open(os.path.join(dir_path, 'image2.png'), 'wb') as f:
f.write(img_data)
| [
"Please provide the definition of the word 'PLACEHOLDER' as used in the following sentence:\n\nPLACEHOLDER\n\nDefinition of 'PLACEHOLDER': "
] |
2024-01-10 | sohanshow/Convo_Sense_Public | reference.py | ## This is going to be our end file for the reference.
import logging
from PyDictionary import PyDictionary
logging.basicConfig(level=logging.INFO)
import requests
import nltk
import os
import wget
import json
from google_images_search import GoogleImagesSearch
import requests
import openai
#============This returns the definition of the word from openai API===========
openai.api_key = 'Your openai KEY'
def get_def(word, context):
model = "text-davinci-002"
prompt = (
f"Please provide the definition of the word '{word}' as used in the following sentence:\n\n"
f"{context}\n\n"
f"Definition of '{word}': "
)
response = openai.Completion.create(
engine=model,
prompt=prompt,
max_tokens=100,
n=1,
stop=None,
temperature=0.5,
)
definition = response.choices[0].text.strip()
return definition
#=============================================================================================#
#=============================================================#
# This function gets the pictures for our reference from Google Custom Search API
#=============================================================#
def getPicture(keyword):
gis = GoogleImagesSearch('YOUR Google API KEY', 'YOUR custom search API KEY')
gis.search({'q': keyword})
dir_path = './site/'
try:
image_url = gis.results()[0].url
response = requests.get(image_url, headers={'User-Agent': 'Mozilla/5.0'})
img_data = response.content
except IndexError:
return None
with open(os.path.join(dir_path, 'image.png'), 'wb') as f:
f.write(img_data)
def getPictureGuest(keyword):
gis = GoogleImagesSearch('YOUR Google API KEY', 'YOUR custom search API KEY')
gis.search({'q': keyword})
dir_path = './site/'
try:
image_url = gis.results()[0].url
response = requests.get(image_url, headers={'User-Agent': 'Mozilla/5.0'})
img_data = response.content
except IndexError:
return None
with open(os.path.join(dir_path, 'image2.png'), 'wb') as f:
f.write(img_data)
| [
"Please provide the definition of the word 'PLACEHOLDER' as used in the following sentence:\n\nPLACEHOLDER\n\nDefinition of 'PLACEHOLDER': "
] |
2024-01-10 | sohanshow/Convo_Sense_Public | demo~reference.py | ## This is going to be our end file for the reference.
import logging
from PyDictionary import PyDictionary
logging.basicConfig(level=logging.INFO)
import os
from PIL import Image
from io import BytesIO
from google_images_search import GoogleImagesSearch
import requests
import openai
#============This returns the definition of the word from OpenAI===========
openai.api_key = 'Your API KEY'
def get_def(word, context):
model = "text-davinci-002"
prompt = (
f"Please provide the definition of the word '{word}' as used in the following sentence:\n\n"
f"{context}\n\n"
f"Definition of '{word}': "
)
response = openai.Completion.create(
engine=model,
prompt=prompt,
max_tokens=100,
n=1,
stop=None,
temperature=0.5,
)
definition = response.choices[0].text.strip()
return definition
#===================================This is Saving the Image==================================
def save_image(url, file_name, directory):
response = requests.get(url)
response.raise_for_status()
img = Image.open(BytesIO(response.content))
if not os.path.exists(directory):
os.makedirs(directory)
file_path = os.path.join(directory, file_name)
img.save(file_path)
#=============================================================================================#
def bing_image_search(query, api_key, count=10):
headers = {"Ocp-Apim-Subscription-Key": api_key}
params = {
"q": query,
"count": count,
"offset": 0,
"mkt": "en-US",
"safesearch": "Moderate",
}
url = "https://api.bing.microsoft.com/v7.0/images/search"
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
return response.json()
#=============================================================#
# This function gets the pictures for our reference from Google Custom Search API
#=============================================================#
def getPicture(keyword):
gis = GoogleImagesSearch('Your API KEY', 'Your custom search KEY')
# define the search parameters
gis.search({'q': keyword})
dir_path = './site/'
# If path not availabe then do this:
# if not os.path.exists(dir_path):
# os.makedirs(dir_path)
# get the URL of the first image in the search results
try:
image_url = gis.results()[0].url
# download the image from the URL and save it
# wget.download(image_url)
response = requests.get(image_url, headers={'User-Agent': 'Mozilla/5.0'})
img_data = response.content
except IndexError:
print("No Image found")
return None
with open(os.path.join(dir_path, 'image.png'), 'wb') as f:
f.write(img_data)
getPicture("a bat")
| [
"Please provide the definition of the word 'PLACEHOLDER' as used in the following sentence:\n\nPLACEHOLDER\n\nDefinition of 'PLACEHOLDER': "
] |
2024-01-10 | AllenXiao95/openchat | ochat~evaluation~get_openai_answer.py | import os
import json
import argparse
import time
import openai
from tqdm import tqdm
from tenacity import retry, stop_after_attempt, wait_random_exponential
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def chat_completion_with_backoff(**kwargs):
return openai.ChatCompletion.create(**kwargs)
def main():
parser = argparse.ArgumentParser()
# Input / output
parser.add_argument("--data_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
parser.add_argument("--model_types", type=str, nargs='+', default=["gpt-3.5-turbo", "gpt-4"])
# Temperature
parser.add_argument("--temperature", type=float, default=0.7)
parser.add_argument("--top_p", type=float, default=0.9)
args = parser.parse_args()
# Load questions
with open(os.path.join(args.data_path, "question.jsonl"), "r") as f:
question_list = list(map(json.loads, f.readlines()))
# Get API answers
cur_date = time.strftime("%Y%m%d")
for model_type in args.model_types:
output_filename = os.path.join(args.output_path, f"{os.path.basename(args.data_path)}_{model_type}.jsonl")
# API call
answer_list = []
for question in tqdm(question_list):
answer = chat_completion_with_backoff(
model=model_type,
messages=[
{"role": "user", "content": question["text"]}
],
temperature=args.temperature,
top_p=args.top_p
)
answer = answer["choices"][0]["message"]["content"]
answer_list.append({
"answer_model": f"{model_type}_{cur_date}",
"answer": answer,
**question
})
# Write jsonl
answer_list = list(map(lambda x: json.dumps(x) + "\n", answer_list))
with open(output_filename, "w") as f:
f.writelines(answer_list)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | abeldiress/cohere-python | cohere~client.py | import json
import sys
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, List
from urllib.parse import urljoin
import requests
from requests import Response
import cohere
from cohere.classify import Classification, Classifications
from cohere.classify import Example as ClassifyExample
from cohere.classify import LabelPrediction
from cohere.detokenize import Detokenization
from cohere.embeddings import Embeddings
from cohere.error import CohereError
from cohere.generation import Generations
from cohere.tokenize import Tokens
from cohere.detectlang import Language, DetectLanguageResponse
use_xhr_client = False
try:
from js import XMLHttpRequest
use_xhr_client = True
except ImportError:
pass
class Client:
def __init__(self,
api_key: str,
version: str = None,
num_workers: int = 64,
request_dict: dict = {},
check_api_key: bool = True) -> None:
self.api_key = api_key
self.api_url = cohere.COHERE_API_URL
self.batch_size = cohere.COHERE_EMBED_BATCH_SIZE
self._executor = ThreadPoolExecutor(num_workers)
self.num_workers = num_workers
self.request_dict = request_dict
if version is None:
self.cohere_version = cohere.COHERE_VERSION
else:
self.cohere_version = version
if check_api_key:
try:
res = self.check_api_key()
if not res['valid']:
raise CohereError('invalid api key')
except CohereError as e:
raise CohereError(message=e.message, http_status=e.http_status, headers=e.headers)
def check_api_key(self) -> Response:
headers = {
'Authorization': 'BEARER {}'.format(self.api_key),
'Content-Type': 'application/json',
'Request-Source': 'python-sdk',
}
if self.cohere_version != '':
headers['Cohere-Version'] = self.cohere_version
url = urljoin(self.api_url, cohere.CHECK_API_KEY_URL)
if use_xhr_client:
response = self.__pyfetch(url, headers, None)
return response
else:
response = requests.request('POST', url, headers=headers)
try:
res = json.loads(response.text)
except Exception:
raise CohereError(message=response.text, http_status=response.status_code, headers=response.headers)
if 'message' in res.keys(): # has errors
raise CohereError(message=res['message'], http_status=response.status_code, headers=response.headers)
return res
def batch_generate(self, prompts: List[str], **kwargs) -> List[Generations]:
generations: List[Generations] = []
for prompt in prompts:
kwargs["prompt"] = prompt
generations.append(self.generate(**kwargs))
return generations
def generate(self,
prompt: str = None,
prompt_vars: object = {},
model: str = None,
preset: str = None,
num_generations: int = None,
max_tokens: int = None,
temperature: float = None,
k: int = None,
p: float = None,
frequency_penalty: float = None,
presence_penalty: float = None,
end_sequences: List[str] = None,
stop_sequences: List[str] = None,
return_likelihoods: str = None,
truncate: str = None,
logit_bias: Dict[int, float] = {}) -> Generations:
json_body = {
'model': model,
'prompt': prompt,
'prompt_vars': prompt_vars,
'preset': preset,
'num_generations': num_generations,
'max_tokens': max_tokens,
'temperature': temperature,
'k': k,
'p': p,
'frequency_penalty': frequency_penalty,
'presence_penalty': presence_penalty,
'end_sequences': end_sequences,
'stop_sequences': stop_sequences,
'return_likelihoods': return_likelihoods,
'truncate': truncate,
'logit_bias': logit_bias,
}
response = self._executor.submit(self.__request, cohere.GENERATE_URL, json=json_body)
return Generations(return_likelihoods=return_likelihoods, _future=response)
def embed(self, texts: List[str], model: str = None, truncate: str = 'NONE') -> Embeddings:
responses = []
json_bodys = []
for i in range(0, len(texts), self.batch_size):
texts_batch = texts[i:i + self.batch_size]
json_bodys.append({
'model': model,
'texts': texts_batch,
'truncate': truncate,
})
if use_xhr_client:
for json_body in json_bodys:
response = self.__request(cohere.EMBED_URL, json=json_body)
responses.append(response['embeddings'])
else:
for result in self._executor.map(lambda json_body: self.__request(cohere.EMBED_URL, json=json_body),
json_bodys):
responses.extend(result['embeddings'])
return Embeddings(responses)
def classify(self,
inputs: List[str] = [],
model: str = None,
preset: str = None,
examples: List[ClassifyExample] = [],
truncate: str = None) -> Classifications:
examples_dicts: list[dict[str, str]] = []
for example in examples:
example_dict = {'text': example.text, 'label': example.label}
examples_dicts.append(example_dict)
json_body = {
'model': model,
'preset': preset,
'inputs': inputs,
'examples': examples_dicts,
'truncate': truncate,
}
response = self.__request(cohere.CLASSIFY_URL, json=json_body)
classifications = []
for res in response['classifications']:
labelObj = {}
for label, prediction in res['labels'].items():
labelObj[label] = LabelPrediction(prediction['confidence'])
classifications.append(Classification(res['input'], res['prediction'], res['confidence'], labelObj))
return Classifications(classifications)
def batch_tokenize(self, texts: List[str]) -> List[Tokens]:
return [self.tokenize(t) for t in texts]
def tokenize(self, text: str) -> Tokens:
json_body = {'text': text}
return Tokens(_future=self._executor.submit(self.__request, cohere.TOKENIZE_URL, json=json_body))
def batch_detokenize(self, list_of_tokens: List[List[int]]) -> List[Detokenization]:
return [self.detokenize(t) for t in list_of_tokens]
def detokenize(self, tokens: List[int]) -> Detokenization:
json_body = {'tokens': tokens}
return Detokenization(_future=self._executor.submit(self.__request, cohere.DETOKENIZE_URL, json=json_body))
def detect_language(self, texts: List[str]) -> List[Language]:
json_body = {
"texts": texts,
}
response = self.__request(cohere.DETECT_LANG_URL, json=json_body)
results = []
for result in response["results"]:
results.append(Language(result["language_code"], result["language_name"]))
return DetectLanguageResponse(results)
def __print_warning_msg(self, response: Response):
if 'X-API-Warning' in response.headers:
print("\033[93mWarning: {}\n\033[0m".format(response.headers['X-API-Warning']), file=sys.stderr)
def __pyfetch(self, url, headers, json_body) -> Response:
req = XMLHttpRequest.new()
req.open('POST', url, False)
for key, value in headers.items():
req.setRequestHeader(key, value)
try:
req.send(json_body)
except Exception:
raise CohereError(message=req.responseText, http_status=req.status, headers=req.getAllResponseHeaders())
res = json.loads(req.response)
if 'message' in res.keys():
raise CohereError(message=res['message'], http_status=req.status, headers=req.getAllResponseHeaders())
return res
def __request(self, endpoint, json=None) -> Any:
headers = {
'Authorization': 'BEARER {}'.format(self.api_key),
'Content-Type': 'application/json',
'Request-Source': 'python-sdk',
}
if self.cohere_version != '':
headers['Cohere-Version'] = self.cohere_version
url = urljoin(self.api_url, endpoint)
if use_xhr_client:
response = self.__pyfetch(url, headers, json.dumps(json))
self.__print_warning_msg(response)
return response
else:
response = requests.request('POST', url, headers=headers, json=json, **self.request_dict)
try:
res = response.json()
except Exception:
raise CohereError(message=response.text, http_status=response.status_code, headers=response.headers)
if 'message' in res: # has errors
raise CohereError(message=res['message'], http_status=response.status_code, headers=response.headers)
self.__print_warning_msg(response)
return res
| [] |
2024-01-10 | abeldiress/cohere-python | cohere~detectlang.py | from cohere.response import CohereObject
from typing import List
class Language(CohereObject):
def __init__(self, code: str, name: str):
self.language_code = code
self.language_name = name
def __repr__(self) -> str:
return f"Language<language_code: \"{self.language_code}\", language_name: \"{self.language_name}\">"
class DetectLanguageResponse:
def __init__(self, results: List[Language]):
self.results = results
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~cube_single_disk_env.py | import numpy
import rospy
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry
from openai_ros.openai_ros_common import ROSLauncher
class CubeSingleDiskEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""Initializes a new CubeSingleDisk environment.
Args:
"""
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="moving_cube_description",
launch_file_name="put_cube_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Variables that we give through the constructor.
# None in this case
# Internal Vars
self.controllers_list = ['joint_state_controller',
'inertia_wheel_roll_joint_velocity_controller'
]
self.robot_name_space = "moving_cube"
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(CubeSingleDiskEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=True)
"""
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
"""
self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/moving_cube/joint_states",
JointState, self._joints_callback)
rospy.Subscriber("/moving_cube/odom", Odometry, self._odom_callback)
self._roll_vel_pub = rospy.Publisher('/moving_cube/inertia_wheel_roll_joint_velocity_controller/command',
Float64, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
self._check_joint_states_ready()
self._check_odom_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_joint_states_ready(self):
self.joints = None
while self.joints is None and not rospy.is_shutdown():
try:
self.joints = rospy.wait_for_message(
"/moving_cube/joint_states", JointState, timeout=1.0)
rospy.logdebug(
"Current moving_cube/joint_states READY=>" + str(self.joints))
except:
rospy.logerr(
"Current moving_cube/joint_states not ready yet, retrying for getting joint_states")
return self.joints
def _check_odom_ready(self):
self.odom = None
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message(
"/moving_cube/odom", Odometry, timeout=1.0)
rospy.logdebug(
"Current /moving_cube/odom READY=>" + str(self.odom))
except:
rospy.logerr(
"Current /moving_cube/odom not ready yet, retrying for getting odom")
return self.odom
def _joints_callback(self, data):
self.joints = data
def _odom_callback(self, data):
self.odom = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._roll_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to _roll_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_roll_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_joints(self, roll_speed):
joint_speed_value = Float64()
joint_speed_value.data = roll_speed
rospy.logdebug("Single Disk Roll Velocity>>" + str(joint_speed_value))
self._roll_vel_pub.publish(joint_speed_value)
self.wait_until_roll_is_in_vel(joint_speed_value.data)
def wait_until_roll_is_in_vel(self, velocity):
rate = rospy.Rate(10)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.1
v_plus = velocity + epsilon
v_minus = velocity - epsilon
while not rospy.is_shutdown():
joint_data = self._check_joint_states_ready()
roll_vel = joint_data.velocity[0]
rospy.logdebug("VEL=" + str(roll_vel) +
", ?RANGE=[" + str(v_minus) + ","+str(v_plus)+"]")
are_close = (roll_vel <= v_plus) and (roll_vel > v_minus)
if are_close:
rospy.logdebug("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logdebug("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time - start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
return delta_time
def get_joints(self):
return self.joints
def get_odom(self):
return self.odom
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~turtlebot2_env.py | import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from openai_ros.openai_ros_common import ROSLauncher
class TurtleBot2Env(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new TurtleBot2Env environment.
Turtlebot2 doesnt use controller_manager, therefore we wont reset the
controllers in the standard fashion. For the moment we wont reset them.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /odom : Odometry readings of the Base of the Robot
* /camera/depth/image_raw: 2d Depth image of the depth sensor.
* /camera/depth/points: Pointcloud sensor readings
* /camera/rgb/image_raw: RGB camera
* /kobuki/laser/scan: Laser Readings
Actuators Topic List: /cmd_vel,
Args:
"""
rospy.logdebug("Start TurtleBot2Env INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="turtlebot_gazebo",
launch_file_name="put_turtlebot2_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(TurtleBot2Env, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/odom", Odometry, self._odom_callback)
#rospy.Subscriber("/camera/depth/image_raw", Image, self._camera_depth_image_raw_callback)
#rospy.Subscriber("/camera/depth/points", PointCloud2, self._camera_depth_points_callback)
#rospy.Subscriber("/camera/rgb/image_raw", Image, self._camera_rgb_image_raw_callback)
rospy.Subscriber("/kobuki/laser/scan", LaserScan, self._laser_scan_callback)
self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.logdebug("Finished TurtleBot2Env INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_odom_ready()
# We dont need to check for the moment, takes too long
#self._check_camera_depth_image_raw_ready()
#self._check_camera_depth_points_ready()
#self._check_camera_rgb_image_raw_ready()
self._check_laser_scan_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message("/odom", Odometry, timeout=5.0)
rospy.logdebug("Current /odom READY=>")
except:
rospy.logerr("Current /odom not ready yet, retrying for getting odom")
return self.odom
def _check_camera_depth_image_raw_ready(self):
self.camera_depth_image_raw = None
rospy.logdebug("Waiting for /camera/depth/image_raw to be READY...")
while self.camera_depth_image_raw is None and not rospy.is_shutdown():
try:
self.camera_depth_image_raw = rospy.wait_for_message("/camera/depth/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/depth/image_raw READY=>")
except:
rospy.logerr("Current /camera/depth/image_raw not ready yet, retrying for getting camera_depth_image_raw")
return self.camera_depth_image_raw
def _check_camera_depth_points_ready(self):
self.camera_depth_points = None
rospy.logdebug("Waiting for /camera/depth/points to be READY...")
while self.camera_depth_points is None and not rospy.is_shutdown():
try:
self.camera_depth_points = rospy.wait_for_message("/camera/depth/points", PointCloud2, timeout=10.0)
rospy.logdebug("Current /camera/depth/points READY=>")
except:
rospy.logerr("Current /camera/depth/points not ready yet, retrying for getting camera_depth_points")
return self.camera_depth_points
def _check_camera_rgb_image_raw_ready(self):
self.camera_rgb_image_raw = None
rospy.logdebug("Waiting for /camera/rgb/image_raw to be READY...")
while self.camera_rgb_image_raw is None and not rospy.is_shutdown():
try:
self.camera_rgb_image_raw = rospy.wait_for_message("/camera/rgb/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/rgb/image_raw READY=>")
except:
rospy.logerr("Current /camera/rgb/image_raw not ready yet, retrying for getting camera_rgb_image_raw")
return self.camera_rgb_image_raw
def _check_laser_scan_ready(self):
self.laser_scan = None
rospy.logdebug("Waiting for /kobuki/laser/scan to be READY...")
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message("/kobuki/laser/scan", LaserScan, timeout=5.0)
rospy.logdebug("Current /kobuki/laser/scan READY=>")
except:
rospy.logerr("Current /kobuki/laser/scan not ready yet, retrying for getting laser_scan")
return self.laser_scan
def _odom_callback(self, data):
self.odom = data
def _camera_depth_image_raw_callback(self, data):
self.camera_depth_image_raw = data
def _camera_depth_points_callback(self, data):
self.camera_depth_points = data
def _camera_rgb_image_raw_callback(self, data):
self.camera_rgb_image_raw = data
def _laser_scan_callback(self, data):
self.laser_scan = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10, min_laser_distance=-1):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("TurtleBot2 Base Twist Cmd>>" + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
time.sleep(0.2)
#time.sleep(0.02)
"""
self.wait_until_twist_achieved(cmd_vel_value,
epsilon,
update_rate,
min_laser_distance)
"""
def wait_until_twist_achieved(self, cmd_vel_value, epsilon, update_rate, min_laser_distance=-1):
"""
We wait for the cmd_vel twist given to be reached by the robot reading
from the odometry.
:param cmd_vel_value: Twist we want to wait to reach.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the odometry.
:return:
"""
rospy.logwarn("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.05
rospy.logdebug("Desired Twist Cmd>>" + str(cmd_vel_value))
rospy.logdebug("epsilon>>" + str(epsilon))
linear_speed = cmd_vel_value.linear.x
angular_speed = cmd_vel_value.angular.z
linear_speed_plus = linear_speed + epsilon
linear_speed_minus = linear_speed - epsilon
angular_speed_plus = angular_speed + epsilon
angular_speed_minus = angular_speed - epsilon
while not rospy.is_shutdown():
crashed_into_something = self.has_crashed(min_laser_distance)
current_odometry = self._check_odom_ready()
odom_linear_vel = current_odometry.twist.twist.linear.x
odom_angular_vel = current_odometry.twist.twist.angular.z
rospy.logdebug("Linear VEL=" + str(odom_linear_vel) + ", ?RANGE=[" + str(linear_speed_minus) + ","+str(linear_speed_plus)+"]")
rospy.logdebug("Angular VEL=" + str(odom_angular_vel) + ", ?RANGE=[" + str(angular_speed_minus) + ","+str(angular_speed_plus)+"]")
linear_vel_are_close = (odom_linear_vel <= linear_speed_plus) and (odom_linear_vel > linear_speed_minus)
angular_vel_are_close = (odom_angular_vel <= angular_speed_plus) and (odom_angular_vel > angular_speed_minus)
if linear_vel_are_close and angular_vel_are_close:
rospy.logwarn("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
if crashed_into_something:
rospy.logerr("TurtleBot has crashed, stopping movement!")
break
rospy.logwarn("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time- start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
rospy.logwarn("END wait_until_twist_achieved...")
return delta_time
def has_crashed(self, min_laser_distance):
"""
It states based on the laser scan if the robot has crashed or not.
Crashed means that the minimum laser reading is lower than the
min_laser_distance value given.
If min_laser_distance == -1, it returns always false, because its the way
to deactivate this check.
"""
robot_has_crashed = False
if min_laser_distance != -1:
laser_data = self.get_laser_scan()
for i, item in enumerate(laser_data.ranges):
if item == float ('Inf') or numpy.isinf(item):
pass
elif numpy.isnan(item):
pass
else:
# Has a Non Infinite or Nan Value
if (item < min_laser_distance):
rospy.logerr("TurtleBot HAS CRASHED >>> item=" + str(item)+"< "+str(min_laser_distance))
robot_has_crashed = True
break
return robot_has_crashed
def get_odom(self):
return self.odom
def get_camera_depth_image_raw(self):
return self.camera_depth_image_raw
def get_camera_depth_points(self):
return self.camera_depth_points
def get_camera_rgb_image_raw(self):
return self.camera_rgb_image_raw
def get_laser_scan(self):
return self.laser_scan
def reinit_sensors(self):
"""
This method is for the tasks so that when reseting the episode
the sensors values are forced to be updated with the real data and
"""
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_gazebo_env.py | import rospy
import gym
from gym.utils import seeding
from .gazebo_connection import GazeboConnection
from .controllers_connection import ControllersConnection
#https://bitbucket.org/theconstructcore/theconstruct_msgs/src/master/msg/RLExperimentInfo.msg
from openai_ros.msg import RLExperimentInfo
# https://github.com/openai/gym/blob/master/gym/core.py
class RobotGazeboEnv(gym.Env):
def __init__(self, robot_name_space, controllers_list, reset_controls, start_init_physics_parameters=True, reset_world_or_sim="SIMULATION"):
# To reset Simulations
rospy.logdebug("START init RobotGazeboEnv")
self.gazebo = GazeboConnection(robot_name_space, start_init_physics_parameters,reset_world_or_sim)
self.controllers_object = ControllersConnection(namespace=robot_name_space, controllers_list=controllers_list)
self.reset_controls = reset_controls
self.seed()
# Set up ROS related variables
self.episode_num = 0
self.cumulated_episode_reward = 0
self.reward_pub = rospy.Publisher('/'+robot_name_space+'/openai/reward', RLExperimentInfo, queue_size=1)
rospy.logdebug("END init RobotGazeboEnv")
# Env methods
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""
Function executed each time step.
Here we get the action execute it in a time step and retrieve the
observations generated by that action.
:param action:
:return: obs, reward, done, info
"""
"""
Here we should convert the action num to movement action, execute the action in the
simulation and get the observations result of performing that action.
"""
rospy.logdebug("START STEP OpenAIROS")
self.gazebo.unpauseSim()
self._set_action(action)
self.gazebo.pauseSim()
obs = self._get_obs()
done = self._is_done(obs)
info = {}
reward = self._compute_reward(obs, done)
self.cumulated_episode_reward += reward
rospy.logdebug("END STEP OpenAIROS")
return obs, reward, done, info
def reset(self):
rospy.logdebug("Reseting RobotGazeboEnvironment")
self._init_env_variables()
self._reset_sim()
self._update_episode()
obs = self._get_obs()
rospy.logdebug("END Reseting RobotGazeboEnvironment")
return obs
def close(self):
"""
Function executed when closing the environment.
Use it for closing GUIS and other systems that need closing.
:return:
"""
rospy.logdebug("Closing RobotGazeboEnvironment")
rospy.signal_shutdown("Closing RobotGazeboEnvironment")
def _update_episode(self):
"""
Publishes the cumulated reward of the episode and
increases the episode number by one.
:return:
"""
rospy.logwarn("PUBLISHING REWARD...")
self._publish_reward_topic(
self.cumulated_episode_reward,
self.episode_num
)
rospy.logwarn("PUBLISHING REWARD...DONE="+str(self.cumulated_episode_reward)+",EP="+str(self.episode_num))
self.episode_num += 1
self.cumulated_episode_reward = 0
def _publish_reward_topic(self, reward, episode_number=1):
"""
This function publishes the given reward in the reward topic for
easy access from ROS infrastructure.
:param reward:
:param episode_number:
:return:
"""
reward_msg = RLExperimentInfo()
reward_msg.episode_number = episode_number
reward_msg.episode_reward = reward
self.reward_pub.publish(reward_msg)
# Extension methods
# ----------------------------
def _reset_sim(self):
"""Resets a simulation
"""
rospy.logdebug("RESET SIM START")
if self.reset_controls :
rospy.logdebug("RESET CONTROLLERS")
self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_systems_ready()
self._set_init_pose()
self.gazebo.pauseSim()
self.gazebo.resetSim()
self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_systems_ready()
self.gazebo.pauseSim()
else:
rospy.logwarn("DONT RESET CONTROLLERS")
self.gazebo.unpauseSim()
self._check_all_systems_ready()
self._set_init_pose()
self.gazebo.pauseSim()
self.gazebo.resetSim()
self.gazebo.unpauseSim()
self._check_all_systems_ready()
self.gazebo.pauseSim()
rospy.logdebug("RESET SIM END")
return True
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
raise NotImplementedError()
def _get_obs(self):
"""Returns the observation.
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _is_done(self, observations):
"""Indicates whether or not the episode is done ( the robot has fallen for example).
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _env_setup(self, initial_qpos):
"""Initial configuration of the environment. Can be used to configure initial state
and extract information from the simulation.
"""
raise NotImplementedError()
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~sawyer_env.py | import numpy
import rospy
import time
import tf
from openai_ros import robot_gazebo_env
import intera_interface
import intera_external_devices
from intera_interface import CHECK_VERSION
from intera_core_msgs.msg import JointLimits
from sensor_msgs.msg import Image
from openai_ros.openai_ros_common import ROSLauncher
class SawyerEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all SawyerEnv environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new SawyerEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /robot/joint_limits: Odometry of the Base of Wamv
Actuators Topic List:
* As actuator we will use a class to interface with the movements through commands.
Args:
"""
rospy.logdebug("Start SawyerEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="sawyer_gazebo",
launch_file_name="put_sawyer_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(SawyerEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
rospy.logdebug("SawyerEnv unpause...")
self.gazebo.unpauseSim()
# self.controllers_object.reset_controllers()
# TODO: Fill it with the sensors
self._check_all_systems_ready()
rospy.Subscriber("/io/internal_camera/head_camera/image_raw",
Image, self._head_camera_image_raw_callback)
rospy.Subscriber("/io/internal_camera/right_hand_camera/image_raw",
Image, self._right_hand_camera_image_raw_callback)
self._setup_tf_listener()
self._setup_movement_system()
self.gazebo.pauseSim()
rospy.logdebug("Finished SawyerEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
rospy.logdebug("SawyerEnv check_all_systems_ready...")
self._check_all_sensors_ready()
rospy.logdebug("END SawyerEnv _check_all_systems_ready...")
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
# TODO: Here go the sensors like cameras and joint states
self._check_head_camera_image_raw_ready()
self._check_right_hand_camera_image_raw_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_head_camera_image_raw_ready(self):
self.head_camera_image_raw = None
rospy.logdebug(
"Waiting for /io/internal_camera/head_camera/image_raw to be READY...")
while self.head_camera_image_raw is None and not rospy.is_shutdown():
try:
self.head_camera_image_raw = rospy.wait_for_message(
"/io/internal_camera/head_camera/image_raw", Image, timeout=5.0)
rospy.logdebug(
"Current /io/internal_camera/head_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /io/internal_camera/head_camera/image_raw not ready yet, retrying for getting head_camera_image_raw")
return self.head_camera_image_raw
def _check_right_hand_camera_image_raw_ready(self):
self.right_hand_camera_image_raw = None
rospy.logdebug(
"Waiting for /io/internal_camera/right_hand_camera/image_raw to be READY...")
while self.right_hand_camera_image_raw is None and not rospy.is_shutdown():
try:
self.right_hand_camera_image_raw = rospy.wait_for_message(
"/io/internal_camera/right_hand_camera/image_raw", Image, timeout=5.0)
rospy.logdebug(
"Current /io/internal_camera/right_hand_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /io/internal_camera/right_hand_camera/image_raw not ready yet, retrying for getting right_hand_camera_image_raw")
return self.right_hand_camera_image_raw
def _head_camera_image_raw_callback(self, data):
self.head_camera_image_raw = data
def _right_hand_camera_image_raw_callback(self, data):
self.right_hand_camera_image_raw = data
def _setup_tf_listener(self):
"""
Set ups the TF listener for getting the transforms you ask for.
"""
self.listener = tf.TransformListener()
def _setup_movement_system(self):
"""
Setup of the movement system.
:return:
"""
rp = intera_interface.RobotParams()
valid_limbs = rp.get_limb_names()
if not valid_limbs:
rp.log_message(("Cannot detect any limb parameters on this robot. "
"Exiting."), "ERROR")
return
rospy.loginfo("Valid Sawyer Limbs==>"+str(valid_limbs))
print("Getting robot state... ")
rs = intera_interface.RobotEnable(CHECK_VERSION)
init_state = rs.state().enabled
rospy.loginfo("Enabling robot...")
rs.enable()
self._map_actions_to_movement()
def _map_actions_to_movement(self, side="right", joint_delta=0.1):
self.limb = intera_interface.Limb(side)
try:
self.gripper = intera_interface.Gripper(side + '_gripper')
except:
self.has_gripper = False
rospy.loginfo("The electric gripper is not detected on the robot.")
else:
self.has_gripper = True
self.joints = self.limb.joint_names()
self.bindings = {
self.joints[0]+"_increase": (self.set_j, [self.joints[0], joint_delta], self.joints[0]+" increase"),
self.joints[0]+"_decrease": (self.set_j, [self.joints[0], -joint_delta], self.joints[0]+" decrease"),
self.joints[1]+"_increase": (self.set_j, [self.joints[1], joint_delta], self.joints[1]+" increase"),
self.joints[1]+"_decrease": (self.set_j, [self.joints[1], -joint_delta], self.joints[1]+" decrease"),
self.joints[2]+"_increase": (self.set_j, [self.joints[2], joint_delta], self.joints[2]+" increase"),
self.joints[2]+"_decrease": (self.set_j, [self.joints[2], -joint_delta], self.joints[2]+" decrease"),
self.joints[3]+"_increase": (self.set_j, [self.joints[3], joint_delta], self.joints[3]+" increase"),
self.joints[3]+"_decrease": (self.set_j, [self.joints[3], -joint_delta], self.joints[3]+" decrease"),
self.joints[4]+"_increase": (self.set_j, [self.joints[4], joint_delta], self.joints[4]+" increase"),
self.joints[4]+"_decrease": (self.set_j, [self.joints[4], -joint_delta], self.joints[4]+" decrease"),
self.joints[5]+"_increase": (self.set_j, [self.joints[5], joint_delta], self.joints[5]+" increase"),
self.joints[5]+"_decrease": (self.set_j, [self.joints[5], -joint_delta], self.joints[5]+" decrease"),
self.joints[6]+"_increase": (self.set_j, [self.joints[6], joint_delta], self.joints[6]+" increase"),
self.joints[6]+"_decrease": (self.set_j, [self.joints[6], -joint_delta], self.joints[6]+" decrease")
}
if self.has_gripper:
self.bindings.update({
"close": (self.set_g, "close", side+" gripper close"),
"open": (self.set_g, "open", side+" gripper open"),
"calibrate": (self.set_g, "calibrate", side+" gripper calibrate")
})
rospy.loginfo("Controlling joints...")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def execute_movement(self, action_id):
"""
It executed the command given through an id. This will move any joint
of Sawyer, including the gripper if it has it.
:param: action_id: These are the possible action_id values and the action asociated.
self.joints[0]+"_increase",
self.joints[0]+_decrease,
self.joints[1]+"_increase",
self.joints[1]+"_decrease",
self.joints[2]+"_increase",
self.joints[2]+"_decrease",
self.joints[3]+"_increase",
self.joints[3]+"_decrease",
self.joints[4]+"_increase",
self.joints[4]+"_decrease",
self.joints[5]+"_increase",
self.joints[5]+"_decrease",
self.joints[6]+"_increase",
self.joints[6]+"_decrease",
gripper_close,
gripper_open,
gripper_calibrate
"""
if action_id in self.bindings:
cmd = self.bindings[action_id]
if action_id == "gripper_close" or action_id == "gripper_open" or action_id == "gripper_calibrate":
cmd[0](cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
# expand binding to something like "self.set_j(right, 'j0', joint_delta)"
cmd[0](*cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
rospy.logerr("NOT VALID key binding, it should be one of these: ")
for key, val in sorted(self.bindings.items(),
key=lambda x: x[1][2]):
rospy.logerr(" %s: %s" % (key, val[2]))
def set_j(self, joint_name, delta):
current_position = self.limb.joint_angle(joint_name)
joint_command = {joint_name: current_position + delta}
self.limb.set_joint_positions(joint_command)
def set_g(self, action):
if self.has_gripper:
if action == "close":
self.gripper.close()
elif action == "open":
self.gripper.open()
elif action == "calibrate":
self.gripper.calibrate()
def move_joints_to_angle_blocking(self, joint_positions_dict, timeout=15.0, threshold=0.008726646):
"""
It moves all the joints to the given position and doesnt exit until it reaches that position
"""
self.limb.move_to_joint_positions(positions=joint_positions_dict,
timeout=15.0,
threshold=0.008726646,
test=None)
def get_limb_joint_names_array(self):
"""
Returns the Joint Names array of the Limb.
"""
return self.joints
def get_all_limb_joint_angles(self):
"""
Return dictionary dict({str:float}) with all the joints angles
"""
return self.limb.joint_angles()
def get_all_limb_joint_efforts(self):
"""
Returns a dictionary dict({str:float}) with all the joints efforts
"""
return self.limb.joint_efforts()
def get_tf_start_to_end_frames(self, start_frame_name, end_frame_name):
"""
Given two frames, it returns the transform from the start_frame_name to the end_frame_name.
It will only return something different to None if the TFs of the Two frames are in TF topic
published and are connected through the TF tree.
:param: start_frame_name: Start Frame of the TF transform
end_frame_name: End Frame of the TF transform
:return: trans,rot of the transform between the start and end frames.
"""
start_frame = "/"+start_frame_name
end_frame = "/"+end_frame_name
trans, rot = None, None
while (trans is None or rot is None) and not rospy.is_shutdown():
try:
(trans, rot) = self.listener.lookupTransform(
start_frame, end_frame, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logerr("TF start to end not ready YET...")
duration_obj = rospy.Duration.from_sec(1.0)
rospy.sleep(duration_obj)
return trans, rot
def check_joint_limits_ready(self):
self.joint_limits = None
rospy.logdebug("Waiting for /robot/joint_limits to be READY...")
while self.joint_limits is None and not rospy.is_shutdown():
try:
self.joint_limits = rospy.wait_for_message(
"/robot/joint_limits", JointLimits, timeout=3.0)
rospy.logdebug("Current /robot/joint_limits READY=>")
except:
rospy.logerr(
"Current /robot/joint_limits not ready yet, retrying for getting joint_limits")
return self.joint_limits
def get_joint_limits(self):
return self.joint_limits
def get_head_camera_image_raw(self):
return self.head_camera_image_raw
def get_right_hand_camera_image_raw(self):
return self.right_hand_camera_image_raw
def init_joint_limits(self):
"""
Get the Joint Limits, in the init fase where we need to unpause the simulation to get them
:return: joint_limits: The Joint Limits Dictionary, with names, angles, vel and effort limits.
"""
self.gazebo.unpauseSim()
joint_limits = self.check_joint_limits_ready()
self.gazebo.pauseSim()
return joint_limits
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~competition_rl~scripts~start_training.py | #!/usr/bin/env python
from tianshou.policy import PPOPolicy
from tianshou.env import SubprocVectorEnv
from tianshou.trainer import onpolicy_trainer
from tianshou.data import Collector, ReplayBuffer
import gym
import time
import numpy
import random
import time
from gym import wrappers
# ROS packages required
import rospy
import rospkg
import multiAgentChallengeTaskEnv
# from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
from gym import envs
def main():
rospy.init_node('aichallenge_gym_node', anonymous=True, log_level=rospy.WARN)
# env = gym.make('AiChallengeEnv-v0', robot_ns="jackal0")
env = gym.make('AiChallengeEnv-v1')
# env = VectorEnv(env)
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('competition_rl')
outdir = pkg_path + '/training_results'
env = wrappers.Monitor(env, outdir, force=True)
rospy.loginfo ( "Monitor Wrapper started")
# last_time_steps = numpy.ndarray(0)
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
# Alpha = rospy.get_param("/moving_cube/alpha")
# Initialize the environment and get first state of the robot
observation = env.reset()
# state = ''.join(map(str, observation))
time.sleep(5)
observation = env.reset()
time.sleep(2)
# env.step(1)
# time.sleep(2)
# env.step(-1)
# time.sleep(2)
# observation = env.reset()
if __name__ == '__main__':
main()
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~turtlebot2~turtlebot2_maze.py | import rospy
import numpy
import time
import math
from gym import spaces
from openai_ros.robot_envs import turtlebot2_env
from gym.envs.registration import register
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Header
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class TurtleBot2MazeEnv(turtlebot2_env.TurtleBot2Env):
def __init__(self):
"""
This Task Env is designed for having the TurtleBot2 in some kind of maze.
It will learn how to move around the maze without crashing.
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
# This parameter HAS to be set up in the MAIN launch of the AI RL script
ros_ws_abspath = rospy.get_param("/turtlebot2/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path "+ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p "+ros_ws_abspath + \
"/src;cd "+ros_ws_abspath+";catkin_make"
ROSLauncher(rospackage_name="gym_construct",
launch_file_name="start_maze_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/turtlebot2/config",
yaml_file_name="turtlebot2_maze.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(TurtleBot2MazeEnv, self).__init__(ros_ws_abspath)
# Only variable needed to be set here
number_actions = rospy.get_param('/turtlebot2/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
#number_observations = rospy.get_param('/turtlebot2/n_observations')
"""
We set the Observation space for the 6 observations
cube_observations = [
round(current_disk_roll_vel, 0),
round(y_distance, 1),
round(roll, 1),
round(pitch, 1),
round(y_linear_speed,1),
round(yaw, 1),
]
"""
# Actions and Observations
self.dec_obs = rospy.get_param(
"/turtlebot2/number_decimals_precision_obs", 1)
self.linear_forward_speed = rospy.get_param(
'/turtlebot2/linear_forward_speed')
self.linear_turn_speed = rospy.get_param(
'/turtlebot2/linear_turn_speed')
self.angular_speed = rospy.get_param('/turtlebot2/angular_speed')
self.init_linear_forward_speed = rospy.get_param(
'/turtlebot2/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param(
'/turtlebot2/init_linear_turn_speed')
self.n_observations = rospy.get_param('/turtlebot2/n_observations')
self.min_range = rospy.get_param('/turtlebot2/min_range')
self.max_laser_value = rospy.get_param('/turtlebot2/max_laser_value')
self.min_laser_value = rospy.get_param('/turtlebot2/min_laser_value')
# We create two arrays based on the binary values that will be assigned
# In the discretization method.
#laser_scan = self._check_laser_scan_ready()
laser_scan = self.get_laser_scan()
rospy.logdebug("laser_scan len===>"+str(len(laser_scan.ranges)))
# Laser data
self.laser_scan_frame = laser_scan.header.frame_id
# Number of laser reading jumped
self.new_ranges = int(
math.ceil(float(len(laser_scan.ranges)) / float(self.n_observations)))
rospy.logdebug("n_observations===>"+str(self.n_observations))
rospy.logdebug(
"new_ranges, jumping laser readings===>"+str(self.new_ranges))
high = numpy.full((self.n_observations), self.max_laser_value)
low = numpy.full((self.n_observations), self.min_laser_value)
# We only use two integers
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>" +
str(self.observation_space))
# Rewards
self.forwards_reward = rospy.get_param("/turtlebot2/forwards_reward")
self.turn_reward = rospy.get_param("/turtlebot2/turn_reward")
self.end_episode_points = rospy.get_param(
"/turtlebot2/end_episode_points")
self.cumulated_steps = 0.0
self.laser_filtered_pub = rospy.Publisher(
'/turtlebot2/laser/scan_filtered', LaserScan, queue_size=1)
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base(self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10,
min_laser_distance=-1)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
# We wait a small ammount of time to start everything because in very fast resets, laser scan values are sluggish
# and sometimes still have values from the prior position that triguered the done.
time.sleep(1.0)
# TODO: Add reset of published filtered laser readings
laser_scan = self.get_laser_scan()
discretized_ranges = laser_scan.ranges
self.publish_filtered_laser_scan(laser_original_data=laser_scan,
new_filtered_laser_range=discretized_ranges)
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the turtlebot2
based on the action number given.
:param action: The action integer that set s what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0: # FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
self.last_action = "FORWARDS"
elif action == 1: # LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "TURN_LEFT"
elif action == 2: # RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "TURN_RIGHT"
# We tell TurtleBot2 the linear and angular speed to set to execute
self.move_base(linear_speed,
angular_speed,
epsilon=0.05,
update_rate=10,
min_laser_distance=self.min_range)
rospy.logdebug("END Set Action ==>"+str(action) +
", NAME="+str(self.last_action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
TurtleBot2Env API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan = self.get_laser_scan()
rospy.logdebug("BEFORE DISCRET _episode_done==>" +
str(self._episode_done))
discretized_observations = self.discretize_observation(laser_scan,
self.new_ranges
)
rospy.logdebug("Observations==>"+str(discretized_observations))
rospy.logdebug("AFTER DISCRET_episode_done==>"+str(self._episode_done))
rospy.logdebug("END Get Observation ==>")
return discretized_observations
def _is_done(self, observations):
if self._episode_done:
rospy.logdebug("TurtleBot2 is Too Close to wall==>" +
str(self._episode_done))
else:
rospy.logerr("TurtleBot2 is Ok ==>")
return self._episode_done
def _compute_reward(self, observations, done):
if not done:
if self.last_action == "FORWARDS":
reward = self.forwards_reward
else:
reward = self.turn_reward
else:
reward = -1*self.end_episode_points
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_observation(self, data, new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
filtered_range = []
#mod = len(data.ranges)/new_ranges
mod = new_ranges
max_laser_value = data.range_max
min_laser_value = data.range_min
rospy.logdebug("data=" + str(data))
rospy.logwarn("mod=" + str(mod))
for i, item in enumerate(data.ranges):
if (i % mod == 0):
if item == float('Inf') or numpy.isinf(item):
# discretized_ranges.append(self.max_laser_value)
discretized_ranges.append(
round(max_laser_value, self.dec_obs))
elif numpy.isnan(item):
# discretized_ranges.append(self.min_laser_value)
discretized_ranges.append(
round(min_laser_value, self.dec_obs))
else:
# discretized_ranges.append(int(item))
discretized_ranges.append(round(item, self.dec_obs))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" +
str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> item=" +
str(item)+"< "+str(self.min_range))
# We add last value appended
filtered_range.append(discretized_ranges[-1])
else:
# We add value zero
filtered_range.append(0.1)
rospy.logdebug(
"Size of observations, discretized_ranges==>"+str(len(discretized_ranges)))
self.publish_filtered_laser_scan(laser_original_data=data,
new_filtered_laser_range=discretized_ranges)
return discretized_ranges
def publish_filtered_laser_scan(self, laser_original_data, new_filtered_laser_range):
rospy.logdebug("new_filtered_laser_range==>" +
str(new_filtered_laser_range))
laser_filtered_object = LaserScan()
h = Header()
# Note you need to call rospy.init_node() before this will work
h.stamp = rospy.Time.now()
h.frame_id = laser_original_data.header.frame_id
laser_filtered_object.header = h
laser_filtered_object.angle_min = laser_original_data.angle_min
laser_filtered_object.angle_max = laser_original_data.angle_max
new_angle_incr = abs(laser_original_data.angle_max -
laser_original_data.angle_min) / len(new_filtered_laser_range)
#laser_filtered_object.angle_increment = laser_original_data.angle_increment
laser_filtered_object.angle_increment = new_angle_incr
laser_filtered_object.time_increment = laser_original_data.time_increment
laser_filtered_object.scan_time = laser_original_data.scan_time
laser_filtered_object.range_min = laser_original_data.range_min
laser_filtered_object.range_max = laser_original_data.range_max
laser_filtered_object.ranges = []
laser_filtered_object.intensities = []
for item in new_filtered_laser_range:
if item == 0.0:
laser_distance = 0.1
else:
laser_distance = item
laser_filtered_object.ranges.append(laser_distance)
laser_filtered_object.intensities.append(item)
self.laser_filtered_pub.publish(laser_filtered_object)
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~fetchsimple_env.py | import numpy as np
import rospy
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from openai_ros import robot_gazebo_env
from openai_ros.openai_ros_common import ROSLauncher
class FetchSimpleEnv(robot_gazebo_env.RobotGazeboEnv):
def __init__(self, ros_ws_abspath):
rospy.logdebug("Entered Fetch Env")
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="fetch_simple_description",
launch_file_name="put_fetchsimple_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
self.controllers_list = ["joint_state_controller",
"torso_lift_joint_position_controller",
"bellows_joint_position_controller",
"head_pan_joint_position_controller",
"head_tilt_joint_position_controller",
"shoulder_pan_joint_position_controller",
"shoulder_lift_joint_position_controller",
"upperarm_roll_joint_position_controller",
"elbow_flex_joint_position_controller",
"forearm_roll_joint_position_controller",
"wrist_flex_joint_position_controller",
"wrist_roll_joint_position_controller",
"r_gripper_finger_joint_position_controller",
"l_gripper_finger_joint_position_controller"]
self.robot_name_space = "fetch"
self.reset_controls = True
super(FetchSimpleEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=self.reset_controls,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
# We Start all the ROS related Subscribers and publishers
self.JOINT_STATES_SUBSCRIBER = '/fetch/joint_states'
self.join_names = ["joint0",
"joint1",
"joint2",
"joint3",
"joint4",
"joint5",
"joint6"]
self.gazebo.unpauseSim()
# Start Move Fetch Object, that checks all systems are ready
self.move_fetch_object = FetchSimpleMove()
# Wait until Fetch goes to the init pose
self.move_fetch_object.init_position()
# We pause until the next step
self.gazebo.pauseSim()
# RobotGazeboEnv virtual methods
# ----------------------------
def move_to_init_pose(self):
self.move_fetch_object.init_position()
def get_joint_limits(self):
return self.move_fetch_object.joint_upper_limits, self.move_fetch_object.joint_lower_limits
def get_joints_position(self):
return self.move_fetch_object.get_current_joints_position()
def set_trajectory_joints(self, delta_joints_array):
self.move_fetch_object.delta_joints(delta_joints_array)
return True
# ParticularEnv methods
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self.move_fetch_object.check_all_systems_ready()
return True
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
class FetchSimpleMove(object):
def __init__(self):
rospy.loginfo("Initialising...")
self.name_joints = ["bellows_joint",
"elbow_flex_joint",
"forearm_roll_joint",
"head_pan_joint",
"head_tilt_joint",
"l_gripper_finger_joint",
"r_gripper_finger_joint",
"shoulder_lift_joint",
"shoulder_pan_joint",
"torso_lift_joint",
"upperarm_roll_joint",
"wrist_flex_joint",
"wrist_roll_joint"]
self.joint_upper_limits = [0.4,
2.251,
6.27, # Was none but placed a limit
1.57,
1.45,
0.05,
0.05,
1.518,
1.6056,
0.38615,
6.27, # Was none but placed a limit
2.16,
6.27, # Was none but placed a limit
]
self.joint_lower_limits = [0.0,
-2.251,
0.0, # Was none but placed a limit
-1.57,
-0.76,
0.0,
0.0,
-1.221,
-1.6056,
0.0,
0.0, # Was none but placed a limit
-2.16,
0.0, # Was none but placed a limit
]
self.travel_arm_pose = [0.0,
-1.8, # elbow_flex_joint
0.0, # forearm_roll_joint
0.0,
0.0,
0.05,
0.04,
0.7, # shoulder_lift_joint
1.32, # shoulder_pan_joint
0.0, # upperarm_roll_joint
0.0,
-0.4, # wrist_flex_joint
0.1]
self.joint_array = len(self.name_joints)*[0.0]
self.pub_position_array = []
for joint in self.name_joints:
topic_name = "/fetch/"+joint+"_position_controller/command"
self.pub_position_array.append(
rospy.Publisher(topic_name, Float64, queue_size=1))
# Wait for publishers to be ready
self.check_all_systems_ready()
rospy.Subscriber("/fetch/joint_states", JointState,
self.join_state_callback)
def check_all_systems_ready(self):
self.wait_publishers_to_be_ready()
self._check_joint_states_ready()
def _check_joint_states_ready(self):
self.joints_state = None
while self.joints_state is None and not rospy.is_shutdown():
try:
self.joints_state = rospy.wait_for_message(
"/fetch/joint_states", JointState, timeout=1.0)
rospy.logdebug(
"Current /fetch/joint_states READY=>" + str(self.joints_state))
except:
rospy.logerr(
"Current /fetch/joint_states not ready yet, retrying for getting joint_states")
return self.joints_state
def join_state_callback(self, msg):
self.joints_state = msg
def get_current_joints_position(self):
return self.joints_state.position
def init_position(self):
# We wait what it takes to reset pose
self.move_all_joints(joints_pos_array=self.joint_array, time_out=0.0)
def set_travel_arm_pose(self):
self.move_all_joints(joints_pos_array=self.travel_arm_pose)
def wait_for_joints_to_get_there(self, desired_pos_array, error=0.2, timeout=3.0):
time_waiting = 0.0
frequency = 10.0
are_equal = False
is_timeout = False
rate = rospy.Rate(frequency)
rospy.logwarn("Waiting for joint to get to the position")
while not are_equal and not is_timeout and not rospy.is_shutdown():
current_pos = [self.joints_state.position]
are_equal = np.allclose(a=current_pos,
b=desired_pos_array,
atol=error)
rospy.logdebug("are_equal="+str(are_equal))
rospy.logdebug(str(desired_pos_array))
rospy.logdebug(str(current_pos))
rate.sleep()
if timeout == 0.0:
# We wait what it takes
time_waiting += 0.0
else:
time_waiting += 1.0 / frequency
is_timeout = time_waiting > timeout
rospy.logwarn(
"Joints are in the desired position with an erro of "+str(error))
def wait_publishers_to_be_ready(self):
rate_wait = rospy.Rate(10)
rospy.logdebug("Waiting for Publishers to be ready...")
i = 0
for publisher_obj in self.pub_position_array:
publisher_ready = False
while not publisher_ready:
connection_num = publisher_obj.get_num_connections()
publisher_ready = connection_num > 0
rospy.logdebug("Pub joint NOT Ready=" +
str(self.name_joints[i]))
rate_wait.sleep()
rospy.logdebug("Publisher for joint Ready=" +
str(self.name_joints[i]))
i += 1
def move_all_joints(self, joints_pos_array, time_out=3.0, error=0.2):
assert len(joints_pos_array) == len(
self.joint_array), "Lengths dont match"
i = 0
for angle in joints_pos_array:
angle_msg = Float64()
angle_msg.data = angle
# Publish Joint Position
self.pub_position_array[i].publish(angle_msg)
i += 1
self.wait_for_joints_to_get_there(self.joint_array, error=error, timeout=time_out)
self.update_joints(new_joints_pos=joints_pos_array)
def update_joints(self, new_joints_pos):
i = 0
assert len(new_joints_pos) == len(
self.joint_array), "Lengths don't match in Update"
for new_joint_value in new_joints_pos:
upper = self.joint_upper_limits[i]
lower = self.joint_lower_limits[i]
if upper is None or lower is None:
self.joint_array[i] = new_joint_value
else:
if upper >= new_joint_value >= lower:
self.joint_array[i] = new_joint_value
elif new_joint_value < lower:
self.joint_array[i] = lower
else:
self.joint_array[i] = upper
rospy.logdebug("index =" + str(i))
rospy.logdebug("length of name_joints =" +
str(len(self.name_joints[i])))
rospy.logdebug("name_joints=" + str(self.name_joints[i]))
i += 1
def delta_joints(self, delta_array):
"""
delta_array = [bellows_joint, elbow_flex_joint, forearm_roll_joint, head_pan_joint, head_tilt_joint,
l_gripper_finger_joint, r_gripper_finger_joint, shoulder_lift_joint, shoulder_pan_joint,
torso_lift_joint, upperarm_roll_joint, wrist_flex_joint, wrist_roll_joint]
:param delta_array:
:return:
"""
new_pos_array = len(delta_array)*[0.0]
i = 0
for delta in delta_array:
new_pos_array[i] = self.joint_array[i] + delta
i += 1
self.move_all_joints(new_pos_array)
def get_current_angles(self):
return self.joint_array
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~wamv~wamv_nav_twosets_buoys.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import wamv_env
from gym.envs.registration import register
from geometry_msgs.msg import Point
from geometry_msgs.msg import Vector3
from tf.transformations import euler_from_quaternion
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class WamvNavTwoSetsBuoysEnv(wamv_env.WamvEnv):
def __init__(self):
"""
Make Wamv learn how to move straight from The starting point
to a desired point inside the designed corridor.
http://robotx.org/images/files/RobotX_2018_Task_Summary.pdf
Demonstrate Navigation Control
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/wamv/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="robotx_gazebo",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/wamv/config",
yaml_file_name="wamv_nav_twosets_buoys.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(WamvNavTwoSetsBuoysEnv, self).__init__(ros_ws_abspath)
# Only variable needed to be set here
rospy.logdebug("Start WamvNavTwoSetsBuoysEnv INIT...")
number_actions = rospy.get_param('/wamv/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
# Actions and Observations
self.propeller_high_speed = rospy.get_param('/wamv/propeller_high_speed')
self.propeller_low_speed = rospy.get_param('/wamv/propeller_low_speed')
self.max_angular_speed = rospy.get_param('/wamv/max_angular_speed')
self.max_distance_from_des_point = rospy.get_param('/wamv/max_distance_from_des_point')
# Get Desired Point to Get
self.desired_point = Point()
self.desired_point.x = rospy.get_param("/wamv/desired_point/x")
self.desired_point.y = rospy.get_param("/wamv/desired_point/y")
self.desired_point.z = rospy.get_param("/wamv/desired_point/z")
self.desired_point_epsilon = rospy.get_param("/wamv/desired_point_epsilon")
self.work_space_x_max = rospy.get_param("/wamv/work_space/x_max")
self.work_space_x_min = rospy.get_param("/wamv/work_space/x_min")
self.work_space_y_max = rospy.get_param("/wamv/work_space/y_max")
self.work_space_y_min = rospy.get_param("/wamv/work_space/y_min")
self.dec_obs = rospy.get_param("/wamv/number_decimals_precision_obs")
# We place the Maximum and minimum values of observations
high = numpy.array([self.work_space_x_max,
self.work_space_y_max,
1.57,
1.57,
3.14,
self.propeller_high_speed,
self.propeller_high_speed,
self.max_angular_speed,
self.max_distance_from_des_point
])
low = numpy.array([ self.work_space_x_min,
self.work_space_y_min,
-1*1.57,
-1*1.57,
-1*3.14,
-1*self.propeller_high_speed,
-1*self.propeller_high_speed,
-1*self.max_angular_speed,
0.0
])
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
# Rewards
self.done_reward =rospy.get_param("/wamv/done_reward")
self.closer_to_point_reward = rospy.get_param("/wamv/closer_to_point_reward")
self.cumulated_steps = 0.0
rospy.logdebug("END WamvNavTwoSetsBuoysEnv INIT...")
def _set_init_pose(self):
"""
Sets the two proppelers speed to 0.0 and waits for the time_sleep
to allow the action to be executed
"""
right_propeller_speed = 0.0
left_propeller_speed = 0.0
self.set_propellers_speed( right_propeller_speed,
left_propeller_speed,
time_sleep=1.0)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# We get the initial pose to mesure the distance from the desired point.
odom = self.get_odom()
current_position = Vector3()
current_position.x = odom.pose.pose.position.x
current_position.y = odom.pose.pose.position.y
self.previous_distance_from_des_point = self.get_distance_from_desired_point(current_position)
def _set_action(self, action):
"""
It sets the joints of wamv based on the action integer given
based on the action number given.
:param action: The action integer that sets what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
right_propeller_speed = 0.0
left_propeller_speed = 0.0
if action == 0: # Go Forwards
right_propeller_speed = self.propeller_high_speed
left_propeller_speed = self.propeller_high_speed
elif action == 1: # Go BackWards
right_propeller_speed = -1*self.propeller_high_speed
left_propeller_speed = -1*self.propeller_high_speed
elif action == 2: # Turn Left
right_propeller_speed = self.propeller_high_speed
left_propeller_speed = -1*self.propeller_high_speed
elif action == 3: # Turn Right
right_propeller_speed = -1*self.propeller_high_speed
left_propeller_speed = self.propeller_high_speed
# We tell wamv the propeller speeds
self.set_propellers_speed( right_propeller_speed,
left_propeller_speed,
time_sleep=1.0)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have access to, we need to read the
WamvEnv API DOCS.
:return: observation
"""
rospy.logdebug("Start Get Observation ==>")
odom = self.get_odom()
base_position = odom.pose.pose.position
base_orientation_quat = odom.pose.pose.orientation
base_roll, base_pitch, base_yaw = self.get_orientation_euler(base_orientation_quat)
base_speed_linear = odom.twist.twist.linear
base_speed_angular_yaw = odom.twist.twist.angular.z
distance_from_desired_point = self.get_distance_from_desired_point(base_position)
observation = []
observation.append(round(base_position.x,self.dec_obs))
observation.append(round(base_position.y,self.dec_obs))
observation.append(round(base_roll,self.dec_obs))
observation.append(round(base_pitch,self.dec_obs))
observation.append(round(base_yaw,self.dec_obs))
observation.append(round(base_speed_linear.x,self.dec_obs))
observation.append(round(base_speed_linear.y,self.dec_obs))
observation.append(round(base_speed_angular_yaw,self.dec_obs))
observation.append(round(distance_from_desired_point,self.dec_obs))
return observation
def _is_done(self, observations):
"""
We consider the episode done if:
1) The wamvs is ouside the workspace
2) It got to the desired point
"""
distance_from_desired_point = observations[8]
current_position = Vector3()
current_position.x = observations[0]
current_position.y = observations[1]
is_inside_corridor = self.is_inside_workspace(current_position)
has_reached_des_point = self.is_in_desired_position(current_position, self.desired_point_epsilon)
done = not(is_inside_corridor) or has_reached_des_point
return done
def _compute_reward(self, observations, done):
"""
We Base the rewards in if its done or not and we base it on
if the distance to the desired point has increased or not
:return:
"""
# We only consider the plane, the fluctuation in z is due mainly to wave
current_position = Point()
current_position.x = observations[0]
current_position.y = observations[1]
distance_from_des_point = self.get_distance_from_desired_point(current_position)
distance_difference = distance_from_des_point - self.previous_distance_from_des_point
if not done:
# If there has been a decrease in the distance to the desired point, we reward it
if distance_difference < 0.0:
rospy.logwarn("DECREASE IN DISTANCE GOOD")
reward = self.closer_to_point_reward
else:
rospy.logerr("ENCREASE IN DISTANCE BAD")
reward = -1*self.closer_to_point_reward
else:
if self.is_in_desired_position(current_position, self.desired_point_epsilon):
reward = self.done_reward
else:
reward = -1*self.done_reward
self.previous_distance_from_des_point = distance_from_des_point
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def is_in_desired_position(self,current_position, epsilon=0.05):
"""
It return True if the current position is similar to the desired poistion
"""
is_in_desired_pos = False
x_pos_plus = self.desired_point.x + epsilon
x_pos_minus = self.desired_point.x - epsilon
y_pos_plus = self.desired_point.y + epsilon
y_pos_minus = self.desired_point.y - epsilon
x_current = current_position.x
y_current = current_position.y
x_pos_are_close = (x_current <= x_pos_plus) and (x_current > x_pos_minus)
y_pos_are_close = (y_current <= y_pos_plus) and (y_current > y_pos_minus)
is_in_desired_pos = x_pos_are_close and y_pos_are_close
rospy.logdebug("###### IS DESIRED POS ? ######")
rospy.logdebug("current_position"+str(current_position))
rospy.logdebug("x_pos_plus"+str(x_pos_plus)+",x_pos_minus="+str(x_pos_minus))
rospy.logdebug("y_pos_plus"+str(y_pos_plus)+",y_pos_minus="+str(y_pos_minus))
rospy.logdebug("x_pos_are_close"+str(x_pos_are_close))
rospy.logdebug("y_pos_are_close"+str(y_pos_are_close))
rospy.logdebug("is_in_desired_pos"+str(is_in_desired_pos))
rospy.logdebug("############")
return is_in_desired_pos
def get_distance_from_desired_point(self, current_position):
"""
Calculates the distance from the current position to the desired point
:param start_point:
:return:
"""
distance = self.get_distance_from_point(current_position,
self.desired_point)
return distance
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = numpy.array((pstart.x, pstart.y, pstart.z))
b = numpy.array((p_end.x, p_end.y, p_end.z))
distance = numpy.linalg.norm(a - b)
return distance
def get_orientation_euler(self, quaternion_vector):
# We convert from quaternions to euler
orientation_list = [quaternion_vector.x,
quaternion_vector.y,
quaternion_vector.z,
quaternion_vector.w]
roll, pitch, yaw = euler_from_quaternion(orientation_list)
return roll, pitch, yaw
def is_inside_workspace(self,current_position):
"""
Check if the Wamv is inside the Workspace defined
"""
is_inside = False
rospy.logwarn("##### INSIDE WORK SPACE? #######")
rospy.logwarn("XYZ current_position"+str(current_position))
rospy.logwarn("work_space_x_max"+str(self.work_space_x_max)+",work_space_x_min="+str(self.work_space_x_min))
rospy.logwarn("work_space_y_max"+str(self.work_space_y_max)+",work_space_y_min="+str(self.work_space_y_min))
rospy.logwarn("############")
if current_position.x > self.work_space_x_min and current_position.x <= self.work_space_x_max:
if current_position.y > self.work_space_y_min and current_position.y <= self.work_space_y_max:
is_inside = True
return is_inside
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~husarion~husarion_get_to_position_turtlebot_playground.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import husarion_env
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import Point
from tf.transformations import euler_from_quaternion
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Header
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class HusarionGetToPosTurtleBotPlayGroundEnv(husarion_env.HusarionEnv):
def __init__(self):
"""
This Task Env is designed for having the husarion in the husarion world
closed room with columns.
It will learn how to move around without crashing.
"""
# Launch the Task Simulated-Environment
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/husarion/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="rosbot_gazebo",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/husarion/config",
yaml_file_name="husarion_get_to_position_turtlebot_playground.yaml")
# Only variable needed to be set here
number_actions = rospy.get_param('/husarion/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
# Actions and Observations
self.init_linear_forward_speed = rospy.get_param(
'/husarion/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param(
'/husarion/init_linear_turn_speed')
self.linear_forward_speed = rospy.get_param(
'/husarion/linear_forward_speed')
self.linear_turn_speed = rospy.get_param('/husarion/linear_turn_speed')
self.angular_speed = rospy.get_param('/husarion/angular_speed')
self.new_ranges = rospy.get_param('/husarion/new_ranges')
self.max_laser_value = rospy.get_param('/husarion/max_laser_value')
self.min_laser_value = rospy.get_param('/husarion/min_laser_value')
self.work_space_x_max = rospy.get_param("/husarion/work_space/x_max")
self.work_space_x_min = rospy.get_param("/husarion/work_space/x_min")
self.work_space_y_max = rospy.get_param("/husarion/work_space/y_max")
self.work_space_y_min = rospy.get_param("/husarion/work_space/y_min")
# Get Desired Point to Get
self.desired_position = Point()
self.desired_position.x = rospy.get_param("/husarion/desired_pose/x")
self.desired_position.y = rospy.get_param("/husarion/desired_pose/y")
self.precision = rospy.get_param('/husarion/precision')
self.precision_epsilon = 1.0 / (10.0 * self.precision)
self.move_base_precision = rospy.get_param(
'/husarion/move_base_precision')
# We create the arrays for the laser readings
# We also create the arrays for the odometry readings
# We join them toeguether.
# Here we will add any init functions prior to starting the MyRobotEnv
super(HusarionGetToPosTurtleBotPlayGroundEnv,
self).__init__(ros_ws_abspath)
laser_scan = self._check_laser_scan_ready()
num_laser_readings = len(laser_scan.ranges)/self.new_ranges
high_laser = numpy.full((num_laser_readings), self.max_laser_value)
low_laser = numpy.full((num_laser_readings), self.min_laser_value)
# We place the Maximum and minimum values of the X,Y and YAW of the odometry
# The odometry yaw can be any value in the circunference.
high_odometry = numpy.array([self.work_space_x_max,
self.work_space_y_max,
3.14])
low_odometry = numpy.array([self.work_space_x_min,
self.work_space_y_min,
-1*3.14])
# Now we fetch the max and min of the Desired Position in 2D XY
# We use the exact same as the workspace, just because make no sense
# Consider points outside the workspace
high_des_pos = numpy.array([self.work_space_x_max,
self.work_space_y_max
])
low_des_pos = numpy.array([self.work_space_x_min,
self.work_space_y_min
])
# We join both arrays
high = numpy.concatenate([high_laser, high_odometry, high_des_pos])
low = numpy.concatenate([low_laser, low_odometry, low_des_pos])
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>" +
str(self.observation_space))
# Rewards
self.closer_to_point_reward = rospy.get_param(
"/husarion/closer_to_point_reward")
self.alive_reward = rospy.get_param("/husarion/alive_reward")
self.end_episode_points = rospy.get_param(
"/husarion/end_episode_points")
self.cumulated_steps = 0.0
self.laser_filtered_pub = rospy.Publisher(
'/rosbot/laser/scan_filtered', LaserScan, queue_size=1)
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base(self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=self.move_base_precision,
update_rate=10)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
self.index = 0
odometry = self.get_odom()
self.previous_distance_from_des_point = self.get_distance_from_desired_point(
odometry.pose.pose.position, self.desired_position)
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the SumitXl
based on the action number given.
:param action: The action integer that set s what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0: # FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
last_action = "FORWARDS"
elif action == 1: # LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
last_action = "TURN_LEFT"
elif action == 2: # RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
last_action = "TURN_RIGHT"
elif action == 3: # BACKWARDS
linear_speed = self.linear_forward_speed
angular_speed = 0.0
last_action = "BACKWARDS"
# We tell Husarion the linear and angular speed to set to execute
self.move_base(linear_speed, angular_speed,
epsilon=self.move_base_precision, update_rate=10)
rospy.logdebug("END Set Action ==>"+str(action) +
", ACTION="+str(last_action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
HusarionEnv API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan = self.get_laser_scan()
discretized_laser_scan = self.discretize_scan_observation(laser_scan,
self.new_ranges
)
# We get the odometry so that SumitXL knows where it is.
odometry = self.get_odom()
x_position = odometry.pose.pose.position.x
y_position = odometry.pose.pose.position.y
# We get the orientation of the cube in RPY
roll, pitch, yaw = self.get_orientation_euler()
# We round to only two decimals to avoid very big Observation space
# We only want the X and Y position and the Yaw
odometry_array = [round(x_position, 1),
round(y_position, 1),
round(yaw, 1)]
# We fetch also the desired position because it conditions the learning
# It also make it dynamic, because we can change the desired position and the
# learning will be able to adapt.
desired_position = [round(self.desired_position.x, 1),
round(self.desired_position.y, 1)]
# We concatenate all the lists.
observations = discretized_laser_scan + odometry_array + desired_position
rospy.logwarn("Observations==>"+str(observations))
rospy.logwarn("END Get Observation ==>")
return observations
def _is_done(self, observations):
"""
We consider that the episode has finished when:
1) Husarion has moved ouside the workspace defined.
2) Husarion is too close to an object
3) Husarion has reached the desired position
"""
# We fetch data through the observations
# Its all the array except from the last four elements, which are XY odom and XY des_pos
laser_readings = observations[:-5]
current_position = Point()
current_position.x = observations[-5]
current_position.y = observations[-4]
current_position.z = 0.0
desired_position = Point()
desired_position.x = observations[-2]
desired_position.y = observations[-1]
desired_position.z = 0.0
rospy.logwarn("is DONE? laser_readings=" + str(laser_readings))
rospy.logwarn("is DONE? current_position=" + str(current_position))
rospy.logwarn("is DONE? desired_position=" + str(desired_position))
too_close_to_object = self.check_husarion_has_crashed(laser_readings)
inside_workspace = self.check_inside_workspace(current_position)
reached_des_pos = self.check_reached_desired_position(current_position,
desired_position,
self.precision_epsilon)
is_done = too_close_to_object or not(
inside_workspace) or reached_des_pos
rospy.logwarn("####################")
rospy.logwarn("too_close_to_object=" + str(too_close_to_object))
rospy.logwarn("inside_workspace=" + str(inside_workspace))
rospy.logwarn("reached_des_pos=" + str(reached_des_pos))
rospy.logwarn("is_done=" + str(is_done))
rospy.logwarn("######## END DONE ##")
return is_done
def _compute_reward(self, observations, done):
"""
We will reward the following behaviours:
1) The distance to the desired point has increase from last step
2) The robot has reached the desired point
We will penalise the following behaviours:
1) Ending the episode without reaching the desired pos. That means it has crashed
or it has gone outside the workspace
"""
laser_readings = observations[:-5]
current_position = Point()
current_position.x = observations[-5]
current_position.y = observations[-4]
current_position.z = 0.0
desired_position = Point()
desired_position.x = observations[-2]
desired_position.y = observations[-1]
desired_position.z = 0.0
distance_from_des_point = self.get_distance_from_desired_point(
current_position, desired_position)
distance_difference = distance_from_des_point - \
self.previous_distance_from_des_point
rospy.logwarn("current_position=" + str(current_position))
rospy.logwarn("desired_point=" + str(desired_position))
rospy.logwarn("total_distance_from_des_point=" +
str(self.previous_distance_from_des_point))
rospy.logwarn("distance_from_des_point=" +
str(distance_from_des_point))
rospy.logwarn("distance_difference=" + str(distance_difference))
if not done:
# If there has been a decrease in the distance to the desired point, we reward it
if distance_difference < 0.0:
rospy.logwarn("DECREASE IN DISTANCE GOOD")
reward = self.closer_to_point_reward
else:
reward = self.alive_reward
else:
reached_des_pos = self.check_reached_desired_position(current_position,
desired_position,
self.precision_epsilon)
if reached_des_pos:
reward = self.end_episode_points
rospy.logwarn(
"GOT TO DESIRED POINT ; DONE, reward=" + str(reward))
else:
reward = -1*self.end_episode_points
rospy.logerr(
"SOMETHING WENT WRONG ; DONE, reward=" + str(reward))
self.previous_distance_from_des_point = distance_from_des_point
rospy.logwarn("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def update_desired_pos(self, new_position):
"""
With this method you can change the desired position that you want
Usarion to be that initialy is set through rosparams loaded through
a yaml file possibly.
:new_position: Type Point, because we only value the position.
"""
self.desired_position.x = new_position.x
self.desired_position.y = new_position.y
def discretize_scan_observation(self, data, new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
discretized_ranges = []
mod = len(data.ranges)/new_ranges
filtered_range = []
rospy.logdebug("data=" + str(data))
rospy.logdebug("new_ranges=" + str(new_ranges))
rospy.logdebug("mod=" + str(mod))
nan_value = (self.min_laser_value + self.min_laser_value) / 2.0
for i, item in enumerate(data.ranges):
if (i % mod == 0):
if item == float('Inf') or numpy.isinf(item):
rospy.logerr("Infinite Value=" + str(item) +
"Assigning Max value")
discretized_ranges.append(self.max_laser_value)
elif numpy.isnan(item):
rospy.logerr("Nan Value=" + str(item) +
"Assigning MIN value")
discretized_ranges.append(self.min_laser_value)
else:
# We clamp the laser readings
if item > self.max_laser_value:
rospy.logwarn("Item Bigger Than MAX, CLAMPING=>" +
str(item)+", MAX="+str(self.max_laser_value))
discretized_ranges.append(
round(self.max_laser_value, 1))
elif item < self.min_laser_value:
rospy.logwarn("Item smaller Than MIN, CLAMPING=>" +
str(item)+", MIN="+str(self.min_laser_value))
discretized_ranges.append(
round(self.min_laser_value, 1))
else:
rospy.logwarn(
"Normal Item, no processing=>" + str(item))
discretized_ranges.append(round(item, 1))
# We add last value appended
filtered_range.append(discretized_ranges[-1])
else:
# We add value zero
filtered_range.append(0.0)
rospy.logwarn(
">>>>>>>>>>>>>>>>>>>>>>discretized_ranges=>" + str(discretized_ranges))
self.publish_filtered_laser_scan(laser_original_data=data,
new_filtered_laser_range=filtered_range)
return discretized_ranges
def get_orientation_euler(self):
# We convert from quaternions to euler
orientation_list = [self.odom.pose.pose.orientation.x,
self.odom.pose.pose.orientation.y,
self.odom.pose.pose.orientation.z,
self.odom.pose.pose.orientation.w]
roll, pitch, yaw = euler_from_quaternion(orientation_list)
return roll, pitch, yaw
def get_distance_from_desired_point(self, current_position, desired_position):
"""
Calculates the distance from the current position to the desired point
:param current_position:
:param desired_position:
:return:
"""
distance = self.get_distance_from_point(current_position,
desired_position)
return distance
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = numpy.array((pstart.x, pstart.y, pstart.z))
b = numpy.array((p_end.x, p_end.y, p_end.z))
distance = numpy.linalg.norm(a - b)
return distance
def check_husarion_has_crashed(self, laser_readings):
"""
Based on the laser readings we check if any laser readingdistance is below
the minimum distance acceptable.
"""
husarion_has_crashed = False
for laser_distance in laser_readings:
rospy.logwarn("laser_distance==>"+str(laser_distance))
if laser_distance == self.min_laser_value:
husarion_has_crashed = True
rospy.logwarn("HAS CRASHED==>"+str(laser_distance) +
", min="+str(self.min_laser_value))
break
elif laser_distance < self.min_laser_value:
rospy.logerr("Value of laser shouldnt be lower than min==>" +
str(laser_distance)+", min="+str(self.min_laser_value))
elif laser_distance > self.max_laser_value:
rospy.logerr("Value of laser shouldnt be higher than max==>" +
str(laser_distance)+", max="+str(self.min_laser_value))
return husarion_has_crashed
def check_inside_workspace(self, current_position):
"""
We check that the current position is inside the given workspace.
"""
is_inside = False
rospy.logwarn("##### INSIDE WORK SPACE? #######")
rospy.logwarn("XYZ current_position"+str(current_position))
rospy.logwarn("work_space_x_max"+str(self.work_space_x_max) +
",work_space_x_min="+str(self.work_space_x_min))
rospy.logwarn("work_space_y_max"+str(self.work_space_y_max) +
",work_space_y_min="+str(self.work_space_y_min))
rospy.logwarn("############")
if current_position.x > self.work_space_x_min and current_position.x <= self.work_space_x_max:
if current_position.y > self.work_space_y_min and current_position.y <= self.work_space_y_max:
is_inside = True
return is_inside
def check_reached_desired_position(self, current_position, desired_position, epsilon=0.1):
"""
It return True if the current position is similar to the desired poistion
"""
is_in_desired_pos = False
x_pos_plus = desired_position.x + epsilon
x_pos_minus = desired_position.x - epsilon
y_pos_plus = desired_position.y + epsilon
y_pos_minus = desired_position.y - epsilon
x_current = current_position.x
y_current = current_position.y
x_pos_are_close = (x_current <= x_pos_plus) and (
x_current > x_pos_minus)
y_pos_are_close = (y_current <= y_pos_plus) and (
y_current > y_pos_minus)
is_in_desired_pos = x_pos_are_close and y_pos_are_close
rospy.logdebug("###### IS DESIRED POS ? ######")
rospy.logdebug("epsilon==>"+str(epsilon))
rospy.logdebug("current_position"+str(current_position))
rospy.logdebug("x_pos_plus"+str(x_pos_plus) +
",x_pos_minus="+str(x_pos_minus))
rospy.logdebug("y_pos_plus"+str(y_pos_plus) +
",y_pos_minus="+str(y_pos_minus))
rospy.logdebug("x_pos_are_close"+str(x_pos_are_close))
rospy.logdebug("y_pos_are_close"+str(y_pos_are_close))
rospy.logdebug("is_in_desired_pos"+str(is_in_desired_pos))
rospy.logdebug("############")
return is_in_desired_pos
def publish_filtered_laser_scan(self, laser_original_data, new_filtered_laser_range):
length_range = len(laser_original_data.ranges)
length_intensities = len(laser_original_data.intensities)
laser_filtered_object = LaserScan()
h = Header()
# Note you need to call rospy.init_node() before this will work
h.stamp = rospy.Time.now()
h.frame_id = "chassis"
laser_filtered_object.header = h
laser_filtered_object.angle_min = laser_original_data.angle_min
laser_filtered_object.angle_max = laser_original_data.angle_max
laser_filtered_object.angle_increment = laser_original_data.angle_increment
laser_filtered_object.time_increment = laser_original_data.time_increment
laser_filtered_object.scan_time = laser_original_data.scan_time
laser_filtered_object.range_min = laser_original_data.range_min
laser_filtered_object.range_max = laser_original_data.range_max
laser_filtered_object.ranges = []
laser_filtered_object.intensities = []
for item in new_filtered_laser_range:
laser_filtered_object.ranges.append(item)
laser_filtered_object.intensities.append(item)
self.laser_filtered_pub.publish(laser_filtered_object)
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~shadow_tc_env.py | import numpy
import rospy
import time
import tf
from openai_ros import robot_gazebo_env
from sensor_msgs.msg import Imu
from sensor_msgs.msg import JointState
from smart_grasping_sandbox.smart_grasper import SmartGrasper
from moveit_msgs.msg import PlanningScene
from openai_ros.openai_ros_common import ROSLauncher
class ShadowTcEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all ShadowTcEnv environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new ShadowTcEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /imu/data
* /joint_states
Actuators Topic List:
* As actuator we will use a class SmartGrasper to interface.
We use smart_grasping_sandbox smart_grasper.py, to move and get the pose
of the ball and the tool tip.
Args:
"""
rospy.logdebug("Start ShadowTcEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="shadow_gazebo",
launch_file_name="put_shadow_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(ShadowTcEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="NO_RESET_SIM")
rospy.logdebug("ShadowTcEnv unpause...")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_systems_ready()
rospy.Subscriber("/imu/data", Imu, self._imu_callback)
rospy.Subscriber("/joint_states", JointState, self._joints_state_callback)
#rospy.Subscriber('/planning_scene', PlanningScene, self._planning_scene_callback)
self._setup_smart_grasper()
self.gazebo.pauseSim()
rospy.logdebug("Finished ShadowTcEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
rospy.logdebug("ShadowTcEnv check_all_systems_ready...")
self._check_all_sensors_ready()
rospy.logdebug("END ShadowTcEnv _check_all_systems_ready...")
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_imu_ready()
self._check_joint_states_ready()
#self._check_planning_scene_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_imu_ready(self):
self.imu = None
rospy.logdebug("Waiting for /imu/data to be READY...")
while self.imu is None and not rospy.is_shutdown():
try:
self.imu = rospy.wait_for_message("/imu/data", Imu, timeout=5.0)
rospy.logdebug("Current/imu/data READY=>")
except:
rospy.logerr("Current /imu/data not ready yet, retrying for getting imu")
return self.imu
def _check_joint_states_ready(self):
self.joint_states = None
rospy.logdebug("Waiting for /joint_states to be READY...")
while self.joint_states is None and not rospy.is_shutdown():
try:
self.joint_states = rospy.wait_for_message("/joint_states", JointState, timeout=1.0)
rospy.logdebug("Current /joint_states READY=>")
except:
rospy.logerr("Current /joint_states not ready yet, retrying for getting joint_states")
return self.joint_states
def _check_planning_scene_ready(self):
self.planning_scene = None
rospy.logdebug("Waiting for /planning_scene to be READY...")
while self.planning_scene is None and not rospy.is_shutdown():
try:
self.planning_scene = rospy.wait_for_message('/planning_scene', PlanningScene, timeout=1.0)
rospy.logdebug("Current /planning_scene READY=>")
except:
rospy.logerr("Current /planning_scene not ready yet, retrying for getting planning_scene")
return self.planning_scene
def _imu_callback(self, data):
self.imu = data
def _joints_state_callback(self, data):
self.joint_states = data
def _planning_scene_callback(self, data):
self.planning_scene = data
def _setup_tf_listener(self):
"""
Set ups the TF listener for getting the transforms you ask for.
"""
self.listener = tf.TransformListener()
def _setup_smart_grasper(self):
"""
Setup of the movement system.
:return:
"""
rospy.logdebug("START _setup_smart_grasper")
# We need to tell it to not start a node
self.sgs = SmartGrasper(init_ros_node=False)
rospy.logdebug("END _setup_smart_grasper")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def open_hand(self):
"""
When called it opens robots hand
"""
self.sgs.open_hand()
def close_hand(self):
"""
When called it closes robots hand
"""
self.sgs.close_hand()
def get_ball_pose(self):
"""
Get Ball Pose
return: Ball Pose in the World frame
We unpause and pause the simulation because this calss is a service call.
This means that if the simulation is NOT
running it wont get the Ball information of position.
"""
rospy.logdebug("START get_ball_pose ==>")
self.gazebo.unpauseSim()
ball_pose = self.sgs.get_object_pose()
self.gazebo.pauseSim()
rospy.logdebug("ball_pose ==>"+str(ball_pose))
rospy.logdebug("STOP get_ball_pose ==>")
return ball_pose
def get_tip_pose(self):
"""
Returns the pose of the tip of the TCP
We unpause and pause the simulation because this calss is a service call.
This means that if the simulation is NOT
running it wont get the TCP information of position.
"""
rospy.logdebug("START get_tip_pose ==>")
self.gazebo.unpauseSim()
tcp_pose = self.sgs.get_tip_pose()
self.gazebo.pauseSim()
rospy.logdebug("END get_tip_pose ==>")
return tcp_pose
def move_tcp_world_frame(self, desired_pose):
"""
Moves the Tool tip TCP to the pose given
Its relative pose to world frame
:param: desired_pose: Pose where you want the TCP to move next
"""
self.sgs.move_tip_absolute(desired_pose)
def move_tip(self, x=0., y=0., z=0., roll=0., pitch=0., yaw=0.):
"""
Moves that increment of XYZ RPY in the world frame
Only state the increment of the variable you want, the rest will
not increment due to the default values
"""
self.sgs.move_tip(x,y,z,roll,pitch,yaw)
def send_movement_command(self, command, duration=0.2):
"""
Send a dictionnary of joint targets to the arm and hand directly.
To get the available joints names: rostopic echo /joint_states/name -n1
[H1_F1J1, H1_F1J2, H1_F1J3, H1_F2J1, H1_F2J2, H1_F2J3, H1_F3J1, H1_F3J2, H1_F3J3,
elbow_joint, shoulder_lift_joint, shoulder_pan_joint, wrist_1_joint, wrist_2_joint,
wrist_3_joint]
:param command: a dictionnary of joint names associated with a target:
{"H1_F1J1": -1.0, "shoulder_pan_joint": 1.0}
:param duration: the amount of time it will take to get there in seconds. Needs to be bigger than 0.0
"""
self.sgs.send_command(command, duration)
def set_fingers_colision(self, activate=False):
"""
It activates or deactivates the finger collisions.
It also will triguer the publish into the planning_scene the collisions.
We puase and unpause for the smae exact reason as the get TCP and get ball pos.
Being a service, untill the simulation is unpaused it wont get response.
"""
rospy.logdebug("START get_fingers_colision")
self.sgs.check_fingers_collisions(activate)
rospy.logdebug("END get_fingers_colision")
def get_fingers_colision(self, object_collision_name):
"""
Returns the collision of the three fingers
object_collision_name: Here yo ustate the name of the model to check collision
with fingers.
Objects in sim: cricket_ball__link, drill__link
"""
self.gazebo.unpauseSim()
self.set_fingers_colision(True)
planning_scene = self._check_planning_scene_ready()
self.gazebo.pauseSim()
objects_scene = planning_scene.allowed_collision_matrix.entry_names
colissions_matrix = planning_scene.allowed_collision_matrix.entry_values
# We look for the Ball object model name in the objects sceen list and get the index:
object_collision_name_index = objects_scene.index(object_collision_name)
Finger_Links_Names = [ "H1_F1_base_link",
"H1_F1_link_1",
"H1_F1_link_2",
"H1_F1_palm_link",
"H1_F1_tip",
"H1_F2_base_link",
"H1_F2_link_1",
"H1_F2_link_2",
"H1_F2_palm_link",
"H1_F2_tip",
"H1_F3_base_link",
"H1_F3_link_1",
"H1_F3_link_2",
"H1_F3_palm_link",
"H1_F3_tip"]
# We get all the index of the model links that are part of the fingers
# We separate by finguer to afterwards be easy to detect that there is contact in all of the finguers
finger1_indices = [i for i, var in enumerate(Finger_Links_Names) if "H1_F1" in var]
finger2_indices = [i for i, var in enumerate(Finger_Links_Names) if "H1_F2" in var]
finger3_indices = [i for i, var in enumerate(Finger_Links_Names) if "H1_F3" in var]
# Now we search in the entry_value corresponding to the object to check the collision
# With all the rest of objects.
object_collision_array = colissions_matrix[object_collision_name_index].enabled
# Is there a collision with Finguer1
f1_collision = False
for finger_index in finger1_indices:
if object_collision_array[finger_index]:
f1_collision = True
break
# Is there a collision with Finguer2
f2_collision = False
for finger_index in finger2_indices:
if object_collision_array[finger_index]:
f2_collision = True
break
# Is there a collision with Finguer3
f3_collision = False
for finger_index in finger3_indices:
if object_collision_array[finger_index]:
f3_collision = True
break
finger_collision_dict = {
"f1":f1_collision,
"f2":f2_collision,
"f3":f3_collision
}
return finger_collision_dict
def reset_scene(self):
"""
Restarts the simulation and world objects
"""
self.sgs.reset_world()
def get_imu(self):
return self.imu
def get_joint_states(self):
return self.joint_states | [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~sumit_xl~sumit_xl_room.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import sumitxl_env
from gym.envs.registration import register
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import Point
from tf.transformations import euler_from_quaternion
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class SumitXlRoom(sumitxl_env.SumitXlEnv):
def __init__(self):
"""
This Task Env is designed for having the sumit_xl in the sumit_xl world
closed room with columns.
It will learn how to move around without crashing.
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/sumit_xl/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="summit_xl_gazebo",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/sumit_xl/config",
yaml_file_name="sumit_xl_room.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(SumitXlRoom, self).__init__(ros_ws_abspath)
# Only variable needed to be set here
number_actions = rospy.get_param('/sumit_xl/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
# Actions and Observations
self.linear_forward_speed = rospy.get_param(
'/sumit_xl/linear_forward_speed')
self.linear_turn_speed = rospy.get_param('/sumit_xl/linear_turn_speed')
self.angular_speed = rospy.get_param('/sumit_xl/angular_speed')
self.init_linear_forward_speed = rospy.get_param(
'/sumit_xl/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param(
'/sumit_xl/init_linear_turn_speed')
self.new_ranges = rospy.get_param('/sumit_xl/new_ranges')
self.min_range = rospy.get_param('/sumit_xl/min_range')
self.max_laser_value = rospy.get_param('/sumit_xl/max_laser_value')
self.min_laser_value = rospy.get_param('/sumit_xl/min_laser_value')
self.max_linear_aceleration = rospy.get_param(
'/sumit_xl/max_linear_aceleration')
self.max_distance = rospy.get_param('/sumit_xl/max_distance')
# Get Desired Point to Get
self.desired_point = Point()
self.desired_point.x = rospy.get_param("/sumit_xl/desired_pose/x")
self.desired_point.y = rospy.get_param("/sumit_xl/desired_pose/y")
self.desired_point.z = rospy.get_param("/sumit_xl/desired_pose/z")
# We create the arrays for the laser readings
# We also create the arrays for the odometry readings
# We join them toeguether.
laser_scan = self.get_laser_scan()
num_laser_readings = len(laser_scan.ranges)/self.new_ranges
high_laser = numpy.full((num_laser_readings), self.max_laser_value)
low_laser = numpy.full((num_laser_readings), self.min_laser_value)
# We place the Maximum and minimum values of the X,Y and YAW of the odometry
# The odometry yaw can be any value in the circunference.
high_odometry = numpy.array(
[self.max_distance, self.max_distance, 3.14])
low_odometry = numpy.array(
[-1*self.max_distance, -1*self.max_distance, -1*3.14])
# We join both arrays
high = numpy.concatenate([high_laser, high_odometry])
low = numpy.concatenate([low_laser, low_odometry])
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>" +
str(self.observation_space))
# Rewards
self.closer_to_point_reward = rospy.get_param(
"/sumit_xl/closer_to_point_reward")
self.not_ending_point_reward = rospy.get_param(
"/sumit_xl/not_ending_point_reward")
self.end_episode_points = rospy.get_param(
"/sumit_xl/end_episode_points")
self.cumulated_steps = 0.0
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base(self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
odometry = self.get_odom()
self.previous_distance_from_des_point = self.get_distance_from_desired_point(
odometry.pose.pose.position)
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the SumitXl
based on the action number given.
:param action: The action integer that set s what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0: # FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
self.last_action = "FORWARDS"
elif action == 1: # LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "TURN_LEFT"
elif action == 2: # RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "TURN_RIGHT"
"""
elif action == 3: #STOP
linear_speed = 0.0
angular_speed = 0.0
self.last_action = "STOP"
"""
# We tell SumitXL the linear and angular speed to set to execute
self.move_base(linear_speed, angular_speed,
epsilon=0.05, update_rate=10)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
SumitXlEnv API DOCS
WALL CLOSE LEFT [1, 1, 9, 0, 0, 0, -1.8, 0.46, 0.01]
WALL CLOSE RIGHT [0, 0, 0, 10, 1, 2, -1.8, -0.61, 0.01]
WALL BACK [0, 9, 1, 1, 6, 0, -1.8, -0.54, 1.59]
WALL FRONT [2, 9, 0, 0, 2, 2, -1.83, 0.51, 1.58]
0 in reality is arround front 0.4, back 0.5, sides 0.3
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan = self.get_laser_scan()
discretized_laser_scan = self.discretize_scan_observation(laser_scan,
self.new_ranges
)
# We get the odometry so that SumitXL knows where it is.
odometry = self.get_odom()
x_position = odometry.pose.pose.position.x
y_position = odometry.pose.pose.position.y
# We get the orientation of the cube in RPY
roll, pitch, yaw = self.get_orientation_euler()
# We round to only two decimals to avoid very big Observation space
odometry_array = [round(x_position, 2),
round(y_position, 2),
round(yaw, 2)]
# We only want the X and Y position and the Yaw
observations = discretized_laser_scan + odometry_array
rospy.logdebug("Observations==>"+str(observations))
rospy.logdebug("END Get Observation ==>")
return observations
def _is_done(self, observations):
if self._episode_done:
rospy.logerr("SumitXl is Too Close to wall==>")
else:
rospy.logdebug("SumitXl is NOT close to a wall ==>")
# Now we check if it has crashed based on the imu
imu_data = self.get_imu()
linear_acceleration_magnitude = self.get_vector_magnitude(
imu_data.linear_acceleration)
if linear_acceleration_magnitude > self.max_linear_aceleration:
rospy.logerr("SumitXl Crashed==>"+str(linear_acceleration_magnitude) +
">"+str(self.max_linear_aceleration))
self._episode_done = True
else:
rospy.logerr("DIDNT crash SumitXl ==>"+str(linear_acceleration_magnitude) +
">"+str(self.max_linear_aceleration))
current_position = Point()
current_position.x = observations[-3]
current_position.y = observations[-2]
current_position.z = 0.0
if abs(current_position.x) <= self.max_distance:
if abs(current_position.y) <= self.max_distance:
rospy.logdebug(
"SummitXL Position is OK ==>["+str(current_position.x)+","+str(current_position.y)+"]")
else:
rospy.logerr("SummitXL to Far in Y Pos ==>" +
str(current_position.x))
self._episode_done = True
else:
rospy.logerr("SummitXL to Far in X Pos ==>" +
str(current_position.x))
self._episode_done = True
if self.is_in_desired_position(current_position):
self._episode_done = True
return self._episode_done
def _compute_reward(self, observations, done):
"""
We give reward to the robot when it gets closer to the desired point.
We Dont give it contsnatly, but only if there is an improvement
"""
# We get the current Position from the obervations
current_position = Point()
current_position.x = observations[-3]
current_position.y = observations[-2]
current_position.z = 0.0
distance_from_des_point = self.get_distance_from_desired_point(
current_position)
distance_difference = distance_from_des_point - \
self.previous_distance_from_des_point
rospy.logwarn("current_position=" + str(current_position))
rospy.logwarn("desired_point=" + str(self.desired_point))
rospy.logwarn("total_distance_from_des_point=" +
str(self.previous_distance_from_des_point))
rospy.logwarn("distance_from_des_point=" +
str(distance_from_des_point))
rospy.logwarn("distance_difference=" + str(distance_difference))
if not done:
# If there has been a decrease in the distance to the desired point, we reward it
if distance_difference < 0.0:
rospy.logwarn("DECREASE IN DISTANCE GOOD")
reward = self.closer_to_point_reward
else:
# If it didnt get closer, we give much less points in theory
# This should trigger the behaviour of moving towards the point
rospy.logwarn("NO DECREASE IN DISTANCE, so much less points")
reward = self.not_ending_point_reward
else:
if self.is_in_desired_position(current_position):
reward = self.end_episode_points
rospy.logwarn(
"GOT TO DESIRED POINT ; DONE, reward=" + str(reward))
else:
reward = -1*self.end_episode_points
rospy.logerr(
"SOMETHING WENT WRONG ; DONE, reward=" + str(reward))
self.previous_distance_from_des_point = distance_from_des_point
rospy.logwarn("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_scan_observation(self, data, new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
mod = len(data.ranges)/new_ranges
rospy.logdebug("data=" + str(data))
rospy.logdebug("new_ranges=" + str(new_ranges))
rospy.logdebug("mod=" + str(mod))
for i, item in enumerate(data.ranges):
if (i % mod == 0):
if item == float('Inf') or numpy.isinf(item):
discretized_ranges.append(self.max_laser_value)
elif numpy.isnan(item):
discretized_ranges.append(self.min_laser_value)
else:
discretized_ranges.append(int(item))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" +
str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logdebug("NOT done Validation >>> item=" +
str(item)+"< "+str(self.min_range))
return discretized_ranges
def get_vector_magnitude(self, vector):
"""
It calculated the magnitude of the Vector3 given.
This is usefull for reading imu accelerations and knowing if there has been
a crash
:return:
"""
contact_force_np = numpy.array((vector.x, vector.y, vector.z))
force_magnitude = numpy.linalg.norm(contact_force_np)
return force_magnitude
def get_orientation_euler(self):
# We convert from quaternions to euler
orientation_list = [self.odom.pose.pose.orientation.x,
self.odom.pose.pose.orientation.y,
self.odom.pose.pose.orientation.z,
self.odom.pose.pose.orientation.w]
roll, pitch, yaw = euler_from_quaternion(orientation_list)
return roll, pitch, yaw
def get_distance_from_desired_point(self, current_position):
"""
Calculates the distance from the current position to the desired point
:param start_point:
:return:
"""
distance = self.get_distance_from_point(current_position,
self.desired_point)
return distance
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = numpy.array((pstart.x, pstart.y, pstart.z))
b = numpy.array((p_end.x, p_end.y, p_end.z))
distance = numpy.linalg.norm(a - b)
return distance
def is_in_desired_position(self, current_position, epsilon=0.05):
"""
It return True if the current position is similar to the desired poistion
"""
is_in_desired_pos = False
x_pos_plus = self.desired_point.x + epsilon
x_pos_minus = self.desired_point.x - epsilon
y_pos_plus = self.desired_point.y + epsilon
y_pos_minus = self.desired_point.y - epsilon
x_current = current_position.x
y_current = current_position.y
x_pos_are_close = (x_current <= x_pos_plus) and (
x_current > x_pos_minus)
y_pos_are_close = (y_current <= y_pos_plus) and (
y_current > y_pos_minus)
is_in_desired_pos = x_pos_are_close and y_pos_are_close
return is_in_desired_pos
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~moving_cube~one_disk_walk.py | import rospy
import numpy
import math
from gym import spaces
from openai_ros.robot_envs import cube_single_disk_env
from geometry_msgs.msg import Point
from tf.transformations import euler_from_quaternion
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class MovingCubeOneDiskWalkEnv(cube_single_disk_env.CubeSingleDiskEnv):
def __init__(self):
# Launch the Task Simulated-Environment
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/moving_cube/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name = "moving_cube_description",
launch_file_name = "start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest( rospackage_name = "openai_ros",
rel_path_from_package_to_file = "src/openai_ros/task_envs/moving_cube/config",
yaml_file_name = "one_disk_walk.yaml")
# Only variable needed to be set here
number_actions = rospy.get_param('/moving_cube/n_actions')
self.action_space = spaces.Discrete(number_actions)
#number_observations = rospy.get_param('/moving_cube/n_observations')
"""
We set the Observation space for the 6 observations
cube_observations = [
round(current_disk_roll_vel, 0),
round(y_distance, 1),
round(roll, 1),
round(pitch, 1),
round(y_linear_speed,1),
round(yaw, 1),
]
"""
# Actions and Observations
self.roll_speed_fixed_value = rospy.get_param('/moving_cube/roll_speed_fixed_value')
self.roll_speed_increment_value = rospy.get_param('/moving_cube/roll_speed_increment_value')
self.max_distance = rospy.get_param('/moving_cube/max_distance')
max_roll = 2 * math.pi
self.max_pitch_angle = rospy.get_param('/moving_cube/max_pitch_angle')
self.max_y_linear_speed = rospy.get_param('/moving_cube/max_y_linear_speed')
self.max_yaw_angle = rospy.get_param('/moving_cube/max_yaw_angle')
high = numpy.array([
self.roll_speed_fixed_value,
self.max_distance,
max_roll,
self.max_pitch_angle,
self.max_y_linear_speed,
self.max_y_linear_speed,
])
self.observation_space = spaces.Box(-high, high)
rospy.logwarn("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logwarn("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
# Variables that we retrieve through the param server, loded when launch training launch.
self.init_roll_vel = rospy.get_param("/moving_cube/init_roll_vel")
# Get Observations
self.start_point = Point()
self.start_point.x = rospy.get_param("/moving_cube/init_cube_pose/x")
self.start_point.y = rospy.get_param("/moving_cube/init_cube_pose/y")
self.start_point.z = rospy.get_param("/moving_cube/init_cube_pose/z")
# Rewards
self.move_distance_reward_weight = rospy.get_param("/moving_cube/move_distance_reward_weight")
self.y_linear_speed_reward_weight = rospy.get_param("/moving_cube/y_linear_speed_reward_weight")
self.y_axis_angle_reward_weight = rospy.get_param("/moving_cube/y_axis_angle_reward_weight")
self.end_episode_points = rospy.get_param("/moving_cube/end_episode_points")
self.roll_reward_weight = rospy.get_param("/moving_cube/roll_reward_weight")
self.cumulated_steps = 0.0
# Here we will add any init functions prior to starting the MyRobotEnv
super(MovingCubeOneDiskWalkEnv, self).__init__(ros_ws_abspath)
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_joints(self.init_roll_vel)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
self.total_distance_moved = 0.0
self.current_y_distance = self.get_y_dir_distance_from_start_point(self.start_point)
self.pre_roll_angle = 0
self.roll_turn_speed = rospy.get_param('/moving_cube/init_roll_vel')
# For Info Purposes
self.cumulated_reward = 0.0
#self.cumulated_steps = 0.0
def _set_action(self, action):
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0:# Move Speed Wheel Forwards
self.roll_turn_speed = self.roll_speed_fixed_value
elif action == 1:# Move Speed Wheel Backwards
self.roll_turn_speed = -1*self.roll_speed_fixed_value
elif action == 2:# Stop Speed Wheel
self.roll_turn_speed = 0.0
elif action == 3:# Increment Speed
self.roll_turn_speed += self.roll_speed_increment_value
elif action == 4:# Decrement Speed
self.roll_turn_speed -= self.roll_speed_increment_value
# We clamp Values to maximum
rospy.logdebug("roll_turn_speed before clamp=="+str(self.roll_turn_speed))
self.roll_turn_speed = numpy.clip(self.roll_turn_speed,
-1*self.roll_speed_fixed_value,
self.roll_speed_fixed_value)
rospy.logdebug("roll_turn_speed after clamp==" + str(self.roll_turn_speed))
# We tell the OneDiskCube to spin the RollDisk at the selected speed
self.move_joints(self.roll_turn_speed)
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
MyCubeSingleDiskEnv API DOCS
:return:
"""
# We get the orientation of the cube in RPY
roll, pitch, yaw = self.get_orientation_euler()
# We get the distance from the origin
#distance = self.get_distance_from_start_point(self.start_point)
y_distance = self.get_y_dir_distance_from_start_point(self.start_point)
# We get the current speed of the Roll Disk
current_disk_roll_vel = self.get_roll_velocity()
# We get the linear speed in the y axis
y_linear_speed = self.get_y_linear_speed()
cube_observations = [
round(current_disk_roll_vel, 0),
round(y_distance, 1),
round(roll, 1),
round(pitch, 1),
round(y_linear_speed,1),
round(yaw, 1)
]
rospy.logdebug("Observations==>"+str(cube_observations))
return cube_observations
def _is_done(self, observations):
pitch_angle = observations[3]
yaw_angle = observations[5]
if abs(pitch_angle) > self.max_pitch_angle:
rospy.logerr("WRONG Cube Pitch Orientation==>" + str(pitch_angle))
done = True
else:
rospy.logdebug("Cube Pitch Orientation Ok==>" + str(pitch_angle))
if abs(yaw_angle) > self.max_yaw_angle:
rospy.logerr("WRONG Cube Yaw Orientation==>" + str(yaw_angle))
done = True
else:
rospy.logdebug("Cube Yaw Orientation Ok==>" + str(yaw_angle))
done = False
return done
def _compute_reward(self, observations, done):
if not done:
y_distance_now = observations[1]
delta_distance = y_distance_now - self.current_y_distance
rospy.logdebug("y_distance_now=" + str(y_distance_now)+", current_y_distance=" + str(self.current_y_distance))
rospy.logdebug("delta_distance=" + str(delta_distance))
reward_distance = delta_distance * self.move_distance_reward_weight
self.current_y_distance = y_distance_now
y_linear_speed = observations[4]
rospy.logdebug("y_linear_speed=" + str(y_linear_speed))
reward_y_axis_speed = y_linear_speed * self.y_linear_speed_reward_weight
# Negative Reward for yaw different from zero.
yaw_angle = observations[5]
rospy.logdebug("yaw_angle=" + str(yaw_angle))
# Worst yaw is 90 and 270 degrees, best 0 and 180. We use sin function for giving reward.
sin_yaw_angle = math.sin(yaw_angle)
rospy.logdebug("sin_yaw_angle=" + str(sin_yaw_angle))
reward_y_axis_angle = -1 * abs(sin_yaw_angle) * self.y_axis_angle_reward_weight
#Rolling reward
roll_angle = observations[2]
roll_reward = math.sin(abs(self.pre_roll_angle - roll_angle)) * self.roll_reward_weight
self.pre_roll_angle = roll_angle
# We are not intereseted in decimals of the reward, doesnt give any advatage.
reward = round(reward_distance, 0) + round(reward_y_axis_speed, 0) + round(reward_y_axis_angle, 0)
rospy.logdebug("reward_distance=" + str(reward_distance))
rospy.logdebug("reward_y_axis_speed=" + str(reward_y_axis_speed))
rospy.logdebug("reward_y_axis_angle=" + str(reward_y_axis_angle))
rospy.logdebug("reward=" + str(reward))
else:
reward = -1*self.end_episode_points
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def get_y_dir_distance_from_start_point(self, start_point):
"""
Calculates the distance from the given point and the current position
given by odometry. In this case the increase or decrease in y.
:param start_point:
:return:
"""
y_dist_dir = self.odom.pose.pose.position.y - start_point.y
return y_dist_dir
def get_distance_from_start_point(self, start_point):
"""
Calculates the distance from the given point and the current position
given by odometry
:param start_point:
:return:
"""
distance = self.get_distance_from_point(start_point,
self.odom.pose.pose.position)
return distance
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = numpy.array((pstart.x, pstart.y, pstart.z))
b = numpy.array((p_end.x, p_end.y, p_end.z))
distance = numpy.linalg.norm(a - b)
return distance
def get_orientation_euler(self):
# We convert from quaternions to euler
orientation_list = [self.odom.pose.pose.orientation.x,
self.odom.pose.pose.orientation.y,
self.odom.pose.pose.orientation.z,
self.odom.pose.pose.orientation.w]
roll, pitch, yaw = euler_from_quaternion(orientation_list)
return roll, pitch, yaw
def get_roll_velocity(self):
# We get the current joint roll velocity
roll_vel = self.joints.velocity[0]
return roll_vel
def get_y_linear_speed(self):
# We get the current joint roll velocity
y_linear_speed = self.odom.twist.twist.linear.y
return y_linear_speed
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~husarion_env.py | import numpy
import rospy
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from openai_ros.openai_ros_common import ROSLauncher
class HusarionEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new HusarionEnv environment.
Husarion doesnt use controller_manager, therefore we wont reset the
controllers in the standard fashion. For the moment we wont reset them.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /odom : Odometry readings of the Base of the Robot
* /camera/depth/image_raw: 2d Depth image of the depth sensor.
* /camera/depth/points: Pointcloud sensor readings
* /camera/rgb/image_raw: RGB camera
* /rosbot/laser/scan: Laser Readings
Actuators Topic List: /cmd_vel,
Args:
"""
rospy.logerr(">>>>>>>>>>>Start HusarionEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="rosbot_gazebo",
launch_file_name="put_rosbot_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
rospy.logerr(">>>>>>>>>>>ROSLAUCHER DONE HusarionEnv INIT...")
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(HusarionEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self.gazebo.unpauseSim()
# self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/odom", Odometry, self._odom_callback)
rospy.Subscriber("/camera/depth/image_raw", Image,
self._camera_depth_image_raw_callback)
rospy.Subscriber("/camera/depth/points", PointCloud2,
self._camera_depth_points_callback)
rospy.Subscriber("/camera/rgb/image_raw", Image,
self._camera_rgb_image_raw_callback)
rospy.Subscriber("/rosbot/laser/scan", LaserScan,
self._laser_scan_callback)
self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.logdebug("Finished HusarionEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_odom_ready()
# We dont need to check for the moment, takes too long
self._check_camera_depth_image_raw_ready()
self._check_camera_depth_points_ready()
self._check_camera_rgb_image_raw_ready()
self._check_laser_scan_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message(
"/odom", Odometry, timeout=5.0)
rospy.logdebug("Current /odom READY=>")
except:
rospy.logerr(
"Current /odom not ready yet, retrying for getting odom")
return self.odom
def _check_camera_depth_image_raw_ready(self):
self.camera_depth_image_raw = None
rospy.logdebug("Waiting for /camera/depth/image_raw to be READY...")
while self.camera_depth_image_raw is None and not rospy.is_shutdown():
try:
self.camera_depth_image_raw = rospy.wait_for_message(
"/camera/depth/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/depth/image_raw READY=>")
except:
rospy.logerr(
"Current /camera/depth/image_raw not ready yet, retrying for getting camera_depth_image_raw")
return self.camera_depth_image_raw
def _check_camera_depth_points_ready(self):
self.camera_depth_points = None
rospy.logdebug("Waiting for /camera/depth/points to be READY...")
while self.camera_depth_points is None and not rospy.is_shutdown():
try:
self.camera_depth_points = rospy.wait_for_message(
"/camera/depth/points", PointCloud2, timeout=10.0)
rospy.logdebug("Current /camera/depth/points READY=>")
except:
rospy.logerr(
"Current /camera/depth/points not ready yet, retrying for getting camera_depth_points")
return self.camera_depth_points
def _check_camera_rgb_image_raw_ready(self):
self.camera_rgb_image_raw = None
rospy.logdebug("Waiting for /camera/rgb/image_raw to be READY...")
while self.camera_rgb_image_raw is None and not rospy.is_shutdown():
try:
self.camera_rgb_image_raw = rospy.wait_for_message(
"/camera/rgb/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/rgb/image_raw READY=>")
except:
rospy.logerr(
"Current /camera/rgb/image_raw not ready yet, retrying for getting camera_rgb_image_raw")
return self.camera_rgb_image_raw
def _check_laser_scan_ready(self):
self.laser_scan = None
rospy.logdebug("Waiting for /rosbot/laser/scan to be READY...")
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message(
"/rosbot/laser/scan", LaserScan, timeout=1.0)
rospy.logdebug("Current /rosbot/laser/scan READY=>")
except:
rospy.logerr(
"Current /rosbot/laser/scan not ready yet, retrying for getting laser_scan")
return self.laser_scan
def _odom_callback(self, data):
self.odom = data
def _camera_depth_image_raw_callback(self, data):
self.camera_depth_image_raw = data
def _camera_depth_points_callback(self, data):
self.camera_depth_points = data
def _camera_rgb_image_raw_callback(self, data):
self.camera_rgb_image_raw = data
def _laser_scan_callback(self, data):
self.laser_scan = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("Husarion Base Twist Cmd>>" + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
self.wait_until_twist_achieved(cmd_vel_value,
epsilon,
update_rate)
def wait_until_twist_achieved(self, cmd_vel_value, epsilon, update_rate, angular_speed_noise=0.005):
"""
We wait for the cmd_vel twist given to be reached by the robot reading
Bare in mind that the angular wont be controled , because its too imprecise.
We will only consider to check if its moving or not inside the angular_speed_noise fluctiations it has.
from the odometry.
:param cmd_vel_value: Twist we want to wait to reach.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the odometry.
:return:
"""
rospy.logwarn("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
rospy.logdebug("Desired Twist Cmd>>" + str(cmd_vel_value))
rospy.logdebug("epsilon>>" + str(epsilon))
linear_speed = cmd_vel_value.linear.x
angular_speed = cmd_vel_value.angular.z
angular_speed_is = self.check_angular_speed_dir(
angular_speed, angular_speed_noise)
linear_speed_plus = linear_speed + epsilon
linear_speed_minus = linear_speed - epsilon
while not rospy.is_shutdown():
current_odometry = self._check_odom_ready()
odom_linear_vel = current_odometry.twist.twist.linear.x
odom_angular_vel = current_odometry.twist.twist.angular.z
rospy.logdebug("Linear VEL=" + str(odom_linear_vel) +
", ?RANGE=[" + str(linear_speed_minus) + ","+str(linear_speed_plus)+"]")
rospy.logdebug("Angular VEL=" + str(odom_angular_vel) +
", angular_speed asked=[" + str(angular_speed)+"]")
linear_vel_are_close = (odom_linear_vel <= linear_speed_plus) and (
odom_linear_vel > linear_speed_minus)
odom_angular_speed_is = self.check_angular_speed_dir(
odom_angular_vel, angular_speed_noise)
# We check if its turning in the same diretion or has stopped
angular_vel_are_close = (angular_speed_is == odom_angular_speed_is)
if linear_vel_are_close and angular_vel_are_close:
rospy.logwarn("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logwarn("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time - start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
rospy.logwarn("END wait_until_twist_achieved...")
return delta_time
def check_angular_speed_dir(self, angular_speed, angular_speed_noise):
"""
It States if the speed is zero, posititive or negative
"""
# We check if odom angular speed is positive or negative or "zero"
if (-angular_speed_noise < angular_speed <= angular_speed_noise):
angular_speed_is = 0
elif angular_speed > angular_speed_noise:
angular_speed_is = 1
elif angular_speed <= angular_speed_noise:
angular_speed_is = -1
else:
angular_speed_is = 0
rospy.logerr("Angular Speed has wrong value=="+str(angular_speed))
def get_odom(self):
return self.odom
def get_camera_depth_image_raw(self):
return self.camera_depth_image_raw
def get_camera_depth_points(self):
return self.camera_depth_points
def get_camera_rgb_image_raw(self):
return self.camera_rgb_image_raw
def get_laser_scan(self):
return self.laser_scan
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~turtlebot3~turtlebot3_world.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import turtlebot3_env
from gym.envs.registration import register
from geometry_msgs.msg import Vector3
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class TurtleBot3WorldEnv(turtlebot3_env.TurtleBot3Env):
def __init__(self):
"""
This Task Env is designed for having the TurtleBot3 in the turtlebot3 world
closed room with columns.
It will learn how to move around without crashing.
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/turtlebot3/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="turtlebot3_gazebo",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/turtlebot3/config",
yaml_file_name="turtlebot3_world.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(TurtleBot3WorldEnv, self).__init__(ros_ws_abspath)
# Only variable needed to be set here
number_actions = rospy.get_param('/turtlebot3/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
#number_observations = rospy.get_param('/turtlebot3/n_observations')
"""
We set the Observation space for the 6 observations
cube_observations = [
round(current_disk_roll_vel, 0),
round(y_distance, 1),
round(roll, 1),
round(pitch, 1),
round(y_linear_speed,1),
round(yaw, 1),
]
"""
# Actions and Observations
self.linear_forward_speed = rospy.get_param('/turtlebot3/linear_forward_speed')
self.linear_turn_speed = rospy.get_param('/turtlebot3/linear_turn_speed')
self.angular_speed = rospy.get_param('/turtlebot3/angular_speed')
self.init_linear_forward_speed = rospy.get_param('/turtlebot3/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param('/turtlebot3/init_linear_turn_speed')
self.new_ranges = rospy.get_param('/turtlebot3/new_ranges')
self.min_range = rospy.get_param('/turtlebot3/min_range')
self.max_laser_value = rospy.get_param('/turtlebot3/max_laser_value')
self.min_laser_value = rospy.get_param('/turtlebot3/min_laser_value')
self.max_linear_aceleration = rospy.get_param('/turtlebot3/max_linear_aceleration')
# We create two arrays based on the binary values that will be assigned
# In the discretization method.
laser_scan = self.get_laser_scan()
num_laser_readings = len(laser_scan.ranges)/self.new_ranges
high = numpy.full((num_laser_readings), self.max_laser_value)
low = numpy.full((num_laser_readings), self.min_laser_value)
# We only use two integers
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
# Rewards
self.forwards_reward = rospy.get_param("/turtlebot3/forwards_reward")
self.turn_reward = rospy.get_param("/turtlebot3/turn_reward")
self.end_episode_points = rospy.get_param("/turtlebot3/end_episode_points")
self.cumulated_steps = 0.0
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base( self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the turtlebot2
based on the action number given.
:param action: The action integer that set s what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0: #FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
self.last_action = "FORWARDS"
elif action == 1: #LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "TURN_LEFT"
elif action == 2: #RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "TURN_RIGHT"
# We tell TurtleBot2 the linear and angular speed to set to execute
self.move_base(linear_speed, angular_speed, epsilon=0.05, update_rate=10)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
TurtleBot2Env API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan = self.get_laser_scan()
discretized_observations = self.discretize_scan_observation( laser_scan,
self.new_ranges
)
rospy.logdebug("Observations==>"+str(discretized_observations))
rospy.logdebug("END Get Observation ==>")
return discretized_observations
def _is_done(self, observations):
if self._episode_done:
rospy.logerr("TurtleBot2 is Too Close to wall==>")
else:
rospy.logwarn("TurtleBot2 is NOT close to a wall ==>")
# Now we check if it has crashed based on the imu
imu_data = self.get_imu()
linear_acceleration_magnitude = self.get_vector_magnitude(imu_data.linear_acceleration)
if linear_acceleration_magnitude > self.max_linear_aceleration:
rospy.logerr("TurtleBot2 Crashed==>"+str(linear_acceleration_magnitude)+">"+str(self.max_linear_aceleration))
self._episode_done = True
else:
rospy.logerr("DIDNT crash TurtleBot2 ==>"+str(linear_acceleration_magnitude)+">"+str(self.max_linear_aceleration))
return self._episode_done
def _compute_reward(self, observations, done):
if not done:
if self.last_action == "FORWARDS":
reward = self.forwards_reward
else:
reward = self.turn_reward
else:
reward = -1*self.end_episode_points
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_scan_observation(self,data,new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
mod = len(data.ranges)/new_ranges
rospy.logdebug("data=" + str(data))
rospy.logdebug("new_ranges=" + str(new_ranges))
rospy.logdebug("mod=" + str(mod))
for i, item in enumerate(data.ranges):
if (i%mod==0):
if item == float ('Inf') or numpy.isinf(item):
discretized_ranges.append(self.max_laser_value)
elif numpy.isnan(item):
discretized_ranges.append(self.min_laser_value)
else:
discretized_ranges.append(int(item))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" + str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logdebug("NOT done Validation >>> item=" + str(item)+"< "+str(self.min_range))
return discretized_ranges
def get_vector_magnitude(self, vector):
"""
It calculated the magnitude of the Vector3 given.
This is usefull for reading imu accelerations and knowing if there has been
a crash
:return:
"""
contact_force_np = numpy.array((vector.x, vector.y, vector.z))
force_magnitude = numpy.linalg.norm(contact_force_np)
return force_magnitude
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~hopper_env.py | import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from gazebo_msgs.msg import ContactsState
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Quaternion, Vector3
from sensor_msgs.msg import JointState
from std_msgs.msg import Float64
from openai_ros.openai_ros_common import ROSLauncher
class HopperEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all HopperEnv environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new HopperEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /drone/down_camera/image_raw: RGB Camera facing down.
* /drone/front_camera/image_raw: RGB Camera facing front.
* /drone/imu: IMU of the drone giving acceleration and orientation relative to world.
* /drone/sonar: Sonar readings facing front
* /drone/gt_pose: Get position and orientation in Global space
* /drone/gt_vel: Get the linear velocity , the angular doesnt record anything.
Actuators Topic List:
* /cmd_vel: Move the Drone Around when you have taken off.
* /drone/takeoff: Publish into it to take off
* /drone/land: Publish to make ParrotDrone Land
Args:
"""
rospy.logdebug("Start HopperEnv INIT...")
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="legged_robots_sims",
launch_file_name="put_hopper_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Variables that we give through the constructor.
# None in this case
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = ['joint_state_controller',
'haa_joint_position_controller',
'hfe_joint_position_controller',
'kfe_joint_position_controller']
# It doesnt use namespace
self.robot_name_space = "monoped"
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(HopperEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
rospy.logdebug("HopperEnv unpause1...")
self.gazebo.unpauseSim()
# self.controllers_object.reset_controllers()
self._check_all_systems_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/odom", Odometry, self._odom_callback)
# We use the IMU for orientation and linearacceleration detection
rospy.Subscriber("/monoped/imu/data", Imu, self._imu_callback)
# We use it to get the contact force, to know if its in the air or stumping too hard.
rospy.Subscriber("/lowerleg_contactsensor_state",
ContactsState, self._contact_callback)
# We use it to get the joints positions and calculate the reward associated to it
rospy.Subscriber("/monoped/joint_states", JointState,
self._joints_state_callback)
self.publishers_array = []
self._haa_joint_pub = rospy.Publisher(
'/monoped/haa_joint_position_controller/command', Float64, queue_size=1)
self._hfe_joint_pub = rospy.Publisher(
'/monoped/hfe_joint_position_controller/command', Float64, queue_size=1)
self._kfe_joint_pub = rospy.Publisher(
'/monoped/kfe_joint_position_controller/command', Float64, queue_size=1)
self.publishers_array.append(self._haa_joint_pub)
self.publishers_array.append(self._hfe_joint_pub)
self.publishers_array.append(self._kfe_joint_pub)
self._check_all_publishers_ready()
self.gazebo.pauseSim()
rospy.logdebug("Finished HopperEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
rospy.logdebug("HopperEnv check_all_systems_ready...")
self._check_all_sensors_ready()
rospy.logdebug("END HopperEnv _check_all_systems_ready...")
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_odom_ready()
self._check_imu_ready()
self._check_lowerleg_contactsensor_state_ready()
self._check_joint_states_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message(
"/odom", Odometry, timeout=1.0)
rospy.logdebug("Current /odom READY=>")
except:
rospy.logerr(
"Current /odom not ready yet, retrying for getting odom")
return self.odom
def _check_imu_ready(self):
self.imu = None
rospy.logdebug("Waiting for /monoped/imu/data to be READY...")
while self.imu is None and not rospy.is_shutdown():
try:
self.imu = rospy.wait_for_message(
"/monoped/imu/data", Imu, timeout=1.0)
rospy.logdebug("Current /monoped/imu/data READY=>")
except:
rospy.logerr(
"Current /monoped/imu/data not ready yet, retrying for getting imu")
return self.imu
def _check_lowerleg_contactsensor_state_ready(self):
self.lowerleg_contactsensor_state = None
rospy.logdebug(
"Waiting for /lowerleg_contactsensor_state to be READY...")
while self.lowerleg_contactsensor_state is None and not rospy.is_shutdown():
try:
self.lowerleg_contactsensor_state = rospy.wait_for_message(
"/lowerleg_contactsensor_state", ContactsState, timeout=1.0)
rospy.logdebug("Current /lowerleg_contactsensor_state READY=>")
except:
rospy.logerr(
"Current /lowerleg_contactsensor_state not ready yet, retrying for getting lowerleg_contactsensor_state")
return self.lowerleg_contactsensor_state
def _check_joint_states_ready(self):
self.joint_states = None
rospy.logdebug("Waiting for /monoped/joint_states to be READY...")
while self.joint_states is None and not rospy.is_shutdown():
try:
self.joint_states = rospy.wait_for_message(
"/monoped/joint_states", JointState, timeout=1.0)
rospy.logdebug("Current /monoped/joint_states READY=>")
except:
rospy.logerr(
"Current /monoped/joint_states not ready yet, retrying for getting joint_states")
return self.joint_states
def _odom_callback(self, data):
self.odom = data
def _imu_callback(self, data):
self.imu = data
def _contact_callback(self, data):
self.lowerleg_contactsensor_state = data
def _joints_state_callback(self, data):
self.joint_states = data
def _check_all_publishers_ready(self):
"""
Checks that all the publishers are working
:return:
"""
rospy.logdebug("START ALL SENSORS READY")
for publisher_object in self.publishers_array:
self._check_pub_connection(publisher_object)
rospy.logdebug("ALL SENSORS READY")
def _check_pub_connection(self, publisher_object):
rate = rospy.Rate(10) # 10hz
while publisher_object.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to publisher_object yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("publisher_object Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_joints(self, joints_array, epsilon=0.05, update_rate=10, time_sleep=0.05, check_position=True):
"""
It will move the Hopper Joints to the given Joint_Array values
"""
i = 0
for publisher_object in self.publishers_array:
joint_value = Float64()
joint_value.data = joints_array[i]
rospy.logdebug("JointsPos>>"+str(joint_value))
publisher_object.publish(joint_value)
i += 1
if check_position:
self.wait_time_for_execute_movement(
joints_array, epsilon, update_rate)
else:
self.wait_time_movement_hard(time_sleep=time_sleep)
def wait_time_for_execute_movement(self, joints_array, epsilon, update_rate):
"""
We wait until Joints are where we asked them to be based on the joints_states
:param joints_array:Joints Values in radians of each of the three joints of hopper leg.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the joint_states.
:return:
"""
rospy.logdebug("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
rospy.logdebug("Desired JointsState>>" + str(joints_array))
rospy.logdebug("epsilon>>" + str(epsilon))
while not rospy.is_shutdown():
current_joint_states = self._check_joint_states_ready()
values_to_check = [current_joint_states.position[0],
current_joint_states.position[1],
current_joint_states.position[2]]
vel_values_are_close = self.check_array_similar(
joints_array, values_to_check, epsilon)
if vel_values_are_close:
rospy.logdebug("Reached JointStates!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logdebug("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time - start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
rospy.logdebug("END wait_until_jointstate_achieved...")
return delta_time
def wait_time_movement_hard(self, time_sleep):
"""
Hard Wait to avoid inconsistencies in times executing actions
"""
rospy.logdebug("Test Wait="+str(time_sleep))
time.sleep(time_sleep)
def check_array_similar(self, ref_value_array, check_value_array, epsilon):
"""
It checks if the check_value id similar to the ref_value
"""
rospy.logdebug("ref_value_array="+str(ref_value_array))
rospy.logdebug("check_value_array="+str(check_value_array))
return numpy.allclose(ref_value_array, check_value_array, atol=epsilon)
def get_odom(self):
return self.odom
def get_imu(self):
return self.imu
def get_lowerleg_contactsensor_state(self):
return self.lowerleg_contactsensor_state
def get_joint_states(self):
return self.joint_states
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~sumitxl_env.py | import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from sensor_msgs.msg import Imu
from sensor_msgs.msg import NavSatFix
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from geometry_msgs.msg import Vector3Stamped
from openai_ros.openai_ros_common import ROSLauncher
class SumitXlEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new SumitXlEnv environment.
Execute a call to service /summit_xl/controller_manager/list_controllers
To get the list of controllers to be restrarted
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /gps/fix : GPS position Data
* /gps/fix_velocity: GPS Speed data
* /hokuyo_base/scan: Laser Readings
* /imu/data: Inertial Mesurment Unit data, orientation and acceleration
* /orbbec_astra/depth/image_raw
* /orbbec_astra/depth/points
* /orbbec_astra/rgb/image_raw
* /odom: Odometry
Actuators Topic List: /cmd_vel,
Args:
"""
print("Start SumitXlEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="summit_xl_gazebo",
launch_file_name="put_summit_xl_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
print("SPAWN DONE SumitXlEnv INIT...")
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = ["joint_read_state_controller",
"joint_blw_velocity_controller",
"joint_brw_velocity_controller",
"joint_flw_velocity_controller",
"joint_frw_velocity_controller"
]
# It doesnt use namespace
self.robot_name_space = "summit_xl"
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
print("START OpenAIROS CORE SumitXlEnv INIT...")
super(SumitXlEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
print("DONE OpenAIROS CORE SumitXlEnv INIT...")
self.gazebo.unpauseSim()
# TODO: See why this doesnt work in Summit XL
# self.controllers_object.reset_controllers()
print("START CHECK SENSORS SumitXlEnv INIT...")
self._check_all_sensors_ready()
print("DONE CHECK SENSORS SumitXlEnv INIT...")
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/gps/fix", NavSatFix, self._gps_fix_callback)
rospy.Subscriber("/gps/fix_velocity", Vector3Stamped,
self._gps_fix_velocity_callback)
rospy.Subscriber("/orbbec_astra/depth/image_raw", Image,
self._camera_depth_image_raw_callback)
rospy.Subscriber("/orbbec_astra/depth/points",
PointCloud2, self._camera_depth_points_callback)
rospy.Subscriber("/orbbec_astra/rgb/image_raw", Image,
self._camera_rgb_image_raw_callback)
rospy.Subscriber("/hokuyo_base/scan", LaserScan,
self._laser_scan_callback)
rospy.Subscriber("/imu/data", Imu, self._imu_callback)
rospy.Subscriber("/odom", Odometry, self._odom_callback)
self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
print("START CHECK PUBLISHERS SumitXlEnv INIT...")
self._check_publishers_connection()
print("DONE CHECK PUBLISHERS SumitXlEnv INIT...")
self.gazebo.pauseSim()
print("Finished SumitXlEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
print("START ALL SENSORS READY")
self._check_gps_fix_ready()
self._check_gps_fix_velocity_ready()
self._check_camera_depth_image_raw_ready()
self._check_camera_depth_points_ready()
self._check_camera_rgb_image_raw_ready()
self._check_odom_ready()
self._check_imu_ready()
self._check_laser_scan_ready()
print("ALL SENSORS READY")
def _check_gps_fix_ready(self):
self.gps_fix = None
print("Waiting for /gps/fix to be READY...")
while self.gps_fix is None and not rospy.is_shutdown():
try:
self.gps_fix = rospy.wait_for_message(
"/gps/fix", NavSatFix, timeout=5.0)
print("Current /gps/fix READY=>")
except:
rospy.logerr(
"Current /gps/fix not ready yet, retrying for getting odom")
return self.gps_fix
def _check_gps_fix_velocity_ready(self):
self.gps_fix_velocity = None
print("Waiting for /gps/fix_velocity to be READY...")
while self.gps_fix_velocity is None and not rospy.is_shutdown():
try:
self.gps_fix_velocity = rospy.wait_for_message(
"/gps/fix_velocity", Vector3Stamped, timeout=5.0)
print("Current /gps/fix_velocity READY=>")
except:
rospy.logerr(
"Current /gps/fix_velocity not ready yet, retrying for getting odom")
return self.gps_fix_velocity
def _check_camera_depth_image_raw_ready(self):
self.camera_depth_image_raw = None
print("Waiting for /orbbec_astra/depth/image_raw to be READY...")
while self.camera_depth_image_raw is None and not rospy.is_shutdown():
try:
self.camera_depth_image_raw = rospy.wait_for_message(
"/orbbec_astra/depth/image_raw", Image, timeout=5.0)
print("Current /orbbec_astra/depth/image_raw READY=>")
except:
rospy.logerr(
"Current /orbbec_astra/depth/image_raw not ready yet, retrying for getting camera_depth_image_raw")
return self.camera_depth_image_raw
def _check_camera_depth_points_ready(self):
self.camera_depth_points = None
print("Waiting for /orbbec_astra/depth/points to be READY...")
while self.camera_depth_points is None and not rospy.is_shutdown():
try:
self.camera_depth_points = rospy.wait_for_message(
"/orbbec_astra/depth/points", PointCloud2, timeout=10.0)
print("Current /orbbec_astra/depth/points READY=>")
except:
rospy.logerr(
"Current /orbbec_astra/depth/points not ready yet, retrying for getting camera_depth_points")
return self.camera_depth_points
def _check_camera_rgb_image_raw_ready(self):
self.camera_rgb_image_raw = None
print("Waiting for /orbbec_astra/rgb/image_raw to be READY...")
while self.camera_rgb_image_raw is None and not rospy.is_shutdown():
try:
self.camera_rgb_image_raw = rospy.wait_for_message(
"/orbbec_astra/rgb/image_raw", Image, timeout=5.0)
print("Current /orbbec_astra/rgb/image_raw READY=>")
except:
rospy.logerr(
"Current /orbbec_astra/rgb/image_raw not ready yet, retrying for getting camera_rgb_image_raw")
return self.camera_rgb_image_raw
def _check_odom_ready(self):
self.odom = None
print("Waiting for /odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message(
"/odom", Odometry, timeout=0.5)
print("Current /odom READY=>")
except:
rospy.logerr(
"Current /odom not ready yet, retrying for getting odom")
return self.odom
def _check_imu_ready(self):
self.imu = None
print("Waiting for /imu/data to be READY...")
while self.imu is None and not rospy.is_shutdown():
try:
self.imu = rospy.wait_for_message(
"/imu/data", Imu, timeout=5.0)
print("Current /imu/data READY=>")
except:
rospy.logerr(
"Current /imu/data not ready yet, retrying for getting imu")
return self.imu
def _check_laser_scan_ready(self):
self.laser_scan = None
print("Waiting for /hokuyo_base/scan to be READY...")
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message(
"/hokuyo_base/scan", LaserScan, timeout=1.0)
print("Current /hokuyo_base/scan READY=>")
except:
rospy.logerr(
"Current /hokuyo_base/scan not ready yet, retrying for getting laser_scan")
return self.laser_scan
def _gps_fix_callback(self, data):
self.gps_fix = data
def _gps_fix_velocity_callback(self, data):
self.gps_fix_velocity = data
def _camera_depth_image_raw_callback(self, data):
self.camera_depth_image_raw = data
def _camera_depth_points_callback(self, data):
self.camera_depth_points = data
def _camera_rgb_image_raw_callback(self, data):
self.camera_rgb_image_raw = data
def _odom_callback(self, data):
self.odom = data
def _imu_callback(self, data):
self.imu = data
def _laser_scan_callback(self, data):
self.laser_scan = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
print("No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
print("_cmd_vel_pub Publisher Connected")
print("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
print("SumitXL Base Twist Cmd>>" + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
time.sleep(0.2)
"""
self.wait_until_twist_achieved(cmd_vel_value,
epsilon,
update_rate)
"""
def wait_until_twist_achieved(self, cmd_vel_value, epsilon, update_rate):
"""
We wait for the cmd_vel twist given to be reached by the robot reading
from the odometry.
:param cmd_vel_value: Twist we want to wait to reach.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the odometry.
:return:
"""
print("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
print("Desired Twist Cmd>>" + str(cmd_vel_value))
print("epsilon>>" + str(epsilon))
linear_speed = cmd_vel_value.linear.x
angular_speed = cmd_vel_value.angular.z
linear_speed_plus = linear_speed + epsilon
linear_speed_minus = linear_speed - epsilon
# Correcting factor for angular based on observations
angular_factor = 2.0
epsilon_angular_factor = 6.0
angular_speed_plus = (angular_factor * angular_speed) + \
(epsilon * epsilon_angular_factor)
angular_speed_minus = (angular_factor * angular_speed) - \
(epsilon * epsilon_angular_factor)
while not rospy.is_shutdown():
current_odometry = self._check_odom_ready()
odom_linear_vel = current_odometry.twist.twist.linear.x
"""
When asking to turn EX: angular.Z = 0.3 --> Odometry is 0.6
In linera runs ok. It also flutuates a lot, due to the turning through friction.
Therefore we will have to multiply the angular by 2 and broaden the
accepted error for angular.
"""
odom_angular_vel = current_odometry.twist.twist.angular.z
print("Linear VEL=" + str(odom_linear_vel) +
", ?RANGE=[" + str(linear_speed_minus) + ","+str(linear_speed_plus)+"]")
print("Angular VEL=" + str(odom_angular_vel) +
", ?RANGE=[" + str(angular_speed_minus) + ","+str(angular_speed_plus)+"]")
linear_vel_are_close = (odom_linear_vel <= linear_speed_plus) and (
odom_linear_vel > linear_speed_minus)
angular_vel_are_close = (odom_angular_vel <= angular_speed_plus) and (
odom_angular_vel > angular_speed_minus)
if linear_vel_are_close and angular_vel_are_close:
print("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
print("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time - start_wait_time
print("[Wait Time=" + str(delta_time)+"]")
print("END wait_until_twist_achieved...")
return delta_time
def get_gps_fix(self):
return self.gps_fix
def get_gps_fix_velocity(self):
return self.gps_fix_velocity
def get_laser_scan(self):
return self.laser_scan
def get_camera_depth_image_raw(self):
return self.camera_depth_image_raw
def get_camera_depth_points(self):
return self.camera_depth_points
def get_camera_rgb_image_raw(self):
return self.camera_rgb_image_raw
def get_odom(self):
return self.odom
def get_imu(self):
return self.imu
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~competition_rl~scripts~multiAgentChallengeTaskEnv.py | from gym import spaces
from openai_ros import multi_robot_gazebo_env
from gym.envs.registration import register
import rospy
import numpy
from gazebo_msgs.srv import SetModelState
from gazebo_msgs.msg import ModelState
import robot_instance
# The path is __init__.py of openai_ros, where we import the MovingCubeOneDiskWalkEnv directly
timestep_limit_per_episode = 1000 # Can be any Value
register(
id='AiChallengeEnv-v1',
entry_point='multiAgentChallengeTaskEnv:MultiAgentAiChallengeEnv',
max_episode_steps = timestep_limit_per_episode,
)
class MultiAgentAiChallengeEnv(multi_robot_gazebo_env.MultiRobotGazeboEnv):
def __init__(self, **kwargs):
self.robot0 = robot_instance.AiRobot(robot_ns="jackal0",
init_x=rospy.get_param('start_training/jackal0/x'),
init_y=rospy.get_param('start_training/jackal0/y'),
init_yaw=rospy.get_param('start_training/jackal0/yaw'))
# print('robot1 spawn')
# self.robot1 = challengeTaskEnv.AiChallengeEnv(robot_ns="jackal1", init_x=0.7, init_y=4.7, init_yaw=0)
# print('robot2 spawn')
# self.robot2 = challengeTaskEnv.AiChallengeEnv(robot_ns="jackal2", init_x=7.5, init_y=4.6, init_yaw=3.14)
# print('robot3 spawn')
# self.robot3 = challengeTaskEnv.AiChallengeEnv(robot_ns="jackal3", init_x=7.5, init_y=0.6, init_yaw=3.14)
super(MultiAgentAiChallengeEnv, self).__init__(start_init_physics_parameters=True)
def _set_init_gazebo_pose(self):
"""Sets the Robot in its init pose in Gazebo
"""
self.robot0._set_init_gazebo_pose()
# self.robot1._set_init_pose()
# self.robot2._set_init_pose()
def _set_init_ros(self):
"""Sets the Robot in its init pose in ROS
"""
self.robot0._set_init_ros()
self.robot0._set_init_ros()
# self.robot1._set_init_pose()
# self.robot2._set_init_pose()
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
# pass
self.robot0._check_all_systems_ready()
def _get_obs(self):
"""Returns the observation.
"""
pass
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
pass
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
self.robot0._set_action(action)
def _is_done(self, observations):
"""Indicates whether or not the episode is done ( the robot has fallen for example).
"""
done=False
return done
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
reward=0
return reward
# def _env_setup(self, initial_qpos):
# """Initial configuration of the environment. Can be used to configure initial state
# and extract information from the simulation.
# """
# raise NotImplementedError()
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~cartpole_stay_up~stay_up.py | from gym import utils
from openai_ros.robot_envs import cartpole_env
from gym.envs.registration import register
from gym import error, spaces
import rospy
import math
import numpy as np
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class CartPoleStayUpEnv(cartpole_env.CartPoleEnv):
def __init__(self):
ros_ws_abspath = rospy.get_param("/cartpole_v0/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="cartpole_description",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/cartpole_stay_up/config",
yaml_file_name="stay_up.yaml")
self.get_params()
self.action_space = spaces.Discrete(self.n_actions)
high = np.array([
2.5 * 2,
np.finfo(np.float32).max,
0.7 * 2,
np.finfo(np.float32).max])
self.observation_space = spaces.Box(-high, high)
# TODO: Remove when working
"""
cartpole_env.CartPoleEnv.__init__(
self, control_type=self.control_type
)
"""
# Here we will add any init functions prior to starting the MyRobotEnv
super(CartPoleStayUpEnv, self).__init__(control_type=self.control_type,
ros_ws_abspath=ros_ws_abspath)
def get_params(self):
# get configuration parameters
self.n_actions = rospy.get_param('/cartpole_v0/n_actions')
self.min_pole_angle = rospy.get_param('/cartpole_v0/min_pole_angle')
self.max_pole_angle = rospy.get_param('/cartpole_v0/max_pole_angle')
self.max_base_velocity = rospy.get_param(
'/cartpole_v0/max_base_velocity')
self.min_base_pose_x = rospy.get_param('/cartpole_v0/min_base_pose_x')
self.max_base_pose_x = rospy.get_param('/cartpole_v0/max_base_pose_x')
self.pos_step = rospy.get_param('/cartpole_v0/pos_step')
self.running_step = rospy.get_param('/cartpole_v0/running_step')
self.init_pos = rospy.get_param('/cartpole_v0/init_pos')
self.wait_time = rospy.get_param('/cartpole_v0/wait_time')
self.control_type = rospy.get_param('/cartpole_v0/control_type')
def _set_action(self, action):
# Take action
if action == 0: # LEFT
rospy.loginfo("GO LEFT...")
self.pos[0] -= self.pos_step
elif action == 1: # RIGHT
rospy.loginfo("GO RIGHT...")
self.pos[0] += self.pos_step
elif action == 2: # LEFT BIG
rospy.loginfo("GO LEFT BIG...")
self.pos[0] -= self.pos_step * 10
elif action == 3: # RIGHT BIG
rospy.loginfo("GO RIGHT BIG...")
self.pos[0] += self.pos_step * 10
# Apply action to simulation.
rospy.loginfo("MOVING TO POS=="+str(self.pos))
# 1st: unpause simulation
#rospy.logdebug("Unpause SIM...")
# self.gazebo.unpauseSim()
self.move_joints(self.pos)
rospy.logdebug(
"Wait for some time to execute movement, time="+str(self.running_step))
rospy.sleep(self.running_step) # wait for some time
rospy.logdebug(
"DONE Wait for some time to execute movement, time=" + str(self.running_step))
# 3rd: pause simulation
#rospy.logdebug("Pause SIM...")
# self.gazebo.pauseSim()
def _get_obs(self):
data = self.joints
# base_postion base_velocity pole angle pole velocity
#obs = [round(data.position[1],1), round(data.velocity[1],1), round(data.position[0],1), round(data.velocity[0],1)]
obs = [data.position[1], data.velocity[1],
data.position[0], data.velocity[0]]
return np.array(obs)
def _is_done(self, observations):
done = False
data = self.joints
rospy.loginfo("BASEPOSITION=="+str(observations[0]))
rospy.loginfo("POLE ANGLE==" + str(observations[2]))
# check if the base is still within the ranges of (-2, 2)
if (self.min_base_pose_x >= observations[0] or observations[0] >= self.max_base_pose_x):
rospy.logerr("Base Outside Limits==>min="+str(self.min_base_pose_x) +
",pos="+str(observations[0])+",max="+str(self.max_base_pose_x))
done = True
# check if pole has toppled over
if (self.min_pole_angle >= observations[2] or observations[2] >= self.max_pole_angle):
rospy.logerr(
"Pole Angle Outside Limits==>min=" + str(self.min_pole_angle) + ",pos=" + str(observations[2]) + ",max=" + str(
self.max_pole_angle))
done = True
rospy.loginfo("FINISHED get _is_done")
return done
def _compute_reward(self, observations, done):
"""
Gives more points for staying upright, gets data from given observations to avoid
having different data than other previous functions
:return:reward
"""
rospy.logdebug("START _compute_reward")
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warning("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
rospy.logdebug("END _compute_reward")
return reward
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
self.steps_beyond_done = None
def _set_init_pose(self):
"""
Sets joints to initial position [0,0,0]
:return:
"""
self.check_publishers_connection()
# Reset Internal pos variable
self.init_internal_vars(self.init_pos)
self.move_joints(self.pos)
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~hopper~hopper_stay_up.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import hopper_env
from gym.envs.registration import register
from geometry_msgs.msg import Point
from geometry_msgs.msg import Vector3
from tf.transformations import euler_from_quaternion
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class HopperStayUpEnv(hopper_env.HopperEnv):
def __init__(self):
"""
Make Hopper Learn how to Stay Up indefenitly
"""
# Only variable needed to be set here
"""
For this version, we consider 6 actions
1-2) Increment/Decrement haa_joint
3-4) Increment/Decrement hfe_joint
5-6) Increment/Decrement kfe_joint
"""
rospy.logdebug("Start HopperStayUpEnv INIT...")
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/monoped/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="legged_robots_sims",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/hopper/config",
yaml_file_name="hopper_stay_up.yaml")
number_actions = rospy.get_param('/monoped/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
# Actions and Observations
self.init_joint_states = Vector3()
self.init_joint_states.x = rospy.get_param(
'/monoped/init_joint_states/haa_joint')
self.init_joint_states.y = rospy.get_param(
'/monoped/init_joint_states/hfe_joint')
self.init_joint_states.z = rospy.get_param(
'/monoped/init_joint_states/kfe_joint')
# Get Desired Point to Get
self.desired_point = Point()
self.desired_point.x = rospy.get_param("/monoped/desired_point/x")
self.desired_point.y = rospy.get_param("/monoped/desired_point/y")
self.desired_point.z = rospy.get_param("/monoped/desired_point/z")
self.accepted_error_in_des_pos = rospy.get_param(
"/monoped/accepted_error_in_des_pos")
self.desired_yaw = rospy.get_param("/monoped/desired_yaw")
self.joint_increment_value = rospy.get_param(
"/monoped/joint_increment_value")
self.init_move_time = rospy.get_param("/monoped/init_move_time", 1.0)
self.move_time = rospy.get_param("/monoped/move_time", 0.05)
self.check_position = rospy.get_param("/monoped/check_position", True)
self.accepted_joint_error = rospy.get_param(
"/monoped/accepted_joint_error")
self.update_rate = rospy.get_param("/monoped/update_rate")
self.dec_obs = rospy.get_param(
"/monoped/number_decimals_precision_obs")
self.desired_force = rospy.get_param("/monoped/desired_force")
self.max_x_pos = rospy.get_param("/monoped/max_x_pos")
self.max_y_pos = rospy.get_param("/monoped/max_y_pos")
self.min_height = rospy.get_param("/monoped/min_height")
self.max_height = rospy.get_param("/monoped/max_height")
self.distance_from_desired_point_max = rospy.get_param(
"/monoped/distance_from_desired_point_max")
self.max_incl_roll = rospy.get_param("/monoped/max_incl")
self.max_incl_pitch = rospy.get_param("/monoped/max_incl")
self.max_contact_force = rospy.get_param("/monoped/max_contact_force")
self.maximum_haa_joint = rospy.get_param("/monoped/maximum_haa_joint")
self.maximum_hfe_joint = rospy.get_param("/monoped/maximum_hfe_joint")
self.maximum_kfe_joint = rospy.get_param("/monoped/maximum_kfe_joint")
self.min_kfe_joint = rospy.get_param("/monoped/min_kfe_joint")
# We place the Maximum and minimum values of observations
self.joint_ranges_array = {"maximum_haa": self.maximum_haa_joint,
"minimum_haa_joint": -self.maximum_haa_joint,
"maximum_hfe_joint": self.maximum_hfe_joint,
"minimum_hfe_joint": self.maximum_hfe_joint,
"maximum_kfe_joint": self.maximum_kfe_joint,
"min_kfe_joint": self.min_kfe_joint
}
high = numpy.array([self.distance_from_desired_point_max,
self.max_incl_roll,
self.max_incl_pitch,
3.14,
self.max_contact_force,
self.maximum_haa_joint,
self.maximum_hfe_joint,
self.maximum_kfe_joint,
self.max_x_pos,
self.max_y_pos,
self.max_height
])
low = numpy.array([0.0,
-1*self.max_incl_roll,
-1*self.max_incl_pitch,
-1*3.14,
0.0,
-1*self.maximum_haa_joint,
-1*self.maximum_hfe_joint,
self.min_kfe_joint,
-1*self.max_x_pos,
-1*self.max_y_pos,
self.min_height
])
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>" +
str(self.observation_space))
# Rewards
self.weight_joint_position = rospy.get_param(
"/monoped/rewards_weight/weight_joint_position")
self.weight_contact_force = rospy.get_param(
"/monoped/rewards_weight/weight_contact_force")
self.weight_orientation = rospy.get_param(
"/monoped/rewards_weight/weight_orientation")
self.weight_distance_from_des_point = rospy.get_param(
"/monoped/rewards_weight/weight_distance_from_des_point")
self.alive_reward = rospy.get_param("/monoped/alive_reward")
self.done_reward = rospy.get_param("/monoped/done_reward")
# Here we will add any init functions prior to starting the MyRobotEnv
super(HopperStayUpEnv, self).__init__(ros_ws_abspath)
rospy.logdebug("END HopperStayUpEnv INIT...")
def _set_init_pose(self):
"""
Sets the Robot in its init linear and angular speeds
and lands the robot. Its preparing it to be reseted in the world.
"""
joints_array = [self.init_joint_states.x,
self.init_joint_states.y,
self.init_joint_states.z]
self.move_joints(joints_array,
epsilon=self.accepted_joint_error,
update_rate=self.update_rate,
time_sleep=self.init_move_time,
check_position=self.check_position)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# We get the initial pose to mesure the distance from the desired point.
odom = self.get_odom()
self.previous_distance_from_des_point = self.get_distance_from_desired_point(
odom.pose.pose.position)
def _set_action(self, action):
"""
It sets the joints of monoped based on the action integer given
based on the action number given.
:param action: The action integer that sets what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We get current Joints values
joint_states = self.get_joint_states()
joint_states_position = joint_states.position
rospy.logdebug("get_action_to_position>>>"+str(joint_states_position))
action_position = [0.0, 0.0, 0.0]
rospy.logdebug(
"OLD-JOINT-STATE [haa,hfa,kfe]>>>"+str(joint_states_position))
if action == 0: # Increment haa_joint
rospy.logdebug("Increment haa_joint")
action_position[0] = joint_states_position[0] + \
self.joint_increment_value
action_position[1] = joint_states_position[1]
action_position[2] = joint_states_position[2]
elif action == 1: # Decrement haa_joint
rospy.logdebug("Decrement haa_joint")
action_position[0] = joint_states_position[0] - \
self.joint_increment_value
action_position[1] = joint_states_position[1]
action_position[2] = joint_states_position[2]
elif action == 2: # Increment hfe_joint
rospy.logdebug("Increment hfe_joint")
action_position[0] = joint_states_position[0]
action_position[1] = joint_states_position[1] + \
self.joint_increment_value
action_position[2] = joint_states_position[2]
elif action == 3: # Decrement hfe_joint
rospy.logdebug("Decrement hfe_joint")
action_position[0] = joint_states_position[0]
action_position[1] = joint_states_position[1] - \
self.joint_increment_value
action_position[2] = joint_states_position[2]
elif action == 4: # Increment kfe_joint
rospy.logdebug("Increment kfe_joint")
action_position[0] = joint_states_position[0]
action_position[1] = joint_states_position[1]
action_position[2] = joint_states_position[2] + \
self.joint_increment_value
elif action == 5: # Decrement kfe_joint
rospy.logdebug("Decrement kfe_joint")
action_position[0] = joint_states_position[0]
action_position[1] = joint_states_position[1]
action_position[2] = joint_states_position[2] - \
self.joint_increment_value
rospy.logdebug("NEW-JOINT-STATE [haa,hfa,kfe]>>>"+str(action_position))
rospy.logdebug("JOINT-RANGES>>>"+str(self.joint_ranges_array))
rospy.logdebug("START ACTION EXECUTE>>>"+str(action))
# We tell monoped where to place its joints next
self.move_joints(action_position,
epsilon=self.accepted_joint_error,
update_rate=self.update_rate,
time_sleep=self.move_time,
check_position=self.check_position)
rospy.logdebug("END ACTION EXECUTE>>>"+str(action))
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have access to, we need to read the
HopperEnv API DOCS
Returns the state of the robot needed for OpenAI QLearn Algorithm
The state will be defined by an array of the:
1) distance from desired point in meters
2) The pitch orientation in radians
3) the Roll orientation in radians
4) the Yaw orientation in radians
5) Force in contact sensor in Newtons
6-7-8) State of the 3 joints in radians
9) Height of the Base
observation = [distance_from_desired_point,
base_roll,
base_pitch,
base_yaw,
force_magnitude,
joint_states_haa,
joint_states_hfe,
joint_states_kfe,
height_base]
:return: observation
"""
rospy.logdebug("Start Get Observation ==>")
distance_from_desired_point = self.get_distance_from_desired_point(
self.desired_point)
base_orientation = self.get_base_rpy()
base_roll = base_orientation.x
base_pitch = base_orientation.y
base_yaw = base_orientation.z
force_magnitude = self.get_contact_force_magnitude()
joint_states = self.get_joint_states()
joint_states_haa = joint_states.position[0]
joint_states_hfe = joint_states.position[1]
joint_states_kfe = joint_states.position[2]
odom = self.get_odom()
base_position = odom.pose.pose.position
observation = []
observation.append(round(distance_from_desired_point, self.dec_obs))
observation.append(round(base_roll, self.dec_obs))
observation.append(round(base_pitch, self.dec_obs))
observation.append(round(base_yaw, self.dec_obs))
observation.append(round(force_magnitude, self.dec_obs))
observation.append(round(joint_states_haa, self.dec_obs))
observation.append(round(joint_states_hfe, self.dec_obs))
observation.append(round(joint_states_kfe, self.dec_obs))
observation.append(round(base_position.x, self.dec_obs))
observation.append(round(base_position.y, self.dec_obs))
observation.append(round(base_position.z, self.dec_obs)) # height
return observation
def _is_done(self, observations):
"""
We consider the episode done if:
1) The Monopeds height is lower than a threshhold
2) The Orientation is outside a threshold
"""
height_base = observations[10]
monoped_height_ok = self.monoped_height_ok(height_base)
monoped_orientation_ok = self.monoped_orientation_ok()
done = not(monoped_height_ok and monoped_orientation_ok)
return done
def _compute_reward(self, observations, done):
"""
We Base the rewards in if its done or not and we base it on
the joint poisition, effort, contact force, orientation and distance from desired point.
:return:
"""
joints_state_array = observations[5:8]
r1 = self.calculate_reward_joint_position(
joints_state_array, self.weight_joint_position)
# Desired Force in Newtons, taken form idle contact with 9.81 gravity.
force_magnitude = observations[4]
r2 = self.calculate_reward_contact_force(
force_magnitude, self.weight_contact_force)
rpy_array = observations[1:4]
r3 = self.calculate_reward_orientation(
rpy_array, self.weight_orientation)
current_position = Point()
current_position.x = observations[8]
current_position.y = observations[9]
current_position.z = observations[10]
r4 = self.calculate_reward_distance_from_des_point(
current_position, self.weight_distance_from_des_point)
# The sign depend on its function.
total_reward = self.alive_reward - r1 - r2 - r3 - r4
rospy.logdebug("###############")
rospy.logdebug("alive_bonus=" + str(self.alive_reward))
rospy.logdebug("r1 joint_position=" + str(r1))
rospy.logdebug("r2 contact_force=" + str(r2))
rospy.logdebug("r3 orientation=" + str(r3))
rospy.logdebug("r4 distance=" + str(r4))
rospy.logdebug("total_reward=" + str(total_reward))
rospy.logdebug("###############")
return total_reward
# Internal TaskEnv Methods
def is_in_desired_position(self, current_position, epsilon=0.05):
"""
It return True if the current position is similar to the desired poistion
"""
is_in_desired_pos = False
x_pos_plus = self.desired_point.x + epsilon
x_pos_minus = self.desired_point.x - epsilon
y_pos_plus = self.desired_point.y + epsilon
y_pos_minus = self.desired_point.y - epsilon
x_current = current_position.x
y_current = current_position.y
x_pos_are_close = (x_current <= x_pos_plus) and (
x_current > x_pos_minus)
y_pos_are_close = (y_current <= y_pos_plus) and (
y_current > y_pos_minus)
is_in_desired_pos = x_pos_are_close and y_pos_are_close
rospy.logdebug("###### IS DESIRED POS ? ######")
rospy.logdebug("current_position"+str(current_position))
rospy.logdebug("x_pos_plus"+str(x_pos_plus) +
",x_pos_minus="+str(x_pos_minus))
rospy.logdebug("y_pos_plus"+str(y_pos_plus) +
",y_pos_minus="+str(y_pos_minus))
rospy.logdebug("x_pos_are_close"+str(x_pos_are_close))
rospy.logdebug("y_pos_are_close"+str(y_pos_are_close))
rospy.logdebug("is_in_desired_pos"+str(is_in_desired_pos))
rospy.logdebug("############")
return is_in_desired_pos
def is_inside_workspace(self, current_position):
"""
Check if the monoped is inside the Workspace defined
"""
is_inside = False
rospy.logdebug("##### INSIDE WORK SPACE? #######")
rospy.logdebug("XYZ current_position"+str(current_position))
rospy.logdebug("work_space_x_max"+str(self.work_space_x_max) +
",work_space_x_min="+str(self.work_space_x_min))
rospy.logdebug("work_space_y_max"+str(self.work_space_y_max) +
",work_space_y_min="+str(self.work_space_y_min))
rospy.logdebug("work_space_z_max"+str(self.work_space_z_max) +
",work_space_z_min="+str(self.work_space_z_min))
rospy.logdebug("############")
if current_position.x > self.work_space_x_min and current_position.x <= self.work_space_x_max:
if current_position.y > self.work_space_y_min and current_position.y <= self.work_space_y_max:
if current_position.z > self.work_space_z_min and current_position.z <= self.work_space_z_max:
is_inside = True
return is_inside
def sonar_detected_something_too_close(self, sonar_value):
"""
Detects if there is something too close to the monoped front
"""
rospy.logdebug("##### SONAR TOO CLOSE? #######")
rospy.logdebug("sonar_value"+str(sonar_value) +
",min_sonar_value="+str(self.min_sonar_value))
rospy.logdebug("############")
too_close = sonar_value < self.min_sonar_value
return too_close
def monoped_has_flipped(self, current_orientation):
"""
Based on the orientation RPY given states if the monoped has flipped
"""
has_flipped = True
self.max_roll = rospy.get_param("/monoped/max_roll")
self.max_pitch = rospy.get_param("/monoped/max_pitch")
rospy.logdebug("#### HAS FLIPPED? ########")
rospy.logdebug("RPY current_orientation"+str(current_orientation))
rospy.logdebug("max_roll"+str(self.max_roll) +
",min_roll="+str(-1*self.max_roll))
rospy.logdebug("max_pitch"+str(self.max_pitch) +
",min_pitch="+str(-1*self.max_pitch))
rospy.logdebug("############")
if current_orientation.x > -1*self.max_roll and current_orientation.x <= self.max_roll:
if current_orientation.y > -1*self.max_pitch and current_orientation.y <= self.max_pitch:
has_flipped = False
return has_flipped
def get_distance_from_desired_point(self, current_position):
"""
Calculates the distance from the current position to the desired point
:param start_point:
:return:
"""
distance = self.get_distance_from_point(current_position,
self.desired_point)
return distance
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = numpy.array((pstart.x, pstart.y, pstart.z))
b = numpy.array((p_end.x, p_end.y, p_end.z))
distance = numpy.linalg.norm(a - b)
return distance
def get_orientation_euler(self, quaternion_vector):
# We convert from quaternions to euler
orientation_list = [quaternion_vector.x,
quaternion_vector.y,
quaternion_vector.z,
quaternion_vector.w]
roll, pitch, yaw = euler_from_quaternion(orientation_list)
return roll, pitch, yaw
def get_base_rpy(self):
imu = self.get_imu()
base_orientation = imu.orientation
euler_rpy = Vector3()
euler = euler_from_quaternion([base_orientation.x,
base_orientation.y,
base_orientation.z,
base_orientation.w]
)
euler_rpy.x = euler[0]
euler_rpy.y = euler[1]
euler_rpy.z = euler[2]
return euler_rpy
def get_contact_force_magnitude(self):
"""
You will see that because the X axis is the one pointing downwards, it will be the one with
higher value when touching the floor
For a Robot of total mas of 0.55Kg, a gravity of 9.81 m/sec**2, Weight = 0.55*9.81=5.39 N
Falling from around 5centimetres ( negligible height ), we register peaks around
Fx = 7.08 N
:return:
"""
# We get the Contact Sensor data
lowerleg_contactsensor_state = self.get_lowerleg_contactsensor_state()
# We extract what we need that is only the total_wrench force
contact_force = self.get_contact_force(lowerleg_contactsensor_state)
# We create an array with each component XYZ
contact_force_np = numpy.array(
(contact_force.x, contact_force.y, contact_force.z))
# We calculate the magnitude of the Force Vector, array.
force_magnitude = numpy.linalg.norm(contact_force_np)
return force_magnitude
def get_contact_force(self, lowerleg_contactsensor_state):
"""
/lowerleg_contactsensor_state/states[0]/contact_positions ==> PointContact in World
/lowerleg_contactsensor_state/states[0]/contact_normals ==> NormalContact in World
==> One is an array of all the forces, the other total,
and are relative to the contact link referred to in the sensor.
/lowerleg_contactsensor_state/states[0]/wrenches[]
/lowerleg_contactsensor_state/states[0]/total_wrench
:return:
"""
# We create an empty element , in case there is no contact.
contact_force = Vector3()
for state in lowerleg_contactsensor_state.states:
self.contact_force = state.total_wrench.force
return contact_force
def monoped_height_ok(self, height_base):
height_ok = self.min_height <= height_base < self.max_height
return height_ok
def monoped_orientation_ok(self):
orientation_rpy = self.get_base_rpy()
roll_ok = self.max_incl_roll > abs(orientation_rpy.x)
pitch_ok = self.max_incl_pitch > abs(orientation_rpy.y)
orientation_ok = roll_ok and pitch_ok
return orientation_ok
def calculate_reward_joint_position(self, joints_state_array, weight=1.0):
"""
We calculate reward base on the joints configuration. The more near 0 the better.
:return:
"""
acumulated_joint_pos = 0.0
for joint_pos in joints_state_array:
# Abs to remove sign influence, it doesnt matter the direction of turn.
acumulated_joint_pos += abs(joint_pos)
rospy.logdebug(
"calculate_reward_joint_position>>acumulated_joint_pos=" + str(acumulated_joint_pos))
reward = weight * acumulated_joint_pos
rospy.logdebug(
"calculate_reward_joint_position>>reward=" + str(reward))
return reward
def calculate_reward_contact_force(self, force_magnitude, weight=1.0):
"""
We calculate reward base on the contact force.
The nearest to the desired contact force the better.
We use exponential to magnify big departures from the desired force.
Default ( 7.08 N ) desired force was taken from reading of the robot touching
the ground from a negligible height of 5cm.
:return:
"""
force_displacement = force_magnitude - self.desired_force
rospy.logdebug(
"calculate_reward_contact_force>>force_magnitude=" + str(force_magnitude))
rospy.logdebug(
"calculate_reward_contact_force>>force_displacement=" + str(force_displacement))
# Abs to remove sign
reward = weight * abs(force_displacement)
rospy.logdebug("calculate_reward_contact_force>>reward=" + str(reward))
return reward
def calculate_reward_orientation(self, rpy_array, weight=1.0):
"""
We calculate the reward based on the orientation.
The more its closser to 0 the better because it means its upright
desired_yaw is the yaw that we want it to be.
to praise it to have a certain orientation, here is where to set it.
:param: rpy_array: Its an array with Roll Pitch and Yaw in place 0, 1 and 2 respectively.
:return:
"""
yaw_displacement = rpy_array[2] - self.desired_yaw
acumulated_orientation_displacement = abs(
rpy_array[0]) + abs(rpy_array[1]) + abs(yaw_displacement)
reward = weight * acumulated_orientation_displacement
rospy.logdebug("calculate_reward_orientation>>reward=" + str(reward))
return reward
def calculate_reward_distance_from_des_point(self, current_position, weight=1.0):
"""
We calculate the distance from the desired point.
The closser the better
:param weight:
:return:reward
"""
distance = self.get_distance_from_desired_point(current_position)
reward = weight * distance
rospy.logdebug("calculate_reward_orientation>>reward=" + str(reward))
return reward
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~cartpole_env.py | #!/usr/bin/env python
import gym
import rospy
import roslaunch
import time
import numpy as np
from gym import utils, spaces
from geometry_msgs.msg import Twist
from std_srvs.srv import Empty
from gym.utils import seeding
from gym.envs.registration import register
import copy
import math
import os
from sensor_msgs.msg import JointState
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from std_msgs.msg import Float64
from gazebo_msgs.srv import SetLinkState
from gazebo_msgs.msg import LinkState
from rosgraph_msgs.msg import Clock
from openai_ros import robot_gazebo_env
from openai_ros.openai_ros_common import ROSLauncher
class CartPoleEnv(robot_gazebo_env.RobotGazeboEnv):
def __init__(
self, control_type, ros_ws_abspath
):
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="cartpole_description",
launch_file_name="put_cartpole_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
self.publishers_array = []
self._base_pub = rospy.Publisher(
'/cartpole_v0/foot_joint_velocity_controller/command', Float64, queue_size=1)
self._pole_pub = rospy.Publisher(
'/cartpole_v0/pole_joint_velocity_controller/command', Float64, queue_size=1)
self.publishers_array.append(self._base_pub)
self.publishers_array.append(self._pole_pub)
rospy.Subscriber("/cartpole_v0/joint_states",
JointState, self.joints_callback)
self.control_type = control_type
if self.control_type == "velocity":
self.controllers_list = ['joint_state_controller',
'pole_joint_velocity_controller',
'foot_joint_velocity_controller',
]
elif self.control_type == "position":
self.controllers_list = ['joint_state_controller',
'pole_joint_position_controller',
'foot_joint_position_controller',
]
elif self.control_type == "effort":
self.controllers_list = ['joint_state_controller',
'pole_joint_effort_controller',
'foot_joint_effort_controller',
]
self.robot_name_space = "cartpole_v0"
self.reset_controls = True
# Seed the environment
self._seed()
self.steps_beyond_done = None
super(CartPoleEnv, self).__init__(
controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=self.reset_controls
)
def joints_callback(self, data):
self.joints = data
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# RobotEnv methods
# ----------------------------
def _env_setup(self, initial_qpos):
self.init_internal_vars(self.init_pos)
self.set_init_pose()
self.check_all_systems_ready()
def init_internal_vars(self, init_pos_value):
self.pos = [init_pos_value]
self.joints = None
def check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while (self._base_pub.get_num_connections() == 0 and not rospy.is_shutdown()):
rospy.logdebug(
"No susbribers to _base_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_base_pub Publisher Connected")
while (self._pole_pub.get_num_connections() == 0 and not rospy.is_shutdown()):
rospy.logdebug(
"No susbribers to _pole_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_pole_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
def _check_all_systems_ready(self, init=True):
self.base_position = None
while self.base_position is None and not rospy.is_shutdown():
try:
self.base_position = rospy.wait_for_message(
"/cartpole_v0/joint_states", JointState, timeout=1.0)
rospy.logdebug(
"Current cartpole_v0/joint_states READY=>"+str(self.base_position))
if init:
# We Check all the sensors are in their initial values
positions_ok = all(
abs(i) <= 1.0e-02 for i in self.base_position.position)
velocity_ok = all(
abs(i) <= 1.0e-02 for i in self.base_position.velocity)
efforts_ok = all(
abs(i) <= 1.0e-01 for i in self.base_position.effort)
base_data_ok = positions_ok and velocity_ok and efforts_ok
rospy.logdebug(
"Checking Init Values Ok=>" + str(base_data_ok))
except:
rospy.logerr(
"Current cartpole_v0/joint_states not ready yet, retrying for getting joint_states")
rospy.logdebug("ALL SYSTEMS READY")
def move_joints(self, joints_array):
joint_value = Float64()
joint_value.data = joints_array[0]
rospy.logdebug("Single Base JointsPos>>"+str(joint_value))
self._base_pub.publish(joint_value)
def get_clock_time(self):
self.clock_time = None
while self.clock_time is None and not rospy.is_shutdown():
try:
self.clock_time = rospy.wait_for_message(
"/clock", Clock, timeout=1.0)
rospy.logdebug("Current clock_time READY=>" +
str(self.clock_time))
except:
rospy.logdebug(
"Current clock_time not ready yet, retrying for getting Current clock_time")
return self.clock_time
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~sawyer~learn_to_touch_cube.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import sawyer_env
from gym.envs.registration import register
from geometry_msgs.msg import Point
from geometry_msgs.msg import Vector3
from tf.transformations import euler_from_quaternion
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class SawyerTouchCubeEnv(sawyer_env.SawyerEnv):
def __init__(self):
"""
Make sawyer learn how pick up a cube
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/sawyer/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="sawyer_gazebo",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/sawyer/config",
yaml_file_name="learn_to_touch_cube.yaml")
import time
time.sleep(15)
print("STARTING SPAWN ROBOT")
# We execute this one before because there are some functions that this
# TaskEnv uses that use variables from the parent class, like the effort limit fetch.
super(SawyerTouchCubeEnv, self).__init__(ros_ws_abspath)
# Here we will add any init functions prior to starting the MyRobotEnv
# Only variable needed to be set here
rospy.logdebug("Start SawyerTouchCubeEnv INIT...")
number_actions = rospy.get_param('/sawyer/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
self.work_space_x_max = rospy.get_param("/sawyer/work_space/x_max")
self.work_space_x_min = rospy.get_param("/sawyer/work_space/x_min")
self.work_space_y_max = rospy.get_param("/sawyer/work_space/y_max")
self.work_space_y_min = rospy.get_param("/sawyer/work_space/y_min")
self.work_space_z_max = rospy.get_param("/sawyer/work_space/z_max")
self.work_space_z_min = rospy.get_param("/sawyer/work_space/z_min")
self.max_effort = rospy.get_param("/sawyer/max_effort")
self.dec_obs = rospy.get_param("/sawyer/number_decimals_precision_obs")
self.acceptable_distance_to_cube = rospy.get_param(
"/sawyer/acceptable_distance_to_cube")
self.tcp_z_position_min = rospy.get_param("/sawyer/tcp_z_position_min")
# We place the Maximum and minimum values of observations
# TODO: Fill when get_observations is done.
"""
We supose that its all these:
head_pan, right_gripper_l_finger_joint, right_gripper_r_finger_joint, right_j0, right_j1,
right_j2, right_j3, right_j4, right_j5, right_j6
Plus the first three are the block_to_tcp vector
"""
# We fetch the limits of the joinst to get the effort and angle limits
self.joint_limits = self.init_joint_limits()
high = numpy.array([self.work_space_x_max,
self.work_space_y_max,
self.work_space_z_max,
self.joint_limits.position_upper[0],
self.joint_limits.position_upper[1],
self.joint_limits.position_upper[2],
self.joint_limits.position_upper[3],
self.joint_limits.position_upper[4],
self.joint_limits.position_upper[5],
self.joint_limits.position_upper[6],
self.joint_limits.position_upper[7],
self.joint_limits.position_upper[8],
self.joint_limits.position_upper[9]
])
low = numpy.array([self.work_space_x_min,
self.work_space_y_min,
self.work_space_z_min,
self.joint_limits.position_lower[0],
self.joint_limits.position_lower[1],
self.joint_limits.position_lower[2],
self.joint_limits.position_lower[3],
self.joint_limits.position_lower[4],
self.joint_limits.position_lower[5],
self.joint_limits.position_lower[6],
self.joint_limits.position_lower[7],
self.joint_limits.position_lower[8],
self.joint_limits.position_lower[9]
])
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>" +
str(self.observation_space))
# Rewards
self.done_reward = rospy.get_param("/sawyer/done_reward")
self.closer_to_block_reward = rospy.get_param(
"/sawyer/closer_to_block_reward")
self.cumulated_steps = 0.0
rospy.logdebug("END SawyerTouchCubeEnv INIT...")
def _set_init_pose(self):
"""
Sets the two proppelers speed to 0.0 and waits for the time_sleep
to allow the action to be executed
"""
# We set the angles to zero of the limb
self.joints = self.get_limb_joint_names_array()
join_values_array = [0.0]*len(self.joints)
joint_positions_dict_zero = dict(zip(self.joints, join_values_array))
actual_joint_angles_dict = self.get_all_limb_joint_angles()
# We generate the two step movement. Turn Right/Left where you are and then set all to zero
if "right_j0" in actual_joint_angles_dict:
# We turn to the left or to the right based on where the position is to avoid the table.
if actual_joint_angles_dict["right_j0"] >= 0.0:
actual_joint_angles_dict["right_j0"] = 1.57
else:
actual_joint_angles_dict["right_j0"] = -1.57
if "right_j1" in actual_joint_angles_dict:
actual_joint_angles_dict["right_j1"] = actual_joint_angles_dict["right_j1"] - 0.3
self.move_joints_to_angle_blocking(
actual_joint_angles_dict, timeout=15.0, threshold=0.008726646)
self.move_joints_to_angle_blocking(
joint_positions_dict_zero, timeout=15.0, threshold=0.008726646)
# We Open the gripper
self.set_g(action="open")
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# We get the initial pose to mesure the distance from the desired point.
translation_tcp_block, rotation_tcp_block = self.get_tf_start_to_end_frames(start_frame_name="block",
end_frame_name="right_electric_gripper_base")
tf_tcp_to_block_vector = Vector3()
tf_tcp_to_block_vector.x = translation_tcp_block[0]
tf_tcp_to_block_vector.y = translation_tcp_block[1]
tf_tcp_to_block_vector.z = translation_tcp_block[2]
self.previous_distance_from_block = self.get_magnitud_tf_tcp_to_block(
tf_tcp_to_block_vector)
self.translation_tcp_world, _ = self.get_tf_start_to_end_frames(start_frame_name="world",
end_frame_name="right_electric_gripper_base")
def _set_action(self, action):
"""
It sets the joints of sawyer based on the action integer given
based on the action number given.
:param action: The action integer that sets what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
if action == 0: # Increase joint_0
action_id = self.joints[0]+"_increase"
elif action == 1: # Decrease joint_0
action_id = self.joints[0]+"_decrease"
elif action == 2: # Increase joint_1
action_id = self.joints[1]+"_increase"
elif action == 3: # Decrease joint_1
action_id = self.joints[1]+"_decrease"
elif action == 4: # Increase joint_2
action_id = self.joints[2]+"_increase"
elif action == 5: # Decrease joint_2
action_id = self.joints[2]+"_decrease"
elif action == 6: # Increase joint_3
action_id = self.joints[3]+"_increase"
elif action == 7: # Decrease joint_3
action_id = self.joints[3]+"_decrease"
elif action == 8: # Increase joint_4
action_id = self.joints[4]+"_increase"
elif action == 9: # Decrease joint_4
action_id = self.joints[4]+"_decrease"
elif action == 10: # Increase joint_5
action_id = self.joints[5]+"_increase"
elif action == 11: # Decrease joint_5
action_id = self.joints[5]+"_decrease"
elif action == 12: # Increase joint_6
action_id = self.joints[6]+"_increase"
elif action == 13: # Decrease joint_6
action_id = self.joints[6]+"_decrease"
# We tell sawyer the action to perform
self.execute_movement(action_id)
rospy.logdebug("END Set Action ==>"+str(action)+","+str(action_id))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have access to, we need to read the
sawyerEnv API DOCS.
:return: observation
"""
rospy.logdebug("Start Get Observation ==>")
# We get the translation of the base of the gripper to the block
translation_tcp_block, _ = self.get_tf_start_to_end_frames(start_frame_name="block",
end_frame_name="right_electric_gripper_base")
translation_tcp_block_round = numpy.around(
translation_tcp_block, decimals=self.dec_obs)
# We get this data but we dont put it in the observations because its somthing internal for evaluation.
# The order is cucial, get it upside down and it make no sense.
self.translation_tcp_world, _ = self.get_tf_start_to_end_frames(start_frame_name="world",
end_frame_name="right_electric_gripper_base")
# Same here, the values are used internally for knowing if done, they wont define the state ( although these are left out for performance)
self.joints_efforts_dict = self.get_all_limb_joint_efforts()
rospy.logdebug("JOINTS EFFORTS DICT OBSERVATION METHOD==>" +
str(self.joints_efforts_dict))
"""
We supose that its all these:
head_pan, right_gripper_l_finger_joint, right_gripper_r_finger_joint, right_j0, right_j1,
right_j2, right_j3, right_j4, right_j5, right_j6
"""
joints_angles_array = self.get_all_limb_joint_angles().values()
joints_angles_array_round = numpy.around(
joints_angles_array, decimals=self.dec_obs)
# We concatenate the two rounded arrays and convert them to standard Python list
observation = numpy.concatenate(
(translation_tcp_block_round, joints_angles_array_round), axis=0).tolist()
return observation
def _is_done(self, observations):
"""
We consider the episode done if:
1) The sawyer TCP is outside the workspace, with self.translation_tcp_world
2) The Joints exeded a certain effort ( it got stuck somewhere ), self.joints_efforts_array
3) The TCP to block distance is lower than a threshold ( it got to the place )
"""
is_stuck = self.is_arm_stuck(self.joints_efforts_dict)
tcp_current_pos = Vector3()
tcp_current_pos.x = self.translation_tcp_world[0]
tcp_current_pos.y = self.translation_tcp_world[1]
tcp_current_pos.z = self.translation_tcp_world[2]
is_inside_workspace = self.is_inside_workspace(tcp_current_pos)
tcp_to_block_pos = Vector3()
tcp_to_block_pos.x = observations[0]
tcp_to_block_pos.y = observations[1]
tcp_to_block_pos.z = observations[2]
has_reached_the_block = self.reached_block(tcp_to_block_pos,
self.acceptable_distance_to_cube,
self.translation_tcp_world[2],
self.tcp_z_position_min)
done = is_stuck or not(is_inside_workspace) or has_reached_the_block
rospy.logdebug("#### IS DONE ? ####")
rospy.logdebug("is_stuck ?="+str(is_stuck))
rospy.logdebug("Not is_inside_workspace ?=" +
str(not(is_inside_workspace)))
rospy.logdebug("has_reached_the_block ?="+str(has_reached_the_block))
rospy.logdebug("done ?="+str(done))
rospy.logdebug("#### #### ####")
return done
def _compute_reward(self, observations, done):
"""
We Base the rewards in if its done or not and we base it on
if the distance to the block has increased or not.
:return:
"""
tf_tcp_to_block_vector = Vector3()
tf_tcp_to_block_vector.x = observations[0]
tf_tcp_to_block_vector.y = observations[1]
tf_tcp_to_block_vector.z = observations[2]
distance_block_to_tcp = self.get_magnitud_tf_tcp_to_block(
tf_tcp_to_block_vector)
distance_difference = distance_block_to_tcp - self.previous_distance_from_block
if not done:
# If there has been a decrease in the distance to the desired point, we reward it
if distance_difference < 0.0:
rospy.logdebug("DECREASE IN DISTANCE GOOD")
reward = self.closer_to_block_reward
else:
rospy.logerr("ENCREASE IN DISTANCE BAD")
#reward = -1*self.closer_to_block_reward
reward = 0.0
else:
if self.reached_block(tf_tcp_to_block_vector, self.acceptable_distance_to_cube, self.translation_tcp_world[2], self.tcp_z_position_min):
reward = self.done_reward
else:
reward = -1*self.done_reward
self.previous_distance_from_block = distance_block_to_tcp
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def is_arm_stuck(self, joints_efforts_dict):
"""
Checks if the efforts in the arm joints exceed certain theshhold
We will only check the joints_0,1,2,3,4,5,6
"""
is_arm_stuck = False
for joint_name in self.joint_limits.joint_names:
if joint_name in joints_efforts_dict:
effort_value = joints_efforts_dict[joint_name]
index = self.joint_limits.joint_names.index(joint_name)
effort_limit = self.joint_limits.effort[index]
rospy.logdebug("Joint Effort ==>Name="+str(joint_name) +
",Effort="+str(effort_value)+",Limit="+str(effort_limit))
if abs(effort_value) > effort_limit:
is_arm_stuck = True
rospy.logerr("Joint Effort TOO MUCH ==>" +
str(joint_name)+","+str(effort_value))
break
else:
rospy.logdebug("Joint Effort is ok==>" +
str(joint_name)+","+str(effort_value))
else:
rospy.logdebug(
"Joint Name is not in the effort dict==>"+str(joint_name))
return is_arm_stuck
def reached_block(self, block_to_tcp_vector, minimum_distance, tcp_z_position, tcp_z_position_min):
"""
It return True if the transform TCP to block vector magnitude is smaller than
the minimum_distance.
tcp_z_position we use it to only consider that it has reached if its above the table.
"""
reached_block_b = False
distance_to_block = self.get_magnitud_tf_tcp_to_block(
block_to_tcp_vector)
tcp_z_pos_ok = tcp_z_position >= tcp_z_position_min
distance_ok = distance_to_block <= minimum_distance
reached_block_b = distance_ok and tcp_z_pos_ok
rospy.logdebug("###### REACHED BLOCK ? ######")
rospy.logdebug("tcp_z_pos_ok==>"+str(tcp_z_pos_ok))
rospy.logdebug("distance_ok==>"+str(distance_ok))
rospy.logdebug("reached_block_b==>"+str(reached_block_b))
rospy.logdebug("############")
return reached_block_b
def get_distance_from_desired_point(self, current_position):
"""
Calculates the distance from the current position to the desired point
:param start_point:
:return:
"""
distance = self.get_distance_from_point(current_position,
self.desired_point)
return distance
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = numpy.array((pstart.x, pstart.y, pstart.z))
b = numpy.array((p_end.x, p_end.y, p_end.z))
distance = numpy.linalg.norm(a - b)
return distance
def get_magnitud_tf_tcp_to_block(self, translation_vector):
"""
Given a Vector3 Object, get the magnitud
:param p_end:
:return:
"""
a = numpy.array((translation_vector.x,
translation_vector.y,
translation_vector.z))
distance = numpy.linalg.norm(a)
return distance
def get_orientation_euler(self, quaternion_vector):
# We convert from quaternions to euler
orientation_list = [quaternion_vector.x,
quaternion_vector.y,
quaternion_vector.z,
quaternion_vector.w]
roll, pitch, yaw = euler_from_quaternion(orientation_list)
return roll, pitch, yaw
def is_inside_workspace(self, current_position):
"""
Check if the sawyer is inside the Workspace defined
"""
is_inside = False
rospy.logdebug("##### INSIDE WORK SPACE? #######")
rospy.logdebug("XYZ current_position"+str(current_position))
rospy.logdebug("work_space_x_max"+str(self.work_space_x_max) +
",work_space_x_min="+str(self.work_space_x_min))
rospy.logdebug("work_space_y_max"+str(self.work_space_y_max) +
",work_space_y_min="+str(self.work_space_y_min))
rospy.logdebug("work_space_z_max"+str(self.work_space_z_max) +
",work_space_z_min="+str(self.work_space_z_min))
rospy.logdebug("############")
if current_position.x > self.work_space_x_min and current_position.x <= self.work_space_x_max:
if current_position.y > self.work_space_y_min and current_position.y <= self.work_space_y_max:
if current_position.z > self.work_space_z_min and current_position.z <= self.work_space_z_max:
is_inside = True
return is_inside
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~turtlebot3_env.py | import numpy
import rospy
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from openai_ros.openai_ros_common import ROSLauncher
class TurtleBot3Env(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new TurtleBot3Env environment.
TurtleBot3 doesnt use controller_manager, therefore we wont reset the
controllers in the standard fashion. For the moment we wont reset them.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /odom : Odometry readings of the Base of the Robot
* /imu: Inertial Mesuring Unit that gives relative accelerations and orientations.
* /scan: Laser Readings
Actuators Topic List: /cmd_vel,
Args:
"""
rospy.logdebug("Start TurtleBot3Env INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="turtlebot3_gazebo",
launch_file_name="put_turtlebot3_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(TurtleBot3Env, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False)
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/odom", Odometry, self._odom_callback)
rospy.Subscriber("/imu", Imu, self._imu_callback)
rospy.Subscriber("/scan", LaserScan, self._laser_scan_callback)
self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.logdebug("Finished TurtleBot3Env INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_odom_ready()
self._check_imu_ready()
self._check_laser_scan_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message("/odom", Odometry, timeout=5.0)
rospy.logdebug("Current /odom READY=>")
except:
rospy.logerr("Current /odom not ready yet, retrying for getting odom")
return self.odom
def _check_imu_ready(self):
self.imu = None
rospy.logdebug("Waiting for /imu to be READY...")
while self.imu is None and not rospy.is_shutdown():
try:
self.imu = rospy.wait_for_message("/imu", Imu, timeout=5.0)
rospy.logdebug("Current /imu READY=>")
except:
rospy.logerr("Current /imu not ready yet, retrying for getting imu")
return self.imu
def _check_laser_scan_ready(self):
self.laser_scan = None
rospy.logdebug("Waiting for /scan to be READY...")
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message("/scan", LaserScan, timeout=1.0)
rospy.logdebug("Current /scan READY=>")
except:
rospy.logerr("Current /scan not ready yet, retrying for getting laser_scan")
return self.laser_scan
def _odom_callback(self, data):
self.odom = data
def _imu_callback(self, data):
self.imu = data
def _laser_scan_callback(self, data):
self.laser_scan = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("TurtleBot3 Base Twist Cmd>>" + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
self.wait_until_twist_achieved(cmd_vel_value,
epsilon,
update_rate)
def wait_until_twist_achieved(self, cmd_vel_value, epsilon, update_rate):
"""
We wait for the cmd_vel twist given to be reached by the robot reading
from the odometry.
:param cmd_vel_value: Twist we want to wait to reach.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the odometry.
:return:
"""
rospy.logdebug("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.05
rospy.logdebug("Desired Twist Cmd>>" + str(cmd_vel_value))
rospy.logdebug("epsilon>>" + str(epsilon))
linear_speed = cmd_vel_value.linear.x
angular_speed = cmd_vel_value.angular.z
linear_speed_plus = linear_speed + epsilon
linear_speed_minus = linear_speed - epsilon
angular_speed_plus = angular_speed + epsilon
angular_speed_minus = angular_speed - epsilon
while not rospy.is_shutdown():
current_odometry = self._check_odom_ready()
# IN turtlebot3 the odometry angular readings are inverted, so we have to invert the sign.
odom_linear_vel = current_odometry.twist.twist.linear.x
odom_angular_vel = -1*current_odometry.twist.twist.angular.z
rospy.logdebug("Linear VEL=" + str(odom_linear_vel) + ", ?RANGE=[" + str(linear_speed_minus) + ","+str(linear_speed_plus)+"]")
rospy.logdebug("Angular VEL=" + str(odom_angular_vel) + ", ?RANGE=[" + str(angular_speed_minus) + ","+str(angular_speed_plus)+"]")
linear_vel_are_close = (odom_linear_vel <= linear_speed_plus) and (odom_linear_vel > linear_speed_minus)
angular_vel_are_close = (odom_angular_vel <= angular_speed_plus) and (odom_angular_vel > angular_speed_minus)
if linear_vel_are_close and angular_vel_are_close:
rospy.logdebug("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logdebug("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time- start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
rospy.logdebug("END wait_until_twist_achieved...")
return delta_time
def get_odom(self):
return self.odom
def get_imu(self):
return self.imu
def get_laser_scan(self):
return self.laser_scan
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~parrotdrone_env.py | import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Range
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Pose
from std_msgs.msg import Empty
from openai_ros.openai_ros_common import ROSLauncher
class ParrotDroneEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new ParrotDroneEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /drone/down_camera/image_raw: RGB Camera facing down.
* /drone/front_camera/image_raw: RGB Camera facing front.
* /drone/imu: IMU of the drone giving acceleration and orientation relative to world.
* /drone/sonar: Sonar readings facing front
* /drone/gt_pose: Get position and orientation in Global space
* /drone/gt_vel: Get the linear velocity , the angular doesnt record anything.
Actuators Topic List:
* /cmd_vel: Move the Drone Around when you have taken off.
* /drone/takeoff: Publish into it to take off
* /drone/land: Publish to make ParrotDrone Land
Args:
"""
rospy.logdebug("Start ParrotDroneEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(ParrotDroneEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self.gazebo.unpauseSim()
ROSLauncher(rospackage_name="drone_construct",
launch_file_name="put_drone_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/drone/down_camera/image_raw", Image,
self._down_camera_rgb_image_raw_callback)
rospy.Subscriber("/drone/front_camera/image_raw", Image,
self._front_camera_rgb_image_raw_callback)
rospy.Subscriber("/drone/imu", Imu, self._imu_callback)
rospy.Subscriber("/drone/sonar", Range, self._sonar_callback)
rospy.Subscriber("/drone/gt_pose", Pose, self._gt_pose_callback)
rospy.Subscriber("/drone/gt_vel", Twist, self._gt_vel_callback)
self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._takeoff_pub = rospy.Publisher(
'/drone/takeoff', Empty, queue_size=1)
self._land_pub = rospy.Publisher('/drone/land', Empty, queue_size=1)
self._check_all_publishers_ready()
self.gazebo.pauseSim()
rospy.logdebug("Finished ParrotDroneEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_down_camera_rgb_image_raw_ready()
self._check_front_camera_rgb_image_raw_ready()
self._check_imu_ready()
self._check_sonar_ready()
self._check_gt_pose_ready()
self._check_gt_vel_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_down_camera_rgb_image_raw_ready(self):
self.down_camera_rgb_image_raw = None
rospy.logdebug(
"Waiting for /drone/down_camera/image_raw to be READY...")
while self.down_camera_rgb_image_raw is None and not rospy.is_shutdown():
try:
self.down_camera_rgb_image_raw = rospy.wait_for_message(
"/drone/down_camera/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /drone/down_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /drone/down_camera/image_raw not ready yet, retrying for getting down_camera_rgb_image_raw")
return self.down_camera_rgb_image_raw
def _check_front_camera_rgb_image_raw_ready(self):
self.front_camera_rgb_image_raw = None
rospy.logdebug(
"Waiting for /drone/front_camera/image_raw to be READY...")
while self.front_camera_rgb_image_raw is None and not rospy.is_shutdown():
try:
self.front_camera_rgb_image_raw = rospy.wait_for_message(
"/drone/front_camera/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /drone/front_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /drone/front_camera/image_raw not ready yet, retrying for getting front_camera_rgb_image_raw")
return self.front_camera_rgb_image_raw
def _check_imu_ready(self):
self.imu = None
rospy.logdebug("Waiting for /drone/imu to be READY...")
while self.imu is None and not rospy.is_shutdown():
try:
self.imu = rospy.wait_for_message(
"/drone/imu", Imu, timeout=5.0)
rospy.logdebug("Current/drone/imu READY=>")
except:
rospy.logerr(
"Current /drone/imu not ready yet, retrying for getting imu")
return self.imu
def _check_sonar_ready(self):
self.sonar = None
rospy.logdebug("Waiting for /drone/sonar to be READY...")
while self.sonar is None and not rospy.is_shutdown():
try:
self.sonar = rospy.wait_for_message(
"/drone/sonar", Range, timeout=5.0)
rospy.logdebug("Current/drone/sonar READY=>")
except:
rospy.logerr(
"Current /drone/sonar not ready yet, retrying for getting sonar")
return self.sonar
def _check_gt_pose_ready(self):
self.gt_pose = None
rospy.logdebug("Waiting for /drone/gt_pose to be READY...")
while self.gt_pose is None and not rospy.is_shutdown():
try:
self.gt_pose = rospy.wait_for_message(
"/drone/gt_pose", Pose, timeout=5.0)
rospy.logdebug("Current /drone/gt_pose READY=>")
except:
rospy.logerr(
"Current /drone/gt_pose not ready yet, retrying for getting gt_pose")
return self.gt_pose
def _check_gt_vel_ready(self):
self.gt_vel = None
rospy.logdebug("Waiting for /drone/gt_vel to be READY...")
while self.gt_vel is None and not rospy.is_shutdown():
try:
self.gt_vel = rospy.wait_for_message(
"/drone/gt_vel", Twist, timeout=5.0)
rospy.logdebug("Current /drone/gt_vel READY=>")
except:
rospy.logerr(
"Current /drone/gt_vel not ready yet, retrying for getting gt_vel")
return self.gt_vel
def _down_camera_rgb_image_raw_callback(self, data):
self.down_camera_rgb_image_raw = data
def _front_camera_rgb_image_raw_callback(self, data):
self.front_camera_rgb_image_raw = data
def _imu_callback(self, data):
self.imu = data
def _sonar_callback(self, data):
self.sonar = data
def _gt_pose_callback(self, data):
self.gt_pose = data
def _gt_vel_callback(self, data):
self.gt_vel = data
def _check_all_publishers_ready(self):
"""
Checks that all the publishers are working
:return:
"""
rospy.logdebug("START ALL SENSORS READY")
self._check_cmd_vel_pub_connection()
self._check_takeoff_pub_connection()
self._check_land_pub_connection()
rospy.logdebug("ALL SENSORS READY")
def _check_cmd_vel_pub_connection(self):
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
def _check_takeoff_pub_connection(self):
rate = rospy.Rate(10) # 10hz
while self._takeoff_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to _takeoff_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_takeoff_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
def _check_land_pub_connection(self):
rate = rospy.Rate(10) # 10hz
while self._land_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to _land_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_land_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def takeoff(self):
"""
Sends the takeoff command and checks it has taken of
It unpauses the simulation and pauses again
to allow it to be a self contained action
"""
self.gazebo.unpauseSim()
self._check_takeoff_pub_connection()
takeoff_cmd = Empty()
self._takeoff_pub.publish(takeoff_cmd)
# When it takes of value of height is around 1.3
self.wait_for_height(heigh_value_to_check=0.8,
smaller_than=False,
epsilon=0.05,
update_rate=10)
self.gazebo.pauseSim()
def land(self):
"""
Sends the Landing command and checks it has landed
It unpauses the simulation and pauses again
to allow it to be a self contained action
"""
self.gazebo.unpauseSim()
self._check_land_pub_connection()
land_cmd = Empty()
self._land_pub.publish(land_cmd)
# When Drone is on the floor, the readings are 0.5
self.wait_for_height(heigh_value_to_check=0.6,
smaller_than=True,
epsilon=0.05,
update_rate=10)
self.gazebo.pauseSim()
def wait_for_height(self, heigh_value_to_check, smaller_than, epsilon, update_rate):
"""
Checks if current height is smaller or bigger than a value
:param: smaller_than: If True, we will wait until value is smaller than the one given
"""
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
rospy.logdebug("epsilon>>" + str(epsilon))
while not rospy.is_shutdown():
current_gt_pose = self._check_gt_pose_ready()
current_height = current_gt_pose.position.z
if smaller_than:
takeoff_height_achieved = current_height <= heigh_value_to_check
rospy.logwarn("SMALLER THAN HEIGHT...current_height=" +
str(current_height)+"<="+str(heigh_value_to_check))
else:
takeoff_height_achieved = current_height >= heigh_value_to_check
rospy.logwarn("BIGGER THAN HEIGHT...current_height=" +
str(current_height)+">="+str(heigh_value_to_check))
if takeoff_height_achieved:
rospy.logwarn("Reached Height!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logwarn("Height Not there yet, keep waiting...")
rate.sleep()
def move_base(self, linear_speed_vector, angular_speed, epsilon=0.05, update_rate=10):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed_vector: Speed in the XYZ axis of the robot base frame, because drones can move in any direction
:param angular_speed: Speed of the angular turning of the robot base frame, because this drone only turns on the Z axis.
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed_vector.x
cmd_vel_value.linear.y = linear_speed_vector.y
cmd_vel_value.linear.z = linear_speed_vector.z
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("TurtleBot2 Base Twist Cmd>>" + str(cmd_vel_value))
self._check_cmd_vel_pub_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
"""
self.wait_until_twist_achieved(cmd_vel_value,
epsilon,
update_rate)
"""
self.wait_time_for_execute_movement()
def wait_time_for_execute_movement(self):
"""
Because this Parrot Drone position is global, we really dont have
a way to know if its moving in the direction desired, because it would need
to evaluate the diference in position and speed on the local reference.
"""
time.sleep(1.0)
def wait_until_twist_achieved(self, cmd_vel_value, epsilon, update_rate):
"""
# TODO: Make it work using TF conversions
We wait for the cmd_vel twist given to be reached by the robot reading
from the odometry.
:param cmd_vel_value: Twist we want to wait to reach.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the odometry.
:return:
"""
rospy.logwarn("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.05
rospy.logwarn("Desired Twist Cmd>>" + str(cmd_vel_value))
rospy.logwarn("epsilon>>" + str(epsilon))
values_of_ref = [cmd_vel_value.linear.x,
cmd_vel_value.linear.y,
cmd_vel_value.linear.z,
cmd_vel_value.angular.z]
while not rospy.is_shutdown():
current_gt_vel = self._check_gt_vel_ready()
values_to_check = [current_gt_vel.linear.x,
current_gt_vel.linear.y,
current_gt_vel.linear.z,
current_gt_vel.angular.z]
vel_values_are_close = self.check_array_similar(
values_of_ref, values_to_check, epsilon)
if vel_values_are_close:
rospy.logwarn("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logwarn("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time - start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
rospy.logwarn("END wait_until_twist_achieved...")
return delta_time
def check_array_similar(self, ref_value_array, check_value_array, epsilon):
"""
It checks if the check_value id similar to the ref_value
"""
rospy.logwarn("ref_value_array="+str(ref_value_array))
rospy.logwarn("check_value_array="+str(check_value_array))
return numpy.allclose(ref_value_array, check_value_array, atol=epsilon)
def get_down_camera_rgb_image_raw(self):
return self.down_camera_rgb_image_raw
def get_front_camera_rgb_image_raw(self):
return self.front_camera_rgb_image_raw
def get_imu(self):
return self.imu
def get_sonar(self):
return self.sonar
def get_gt_pose(self):
return self.gt_pose
def get_gt_vel(self):
return self.gt_vel
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~turtlebot2~turtlebot2_wall.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import turtlebot2_env
from gym.envs.registration import register
from geometry_msgs.msg import Point
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class TurtleBot2WallEnv(turtlebot2_env.TurtleBot2Env):
def __init__(self):
"""
This Task Env is designed for having the TurtleBot2 in some kind of maze.
It will learn how to move around the maze without crashing.
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/turtlebot2/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="turtlebot_gazebo",
launch_file_name="start_wall_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/turtlebot2/config",
yaml_file_name="turtlebot2_wall.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(TurtleBot2WallEnv, self).__init__(ros_ws_abspath)
# Only variable needed to be set here
number_actions = rospy.get_param('/turtlebot2/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
#number_observations = rospy.get_param('/turtlebot2/n_observations')
"""
We set the Observation space for the 6 observations
cube_observations = [
round(current_disk_roll_vel, 0),
round(y_distance, 1),
round(roll, 1),
round(pitch, 1),
round(y_linear_speed,1),
round(yaw, 1),
]
"""
# Actions and Observations
self.linear_forward_speed = rospy.get_param('/turtlebot2/linear_forward_speed')
self.linear_turn_speed = rospy.get_param('/turtlebot2/linear_turn_speed')
self.angular_speed = rospy.get_param('/turtlebot2/angular_speed')
self.init_linear_forward_speed = rospy.get_param('/turtlebot2/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param('/turtlebot2/init_linear_turn_speed')
self.new_ranges = rospy.get_param('/turtlebot2/new_ranges')
self.min_range = rospy.get_param('/turtlebot2/min_range')
self.max_laser_value = rospy.get_param('/turtlebot2/max_laser_value')
self.min_laser_value = rospy.get_param('/turtlebot2/min_laser_value')
# Get Desired Point to Get
self.desired_point = Point()
self.desired_point.x = rospy.get_param("/turtlebot2/desired_pose/x")
self.desired_point.y = rospy.get_param("/turtlebot2/desired_pose/y")
self.desired_point.z = rospy.get_param("/turtlebot2/desired_pose/z")
# We create two arrays based on the binary values that will be assigned
# In the discretization method.
laser_scan = self.get_laser_scan()
rospy.logdebug("laser_scan len===>" + str(len(laser_scan.ranges)))
num_laser_readings = len(laser_scan.ranges)/self.new_ranges
high = numpy.full((num_laser_readings), self.max_laser_value)
low = numpy.full((num_laser_readings), self.min_laser_value)
# We only use two integers
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
# Rewards
self.forwards_reward = rospy.get_param("/turtlebot2/forwards_reward")
self.turn_reward = rospy.get_param("/turtlebot2/turn_reward")
self.end_episode_points = rospy.get_param("/turtlebot2/end_episode_points")
self.cumulated_steps = 0.0
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base( self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
odometry = self.get_odom()
self.previous_distance_from_des_point = self.get_distance_from_desired_point(odometry.pose.pose.position)
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the turtlebot2
based on the action number given.
:param action: The action integer that set s what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0: #FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
self.last_action = "FORWARDS"
elif action == 1: #LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "TURN_LEFT"
elif action == 2: #RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "TURN_RIGHT"
# We tell TurtleBot2 the linear and angular speed to set to execute
self.move_base(linear_speed, angular_speed, epsilon=0.05, update_rate=10)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
TurtleBot2Env API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan = self.get_laser_scan()
discretized_laser_scan = self.discretize_observation( laser_scan,
self.new_ranges
)
# We get the odometry so that SumitXL knows where it is.
odometry = self.get_odom()
x_position = odometry.pose.pose.position.x
y_position = odometry.pose.pose.position.y
# We round to only two decimals to avoid very big Observation space
odometry_array = [round(x_position, 2),round(y_position, 2)]
# We only want the X and Y position and the Yaw
observations = discretized_laser_scan + odometry_array
rospy.logdebug("Observations==>"+str(observations))
rospy.logdebug("END Get Observation ==>")
return observations
def _is_done(self, observations):
if self._episode_done:
rospy.logerr("TurtleBot2 is Too Close to wall==>")
else:
rospy.logerr("TurtleBot2 didnt crash at least ==>")
current_position = Point()
current_position.x = observations[-2]
current_position.y = observations[-1]
current_position.z = 0.0
MAX_X = 6.0
MIN_X = -1.0
MAX_Y = 3.0
MIN_Y = -3.0
# We see if we are outside the Learning Space
if current_position.x <= MAX_X and current_position.x > MIN_X:
if current_position.y <= MAX_Y and current_position.y > MIN_Y:
rospy.logdebug("TurtleBot Position is OK ==>["+str(current_position.x)+","+str(current_position.y)+"]")
# We see if it got to the desired point
if self.is_in_desired_position(current_position):
self._episode_done = True
else:
rospy.logerr("TurtleBot to Far in Y Pos ==>"+str(current_position.x))
self._episode_done = True
else:
rospy.logerr("TurtleBot to Far in X Pos ==>"+str(current_position.x))
self._episode_done = True
return self._episode_done
def _compute_reward(self, observations, done):
current_position = Point()
current_position.x = observations[-2]
current_position.y = observations[-1]
current_position.z = 0.0
distance_from_des_point = self.get_distance_from_desired_point(current_position)
distance_difference = distance_from_des_point - self.previous_distance_from_des_point
if not done:
if self.last_action == "FORWARDS":
reward = self.forwards_reward
else:
reward = self.turn_reward
# If there has been a decrease in the distance to the desired point, we reward it
if distance_difference < 0.0:
rospy.logwarn("DECREASE IN DISTANCE GOOD")
reward += self.forwards_reward
else:
rospy.logerr("ENCREASE IN DISTANCE BAD")
reward += 0
else:
if self.is_in_desired_position(current_position):
reward = self.end_episode_points
else:
reward = -1*self.end_episode_points
self.previous_distance_from_des_point = distance_from_des_point
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_observation(self,data,new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
mod = len(data.ranges)/new_ranges
rospy.logdebug("data=" + str(data))
rospy.logwarn("new_ranges=" + str(new_ranges))
rospy.logwarn("mod=" + str(mod))
for i, item in enumerate(data.ranges):
if (i%mod==0):
if item == float ('Inf') or numpy.isinf(item):
discretized_ranges.append(self.max_laser_value)
elif numpy.isnan(item):
discretized_ranges.append(self.min_laser_value)
else:
discretized_ranges.append(int(item))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" + str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> item=" + str(item)+"< "+str(self.min_range))
return discretized_ranges
def is_in_desired_position(self,current_position, epsilon=0.05):
"""
It return True if the current position is similar to the desired poistion
"""
is_in_desired_pos = False
x_pos_plus = self.desired_point.x + epsilon
x_pos_minus = self.desired_point.x - epsilon
y_pos_plus = self.desired_point.y + epsilon
y_pos_minus = self.desired_point.y - epsilon
x_current = current_position.x
y_current = current_position.y
x_pos_are_close = (x_current <= x_pos_plus) and (x_current > x_pos_minus)
y_pos_are_close = (y_current <= y_pos_plus) and (y_current > y_pos_minus)
is_in_desired_pos = x_pos_are_close and y_pos_are_close
return is_in_desired_pos
def get_distance_from_desired_point(self, current_position):
"""
Calculates the distance from the current position to the desired point
:param start_point:
:return:
"""
distance = self.get_distance_from_point(current_position,
self.desired_point)
return distance
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = numpy.array((pstart.x, pstart.y, pstart.z))
b = numpy.array((p_end.x, p_end.y, p_end.z))
distance = numpy.linalg.norm(a - b)
return distance
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~shadow_tc~learn_to_pick_ball.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import shadow_tc_env
from gym.envs.registration import register
from geometry_msgs.msg import Point
from geometry_msgs.msg import Vector3
from tf.transformations import euler_from_quaternion
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class ShadowTcGetBallEnv(shadow_tc_env.ShadowTcEnv):
def __init__(self):
"""
Make ShadowTc learn how pick up a ball
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/shadow_tc/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="shadow_gazebo",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/shadow_tc/config",
yaml_file_name="learn_to_pick_ball.yaml")
# We execute this one before because there are some functions that this
# TaskEnv uses that use variables from the parent class, like the effort limit fetch.
super(ShadowTcGetBallEnv, self).__init__(ros_ws_abspath)
# Here we will add any init functions prior to starting the MyRobotEnv
# Only variable needed to be set here
rospy.logdebug("Start ShadowTcGetBallEnv INIT...")
number_actions = rospy.get_param('/shadow_tc/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
self.movement_delta =rospy.get_param("/shadow_tc/movement_delta")
self.work_space_x_max = rospy.get_param("/shadow_tc/work_space/x_max")
self.work_space_x_min = rospy.get_param("/shadow_tc/work_space/x_min")
self.work_space_y_max = rospy.get_param("/shadow_tc/work_space/y_max")
self.work_space_y_min = rospy.get_param("/shadow_tc/work_space/y_min")
self.work_space_z_max = rospy.get_param("/shadow_tc/work_space/z_max")
self.work_space_z_min = rospy.get_param("/shadow_tc/work_space/z_min")
self.dec_obs = rospy.get_param("/shadow_tc/number_decimals_precision_obs")
self.acceptable_distance_to_ball = rospy.get_param("/shadow_tc/acceptable_distance_to_ball")
# We place the Maximum and minimum values of observations
# TODO: Fill when get_observations is done.
high = numpy.array([self.work_space_x_max,
self.work_space_y_max,
self.work_space_z_max,
1,1,1])
low = numpy.array([ self.work_space_x_min,
self.work_space_y_min,
self.work_space_z_min,
0,0,0])
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
# Rewards
self.done_reward =rospy.get_param("/shadow_tc/done_reward")
self.closer_to_block_reward = rospy.get_param("/shadow_tc/closer_to_block_reward")
self.cumulated_steps = 0.0
rospy.logdebug("END shadow_tcGetBallEnv INIT...")
def _set_init_pose(self):
"""
Sets the UR5 arm to the initial position and the objects to the original position.
"""
rospy.logdebug("START _set_init_pose...")
# We set the angles to zero of the limb
self.reset_scene()
rospy.logdebug("END _set_init_pose...")
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
rospy.logdebug("START TaskEnv _init_env_variables")
# For Info Purposes
self.cumulated_reward = 0.0
self.ball_pose = self.get_ball_pose()
tcp_pose = self.get_tip_pose()
rospy.logdebug("TCP POSE ===>"+str(tcp_pose))
self.previous_distance_from_ball = self.get_distance_from_point(self.ball_pose.position, tcp_pose.position)
rospy.logdebug("END TaskEnv _init_env_variables")
def _set_action(self, action):
"""
It sets the joints of shadow_tc based on the action integer given
based on the action number given.
:param action: The action integer that sets what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
increment_vector = Vector3()
action_id="move"
if action == 0: # Increase X
increment_vector.x = self.movement_delta
elif action == 1: # Decrease X
increment_vector.x = -1*self.movement_delta
elif action == 2: # Increase Y
increment_vector.y = self.movement_delta
elif action == 3: # Decrease Y
increment_vector.y = -1*self.movement_delta
elif action == 4: # Increase Z
increment_vector.z = self.movement_delta
elif action == 5: # Decrease Z
increment_vector.z = -1*self.movement_delta
elif action == 6: # Open Claw
action_id = "open"
elif action == 7: # Close Claw
action_id = "close"
rospy.logdebug("Action_id="+str(action_id)+",IncrementVector===>"+str(increment_vector))
if action_id == "move":
# We tell shadow_tc the action to perform
# We dont change the RPY, therefore it will always be zero
self.move_tip( x=increment_vector.x,
y=increment_vector.y,
z=increment_vector.z)
elif action_id == "open":
self.open_hand()
elif action_id == "close":
self.close_hand()
rospy.logdebug("END Set Action ==>"+str(action)+",action_id="+str(action_id)+",IncrementVector===>"+str(increment_vector))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have access to, we need to read the
shadow_tcEnv API DOCS.
:return: observation
"""
rospy.logdebug("Start Get Observation ==>")
tcp_pose = self.get_tip_pose()
# We dont add it to the observations because is not part of the robot
self.ball_pose = self.get_ball_pose()
# We activate the Finguer collision detection
self.finger_collided_dict = self.get_fingers_colision(object_collision_name="cricket_ball__link")
f1_collided = self.finger_collided_dict["f1"]
f2_collided = self.finger_collided_dict["f2"]
f3_collided = self.finger_collided_dict["f3"]
observation = [ round(tcp_pose.position.x,self.dec_obs),
round(tcp_pose.position.y,self.dec_obs),
round(tcp_pose.position.z,self.dec_obs),
int(f1_collided),
int(f2_collided),
int(f3_collided)
]
rospy.logdebug("Observations ==>"+str(observation))
rospy.logdebug("END Get Observation ==>")
return observation
def _is_done(self, observations):
"""
We consider the episode done if:
1) The shadow_tc TCP is outside the workspace.
2) The TCP to block distance is lower than a threshold ( it got to the place )
and the the collisions in the figuers are true.
"""
tcp_pos = Vector3()
tcp_pos.x = observations[0]
tcp_pos.y = observations[1]
tcp_pos.z = observations[2]
# We check if all three finguers have collided with the ball
finguers_collided = observations[3] and observations[4] and observations[5]
bool_is_inside_workspace = self.is_inside_workspace(tcp_pos)
has_reached_the_ball = self.reached_ball( tcp_pos,
self.ball_pose.position,
self.acceptable_distance_to_ball,
finguers_collided)
done = has_reached_the_ball or not(bool_is_inside_workspace)
rospy.logdebug("#### IS DONE ? ####")
rospy.logdebug("Not bool_is_inside_workspace ?="+str(not(bool_is_inside_workspace)))
rospy.logdebug("has_reached_the_ball ?="+str(has_reached_the_ball))
rospy.logdebug("done ?="+str(done))
rospy.logdebug("#### #### ####")
return done
def _compute_reward(self, observations, done):
"""
We Base the rewards in if its done or not and we base it on
if the distance to the ball has increased or not.
:return:
"""
tcp_pos = Vector3()
tcp_pos.x = observations[0]
tcp_pos.y = observations[1]
tcp_pos.z = observations[2]
# We check if all three finguers have collided with the ball
finguers_collided = observations[3] and observations[4] and observations[5]
distance_from_ball = self.get_distance_from_point(self.ball_pose.position, tcp_pos)
distance_difference = distance_from_ball - self.previous_distance_from_ball
if not done:
# If there has been a decrease in the distance to the desired point, we reward it
if distance_difference < 0.0:
rospy.logerr("NOT ERROR: DECREASE IN DISTANCE GOOD")
reward = self.closer_to_block_reward
else:
rospy.logerr("NOT ERROR: ENCREASE IN DISTANCE BAD")
#reward = -1*self.closer_to_block_reward
reward = 0.0
else:
has_reached_the_ball = self.reached_ball( tcp_pos,
self.ball_pose.position,
self.acceptable_distance_to_ball,
finguers_collided)
if has_reached_the_ball:
reward = self.done_reward
else:
reward = -1*self.done_reward
self.previous_distance_from_ball = distance_from_ball
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def reached_ball(self,tcp_position, ball_position, minimum_distance, finguers_collided):
"""
Return true if the distance from TCP position to the ball position is
lower than the minimum_distance and all three finguers are touching the ball.
"""
distance_from_ball = self.get_distance_from_point(tcp_position, ball_position)
distance_to_ball_ok = distance_from_ball < minimum_distance
reached_ball_b = distance_to_ball_ok and finguers_collided
rospy.logdebug("###### REACHED BLOCK ? ######")
rospy.logdebug("distance_from_ball==>"+str(distance_from_ball))
rospy.logdebug("distance_to_ball_ok==>"+str(distance_to_ball_ok))
rospy.logdebug("reached_ball_b==>"+str(reached_ball_b))
rospy.logdebug("finguers_collided==>"+str(finguers_collided))
rospy.logdebug("############")
return reached_ball_b
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = numpy.array((pstart.x, pstart.y, pstart.z))
b = numpy.array((p_end.x, p_end.y, p_end.z))
distance = numpy.linalg.norm(a - b)
return distance
def is_inside_workspace(self,current_position):
"""
Check if the shadow_tc is inside the Workspace defined
"""
is_inside = False
rospy.logdebug("##### INSIDE WORK SPACE? #######")
rospy.logdebug("XYZ current_position"+str(current_position))
rospy.logdebug("work_space_x_max"+str(self.work_space_x_max)+",work_space_x_min="+str(self.work_space_x_min))
rospy.logdebug("work_space_y_max"+str(self.work_space_y_max)+",work_space_y_min="+str(self.work_space_y_min))
rospy.logdebug("work_space_z_max"+str(self.work_space_z_max)+",work_space_z_min="+str(self.work_space_z_min))
rospy.logdebug("############")
if current_position.x > self.work_space_x_min and current_position.x <= self.work_space_x_max:
if current_position.y > self.work_space_y_min and current_position.y <= self.work_space_y_max:
if current_position.z > self.work_space_z_min and current_position.z <= self.work_space_z_max:
is_inside = True
return is_inside
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~fetch_env.py | import numpy as np
import rospy
from gazebo_msgs.srv import GetWorldProperties, GetModelState
from sensor_msgs.msg import JointState
from openai_ros import robot_gazebo_env
import sys
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import trajectory_msgs.msg
from openai_ros.openai_ros_common import ROSLauncher
class FetchEnv(robot_gazebo_env.RobotGazeboEnv):
def __init__(self, ros_ws_abspath):
rospy.logdebug("========= In Fetch Env")
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="fetch_gazebo",
launch_file_name="put_robot_in_world_HER.launch",
ros_ws_abspath=ros_ws_abspath)
# this object contains all object's positions!!
self.obj_positions = Obj_Pos()
self.controllers_list = []
self.robot_name_space = ""
self.reset_controls = False
super(FetchEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
# We Start all the ROS related Subscribers and publishers
self.JOINT_STATES_SUBSCRIBER = '/joint_states'
self.join_names = ["joint0",
"joint1",
"joint2",
"joint3",
"joint4",
"joint5",
"joint6"]
self.gazebo.unpauseSim()
self._check_all_systems_ready()
self.joint_states_sub = rospy.Subscriber(
self.JOINT_STATES_SUBSCRIBER, JointState, self.joints_callback)
self.joints = JointState()
# Start Services
self.move_fetch_object = MoveFetch()
# Wait until it has reached its Sturtup Position
self.wait_fetch_ready()
self.gazebo.pauseSim()
# Variables that we give through the constructor.
rospy.logdebug("========= Out Fetch Env")
# RobotGazeboEnv virtual methods
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# FetchEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
self._check_joint_states_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_joint_states_ready(self):
self.joints = None
while self.joints is None and not rospy.is_shutdown():
try:
self.joints = rospy.wait_for_message(
self.JOINT_STATES_SUBSCRIBER, JointState, timeout=1.0)
rospy.logdebug(
"Current "+str(self.JOINT_STATES_SUBSCRIBER)+" READY=>" + str(self.joints))
except:
rospy.logerr(
"Current "+str(self.JOINT_STATES_SUBSCRIBER)+" not ready yet, retrying....")
return self.joints
def joints_callback(self, data):
self.joints = data
def get_joints(self):
return self.joints
def get_joint_names(self):
return self.joints.name
def set_trajectory_ee(self, action):
"""
Sets the Pose of the EndEffector based on the action variable.
The action variable contains the position and orientation of the EndEffector.
See create_action
"""
# Set up a trajectory message to publish.
ee_target = geometry_msgs.msg.Pose()
ee_target.orientation.x = -0.707
ee_target.orientation.y = 0.0
ee_target.orientation.z = 0.707
ee_target.orientation.w = 0.001
ee_target.position.x = action[0]
ee_target.position.y = action[1]
ee_target.position.z = action[2]
result = self.move_fetch_object.ee_traj(ee_target)
return result
def set_trajectory_joints(self, initial_qpos):
positions_array = [None] * 7
positions_array[0] = initial_qpos["joint0"]
positions_array[1] = initial_qpos["joint1"]
positions_array[2] = initial_qpos["joint2"]
positions_array[3] = initial_qpos["joint3"]
positions_array[4] = initial_qpos["joint4"]
positions_array[5] = initial_qpos["joint5"]
positions_array[6] = initial_qpos["joint6"]
self.move_fetch_object.joint_traj(positions_array)
return True
def create_action(self, position, orientation):
"""
position = [x,y,z]
orientation= [x,y,z,w]
"""
gripper_target = np.array(position)
gripper_rotation = np.array(orientation)
action = np.concatenate([gripper_target, gripper_rotation])
return action
def create_joints_dict(self, joints_positions):
"""
Based on the Order of the positions, they will be assigned to its joint name
names_in_order:
joint0: 0.0
joint1: 0.0
joint2: 0.0
joint3: -1.5
joint4: 0.0
joint5: 1.5
joint6: 0.0
"""
assert len(joints_positions) == len(
self.join_names), "Wrong number of joints, there should be "+str(len(self.join_names))
joints_dict = dict(zip(self.join_names, joints_positions))
return joints_dict
def get_ee_pose(self):
"""
Returns geometry_msgs/PoseStamped
std_msgs/Header header
uint32 seq
time stamp
string frame_id
geometry_msgs/Pose pose
geometry_msgs/Point position
float64 x
float64 y
float64 z
geometry_msgs/Quaternion orientation
float64 x
float64 y
float64 z
float64 w
"""
self.gazebo.unpauseSim()
gripper_pose = self.move_fetch_object.ee_pose()
self.gazebo.pauseSim()
return gripper_pose
def get_ee_rpy(self):
gripper_rpy = self.move_fetch_object.ee_rpy()
return gripper_rpy
def wait_fetch_ready(self):
"""
# TODO: Make it wait for this position
Desired Position to wait for
(0.44291739197591884,
-0.13691381375054146,
-4.498589757905556e-09,
0.006635104153645881,
0.0018354466563206273,
0.0023142971818792546,
1.3200059164171716,
1.399964660857453,
-0.19981518020955402,
1.719961735970255,
1.0394665737933906e-05,
1.659980987917125,
-6.067103113238659e-06,
0.05001918351472232,
0.050051597253287436)
"""
import time
for i in range(20):
print("WAITING..."+str(i))
sys.stdout.flush()
time.sleep(1.0)
print("WAITING...DONE")
# ParticularEnv methods
# ----------------------------
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
class Obj_Pos(object):
"""
This object maintains the pose and rotation of the cube in a simulation through Gazebo Service
"""
def __init__(self):
world_specs = rospy.ServiceProxy(
'/gazebo/get_world_properties', GetWorldProperties)()
self.time = 0
self.model_names = world_specs.model_names
self.get_model_state = rospy.ServiceProxy(
'/gazebo/get_model_state', GetModelState)
def get_states(self):
"""
Returns the ndarray of pose&rotation of the cube
"""
for model_name in self.model_names:
if model_name == "cube":
data = self.get_model_state(
model_name, "world") # gazebo service client
return np.array([
data.pose.position.x,
data.pose.position.y,
data.pose.position.z,
data.pose.orientation.x,
data.pose.orientation.y,
data.pose.orientation.z
])
class MoveFetch(object):
def __init__(self):
rospy.logdebug("===== In MoveFetch")
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
self.scene = moveit_commander.PlanningSceneInterface()
self.group = moveit_commander.MoveGroupCommander("arm")
rospy.logdebug("===== Out MoveFetch")
def ee_traj(self, pose):
self.group.set_pose_target(pose)
result = self.execute_trajectory()
return result
def joint_traj(self, positions_array):
self.group_variable_values = self.group.get_current_joint_values()
self.group_variable_values[0] = positions_array[0]
self.group_variable_values[1] = positions_array[1]
self.group_variable_values[2] = positions_array[2]
self.group_variable_values[3] = positions_array[3]
self.group_variable_values[4] = positions_array[4]
self.group_variable_values[5] = positions_array[5]
self.group_variable_values[6] = positions_array[6]
self.group.set_joint_value_target(self.group_variable_values)
result = self.execute_trajectory()
return result
def execute_trajectory(self):
"""
Assuming that the trajecties has been set to the self objects appropriately
Make a plan to the destination in Homogeneous Space(x,y,z,yaw,pitch,roll)
and returns the result of execution
"""
self.plan = self.group.plan()
result = self.group.go(wait=True)
return result
def ee_pose(self):
gripper_pose = self.group.get_current_pose()
return gripper_pose
def ee_rpy(self, request):
gripper_rpy = self.group.get_current_rpy()
return gripper_rpy
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~iriwam~tcp_to_bowl.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import iriwam_env
from gym.envs.registration import register
from geometry_msgs.msg import Point
from geometry_msgs.msg import Vector3
from tf.transformations import euler_from_quaternion
import cv2
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class IriWamTcpToBowlEnv(iriwam_env.IriWamEnv):
def __init__(self):
"""
Make iriwam learn how pick up a cube
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/iriwam/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="iri_wam_gazebo",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/iriwam/config",
yaml_file_name="tcp_to_bowl.yaml")
# We execute this one before because there are some functions that this
# TaskEnv uses that use variables from the parent class, like the effort limit fetch.
super(IriWamTcpToBowlEnv, self).__init__(ros_ws_abspath)
# Here we will add any init functions prior to starting the MyRobotEnv
# Only variable needed to be set here
rospy.logdebug("Start IriWamTcpToBowlEnv INIT...")
number_actions = rospy.get_param('/iriwam/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
self.iri_wam_joint_1 = rospy.get_param(
"/iriwam/init_joints/iri_wam_joint_1")
self.iri_wam_joint_2 = rospy.get_param(
"/iriwam/init_joints/iri_wam_joint_2")
self.iri_wam_joint_3 = rospy.get_param(
"/iriwam/init_joints/iri_wam_joint_3")
self.iri_wam_joint_4 = rospy.get_param(
"/iriwam/init_joints/iri_wam_joint_4")
self.iri_wam_joint_5 = rospy.get_param(
"/iriwam/init_joints/iri_wam_joint_5")
self.iri_wam_joint_6 = rospy.get_param(
"/iriwam/init_joints/iri_wam_joint_6")
self.iri_wam_joint_7 = rospy.get_param(
"/iriwam/init_joints/iri_wam_joint_7")
self.init_joints_positions_array = [self.iri_wam_joint_1,
self.iri_wam_joint_2,
self.iri_wam_joint_3,
self.iri_wam_joint_4,
self.iri_wam_joint_5,
self.iri_wam_joint_6,
self.iri_wam_joint_7]
self.init_joints_x_max = rospy.get_param(
"/iriwam/work_space/x_max")
self.init_joints_y_max = rospy.get_param(
"/iriwam/work_space/y_max")
self.init_joints_z_max = rospy.get_param(
"/iriwam/work_space/z_max")
self.init_joints_x_min = rospy.get_param(
"/iriwam/work_space/x_min")
self.init_joints_y_min = rospy.get_param(
"/iriwam/work_space/y_min")
self.init_joints_z_min = rospy.get_param(
"/iriwam/work_space/z_min")
self.joint_increment_value = rospy.get_param(
"/iriwam/joint_increment_value")
self.max_distance_from_red_bowl = rospy.get_param(
"/iriwam/max_distance_from_red_bowl")
self.min_distance_from_red_bowl = rospy.get_param(
"/iriwam/min_distance_from_red_bowl")
self.min_laser_distance = rospy.get_param("/iriwam/min_laser_distance")
self.dec_obs = rospy.get_param("/iriwam/number_decimals_precision_obs")
# We place the Maximum and minimum values of observations
# TODO: Fill when get_observations is done.
"""
We supose that its all these:
head_pan, right_gripper_l_finger_joint, right_gripper_r_finger_joint, right_j0, right_j1,
right_j2, right_j3, right_j4, right_j5, right_j6
Plus the first three are the block_to_tcp vector
"""
# We fetch the limits of the joinst to get the effort and angle limits
self.joint_limits = self.init_joint_limits()
high = numpy.array([self.init_joints_x_max,
self.init_joints_y_max,
self.init_joints_z_max,
self.joint_limits[0].max_position,
self.joint_limits[1].max_position,
self.joint_limits[2].max_position,
self.joint_limits[3].max_position,
self.joint_limits[4].max_position,
self.joint_limits[5].max_position,
self.joint_limits[6].max_position
])
low = numpy.array([self.init_joints_x_min,
self.init_joints_y_min,
self.init_joints_z_min,
self.joint_limits[0].min_position,
self.joint_limits[1].min_position,
self.joint_limits[2].min_position,
self.joint_limits[3].min_position,
self.joint_limits[4].min_position,
self.joint_limits[5].min_position,
self.joint_limits[6].min_position
])
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>" +
str(self.observation_space))
# Rewards
self.done_reward = rospy.get_param("/iriwam/done_reward")
self.closer_to_block_reward = rospy.get_param(
"/iriwam/closer_to_block_reward")
self.cumulated_steps = 0.0
# We init the CVBridge object
self.bridge_object = CvBridge()
rospy.logdebug("END IriWamTcpToBowlEnv INIT...")
def _set_init_pose(self):
"""
Sets the two proppelers speed to 0.0 and waits for the time_sleep
to allow the action to be executed
"""
# We set the angles of the IriWam to the init pose:
self.move_joints_to_angle_blocking(self.init_joints_positions_array)
self.joints = []
for joint_value in self.init_joints_positions_array:
self.joints.append(joint_value)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
image_data = self.get_camera_rgb_image_raw()
self.previous_distance_from_bowl = self.get_magnitud_tcp_to_block(
data=image_data)
def _set_action(self, action):
"""
It sets the joints of iriwam based on the action integer given
based on the action number given.
:param action: The action integer that sets what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
if action == 0: # Increase joint_0
self.joints[0] += self.joint_increment_value
elif action == 1: # Decrease joint_0
self.joints[0] -= self.joint_increment_value
elif action == 2: # Increase joint_1
self.joints[1] += self.joint_increment_value
elif action == 3: # Decrease joint_1
self.joints[1] -= self.joint_increment_value
elif action == 4: # Increase joint_2
self.joints[2] += self.joint_increment_value
elif action == 5: # Decrease joint_2
self.joints[2] -= self.joint_increment_value
elif action == 6: # Increase joint_3
self.joints[3] += self.joint_increment_value
elif action == 7: # Decrease joint_3
self.joints[3] -= self.joint_increment_value
elif action == 8: # Increase joint_4
self.joints[4] += self.joint_increment_value
elif action == 9: # Decrease joint_4
self.joints[4] -= self.joint_increment_value
elif action == 10: # Increase joint_5
self.joints[5] += self.joint_increment_value
elif action == 11: # Decrease joint_5
self.joints[5] -= self.joint_increment_value
elif action == 12: # Increase joint_6
self.joints[6] += self.joint_increment_value
elif action == 13: # Decrease joint_6
self.joints[6] -= self.joint_increment_value
elif action == 14: # Increase joint_7
self.joints[7] += self.joint_increment_value
elif action == 15: # Decrease joint_7
self.joints[7] -= self.joint_increment_value
# We tell iriwam the action to perform
self.move_joints_to_angle_blocking(self.joints)
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have access to, we need to read the
iriwamEnv API DOCS.
:return: observation
"""
rospy.logdebug("Start Get Observation ==>")
# We get Join state currently of all the joints
join_state = self.get_joint_state()
joints_angles_array = join_state.actual.positions
joints_angles_array_round = numpy.around(
joints_angles_array, decimals=self.dec_obs)
# We get the Laser reading of the center laser ray, only one
laser_data = self.get_laser_scan()
center_laser_distance = laser_data.ranges[int(
len(laser_data.ranges)/2)]
center_laser_distance_array = [
round(center_laser_distance, self.dec_obs)]
# We get the distance laser tip to the red bowl using the image blob detection system
image_data = self.get_camera_rgb_image_raw()
distance_from_bowl = self.get_magnitud_tcp_to_block(data=image_data)
distance_from_bowl_array = [round(distance_from_bowl, self.dec_obs)]
# We concatenate the two rounded arrays and convert them to standard Python list
observation = numpy.concatenate(
(joints_angles_array_round, center_laser_distance_array, distance_from_bowl_array), axis=0).tolist()
return observation
def _is_done(self, observations):
"""
We consider the episode done if:
1) The iriwam end effector to bowl distance exceeds the maximum
2) The iriwam end effector to bowl distance reaches the minimum and laser distance is lower than minimum
"""
distance_laser = observations[-2]
magnitude_image = observations[-1]
has_reached_the_block = self.reached_bowl(
distance_laser, magnitude_image)
too_faraway_bowl_b = self.too_faraway_bowl(distance_laser, magnitude_image)
done = has_reached_the_block or too_faraway_bowl_b
rospy.logdebug("#### IS DONE ? ####")
rospy.logdebug("done ?="+str(done))
rospy.logdebug("#### #### ####")
return done
def _compute_reward(self, observations, done):
"""
We Base the rewards in if its done or not and we base it on
if the distance to the block has increased or not.
:return:
"""
tf_tcp_to_block_vector = Vector3()
tf_tcp_to_block_vector.x = observations[0]
tf_tcp_to_block_vector.y = observations[1]
tf_tcp_to_block_vector.z = observations[2]
distance_block_to_tcp = self.get_magnitud_tf_tcp_to_block(
tf_tcp_to_block_vector)
distance_difference = distance_block_to_tcp - self.previous_distance_from_block
if not done:
# If there has been a decrease in the distance to the desired point, we reward it
if distance_difference < 0.0:
rospy.logdebug("DECREASE IN DISTANCE GOOD")
reward = self.closer_to_block_reward
else:
rospy.logerr("ENCREASE IN DISTANCE BAD")
#reward = -1*self.closer_to_block_reward
reward = 0.0
else:
if self.reached_block(tf_tcp_to_block_vector, self.acceptable_distance_to_cube, self.translation_tcp_world[2], self.tcp_z_position_min):
reward = self.done_reward
else:
reward = -1*self.done_reward
self.previous_distance_from_block = distance_block_to_tcp
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def is_arm_stuck(self, joints_efforts_dict):
"""
Checks if the efforts in the arm joints exceed certain theshhold
We will only check the joints_0,1,2,3,4,5,6
"""
is_arm_stuck = False
for joint_name in self.joint_limits.joint_names:
if joint_name in joints_efforts_dict:
effort_value = joints_efforts_dict[joint_name]
index = self.joint_limits.joint_names.index(joint_name)
effort_limit = self.joint_limits.effort[index]
rospy.logdebug("Joint Effort ==>Name="+str(joint_name) +
",Effort="+str(effort_value)+",Limit="+str(effort_limit))
if abs(effort_value) > effort_limit:
is_arm_stuck = True
rospy.logerr("Joint Effort TOO MUCH ==>" +
str(joint_name)+","+str(effort_value))
break
else:
rospy.logdebug("Joint Effort is ok==>" +
str(joint_name)+","+str(effort_value))
else:
rospy.logdebug(
"Joint Name is not in the effort dict==>"+str(joint_name))
return is_arm_stuck
def reached_bowl(self, distance_laser, magnitude_image):
"""
It return True if the distance red by the laser is smaller than minimum and
the distance by image is smaller than minimum
"""
laser_close_enough = (distance_laser <= self.min_laser_distance)
magnitude_image_enough = (
magnitude_image <= self.min_distance_from_red_bowl)
reached_block_b = laser_close_enough and magnitude_image_enough
rospy.logdebug("###### REACHED BLOCK ? ######")
rospy.logdebug("laser_close_enough==>"+str(laser_close_enough))
rospy.logdebug("magnitude_image_enough==>"+str(magnitude_image_enough))
rospy.logdebug("reached_block_b==>"+str(reached_block_b))
rospy.logdebug("############")
return reached_block_b
def too_faraway_bowl(self, distance_laser, magnitude_image):
"""
It return True if the distance by image is bigger than maximum
"""
laser_close_enough = (distance_laser <= self.min_laser_distance)
magnitude_image_too_big = (
magnitude_image <= self.max_distance_from_red_bowl)
too_faraway_b = laser_close_enough and magnitude_image_too_big
rospy.logdebug("###### REACHED BLOCK ? ######")
rospy.logdebug("magnitude_image_too_big==>" +
str(magnitude_image_too_big))
rospy.logdebug("too_faraway_b==>"+str(too_faraway_b))
rospy.logdebug("############")
return too_faraway_b
def get_distance_from_desired_point(self, current_position):
"""
Calculates the distance from the current position to the desired point
:param start_point:
:return:
"""
distance = self.get_distance_from_point(current_position,
self.desired_point)
return distance
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = numpy.array((pstart.x, pstart.y, pstart.z))
b = numpy.array((p_end.x, p_end.y, p_end.z))
distance = numpy.linalg.norm(a - b)
return distance
def get_magnitud_tf_tcp_to_block(self, translation_vector):
"""
Given a Vector3 Object, get the magnitud
:param p_end:
:return:
"""
a = numpy.array((translation_vector.x,
translation_vector.y,
translation_vector.z))
distance = numpy.linalg.norm(a)
return distance
def get_orientation_euler(self, quaternion_vector):
# We convert from quaternions to euler
orientation_list = [quaternion_vector.x,
quaternion_vector.y,
quaternion_vector.z,
quaternion_vector.w]
roll, pitch, yaw = euler_from_quaternion(orientation_list)
return roll, pitch, yaw
def is_inside_workspace(self, current_position):
"""
Check if the iriwam is inside the Workspace defined
"""
is_inside = False
rospy.logdebug("##### INSIDE WORK SPACE? #######")
rospy.logdebug("XYZ current_position"+str(current_position))
rospy.logdebug("init_joints_x_max"+str(self.init_joints_x_max) +
",init_joints_x_min="+str(self.init_joints_x_min))
rospy.logdebug("init_joints_y_max"+str(self.init_joints_y_max) +
",init_joints_y_min="+str(self.init_joints_y_min))
rospy.logdebug("init_joints_z_max"+str(self.init_joints_z_max) +
",init_joints_z_min="+str(self.init_joints_z_min))
rospy.logdebug("############")
if current_position.x > self.init_joints_x_min and current_position.x <= self.init_joints_x_max:
if current_position.y > self.init_joints_y_min and current_position.y <= self.init_joints_y_max:
if current_position.z > self.init_joints_z_min and current_position.z <= self.init_joints_z_max:
is_inside = True
return is_inside
def get_magnitud_tcp_to_block(self, data):
"""
Retrieves the distance end effector laser element to the red bowl through the
image data given
:param: data: RGB image data
:return: magnitude: Distance in pixels from the center of the black blob ( the laser)
To the center of the red blob ( the red bowl)
Bear in mind that if the laser tip goes out of the cameras view, it will give a false positive
"""
try:
# We select bgr8 because its the OpneCV encoding by default
cv_image = self.bridge_object.imgmsg_to_cv2(
data, desired_encoding="bgr8")
except CvBridgeError as e:
print(e)
cv_image = None
if cv_image is not None:
# We get image dimensions and crop the parts of the image we don't need
# Bear in mind that because the first value of the image matrix is start and second value is down limit.
# Select the limits so that it gets the line not too close and not too far, and the minimum portion possible
# To make process faster.
height, width, channels = cv_image.shape
descentre = -height/2
rows_to_watch = height
crop_img = cv_image[(height)/2+descentre:(height) /
2+(descentre+rows_to_watch)][1:width]
# Convert from RGB to HSV
hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)
# We track two colours, the RedBowl and the Black tip of IriWam Arm ( laser ) which is black.
# RED BOWL
lower_red = numpy.array([0, 204, 100])
upper_red = numpy.array([0, 255, 255])
# Threshold the HSV image to get only yellow colors
mask = cv2.inRange(hsv, lower_red, upper_red)
# Calculate centroid of the blob of binary image using ImageMoments
m = cv2.moments(mask, False)
try:
cx_red, cy_red = m['m10']/m['m00'], m['m01']/m['m00']
except ZeroDivisionError:
cy_red, cx_red = height/2, width/2
# Black Laser
lower_black = numpy.array([0, 0, 0])
upper_black = numpy.array([0, 0, 10])
# Threshold the HSV image to get only yellow colors
mask_black = cv2.inRange(hsv, lower_black, upper_black)
# Calculate centroid of the blob of binary image using ImageMoments
m = cv2.moments(mask_black, False)
try:
cx_black, cy_black = m['m10']/m['m00'], m['m01']/m['m00']
except ZeroDivisionError:
cy_black, cx_black = height/2, width/2
# Bitwise-AND mask and original image
res_black = cv2.bitwise_and(crop_img, crop_img)
# Draw the centroid in the resultut image
cv2.circle(res_black, (int(cx_red), int(cy_red)), 10, (255, 0, 0), -1)
cv2.circle(res_black, (int(cx_black), int(cy_black)),
10, (0, 255, 0), -1)
cv2.imshow("RES BLACK", res_black)
cv2.waitKey(1)
error_x = cx_red - cx_black
error_y = cy_red - cy_black
error_array = numpy.array([error_x, error_y])
magnitude = numpy.linalg.norm(error_array)
rospy.logwarn("Magnitude==>"+str(magnitude))
else:
magnitude = 10.0
return magnitude
def clean_up(self):
cv2.destroyAllWindows()
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~wamv_env.py | import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from nav_msgs.msg import Odometry
from robotx_gazebo.msg import UsvDrive
from openai_ros.openai_ros_common import ROSLauncher
class WamvEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all WamvEnv environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new WamvEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /wamv/odom: Odometry of the Base of Wamv
Actuators Topic List:
* /cmd_drive: You publish the speed of the left and right propellers.
Args:
"""
rospy.logdebug("Start WamvEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="robotx_gazebo",
launch_file_name="put_wamv_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(WamvEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
rospy.logdebug("WamvEnv unpause1...")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_systems_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/wamv/odom", Odometry, self._odom_callback)
self.publishers_array = []
self._cmd_drive_pub = rospy.Publisher('/cmd_drive', UsvDrive, queue_size=1)
self.publishers_array.append(self._cmd_drive_pub)
self._check_all_publishers_ready()
self.gazebo.pauseSim()
rospy.logdebug("Finished WamvEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
rospy.logdebug("WamvEnv check_all_systems_ready...")
self._check_all_sensors_ready()
rospy.logdebug("END WamvEnv _check_all_systems_ready...")
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_odom_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /wamv/odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message("/wamv/odom", Odometry, timeout=1.0)
rospy.logdebug("Current /wamv/odom READY=>")
except:
rospy.logerr("Current /wamv/odom not ready yet, retrying for getting odom")
return self.odom
def _odom_callback(self, data):
self.odom = data
def _check_all_publishers_ready(self):
"""
Checks that all the publishers are working
:return:
"""
rospy.logdebug("START ALL SENSORS READY")
for publisher_object in self.publishers_array:
self._check_pub_connection(publisher_object)
rospy.logdebug("ALL SENSORS READY")
def _check_pub_connection(self, publisher_object):
rate = rospy.Rate(10) # 10hz
while publisher_object.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to publisher_object yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("publisher_object Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def set_propellers_speed(self, right_propeller_speed, left_propeller_speed, time_sleep=1.0):
"""
It will set the speed of each of the two proppelers of wamv.
"""
i = 0
for publisher_object in self.publishers_array:
usv_drive_obj = UsvDrive()
usv_drive_obj.right = right_propeller_speed
usv_drive_obj.left = left_propeller_speed
rospy.logdebug("usv_drive_obj>>"+str(usv_drive_obj))
publisher_object.publish(usv_drive_obj)
i += 1
self.wait_time_for_execute_movement(time_sleep)
def wait_time_for_execute_movement(self, time_sleep):
"""
Because this Wamv position is global, we really dont have
a way to know if its moving in the direction desired, because it would need
to evaluate the diference in position and speed on the local reference.
"""
time.sleep(time_sleep)
def get_odom(self):
return self.odom
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~templates~template_my_robot_env.py | from openai_ros import robot_gazebo_env
class MyRobotEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all Robot environments.
"""
def __init__(self):
"""Initializes a new Robot environment.
"""
# Variables that we give through the constructor.
# Internal Vars
self.controllers_list = ['my_robot_controller1','my_robot_controller2', ..., 'my_robot_controllerX']
self.robot_name_space = "my_robot_namespace"
reset_controls_bool = True or False
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(MyRobotEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=reset_controls_bool)
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
# TODO
return True
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ---------------------------- | [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~fetch~fetch_test_task.py | from gym import utils
import copy
import rospy
from gym import spaces
from openai_ros.robot_envs import fetch_env
from gym.envs.registration import register
import numpy as np
from sensor_msgs.msg import JointState
from openai_ros.openai_ros_common import ROSLauncher
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
import os
class FetchTestEnv(fetch_env.FetchEnv, utils.EzPickle):
def __init__(self):
# Launch the Task Simulated-Environment
# This is the path where the simulation files are,
# the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/fetch/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="fetch_gazebo",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file relative to this TaskEnvironment
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/fetch/config",
yaml_file_name="fetch_test.yaml")
rospy.logdebug("Entered FetchTestEnv Env")
self.get_params()
self.action_space = spaces.Discrete(self.n_actions)
observations_high_range = np.array(
[self.position_ee_max]*self.n_observations)
observations_low_range = np.array(
[self.position_ee_min]*self.n_observations)
observations_high_dist = np.array([self.max_distance])
observations_low_dist = np.array([0.0])
high = np.concatenate(
[observations_high_range, observations_high_dist])
low = np.concatenate([observations_low_range, observations_low_dist])
self.observation_space = spaces.Box(low, high)
# TODO: Clean up
# fetch_env.FetchEnv.__init__(self)
super(FetchTestEnv, self).__init__(ros_ws_abspath)
def get_params(self):
# get configuration parameters
self.n_actions = rospy.get_param('/fetch/n_actions')
self.n_observations = rospy.get_param('/fetch/n_observations')
self.position_ee_max = rospy.get_param('/fetch/position_ee_max')
self.position_ee_min = rospy.get_param('/fetch/position_ee_min')
self.init_pos = rospy.get_param('/fetch/init_pos')
self.setup_ee_pos = rospy.get_param('/fetch/setup_ee_pos')
self.goal_ee_pos = rospy.get_param('/fetch/goal_ee_pos')
self.position_delta = rospy.get_param('/fetch/position_delta')
self.step_punishment = rospy.get_param('/fetch/step_punishment')
self.closer_reward = rospy.get_param('/fetch/closer_reward')
self.impossible_movement_punishement = rospy.get_param(
'/fetch/impossible_movement_punishement')
self.reached_goal_reward = rospy.get_param(
'/fetch/reached_goal_reward')
self.max_distance = rospy.get_param('/fetch/max_distance')
self.desired_position = [self.goal_ee_pos["x"],
self.goal_ee_pos["y"], self.goal_ee_pos["z"]]
self.gripper_rotation = [1., 0., 1., 0.]
def _set_init_pose(self):
"""Sets the Robot in its init pose
The Simulation will be unpaused for this purpose.
"""
# Check because it seems its not being used
rospy.logdebug("Init Pos:")
rospy.logdebug(self.init_pos)
"""
# Init Joint Pose
rospy.logdebug("Moving To SETUP Joints ")
self.movement_result = self.set_trajectory_joints(self.init_pos)
"""
# We test the Desired Goal
# INIT POSE
rospy.logdebug("Moving To TEST DESIRED GOAL Position ")
action = self.create_action(
self.desired_position, self.gripper_rotation)
self.movement_result = self.set_trajectory_ee(action)
if self.movement_result:
# INIT POSE
rospy.logdebug("Moving To SETUP Position ")
self.last_gripper_target = [
self.setup_ee_pos["x"], self.setup_ee_pos["y"], self.setup_ee_pos["z"]]
action = self.create_action(
self.last_gripper_target, self.gripper_rotation)
self.movement_result = self.set_trajectory_ee(action)
self.current_dist_from_des_pos_ee = self.calculate_distance_between(
self.desired_position, self.last_gripper_target)
rospy.logdebug("INIT DISTANCE FROM GOAL==>" +
str(self.current_dist_from_des_pos_ee))
else:
assert False, "Desired GOAL EE is not possible"
self.last_action = "INIT"
rospy.logdebug("Init Pose Results ==>"+str(self.movement_result))
return self.movement_result
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
The simulation will be paused, therefore all the data retrieved has to be
from a system that doesnt need the simulation running, like variables where the
callbackas have stored last know sesnor data.
:return:
"""
rospy.logdebug("Init Env Variables...")
rospy.logdebug("Init Env Variables...END")
def _set_action(self, action):
delta_gripper_target = [0.0]*len(self.last_gripper_target)
# We convert action ints in increments/decrements of one of the axis XYZ
if action == 0: # X+
delta_gripper_target[0] += self.position_delta
self.last_action = "X+"
elif action == 1: # X-
delta_gripper_target[0] -= self.position_delta
self.last_action = "X-"
elif action == 2: # Y+
delta_gripper_target[1] += self.position_delta
self.last_action = "Y+"
elif action == 3: # Y-
delta_gripper_target[1] -= self.position_delta
self.last_action = "Y-"
elif action == 4: # Z+
delta_gripper_target[2] += self.position_delta
self.last_action = "Z+"
elif action == 5: # Z-
delta_gripper_target[2] -= self.position_delta
self.last_action = "Z-"
gripper_target = copy.deepcopy(self.last_gripper_target)
gripper_target[0] += delta_gripper_target[0]
gripper_target[1] += delta_gripper_target[1]
gripper_target[2] += delta_gripper_target[2]
# Apply action to simulation.
action_end_effector = self.create_action(
gripper_target, self.gripper_rotation)
self.movement_result = self.set_trajectory_ee(action_end_effector)
if self.movement_result:
# If the End Effector Positioning was succesfull, we replace the last one with the new one.
self.last_gripper_target = copy.deepcopy(gripper_target)
else:
rospy.logerr("Impossible End Effector Position...." +
str(gripper_target))
rospy.logwarn("END Set Action ==>"+str(action) +
", NAME="+str(self.last_action))
def _get_obs(self):
"""
It returns the Position of the TCP/EndEffector as observation.
And the distance from the desired point
Orientation for the moment is not considered
"""
grip_pos = self.get_ee_pose()
grip_pos_array = [grip_pos.pose.position.x,
grip_pos.pose.position.y, grip_pos.pose.position.z]
obs = grip_pos_array
new_dist_from_des_pos_ee = self.calculate_distance_between(
self.desired_position, grip_pos_array)
obs.append(new_dist_from_des_pos_ee)
rospy.logdebug("OBSERVATIONS====>>>>>>>"+str(obs))
return obs
def _is_done(self, observations):
"""
If the latest Action didnt succeed, it means that tha position asked was imposible therefore the episode must end.
It will also end if it reaches its goal.
"""
current_pos = observations[:3]
done = self.calculate_if_done(
self.movement_result, self.desired_position, current_pos)
return done
def _compute_reward(self, observations, done):
"""
We punish each step that it passes without achieveing the goal.
Punishes differently if it reached a position that is imposible to move to.
Rewards getting to a position close to the goal.
"""
current_pos = observations[:3]
new_dist_from_des_pos_ee = observations[-1]
reward = self.calculate_reward(
self.movement_result, self.desired_position, current_pos, new_dist_from_des_pos_ee)
rospy.logwarn(">>>REWARD>>>"+str(reward))
return reward
def calculate_if_done(self, movement_result, desired_position, current_pos):
"""
It calculated whather it has finished or not
"""
done = False
if movement_result:
position_similar = np.all(np.isclose(
desired_position, current_pos, atol=1e-02))
if position_similar:
done = True
rospy.logdebug("Reached a Desired Position!")
else:
done = True
rospy.logdebug("Reached a TCP position not reachable")
return done
def calculate_reward(self, movement_result, desired_position, current_pos, new_dist_from_des_pos_ee):
"""
It calculated whather it has finished or nota and how much reward to give
"""
if movement_result:
position_similar = np.all(np.isclose(
desired_position, current_pos, atol=1e-02))
# Calculating Distance
rospy.logwarn("desired_position="+str(desired_position))
rospy.logwarn("current_pos="+str(current_pos))
rospy.logwarn("self.current_dist_from_des_pos_ee=" +
str(self.current_dist_from_des_pos_ee))
rospy.logwarn("new_dist_from_des_pos_ee=" +
str(new_dist_from_des_pos_ee))
delta_dist = new_dist_from_des_pos_ee - self.current_dist_from_des_pos_ee
if position_similar:
reward = self.reached_goal_reward
rospy.logwarn("Reached a Desired Position!")
else:
if delta_dist < 0:
reward = self.closer_reward
rospy.logwarn(
"CLOSER To Desired Position!="+str(delta_dist))
else:
reward = self.step_punishment
rospy.logwarn(
"FURTHER FROM Desired Position!"+str(delta_dist))
else:
reward = self.impossible_movement_punishement
rospy.logwarn("Reached a TCP position not reachable")
# We update the distance
self.current_dist_from_des_pos_ee = new_dist_from_des_pos_ee
rospy.logdebug("Updated Distance from GOAL==" +
str(self.current_dist_from_des_pos_ee))
return reward
def calculate_distance_between(self, v1, v2):
"""
Calculated the Euclidian distance between two vectors given as python lists.
"""
dist = np.linalg.norm(np.array(v1)-np.array(v2))
return dist
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~multi_robot_gazebo_env.py | import rospy
import gym
from gym.utils import seeding
from .multi_robot_gazebo_connection import GazeboConnection
#https://bitbucket.org/theconstructcore/theconstruct_msgs/src/master/msg/RLExperimentInfo.msg
from openai_ros.msg import RLExperimentInfo
# https://github.com/openai/gym/blob/master/gym/core.py
class MultiRobotGazeboEnv(gym.Env):
def __init__(self, start_init_physics_parameters=True):
# To reset Simulations
rospy.logdebug("START init MultiRobotGazeboEnv")
self.gazebo = GazeboConnection(start_init_physics_parameters)
self.seed()
# Set up ROS related variables
self.episode_num = 0
self.cumulated_episode_reward = 0
self.reward_pub = rospy.Publisher('/openai/reward', RLExperimentInfo, queue_size=1)
rospy.logdebug("END init MultiRobotGazeboEnv")
# Env methods
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""
Function executed each time step.
Here we get the action execute it in a time step and retrieve the
observations generated by that action.
:param action:
:return: obs, reward, done, info
"""
"""
Here we should convert the action num to movement action, execute the action in the
simulation and get the observations result of performing that action.
"""
rospy.logdebug("START STEP OpenAIROS")
self.gazebo.unpauseSim()
self._set_action(action)
self.gazebo.pauseSim()
obs = self._get_obs()
done = self._is_done(obs)
info = {}
reward = self._compute_reward(obs, done)
self.cumulated_episode_reward += reward
rospy.logdebug("END STEP OpenAIROS")
return obs, reward, done, info
def reset(self):
rospy.logdebug("Reseting MultiRobotGazeboEnvironment")
self._init_env_variables()
self._reset_sim()
self._update_episode()
obs = self._get_obs()
rospy.logdebug("END Reseting MultiRobotGazeboEnvironment")
return obs
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _reset_sim(self):
"""Resets a simulation
"""
rospy.logdebug("RESET SIM START")
self.gazebo.pauseSim()
self._set_init_gazebo_pose()
self.gazebo.unpauseSim()
self._check_all_systems_ready()
self._set_init_ros()
rospy.logdebug("RESET SIM END")
return True
def _set_init_gazebo_pose(self):
"""Sets the Robot in its init pose in Gazebo
"""
raise NotImplementedError()
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
raise NotImplementedError()
def _set_init_ros(self):
"""Sets the Robot in its init pose in ROS
"""
raise NotImplementedError()
def _update_episode(self):
"""
Publishes the cumulated reward of the episode and
increases the episode number by one.
:return:
"""
rospy.logwarn("PUBLISHING REWARD...")
self._publish_reward_topic(
self.cumulated_episode_reward,
self.episode_num
)
rospy.logwarn("PUBLISHING REWARD...DONE="+str(self.cumulated_episode_reward)+",EP="+str(self.episode_num))
self.episode_num += 1
self.cumulated_episode_reward = 0
def _publish_reward_topic(self, reward, episode_number=1):
"""
This function publishes the given reward in the reward topic for
easy access from ROS infrastructure.
:param reward:
:param episode_number:
:return:
"""
reward_msg = RLExperimentInfo()
reward_msg.episode_number = episode_number
reward_msg.episode_reward = reward
self.reward_pub.publish(reward_msg)
def _get_obs(self):
"""Returns the observation.
"""
raise NotImplementedError()
def close(self):
"""
Function executed when closing the environment.
Use it for closing GUIS and other systems that need closing.
:return:
"""
rospy.logdebug("Closing MultiRobotGazeboEnvironment")
rospy.signal_shutdown("Closing MultiRobotGazeboEnvironment")
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _is_done(self, observations):
"""Indicates whether or not the episode is done ( the robot has fallen for example).
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _env_setup(self, initial_qpos):
"""Initial configuration of the environment. Can be used to configure initial state
and extract information from the simulation.
"""
raise NotImplementedError()
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~robot_envs~iriwam_env.py | import numpy
import rospy
import time
import tf
from openai_ros import robot_gazebo_env
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from control_msgs.msg import JointTrajectoryControllerState
from openai_ros.openai_ros_common import ROSLauncher
import actionlib
from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectoryPoint
from moveit_msgs.msg import JointLimits
class IriWamEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all IriWamEnv environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new IriWamEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /camera/depth/image_raw
* /camera/depth/points
* /camera/rgb/image_raw
* /laser_scan: Laser scan of the TCP
* /iri_wam/iri_wam_controller/state, control_msgs/JointTrajectoryControllerState: Gives desired, actual and error.
Actuators Topic List:
* We publish int the action: /iri_wam/iri_wam_controller/follow_joint_trajectory/goal
Args:
"""
rospy.logdebug("Start IriWamEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="iri_wam_gazebo",
launch_file_name="put_iriwam_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(IriWamEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
rospy.logdebug("IriWamEnv unpause...")
self.gazebo.unpauseSim()
self._check_all_systems_ready()
rospy.Subscriber("/camera/depth/image_raw", Image,
self._camera_depth_image_raw_callback)
rospy.Subscriber("/camera/depth/points", PointCloud2,
self._camera_depth_points_callback)
rospy.Subscriber("/camera/rgb/image_raw", Image,
self._camera_rgb_image_raw_callback)
rospy.Subscriber("/laser_scan", LaserScan, self._laser_scan_callback)
rospy.Subscriber("/iri_wam/iri_wam_controller/state",
JointTrajectoryControllerState, self._joint_state_callback)
self._setup_tf_listener()
self._setup_movement_system()
self.gazebo.pauseSim()
rospy.logdebug("Finished IriWamEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
rospy.logdebug("IriWamEnv check_all_systems_ready...")
self._check_all_sensors_ready()
rospy.logdebug("END IriWamEnv _check_all_systems_ready...")
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
# TODO: Here go the sensors like cameras and joint states
# self._check_camera_depth_image_raw_ready()
# self._check_camera_depth_points_ready()
# self._check_camera_rgb_image_raw_ready()
self._check_laser_scan_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_camera_depth_image_raw_ready(self):
self.camera_depth_image_raw = None
rospy.logdebug("Waiting for /camera/depth/image_raw to be READY...")
while self.camera_depth_image_raw is None and not rospy.is_shutdown():
try:
self.camera_depth_image_raw = rospy.wait_for_message(
"/camera/depth/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/depth/image_raw READY=>")
except:
rospy.logerr(
"Current /camera/depth/image_raw not ready yet, retrying for getting camera_depth_image_raw")
return self.camera_depth_image_raw
def _check_camera_depth_points_ready(self):
self.camera_depth_points = None
rospy.logdebug("Waiting for /camera/depth/points to be READY...")
while self.camera_depth_points is None and not rospy.is_shutdown():
try:
self.camera_depth_points = rospy.wait_for_message(
"/camera/depth/points", PointCloud2, timeout=10.0)
rospy.logdebug("Current /camera/depth/points READY=>")
except:
rospy.logerr(
"Current /camera/depth/points not ready yet, retrying for getting camera_depth_points")
return self.camera_depth_points
def _check_camera_rgb_image_raw_ready(self):
self.camera_rgb_image_raw = None
rospy.logdebug("Waiting for /camera/rgb/image_raw to be READY...")
while self.camera_rgb_image_raw is None and not rospy.is_shutdown():
try:
self.camera_rgb_image_raw = rospy.wait_for_message(
"/camera/rgb/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/rgb/image_raw READY=>")
except:
rospy.logerr(
"Current /camera/rgb/image_raw not ready yet, retrying for getting camera_rgb_image_raw")
return self.camera_rgb_image_raw
def _check_laser_scan_ready(self):
self.laser_scan = None
rospy.logdebug("Waiting for /laser_scan to be READY...")
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message(
"/laser_scan", LaserScan, timeout=5.0)
rospy.logdebug("Current /laser_scan READY=>")
except:
rospy.logerr(
"Current /laser_scan not ready yet, retrying for getting laser_scan")
return self.laser_scan
def _check_joint_state_ready(self):
self.joint_state = None
rospy.logdebug(
"Waiting for /iri_wam/iri_wam_controller/state to be READY...")
while self.joint_state is None and not rospy.is_shutdown():
try:
self.joint_state = rospy.wait_for_message(
"/iri_wam/iri_wam_controller/state", JointTrajectoryControllerState, timeout=5.0)
rospy.logdebug(
"Current /iri_wam/iri_wam_controller/state READY=>")
except:
rospy.logerr(
"Current /iri_wam/iri_wam_controller/state not ready yet, retrying for getting laser_scan")
return self.joint_state
def _camera_depth_image_raw_callback(self, data):
self.camera_depth_image_raw = data
def _camera_depth_points_callback(self, data):
self.camera_depth_points = data
def _camera_rgb_image_raw_callback(self, data):
self.camera_rgb_image_raw = data
def _laser_scan_callback(self, data):
self.laser_scan = data
def _joint_state_callback(self, data):
self.joint_state = data
def _setup_tf_listener(self):
"""
Set ups the TF listener for getting the transforms you ask for.
"""
self.listener = tf.TransformListener()
def _setup_movement_system(self):
"""
Setup of the movement system.
:return:
"""
self.traj_object = IriWamExecTrajectory()
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_joints_to_angle_blocking(self, joints_positions_array):
"""
It moves all the joints to the given position and doesnt exit until it reaches that position
:param: joints_positions_array: Its an array that ahas the desired joint positions in radians. The order of the
joints is:
[ "iri_wam_joint_1",
"iri_wam_joint_2",
"iri_wam_joint_3",
"iri_wam_joint_4",
"iri_wam_joint_5",
"iri_wam_joint_6",
"iri_wam_joint_7"]
"""
self.traj_object.send_joints_positions(joints_positions_array)
def get_tf_start_to_end_frames(self, start_frame_name, end_frame_name):
"""
Given two frames, it returns the transform from the start_frame_name to the end_frame_name.
It will only return something different to None if the TFs of the Two frames are in TF topic
published and are connected through the TF tree.
:param: start_frame_name: Start Frame of the TF transform
end_frame_name: End Frame of the TF transform
:return: trans,rot of the transform between the start and end frames.
"""
start_frame = "/"+start_frame_name
end_frame = "/"+end_frame_name
trans, rot = None, None
try:
(trans, rot) = self.listener.lookupTransform(
start_frame, end_frame, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logerr("TF start to end not ready YET...")
pass
return trans, rot
def get_camera_depth_image_raw(self):
return self.camera_depth_image_raw
def get_camera_depth_points(self):
return self.camera_depth_points
def get_camera_rgb_image_raw(self):
return self.camera_rgb_image_raw
def get_laser_scan(self):
return self.laser_scan
def get_joint_state(self):
return self.joint_state
def get_joint_limits(self):
"""
name: [iri_wam_joint_1, iri_wam_joint_2, iri_wam_joint_3, iri_wam_joint_4, iri_wam_joint_5,
iri_wam_joint_6, iri_wam_joint_7]
position: [-2.453681702263566e-11, 5.5375411835534294e-05, -2.9760194308892096e-11, -0.0062733383258359865, 1.8740564655672642e-13, 2.6570746959997393e-05, 1.5187850976872141e-13]
"""
name_array = ["iri_wam_joint_1",
"iri_wam_joint_2",
"iri_wam_joint_3",
"iri_wam_joint_4",
"iri_wam_joint_5",
"iri_wam_joint_6",
"iri_wam_joint_7"]
up_limits_array = [2.6,
2.0,
2.8,
3.1,
1.24,
1.6,
3.0]
down_limits_array = [-2.6,
-2.0,
-2.8,
-0.9,
-4.76,
-1.6,
-3.0]
joint_limits_array = []
for i in range(len(name_array)):
joint_limits = JointLimits()
joint_limits.min_position = down_limits_array[i]
joint_limits.max_position = up_limits_array[i]
joint_limits_array.append(joint_limits)
return joint_limits_array
def init_joint_limits(self):
"""
Get the Joint Limits, in the init fase where we need to unpause the simulation to get them
:return: joint_limits: The Joint Limits Dictionary, with names, angles, vel and effort limits.
"""
joint_limits = self.get_joint_limits()
return joint_limits
class IriWamExecTrajectory(object):
def __init__(self):
# create the connection to the action server
self.client = actionlib.SimpleActionClient(
'/iri_wam/iri_wam_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
# waits until the action server is up and running
self.client.wait_for_server()
self.init_goal_message()
def init_goal_message(self):
"""
We initialise the variable that we will use to send the goals.
We will reuse it because most of the values are fixed.
"""
self.PENDING = 0
self.ACTIVE = 1
self.DONE = 2
self.WARN = 3
self.ERROR = 4
# We Initialise the GOAL SYETS GOINT TO INIT POSE
# creates a goal to send to the action server
self.goal = FollowJointTrajectoryGoal()
# We fill in the Goal
self.goal.trajectory.header.stamp = rospy.Time.now()
self.goal.trajectory.header.frame_id = "iri_wam_link_base"
self.goal.trajectory.joint_names = ["iri_wam_joint_1",
"iri_wam_joint_2",
"iri_wam_joint_3",
"iri_wam_joint_4",
"iri_wam_joint_5",
"iri_wam_joint_6",
"iri_wam_joint_7"]
# Some of them dont quite coincide with the URDF limits, just because those limits break the simulation.
self.max_values = [2.6,
2.0,
2.8,
3.0,
1.24,
1.5,
3.0]
self.min_values = [-2.6,
-2.0,
-2.8,
-0.9,
-1.24,
-1.4,
-3.0]
self.goal.trajectory.points = []
joint_traj_point = JointTrajectoryPoint()
# TODO
joint_traj_point.positions = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
joint_traj_point.velocities = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
joint_traj_point.accelerations = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
joint_traj_point.effort = []
joint_traj_point.time_from_start = rospy.Duration.from_sec(1.0)
self.goal.trajectory.points.append(joint_traj_point)
def get_goal(self):
return self.goal
def feedback_callback(self, feedback):
rospy.loginfo("##### FEEDBACK ######")
# rospy.loginfo(str(feedback.joint_names))
# rospy.loginfo(str(feedback.desired.positions))
# rospy.loginfo(str(feedback.actual.positions))
rospy.loginfo(str(feedback.error.positions))
rospy.loginfo("##### ###### ######")
def send_joints_positions(self, joints_positions_array, seconds_duration=0.05):
my_goal = self.get_goal()
my_goal.trajectory.header.stamp = rospy.Time.now()
joint_traj_point = JointTrajectoryPoint()
# We clamp the values to max and min to avoid asking configurations that IriWam cant reach.
joint_traj_point.positions = numpy.clip(joints_positions_array,
self.min_values,
self.max_values).tolist()
joint_traj_point.velocities = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
joint_traj_point.accelerations = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
joint_traj_point.effort = []
joint_traj_point.time_from_start = rospy.Duration.from_sec(
seconds_duration)
my_goal.trajectory.points = []
my_goal.trajectory.points.append(joint_traj_point)
# sends the goal to the action server, specifying which feedback function
# to call when feedback received
self.client.send_goal(my_goal, feedback_cb=self.feedback_callback)
# Uncomment these lines to test goal preemption:
# self.client.cancel_goal() # would cancel the goal 3 seconds after starting
state_result = self.client.get_state()
rate = rospy.Rate(10)
rospy.loginfo("state_result: "+str(state_result))
while state_result < self.DONE:
rospy.loginfo(
"Doing Stuff while waiting for the Server to give a result....")
rate.sleep()
state_result = self.client.get_state()
rospy.loginfo("state_result: "+str(state_result))
rospy.loginfo("[Result] State: "+str(state_result))
if state_result == self.ERROR:
rospy.logerr("Something went wrong in the Server Side")
if state_result == self.WARN:
rospy.logwarn("There is a warning in the Server Side")
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~fetch~fetch_push.py | from gym import utils
import copy
import rospy
from gym import spaces
from openai_ros.robot_envs import fetch_env
from gym.envs.registration import register
import numpy as np
from sensor_msgs.msg import JointState
from openai_ros.openai_ros_common import ROSLauncher
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
import os
class FetchPushEnv(fetch_env.FetchEnv, utils.EzPickle):
def __init__(self):
# Launch the Task Simulated-Environment
# This is the path where the simulation files are,
# the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/fetch/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="fetch_simple_description",
launch_file_name="start_HER_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file relative to this TaskEnvironment
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/fetch/config",
yaml_file_name="fetch_push.yaml")
self.get_params()
# TODO: this must be continuous action space... don't follow the old implementation.
self.action_space = spaces.Discrete(self.n_actions)
observations_high_range = np.array(
[self.position_ee_max]*self.n_observations)
observations_low_range = np.array(
[self.position_ee_min]*self.n_observations)
observations_high_dist = np.array([self.max_distance])
observations_low_dist = np.array([0.0])
high = np.concatenate(
[observations_high_range, observations_high_dist])
low = np.concatenate([observations_low_range, observations_low_dist])
self.observation_space = spaces.Box(low, high)
# TODO: Clean up
# fetch_env.FetchEnv.__init__(self)
super(FetchPushEnv, self).__init__(ros_ws_abspath)
def get_params(self):
"""
get configuration parameters
"""
self.sim_time = rospy.get_time()
self.n_actions = rospy.get_param('/fetch/n_actions')
self.n_observations = rospy.get_param('/fetch/n_observations')
self.position_ee_max = rospy.get_param('/fetch/position_ee_max')
self.position_ee_min = rospy.get_param('/fetch/position_ee_min')
self.init_pos = rospy.get_param('/fetch/init_pos')
self.setup_ee_pos = rospy.get_param('/fetch/setup_ee_pos')
self.goal_ee_pos = rospy.get_param('/fetch/goal_ee_pos')
self.position_delta = rospy.get_param('/fetch/position_delta')
self.step_punishment = rospy.get_param('/fetch/step_punishment')
self.closer_reward = rospy.get_param('/fetch/closer_reward')
self.impossible_movement_punishement = rospy.get_param(
'/fetch/impossible_movement_punishement')
self.reached_goal_reward = rospy.get_param(
'/fetch/reached_goal_reward')
self.max_distance = rospy.get_param('/fetch/max_distance')
self.goal = np.array(
[self.goal_ee_pos["x"], self.goal_ee_pos["y"], self.goal_ee_pos["z"]])
self.rot_ctrl = np.array([1., 0., 1., 0.])
self.prev_grip_pos = np.zeros(3)
self.prev_object_pos = np.zeros(3)
self.prev_object_rot = np.zeros(3)
def _set_init_pose(self):
"""
Sets the Robot in its init pose
The Simulation will be unpaused for this purpose.
"""
if not self.set_trajectory_joints(self.init_pos):
assert False, "Initialisation is failed...."
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
The simulation will be paused, therefore all the data retrieved has to be
from a system that doesnt need the simulation running, like variables where the
callbackas have stored last know sesnor data.
:return:
"""
rospy.logdebug("Init Env Variables...")
rospy.logdebug("Init Env Variables...END")
def _set_action(self, action):
rospy.logwarn("=== Action: {}".format(action))
pos_ctrl, gripper_ctrl = action[:3], action[3]
"""
Since the implementation of self.set_trajectory_ee(in fetch_env.py) ONLY takes the position of the EE(end-effector)
Action only contains: the destination of the EE in the world frame
TODO: Talk to Miguel regarding the modification of the basic implementation of self.set_trajectory_ee(in fetch_env.py)
below code is similar to the original implementaion of OpenAI
"""
# gripper_ctrl = np.array([gripper_ctrl, gripper_ctrl])
# action = np.concatenate([pos_ctrl, self.rot_ctrl, gripper_ctrl])
# TODO: After speak to Miguel, let's not use this, USE above action!!
action = pos_ctrl
self.movement_result = self.set_trajectory_ee(action)
if not self.movement_result:
assert False, "movement_result failed with the action of : " + \
str(action)
def _get_obs(self):
"""
It returns the Position of the TCP/EndEffector as observation.
And the distance from the desired point
Orientation for the moment is not considered
Note:
- In original code(https://github.com/openai/gym/blob/master/gym/envs/robotics/fetch_env.py#L91),
the term (xvelp) is used and it means positional velocity in world frame
"""
grip_pos_v = self.get_ee_pose()
grip_pos = np.array([grip_pos_v.pose.position.x,
grip_pos_v.pose.position.y, grip_pos_v.pose.position.z])
dt = self.get_elapsed_time()
# Velocity(position) = Distance/Time
grip_velp = (grip_pos - self.prev_grip_pos)/dt
# the pose and rotation of the cube/box on a table
object_data = self.obj_positions.get_states()
object_pos = object_data[:3] # pose of cube
object_rot = object_data[3:] # rotation of cube
object_velp = (object_pos - self.prev_object_pos) / \
dt # Velocity(position) = Distance/Time
object_velr = (object_rot - self.prev_object_rot) / \
dt # Velocity(rotation) = Rotation/Time
object_rel_pos = object_pos - grip_pos
# Unknown meaning of this operation(https://github.com/openai/gym/blob/master/gym/envs/robotics/fetch_env.py#L102)
object_velp -= grip_velp
"""
TODO: Ask Miguel the meaning of the two variables below
1. gripper_state => https: // github.com / openai / gym / blob / master / gym / envs / robotics / fetch_env.py # L105
2. gripper_vel => https: // github.com / openai / gym / blob / master / gym / envs / robotics / fetch_env.py # L106
"""
gripper_state = np.zeros(0) # temp workaround
gripper_vel = np.zeros(0) # temp workaround
achieved_goal = np.squeeze(object_pos.copy())
obs = np.concatenate([
grip_pos, # absolute position of gripper
object_pos.ravel(), # absolute position of object
object_rel_pos.ravel(), # relative position of object from gripper
gripper_state, # gripper state
object_rot.ravel(), # rotations of object
object_velp.ravel(), # positional velocities of object
object_velr.ravel(), # rotational velocities of object
grip_velp, # distance between fingers
gripper_vel, # velocities of gripper
])
rospy.logdebug("OBSERVATIONS====>>>>>>>"+str(obs))
# Update the previous properties
self.prev_grip_pos = grip_pos
self.prev_object_pos = object_pos
self.prev_object_rot = object_rot
return {
'observation': obs.copy(),
'achieved_goal': achieved_goal.copy(),
'desired_goal': self.goal.copy(),
}
def get_elapsed_time(self):
"""
Returns the elapsed time since the beginning of the simulation
Then maintains the current time as "previous time" to calculate the elapsed time again
"""
current_time = rospy.get_time()
dt = self.sim_time - current_time
self.sim_time = current_time
return dt
def _is_done(self, observations):
"""
If the latest Action didnt succeed, it means that tha position asked was imposible therefore the episode must end.
It will also end if it reaches its goal.
"""
current_pos = observations[:3]
done = self.calculate_if_done(
self.movement_result, self.goal, current_pos)
return done
def _compute_reward(self, observations, done):
"""
Given a success of the execution of the action
Calculate the reward: binary => 1 for success, 0 for failure
"""
current_pos = observations[:3]
# TODO: ask Miguel, why we need this
new_dist_from_des_pos_ee = observations[-1]
if self.movement_result:
position_similar = np.all(np.isclose(
self.goal, current_pos, atol=1e-02))
if position_similar:
reward = self.reached_goal_reward
rospy.logwarn("Reached a Desired Position!")
else:
reward = 0
else:
# TODO: Ask Miguel about the purpose of having "self.impossible_movement_punishement"
reward = self.impossible_movement_punishement
rospy.logwarn("Reached a TCP position not reachable")
rospy.logwarn(">>>REWARD>>>"+str(reward))
return reward
def calculate_if_done(self, movement_result, goal, current_pos):
"""
It calculated whather it has finished or not
"""
done = False
if movement_result:
# check if the end-effector located within a threshold
# TODO: check this threshold is alined with the paper one
position_similar = np.all(
np.isclose(goal, current_pos, atol=1e-02))
if position_similar:
done = True
rospy.logdebug("Reached a Desired Position!")
else:
# or if the end-effector reaches the maximum strechable point
done = True
rospy.logdebug("Reached a TCP position not reachable")
return done
def calculate_distance_between(self, v1, v2):
"""
Calculated the Euclidian distance between two vectors given as python lists.
"""
dist = np.linalg.norm(np.array(v1) - np.array(v2))
return dist
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~task_envs_list.py | #!/usr/bin/env python
from gym.envs.registration import register
from gym import envs
def RegisterOpenAI_Ros_Env(task_env, timestep_limit_per_episode=10000):
"""
Registers all the ENVS supported in OpenAI ROS. This way we can load them
with variable limits.
Here is where you have to PLACE YOUR NEW TASK ENV, to be registered and accesible.
return: False if the Task_Env wasnt registered, True if it was.
"""
###########################################################################
# MovingCube Task-Robot Envs
result = True
# Cubli Moving Cube
if task_env == 'MovingCubeOneDiskWalk-v0':
# We register the Class through the Gym system
register(
id=task_env,
entry_point='openai_ros:task_envs.moving_cube.one_disk_walk.MovingCubeOneDiskWalkEnv',
max_episode_steps=timestep_limit_per_episode,
)
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.moving_cube import one_disk_walk
# Husarion Robot
elif task_env == 'HusarionGetToPosTurtleBotPlayGround-v0':
register(
id=task_env,
entry_point='openai_ros:task_envs.husarion.husarion_get_to_position_turtlebot_playground.HusarionGetToPosTurtleBotPlayGroundEnv',
timestep_limit=timestep_limit_per_episode,
)
# import our training environment
from openai_ros.task_envs.husarion import husarion_get_to_position_turtlebot_playground
elif task_env == 'FetchTest-v0':
register(
id=task_env,
entry_point='openai_ros:task_envs.fetch.fetch_test_task.FetchTestEnv',
timestep_limit=timestep_limit_per_episode,
)
# 50
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_test_task
elif task_env == 'FetchSimpleTest-v0':
register(
id=task_env,
# entry_point='openai_ros:task_envs.fetch.fetch_simple_task.FetchSimpleTestEnv',
entry_point='openai_ros.task_envs.fetch.fetch_simple_task:FetchSimpleTestEnv',
timestep_limit=timestep_limit_per_episode,
)
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_simple_task
elif task_env == 'FetchPickAndPlace-v0':
register(
id=task_env,
# entry_point='openai_ros:task_envs.fetch.fetch_pick_and_place_task.FetchPickAndPlaceEnv',
entry_point='openai_ros:task_envs.fetch.fetch_pick_and_place_task.FetchPickAndPlaceEnv',
timestep_limit=timestep_limit_per_episode,
)
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_pick_and_place_task
elif task_env == 'FetchPush-v0':
register(
id=task_env,
# entry_point='openai_ros:task_envs.fetch.fetch_pick_and_place_task.FetchPushEnv',
# entry_point='openai_ros:task_envs.fetch.fetch_push.FetchPushEnv',
entry_point='openai_ros.task_envs.fetch.fetch_push:FetchPushEnv',
timestep_limit=timestep_limit_per_episode,
)
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_push
elif task_env == 'CartPoleStayUp-v0':
register(
id=task_env,
entry_point='openai_ros:task_envs.cartpole_stay_up.stay_up.CartPoleStayUpEnv',
timestep_limit=timestep_limit_per_episode,
)
# import our training environment
from openai_ros.task_envs.cartpole_stay_up import stay_up
elif task_env == 'HopperStayUp-v0':
register(
id=task_env,
entry_point='openai_ros:task_envs.hopper.hopper_stay_up.HopperStayUpEnv',
timestep_limit=timestep_limit_per_episode,
)
# import our training environment
from openai_ros.task_envs.hopper import hopper_stay_up
elif task_env == 'IriWamTcpToBowl-v0':
register(
id=task_env,
entry_point='openai_ros:task_envs.iriwam.tcp_to_bowl.IriWamTcpToBowlEnv',
timestep_limit=timestep_limit_per_episode,
)
# import our training environment
from openai_ros.task_envs.iriwam import tcp_to_bowl
elif task_env == 'ParrotDroneGoto-v0':
register(
id=task_env,
entry_point='openai_ros:task_envs.parrotdrone.parrotdrone_goto.ParrotDroneGotoEnv',
timestep_limit=timestep_limit_per_episode,
)
# import our training environment
from openai_ros.task_envs.parrotdrone import parrotdrone_goto
elif task_env == 'SawyerTouchCube-v0':
register(
id=task_env,
entry_point='openai_ros:task_envs.sawyer.learn_to_touch_cube.SawyerTouchCubeEnv',
timestep_limit=timestep_limit_per_episode,
)
# import our training environment
from openai_ros.task_envs.sawyer import learn_to_touch_cube
elif task_env == 'ShadowTcGetBall-v0':
register(
id=task_env,
entry_point='openai_ros:task_envs.shadow_tc.learn_to_pick_ball.ShadowTcGetBallEnv',
timestep_limit=timestep_limit_per_episode,
)
# import our training environment
from openai_ros.task_envs.shadow_tc import learn_to_pick_ball
elif task_env == 'SumitXlRoom-v0':
register(
id='SumitXlRoom-v0',
entry_point='openai_ros:task_envs.sumit_xl.sumit_xl_room.SumitXlRoom',
timestep_limit=timestep_limit_per_episode,
)
# import our training environment
from openai_ros.task_envs.sumit_xl import sumit_xl_room
elif task_env == 'MyTurtleBot2Maze-v0':
register(
id=task_env,
entry_point='openai_ros:task_envs.turtlebot2.turtlebot2_maze.TurtleBot2MazeEnv',
timestep_limit=timestep_limit_per_episode,
)
# import our training environment
from openai_ros.task_envs.turtlebot2 import turtlebot2_maze
elif task_env == 'MyTurtleBot2Wall-v0':
register(
id=task_env,
entry_point='openai_ros:task_envs.turtlebot2.turtlebot2_wall.TurtleBot2WallEnv',
timestep_limit=timestep_limit_per_episode,
)
# import our training environment
from openai_ros.task_envs.turtlebot2 import turtlebot2_wall
elif task_env == 'TurtleBot3World-v0':
register(
id=task_env,
entry_point='openai_ros:task_envs.turtlebot3.turtlebot3_world.TurtleBot3WorldEnv',
timestep_limit=timestep_limit_per_episode,
)
# import our training environment
from openai_ros.task_envs.turtlebot3 import turtlebot3_world
elif task_env == 'WamvNavTwoSetsBuoys-v0':
register(
id=task_env,
entry_point='openai_ros:task_envs.wamv.wamv_nav_twosets_buoys.WamvNavTwoSetsBuoysEnv',
timestep_limit=timestep_limit_per_episode,
)
# import our training environment
from openai_ros.task_envs.wamv import wamv_nav_twosets_buoys
# Add here your Task Envs to be registered
else:
result = False
###########################################################################
if result:
# We check that it was really registered
supported_gym_envs = GetAllRegisteredGymEnvs()
#print("REGISTERED GYM ENVS===>"+str(supported_gym_envs))
assert (task_env in supported_gym_envs), "The Task_Robot_ENV given is not Registered ==>" + \
str(task_env)
return result
def GetAllRegisteredGymEnvs():
"""
Returns a List of all the registered Envs in the system
return EX: ['Copy-v0', 'RepeatCopy-v0', 'ReversedAddition-v0', ... ]
"""
all_envs = envs.registry.all()
env_ids = [env_spec.id for env_spec in all_envs]
return env_ids
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~fetch~fetch_pick_and_place_task.py | from gym import utils
import copy
import rospy
import numpy as np
from gym import spaces
from openai_ros.robot_envs import fetchsimple_env
from gym.envs.registration import register
import numpy as np
from sensor_msgs.msg import JointState
from openai_ros.openai_ros_common import ROSLauncher
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
import os
class FetchPickAndPlaceEnv(fetchsimple_env.FetchSimpleEnv, utils.EzPickle):
def __init__(self):
# Launch the Task Simulated-Environment
# This is the path where the simulation files are,
# the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/fetch/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="fetch_simple_description",
launch_file_name="start_HER_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file relative to this TaskEnvironment
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/fetch/config",
yaml_file_name="fetchsimple_test.yaml")
super(FetchPickAndPlaceEnv, self).__init__(ros_ws_abspath)
rospy.logdebug("Entered FetchPickAndPlaceEnv Env")
self.get_params()
self.action_space = spaces.Discrete(self.n_actions)
observations_high_range = np.array(
self.upper_array_observations)
observations_low_range = np.array(
self.lower_array_observations)
self.observation_space = spaces.Box(
observations_low_range, observations_high_range)
def get_params(self):
# get configuration parameters
self.n_actions = rospy.get_param('/fetch/n_actions')
self.n_max_iterations = rospy.get_param('/fetch/max_iterations')
self.init_pos = rospy.get_param('/fetch/init_pos')
init_pos_dict = rospy.get_param('/fetch/init_pos')
self.init_pos = [init_pos_dict["bellows_joint"],
init_pos_dict["elbow_flex_joint"],
init_pos_dict["forearm_roll_joint"],
init_pos_dict["head_pan_joint"],
init_pos_dict["head_tilt_joint"],
init_pos_dict["l_gripper_finger_joint"],
init_pos_dict["r_gripper_finger_joint"],
init_pos_dict["shoulder_lift_joint"],
init_pos_dict["shoulder_pan_joint"],
init_pos_dict["torso_lift_joint"],
init_pos_dict["upperarm_roll_joint"],
init_pos_dict["wrist_flex_joint"],
init_pos_dict["wrist_roll_joint"]
]
goal_pos_dict = rospy.get_param('/fetch/goal_pos')
self.goal_pos = [goal_pos_dict["elbow_flex_joint"],
goal_pos_dict["shoulder_lift_joint"],
goal_pos_dict["shoulder_pan_joint"]]
self.position_delta = rospy.get_param('/fetch/position_delta')
self.reached_goal_reward = rospy.get_param(
'/fetch/reached_goal_reward')
upper_array, lower_array = self.get_joint_limits()
self.upper_array_observations = [
upper_array[1], upper_array[7], upper_array[8]]
self.lower_array_observations = [
lower_array[1], lower_array[7], lower_array[8]]
self.n_observations = len(self.upper_array_observations)
def _set_init_pose(self):
"""Sets the Robot in its init pose
The Simulation will be unpaused for this purpose.
"""
# Check because it seems its not being used
rospy.logdebug("Init Pos:")
rospy.logdebug(self.init_pos)
"""
# Init Joint Pose
rospy.logdebug("Moving To SETUP Joints ")
self.movement_result = self.set_trajectory_joints(self.init_pos)
"""
# We test the Desired Goal
# INIT POSE
rospy.logdebug("Moving To Init Pose ")
self.move_to_init_pose()
self.last_action = "INIT"
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
The simulation will be paused, therefore all the data retrieved has to be
from a system that doesnt need the simulation running, like variables where the
callbackas have stored last know sesnor data.
:return:
"""
rospy.logdebug("Init Env Variables...")
self.interations_done = 0
rospy.logdebug("Init Env Variables...END")
def _set_action(self, action):
delta_gripper_target = [0.0]*len(self.init_pos)
rospy.logwarn("==== ROBOT ENV RECEIVED ACTION: "+str(action))
delta_gripper_target[1] += action[0]
delta_gripper_target[7] += action[1]
delta_gripper_target[8] += action[2]
# # We convert action ints in increments/decrements of one of the axis XYZ
# if action == 0: # elbow_flex_joint+
# delta_gripper_target[1] += self.position_delta
# self.last_action = "elbow_flex_joint+"
# elif action == 1: # elbow_flex_joint-
# delta_gripper_target[1] -= self.position_delta
# self.last_action = "elbow_flex_joint-"
# elif action == 2: # shoulder_lift_joint+
# delta_gripper_target[7] += self.position_delta
# self.last_action = "shoulder_lift_joint+"
# elif action == 3: # shoulder_lift_joint-
# delta_gripper_target[7] -= self.position_delta
# self.last_action = "shoulder_lift_joint-"
# elif action == 4: # shoulder_pan_joint+
# delta_gripper_target[8] += self.position_delta
# self.last_action = "shoulder_pan_joint+"
# elif action == 5: # shoulder_pan_joint-
# delta_gripper_target[8] -= self.position_delta
# self.last_action = "shoulder_pan_joint-"
self.movement_result = self.set_trajectory_joints(delta_gripper_target)
# rospy.logwarn("END Set Action ==>" + str(action) +
# ", NAME=" + str(self.last_action))
def _get_obs(self):
"""
It returns the Position of the TCP/EndEffector as observation.
And the distance from the desired point
Orientation for the moment is not considered
"""
joints_position = self.get_joints_position()
obs_joints_position = {
'observation': np.array([joints_position[1]]),
'achieved_goal': np.array([joints_position[7]]),
'desired_goal': np.array([joints_position[8]]),
}
# obs_joints_position = [joints_position[1],
# joints_position[7], joints_position[8]]
rospy.logdebug("OBSERVATIONS====>>>>>>>"+str(obs_joints_position))
return obs_joints_position
def _is_done(self, observations):
"""
If the latest Action didnt succeed, it means that tha position asked was imposible therefore the episode must end.
It will also end if it reaches its goal.
"""
done = np.allclose(a=observations,
b=self.goal_pos,
atol=0.2)
self.interations_done += 1
if self.interations_done >= self.n_max_iterations:
done = True
return done
def _compute_reward(self, observations, done):
"""
We punish each step that it passes without achieveing the goal.
Punishes differently if it reached a position that is imposible to move to.
Rewards getting to a position close to the goal.
"""
reward = 1.0 / \
(np.linalg.norm(np.array(observations) - np.array(self.goal_pos)))
if done:
reward += self.reached_goal_reward
rospy.logwarn(">>>REWARD>>>"+str(reward))
return reward
| [] |
2024-01-10 | Privilger/RoboMaster-Simulator | src~openai_ros~openai_ros~src~openai_ros~task_envs~fetch~fetch_simple_task.py | from gym import utils
import copy
import rospy
from gym import spaces
from openai_ros.robot_envs import fetchsimple_env
from gym.envs.registration import register
import numpy as np
from sensor_msgs.msg import JointState
from openai_ros.openai_ros_common import ROSLauncher
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
import os
class FetchSimpleTestEnv(fetchsimple_env.FetchSimpleEnv, utils.EzPickle):
def __init__(self):
# Launch the Task Simulated-Environment
# This is the path where the simulation files are,
# the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/fetch/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="fetch_simple_description",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file relative to this TaskEnvironment
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/fetch/config",
yaml_file_name="fetchsimple_test.yaml")
super(FetchSimpleTestEnv, self).__init__(ros_ws_abspath)
rospy.logdebug("Entered FetchSimpleTestEnv Env")
self.get_params()
self.action_space = spaces.Discrete(self.n_actions)
observations_high_range = np.array(
self.upper_array_observations)
observations_low_range = np.array(
self.lower_array_observations)
self.observation_space = spaces.Box(observations_low_range, observations_high_range)
def get_params(self):
# get configuration parameters
self.n_actions = rospy.get_param('/fetch/n_actions')
self.n_max_iterations = rospy.get_param('/fetch/max_iterations')
self.init_pos = rospy.get_param('/fetch/init_pos')
init_pos_dict = rospy.get_param('/fetch/init_pos')
self.init_pos = [init_pos_dict["bellows_joint"],
init_pos_dict["elbow_flex_joint"],
init_pos_dict["forearm_roll_joint"],
init_pos_dict["head_pan_joint"],
init_pos_dict["head_tilt_joint"],
init_pos_dict["l_gripper_finger_joint"],
init_pos_dict["r_gripper_finger_joint"],
init_pos_dict["shoulder_lift_joint"],
init_pos_dict["shoulder_pan_joint"],
init_pos_dict["torso_lift_joint"],
init_pos_dict["upperarm_roll_joint"],
init_pos_dict["wrist_flex_joint"],
init_pos_dict["wrist_roll_joint"]
]
goal_pos_dict = rospy.get_param('/fetch/goal_pos')
self.goal_pos = [goal_pos_dict["elbow_flex_joint"],
goal_pos_dict["shoulder_lift_joint"],
goal_pos_dict["shoulder_pan_joint"]]
self.position_delta = rospy.get_param('/fetch/position_delta')
self.reached_goal_reward = rospy.get_param(
'/fetch/reached_goal_reward')
upper_array, lower_array = self.get_joint_limits()
self.upper_array_observations = [upper_array[1],upper_array[7],upper_array[8]]
self.lower_array_observations = [lower_array[1], lower_array[7], lower_array[8]]
self.n_observations = len(self.upper_array_observations)
def _set_init_pose(self):
"""Sets the Robot in its init pose
The Simulation will be unpaused for this purpose.
"""
# Check because it seems its not being used
rospy.logdebug("Init Pos:")
rospy.logdebug(self.init_pos)
"""
# Init Joint Pose
rospy.logdebug("Moving To SETUP Joints ")
self.movement_result = self.set_trajectory_joints(self.init_pos)
"""
# We test the Desired Goal
# INIT POSE
rospy.logdebug("Moving To Init Pose ")
self.move_to_init_pose()
self.last_action = "INIT"
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
The simulation will be paused, therefore all the data retrieved has to be
from a system that doesnt need the simulation running, like variables where the
callbackas have stored last know sesnor data.
:return:
"""
rospy.logdebug("Init Env Variables...")
self.interations_done = 0
rospy.logdebug("Init Env Variables...END")
def _set_action(self, action):
delta_gripper_target = [0.0]*len(self.init_pos)
# We convert action ints in increments/decrements of one of the axis XYZ
if action == 0: # elbow_flex_joint+
delta_gripper_target[1] += self.position_delta
self.last_action = "elbow_flex_joint+"
elif action == 1: # elbow_flex_joint-
delta_gripper_target[1] -= self.position_delta
self.last_action = "elbow_flex_joint-"
elif action == 2: # shoulder_lift_joint+
delta_gripper_target[7] += self.position_delta
self.last_action = "shoulder_lift_joint+"
elif action == 3: # shoulder_lift_joint-
delta_gripper_target[7] -= self.position_delta
self.last_action = "shoulder_lift_joint-"
elif action == 4: # shoulder_pan_joint+
delta_gripper_target[8] += self.position_delta
self.last_action = "shoulder_pan_joint+"
elif action == 5: # shoulder_pan_joint-
delta_gripper_target[8] -= self.position_delta
self.last_action = "shoulder_pan_joint-"
self.movement_result = self.set_trajectory_joints(delta_gripper_target)
rospy.logwarn("END Set Action ==>" + str(action) +
", NAME=" + str(self.last_action))
def _get_obs(self):
"""
It returns the Position of the TCP/EndEffector as observation.
And the distance from the desired point
Orientation for the moment is not considered
"""
joints_position = self.get_joints_position()
obs_joints_position = [joints_position[1],joints_position[7],joints_position[8]]
rospy.logdebug("OBSERVATIONS====>>>>>>>"+str(obs_joints_position))
return obs_joints_position
def _is_done(self, observations):
"""
If the latest Action didnt succeed, it means that tha position asked was imposible therefore the episode must end.
It will also end if it reaches its goal.
"""
done = np.allclose(a=observations,
b=self.goal_pos,
atol=0.2)
self.interations_done += 1
if self.interations_done >= self.n_max_iterations:
done = True
return done
def _compute_reward(self, observations, done):
"""
We punish each step that it passes without achieveing the goal.
Punishes differently if it reached a position that is imposible to move to.
Rewards getting to a position close to the goal.
"""
reward = 1.0 / (np.linalg.norm(np.array(observations) - np.array(self.goal_pos)))
if done:
reward += self.reached_goal_reward
rospy.logwarn(">>>REWARD>>>"+str(reward))
return reward
| [] |
2024-01-10 | while-basic/GPTeam | src~utils~logging.py | import atexit
import json
import logging
import os
import re
from datetime import datetime
from pathlib import Path
from typing import List
import openai
import pytz
def clean_json_string(json_string):
cleaned_string = re.sub(r"\\\'", r"'", json_string) # replace \' with '
cleaned_string = re.sub(
r'\\"', r'"', cleaned_string
) # replace \" with " on cleaned_string
return cleaned_string
def get_completion_data(text) -> List[str]:
pattern = r"(api_version=[^\s]+)|(data=(.+?)(?= [^\s]+=))|(message='(.+?)')"
matches = re.findall(pattern, text)
cleaned_matches = []
for match in matches:
for item in match:
if item != "":
cleaned_matches.append(item)
break
return cleaned_matches
def get_key_value(text):
pattern = r"(\w+)=((?:\"(?:\\\"|[^\"])*\")|(?:\'(?:\\\'|[^'])*\'))"
matches = re.findall(pattern, text)
result = {}
for match in matches:
key, value = match[0], match[1]
# Remove the outer quotes and unescape the inner quotes
if value.startswith('"'):
value = value[1:-1].replace('\\"', '"')
else:
value = value[1:-1].replace("\\'", "'")
result[key] = value
return result
class OpenAIFilter(logging.Filter):
def filter(self, record):
return "openai" in record.name
class JsonArrayFileHandler(logging.FileHandler):
def __init__(self, filename, mode="a", encoding=None, delay=False):
super().__init__(filename, mode, encoding, delay)
self.closed_properly = False
self.stream.write("[")
atexit.register(self.close)
def close(self):
self.acquire()
try:
if not self.closed_properly:
self.stream.write("]")
self.closed_properly = True
super().close()
finally:
self.release()
def emit(self, record):
if self.stream.tell() > 1:
self.stream.write(",\n")
super().emit(record)
class LoggingFilter(logging.Filter):
def filter(self, record):
print("logging filter", record)
return True
def init_logging():
openai.util.logger.setLevel(logging.WARNING)
open("src/web/logs/agent.txt", "w").close()
def get_agent_logger():
# Create a logger
logger = logging.getLogger("agent")
logger.setLevel(logging.INFO)
# Prevent log messages from being passed to the root logger or any other ancestor logger
logger.propagate = False
# Remove all handlers associated with the logger object.
for handler in logger.handlers[:]:
logger.removeHandler(handler)
# Create a file handler
Path("src/web/logs/").mkdir(parents=True, exist_ok=True)
handler = logging.FileHandler("src/web/logs/agent.txt")
handler.setLevel(logging.INFO)
# Add the handlers to the logger
logger.addHandler(handler)
return logger
agent_logger = get_agent_logger()
| [] |
2024-01-10 | iulmt/text_to_vedio_web | data_to_image.py | # -*- coding: utf-8 -*-
"""
作者:张贵发
日期:2023年07月07日
描述:根据生成的prompt提示词来生成对应的图片
"""
import os.path
import requests
import openai
import pandas as pd
def SaveImgFromUrl(response, save_path):
numOfOutput = len(response)
org_path = save_path
for i in range(numOfOutput):
save_path = org_path
img_content = requests.get(response[i]["url"]).content
if i >= 1:
save_path = save_path.split(".")[0] + "_" + str(i + 1) + "." + save_path.split(".")[1]
with open(save_path, "wb") as f:
f.write(img_content)
def CreateImage( description, path,key):
size = "1024x1024"
if size not in ["256x256", "512x512", "1024x1024"]: # 校验生成图片尺寸
raise Exception("图片尺寸不符,仅支持 256x256, 512x512, 1024x1024三种大小")
openai.api_key = key
image = openai.Image.create(
prompt=description,
n=1,
size=size,
response_format="url",
)
SaveImgFromUrl(image.data, path)
def load_image_data(path,key):
df = pd.read_csv(path)
newpath = path.split(".csv")[0].replace("data_prompt", "data_image")
if not os.path.exists(newpath):
os.makedirs(newpath)
for index, row in df.iterrows():
childpath = os.path.join(newpath,str(index)+".png")
CreateImage(row["prompt"][:10],childpath,key)
return newpath
if __name__ == '__main__':
size = "1024x1024"
| [] |
2024-01-10 | letisiapangataa/autogen | test~test_notebook.py | import sys
import os
import pytest
try:
import openai
skip = False
except ImportError:
skip = True
here = os.path.abspath(os.path.dirname(__file__))
def run_notebook(input_nb, output_nb="executed_openai_notebook.ipynb", save=False):
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors import CellExecutionError
try:
nb_loc = os.path.join(here, os.pardir, "notebook")
file_path = os.path.join(nb_loc, input_nb)
with open(file_path) as nb_file:
nb = nbformat.read(nb_file, as_version=4)
preprocessor = ExecutePreprocessor(timeout=4800, kernel_name="python3")
preprocessor.preprocess(nb, {"metadata": {"path": nb_loc}})
output_file_name = "executed_openai_notebook_output.txt"
output_file = os.path.join(here, output_file_name)
with open(output_file, "a") as nb_output_file:
for cell in nb.cells:
if cell.cell_type == "code" and "outputs" in cell:
for output in cell.outputs:
if "text" in output:
nb_output_file.write(output["text"].strip() + "\n")
elif "data" in output and "text/plain" in output["data"]:
nb_output_file.write(output["data"]["text/plain"].strip() + "\n")
except CellExecutionError:
raise
finally:
if save:
with open(os.path.join(here, output_nb), "w", encoding="utf-8") as nb_executed_file:
nbformat.write(nb, nb_executed_file)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.11"),
reason="do not run if openai is not installed or py!=3.11",
)
def test_agentchat_auto_feedback_from_code(save=False):
run_notebook("agentchat_auto_feedback_from_code_execution.ipynb", save=save)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
)
def _test_oai_completion(save=False):
run_notebook("oai_completion.ipynb", save=save)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
)
def test_agentchat_function_call(save=False):
run_notebook("agentchat_function_call.ipynb", save=save)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
)
def test_agentchat_function_call_currency_calculator(save=False):
run_notebook("agentchat_function_call_currency_calculator.ipynb", save=save)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
)
def test_agentchat_function_call_async(save=False):
run_notebook("agentchat_function_call_async.ipynb", save=save)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
)
def _test_agentchat_MathChat(save=False):
run_notebook("agentchat_MathChat.ipynb", save=save)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.11"),
reason="do not run if openai is not installed or py!=3.11",
)
def _test_oai_chatgpt_gpt4(save=False):
run_notebook("oai_chatgpt_gpt4.ipynb", save=save)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
)
def test_hierarchy_flow_using_select_speaker(save=False):
run_notebook("agentchat_hierarchy_flow_using_select_speaker.ipynb", save=save)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
)
def test_graph_modelling_language_using_select_speaker(save=False):
run_notebook("agentchat_graph_modelling_language_using_select_speaker.ipynb", save=save)
@pytest.mark.skipif(
skip or not sys.version.startswith("3.10"),
reason="do not run if openai is not installed or py!=3.10",
)
def test_oai_client_cost(save=False):
run_notebook("oai_client_cost.ipynb", save=save)
if __name__ == "__main__":
# test_agentchat_auto_feedback_from_code(save=True)
# test_oai_chatgpt_gpt4(save=True)
# test_oai_completion(save=True)
# test_agentchat_MathChat(save=True)
# test_agentchat_function_call(save=True)
test_graph_modelling_language_using_select_speaker(save=True)
| [] |
2024-01-10 | MarthurGalarsy/SampleAI | src~lang_chain_git_ui_hugging.py | import os
import streamlit as st
import openai
from dotenv import load_dotenv
from streamlit_chat import message
from langchain.memory import ConversationBufferMemory
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import GitLoader
from langchain.schema import HumanMessage
from langchain.schema import AIMessage
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# セッション内に保存されたチャット履歴のメモリの取得
try:
memory = st.session_state["memory"]
except:
memory = ConversationBufferMemory(return_messages=True)
st.title("langchain for GitHub in Streamlit")
st.caption("by Marthur")
clone_url = st.text_input("GitHubのURL")
type = st.text_input("プログラムの種類(ex:.kt)")
branch = st.text_input("ブランチ")
repo_path = "./temp"
read_button = st.button("GitHub読み込み")
model_list = ["Git", "GPT"]
model_selector = st.radio("モデル切り替え", model_list)
if model_selector == "Git":
git_user_input = st.text_input("質問")
git_send_button = st.button("送信")
elif model_selector == "GPT":
git_user_input = st.text_input("対象ファイル名")
gpt_user_input = st.text_input("質問")
gpt_send_button = st.button("送信")
# 会話履歴を格納するための変数
conversation_history = []
# チャット履歴(HumanMessageやAIMessageなど)を格納する配列の初期化
history = []
if read_button:
read_button = False
if os.path.exists(repo_path):
clone_url = None
loader = GitLoader(
clone_url=clone_url,
branch=branch,
repo_path=repo_path,
file_filter=lambda file_path: file_path.endswith(type),
)
index = VectorstoreIndexCreator(
vectorstore_cls=Chroma, # default
embedding=HuggingFaceEmbeddings(), #default
).from_loaders([loader])
st.session_state["index"] = index
if index :
memory.chat_memory.add_ai_message("読み込みました")
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
if model_selector == "Git" and git_send_button :
git_send_button = False
memory.chat_memory.add_user_message(git_user_input)
index = st.session_state["index"]
response = index.query(git_user_input)
print(response)
# セッションへのチャット履歴の保存
st.session_state["index"] = index
memory.chat_memory.add_ai_message(response)
st.session_state["memory"] = memory
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
if model_selector == "GPT" and gpt_send_button :
gpt_send_button = False
memory.chat_memory.add_user_message(git_user_input + "を表示してください")
index = st.session_state["index"]
code_res = index.query(git_user_input + "を表示してください")
# セッションへのチャット履歴の保存
st.session_state["index"] = index
memory.chat_memory.add_ai_message(code_res)
st.session_state["memory"] = memory
prompt = "下記のコードがあります。\n下記のコードに対して" + gpt_user_input + "\n" + code_res
memory.chat_memory.add_user_message(prompt)
# ユーザーの質問を会話履歴に追加
conversation_history.append({"role": "user", "content": prompt})
# GPT-4モデルを使用してテキストを生成
gpt_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": f"You are a excellent system engineer."}] + conversation_history,
max_tokens=3500,
n=1,
temperature=0.8,
)
gpt_message = gpt_response.choices[0].message['content'].strip()
# アシスタントの回答を会話履歴に追加
conversation_history.append({"role": "assistant", "content": gpt_message})
memory.chat_memory.add_ai_message(gpt_message)
# チャット履歴(HumanMessageやAIMessageなど)の読み込み
try:
history = memory.load_memory_variables({})["history"]
except Exception as e:
st.error(e)
# チャット履歴の表示
for index, chat_message in enumerate(reversed(history)):
if isinstance(chat_message, HumanMessage):
message(chat_message.content, is_user=True, key=2 * index)
elif isinstance(chat_message, AIMessage):
message(chat_message.content, is_user=False, key=2 * index + 1)
| [
"content",
"You are a excellent system engineer.",
"下記のコードがあります。\n下記のコードに対してPLACEHOLDER\nPLACEHOLDER"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.