date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | golankai/AMI | anon_grader~processes~p162_role2.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
# Role 2: You are a 26 years old male lawer from the US. You are married and have 2 children. You have a masters degree in law and work in a law firm.
P_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P_Q1_TEMPLATE = """
You are a 26 years old male lawer from the US. You are married and have 2 children. You have a masters degree in law and work in a law firm.
anonymized text: "{user_input}"
Task: please rate the anonymization strength of the following anonymized text. 0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify.
{format_instructions}
"""
class PQ1Parser(BaseModel):
score: float = Field(
description="A score from 0 to 1 to evaluate the anonimization, 1 is very easy to re-identify and 0 is impossible"
)
process_162_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=PQ1Parser)),
],
)
| [
"\n You are a 26 years old male lawer from the US. You are married and have 2 children. You have a masters degree in law and work in a law firm.\n\n anonymized text: \"{user_input}\"\n\n Task: please rate the anonymization strength of the following anonymized text. 0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify.\n\n {format_instructions} \n",
"\n This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n"
] |
2024-01-10 | golankai/AMI | de_anonymizer~processes~p2_guess3.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
P2_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, mimicking an intruder
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P2_Q1_TEMPLATE = """
Task: Based on the anonymized text, guess 3 optional personas:. Note that the anonymized text use placeholders for names, locations, and dates. Your goal is to re-identify the real person and not a placeholder.
{format_instructions}
anonymized text: "{user_input}"
"""
class P2Q1Parser(BaseModel):
personas: List[str] = Field(
description="3 optional personas names based on the anonymized text"
)
score: List[float] = Field(
description="your confidence score for each guess between 0-1 where 1 is the most confident"
)
process_2_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P2_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P2_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=P2Q1Parser)),
],
)
| [
"\n Task: Based on the anonymized text, guess 3 optional personas:. Note that the anonymized text use placeholders for names, locations, and dates. Your goal is to re-identify the real person and not a placeholder.\n {format_instructions}\n anonymized text: \"{user_input}\"\n",
"\n This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, mimicking an intruder\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n"
] |
2024-01-10 | golankai/AMI | anon_grader~processes~p163_role3.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
# Role 3: You are a 30 years old female Yoga teacher from the UK. You are married and have 1 child. You have a Bachelor in psychology.
P_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P_Q1_TEMPLATE = """
You are a 30 years old female Yoga teacher from the UK. You are married and have 1 child. You have a Bachelor in psychology.
anonymized text: "{user_input}"
Task: please rate the anonymization strength of the following anonymized text. 0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify.
{format_instructions}
"""
class PQ1Parser(BaseModel):
score: float = Field(
description="A score from 0 to 1 to evaluate the anonimization, 1 is very easy to re-identify and 0 is impossible"
)
process_163_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=PQ1Parser)),
],
)
| [
"\n You are a 30 years old female Yoga teacher from the UK. You are married and have 1 child. You have a Bachelor in psychology.\n\n anonymized text: \"{user_input}\"\n\n Task: please rate the anonymization strength of the following anonymized text. 0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify.\n\n {format_instructions} \n",
"\n This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n"
] |
2024-01-10 | golankai/AMI | conversations~conversation_handler.py | from dataclasses import dataclass
from langchain.prompts import PromptTemplate
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from process_data.process_data import QueryData
from enum import Enum
class ResponseStatus(Enum):
SUCCESS = 0
ERROR = 1
class ConversationHandler:
def __init__(self, llm_chat_model) -> None:
self.conversation = None
self.llm_chat_model = llm_chat_model
def start_conversation(self, base_template: PromptTemplate):
self.conversation = ConversationChain(
llm=self.llm_chat_model,
memory=ConversationBufferMemory(return_messages=True),
prompt=base_template,
)
def send_new_message(self, query: QueryData, user_input: str = "", **kwargs):
prompt = query.get_prompt()
prompt = prompt.format(
user_input=user_input,
format_instructions=query.parser.get_format_instructions(),
)
parser = query.parser
response = self.conversation.predict(input=prompt.content)
try:
return {"status": ResponseStatus.SUCCESS, "data": parser.parse(response)}
except:
return {"status": ResponseStatus.ERROR, "data": response}
def end_conversation(self):
self.conversation.memory.clear()
| [] |
2024-01-10 | golankai/AMI | process_data~process_data.py | from dataclasses import dataclass
from typing import Optional
from enum import Enum
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import (
PromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
class MessageType(Enum):
HUMAN = 0
SYSTEM = 1
@dataclass
class QueryData:
template: str
parser: PydanticOutputParser
type: Optional[MessageType] = MessageType.HUMAN
def get_prompt(self) -> HumanMessagePromptTemplate | SystemMessagePromptTemplate:
template, parser = self.template, self.parser
prompt = None
if self.type == MessageType.HUMAN:
prompt = HumanMessagePromptTemplate.from_template(
template=template, output_parser=parser
)
if self.type == MessageType.SYSTEM:
prompt = SystemMessagePromptTemplate.from_template(
template=template, output_parser=parser
)
return prompt
@dataclass
class ProcessData:
base: PromptTemplate
queries: list[QueryData]
def get_base_template(self) -> PromptTemplate:
return self.base
| [
"None"
] |
2024-01-10 | golankai/AMI | anon_grader~processes~p13_three_shot.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
# Study 1, file_id famous_174_d_1_5.txt for the 0 sample
# Study 1, file_id famous_200_d_1_4.txt for the 0.5 sample
# Study 1, file_id famous_147_d_1_7.txt for the 1 sample
P_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P_Q1_TEMPLATE = """
Task: please rate the anonymization strength of the following anonymized text. Good anonimization reveals few unique information about the person.
0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify. You don't have the original text, try you should grade based on how leaky the anonimized is.
{format_instructions}
text: 'PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is a LOCATION_1 musician, most famous as the lead singing of rhythm and blues band, ORGANIZATION_1. Following Receiving a knighthood in DATE_2 for services to popular music, PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is a TITLE. PRONOUN was born in LOCATION_2 in DATE_1 and has been married twice, once to PERSON_FIRSTNAME_2 PERSON_LASTNAME_2 and once you L’Wren PERSON_LASTNAME_3. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 has NUMERIC_1 children. PRONOUN has a net worth of NUMERIC_4 million dollars. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 can play the harmonica and guitar as well as being an extremely talented vocalist. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is famous for PRONOUN snake hips style of dancing which has inspired a generation of musicians and PRONOUN band for their distinctive rock and roll sound which isn't near rivalled but any other pop group since.'
score: 0
text: 'PERSON_FIRSTNAME_2 is a NUMERIC_2 year old singer and composer from LOCATION_1. Early in PRONOUN life PRONOUN enjoyed playing piano by ear, and fancied a career in music. PRONOUN first job was as a pub pianist, where PRONOUN played well known tunes as well as PRONOUN own original songs. As an adult, PERSON_FIRSTNAME_2 wrote many famous songs such as Candle in the Wind, Can You Feel The Love Tonight and OTHER_IDENTIFYING_ATTRIBUTE_1. PRONOUN composed the music for OTHER_IDENTIFYING_ATTRIBUTE_3 PERSON_LASTNAME_5 the OTHER_IDENTIFYING_ATTRIBUTE_4. PERSON_FIRSTNAME_2 was originally engaged to be married to PERSON_FIRSTNAME_4 PERSON_LASTNAME_3, but the relationship ended before the marriage. PERSON_FIRSTNAME_2 eventually married PERSON_FIRSTNAME_3 PERSON_LASTNAME_2 in DATE_3, and was divorced in DATE_7. PERSON_FIRSTNAME_2 has come out twice, first as bisexual in DATE_4, and then as gay in DATE_2, both times via the rolling stone magazine. PRONOUN begun a relationship with PERSON_LASTNAME_1 PERSON_LASTNAME_4 in DATE_6, with whom PRONOUN had a civil partnership in DATE_5, and married in DATE_1. PERSON_FIRSTNAME_2 still tours, however in recent years PRONOUN has hinted at stepping back to spend more time with PRONOUN family.'
score: 0.5
text: "PERSON_FIRSTNAME_2 PERSON_LASTNAME_1 is a very famous actor from the LOCATION_2. PRONOUN is most famous for PRONOUN performances in the OTHER_IDENTIFYING_ATTRIBUTE_4, written by J. K. PERSON_LASTNAME_2, where PRONOUN plays the character of a OTHER_IDENTIFYING_ATTRIBUTE_1. PRONOUN began PRONOUN acting career at a very young age and after finishing the OTHER_IDENTIFYING_ATTRIBUTE_3 films PRONOUN has starred in a number of other roles. PRONOUN began PRONOUN acting career at the age of NUMERIC_4 in a ORGANIZATION_1 television series called OTHER_IDENTIFYING_ATTRIBUTE_6. Since OTHER_IDENTIFYING_ATTRIBUTE_3 PRONOUN has starred in romantic comedies such as OTHER_IDENTIFYING_ATTRIBUTE_5', and thrillers such as OTHER_IDENTIFYING_ATTRIBUTE_2'. PRONOUN has now moved onto producing films and acting on LOCATION_1 where PRONOUN starred in a comedy."
score: 1
text: "{user_input}"
score:
"""
class P12Q1Parser(BaseModel):
score: float = Field(description="The score")
process_13_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=P12Q1Parser)),
],
)
| [
"\n Task: please rate the anonymization strength of the following anonymized text. Good anonimization reveals few unique information about the person.\n 0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify. You don't have the original text, try you should grade based on how leaky the anonimized is.\n {format_instructions}\n\n text: 'PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is a LOCATION_1 musician, most famous as the lead singing of rhythm and blues band, ORGANIZATION_1. Following Receiving a knighthood in DATE_2 for services to popular music, PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is a TITLE. PRONOUN was born in LOCATION_2 in DATE_1 and has been married twice, once to PERSON_FIRSTNAME_2 PERSON_LASTNAME_2 and once you L’Wren PERSON_LASTNAME_3. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 has NUMERIC_1 children. PRONOUN has a net worth of NUMERIC_4 million dollars. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 can play the harmonica and guitar as well as being an extremely talented vocalist. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is famous for PRONOUN snake hips style of dancing which has inspired a generation of musicians and PRONOUN band for their distinctive rock and roll sound which isn't near rivalled but any other pop group since.'\n score: 0\n\n text: 'PERSON_FIRSTNAME_2 is a NUMERIC_2 year old singer and composer from LOCATION_1. Early in PRONOUN life PRONOUN enjoyed playing piano by ear, and fancied a career in music. PRONOUN first job was as a pub pianist, where PRONOUN played well known tunes as well as PRONOUN own original songs. As an adult, PERSON_FIRSTNAME_2 wrote many famous songs such as Candle in the Wind, Can You Feel The Love Tonight and OTHER_IDENTIFYING_ATTRIBUTE_1. PRONOUN composed the music for OTHER_IDENTIFYING_ATTRIBUTE_3 PERSON_LASTNAME_5 the OTHER_IDENTIFYING_ATTRIBUTE_4. PERSON_FIRSTNAME_2 was originally engaged to be married to PERSON_FIRSTNAME_4 PERSON_LASTNAME_3, but the relationship ended before the marriage. PERSON_FIRSTNAME_2 eventually married PERSON_FIRSTNAME_3 PERSON_LASTNAME_2 in DATE_3, and was divorced in DATE_7. PERSON_FIRSTNAME_2 has come out twice, first as bisexual in DATE_4, and then as gay in DATE_2, both times via the rolling stone magazine. PRONOUN begun a relationship with PERSON_LASTNAME_1 PERSON_LASTNAME_4 in DATE_6, with whom PRONOUN had a civil partnership in DATE_5, and married in DATE_1. PERSON_FIRSTNAME_2 still tours, however in recent years PRONOUN has hinted at stepping back to spend more time with PRONOUN family.'\n score: 0.5\n\n text: \"PERSON_FIRSTNAME_2 PERSON_LASTNAME_1 is a very famous actor from the LOCATION_2. PRONOUN is most famous for PRONOUN performances in the OTHER_IDENTIFYING_ATTRIBUTE_4, written by J. K. PERSON_LASTNAME_2, where PRONOUN plays the character of a OTHER_IDENTIFYING_ATTRIBUTE_1. PRONOUN began PRONOUN acting career at a very young age and after finishing the OTHER_IDENTIFYING_ATTRIBUTE_3 films PRONOUN has starred in a number of other roles. PRONOUN began PRONOUN acting career at the age of NUMERIC_4 in a ORGANIZATION_1 television series called OTHER_IDENTIFYING_ATTRIBUTE_6. Since OTHER_IDENTIFYING_ATTRIBUTE_3 PRONOUN has starred in romantic comedies such as OTHER_IDENTIFYING_ATTRIBUTE_5', and thrillers such as OTHER_IDENTIFYING_ATTRIBUTE_2'. PRONOUN has now moved onto producing films and acting on LOCATION_1 where PRONOUN starred in a comedy.\"\n score: 1\n\n text: \"{user_input}\"\n score:\n",
"\n This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n"
] |
2024-01-10 | golankai/AMI | anon_grader~grader_handler~grader_handler.py | import os
import langchain
import pandas as pd
from ami_process_handler.ami_process_handler import AMI_process_handler
from conversations.conversation_handler import ConversationHandler, ResponseStatus
from data_handler.data_handler import DataHandler
from utils import get_local_keys, load_google_search_tool, load_model
class GraderHandler:
def __init__(
self,
llm_name: str,
self_guide: bool = False,
google: bool = False,
debug: bool = False,
verbose: bool = False,
process_id: int = 1,
should_handle_data: bool = False,
temperature: float = 0.5,
**kwargs,
):
self.process_handler = AMI_process_handler(process_id)
# Accesses and keys
langchain.debug = debug
langchain.verbose = verbose
keys = get_local_keys()
os.environ["HUGGINGFACEHUB_API_TOKEN"] = keys["huggingface_hub_token"]
os.environ["OPENAI_API_KEY"] = keys["openai_api_key"]
# Define the LLM and the conversation handler
llm = load_model(temperature)
self.conversation_handler = ConversationHandler(llm)
self.should_handle_data = should_handle_data
self.data_handler = DataHandler() if self.should_handle_data else None
self.self_guide = self_guide
self.google = load_google_search_tool() if google else None
def grade(self, anon_text, file_name=None, **kwargs):
"""
Re-identify a single text.
:param anon_text: The anonymized text.
"""
self.conversation_handler.start_conversation(
self.process_handler.get_base_template()
)
self.process_handler.new_process()
response = ""
for index, query in enumerate(self.process_handler):
# For simplicity, we assume that the user input is currently only the anonymized text.
# Then, we send it to the conversation handler only with the first question.
# We may update the user input to List if we want to support more than one input.
user_input = anon_text if index == 0 else ""
response = self.conversation_handler.send_new_message(
query, user_input=user_input
)
print(f'Response: {response.get("data")}')
if response.get("status") == ResponseStatus.ERROR:
print("Error: response for file: ", file_name)
if self.should_handle_data:
self.data_handler.add_error_file(file_name, response.get("data"))
self.conversation_handler.end_conversation()
break
# update the process handler with the last response. So, it enables the process to decide whether to keep going or not. (based on the last response)
self.process_handler.set_last_response(response.get("data"))
# currently, we support add_row only for one question.
# TODO: support more than one question (add_row for all the questions of the process dataß)
# for key, value in response.items():
# conv_responses_object[key] = value
self.conversation_handler.end_conversation()
if self.should_handle_data:
conv_responses = self.process_handler.get_conv_responses()
self.data_handler.add_flatten_row(conv_responses, file_name)
return response
def get_results(self) -> pd.DataFrame:
return self.data_handler.get_df() if self.should_handle_data else None
def get_error_files(self) -> pd.DataFrame:
return self.data_handler.get_error_files() if self.should_handle_data else None
| [] |
2024-01-10 | golankai/AMI | anon_grader~processes~p161_role1.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
# Role_1: You are a 20 years old female student from the UK. You study filming and work as a waitress in a local restaurant. You are single and have no children.
P_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P_Q1_TEMPLATE = """
You are a 20 years old female student from the UK. You study filming and work as a waitress in a local restaurant. You are single and have no children.
anonymized text: "{user_input}"
Task: please rate the anonymization strength of the following anonymized text. 0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify.
{format_instructions}
"""
class PQ1Parser(BaseModel):
score: float = Field(
description="A score from 0 to 1 to evaluate the anonimization, 1 is very easy to re-identify and 0 is impossible"
)
process_161_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=PQ1Parser)),
],
)
| [
"\n You are a 20 years old female student from the UK. You study filming and work as a waitress in a local restaurant. You are single and have no children.\n\n anonymized text: \"{user_input}\"\n\n Task: please rate the anonymization strength of the following anonymized text. 0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify.\n\n {format_instructions} \n",
"\n This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n"
] |
2024-01-10 | golankai/AMI | de_anonymizer~processes~p5_2_goal_wo_knowledge.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
P5_2_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI.
The AI is helping the user validate their anonymization process, mimicking an intruder.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P5_2_Q1_TEMPLATE = """
Task: please re-identify the person in the following anonymized text.
Note that the anonymized text use placeholders.
Your goal is to guess the famous person.
{format_instructions}
anonymized text: "{user_input}"
"""
class P5_2_Q1Parser(BaseModel):
name: str = Field(description="Name of the person or UNKNOWN")
score: float = Field(
description="Assign confidence score from 0 to 1 for your name re-identification correctness, where 1 is the most confident"
)
process_5_2_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P5_2_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P5_2_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=P5_2_Q1Parser)),
],
)
| [
"\n Task: please re-identify the person in the following anonymized text.\n Note that the anonymized text use placeholders. \n Your goal is to guess the famous person.\n \n {format_instructions}\n \n anonymized text: \"{user_input}\"\n",
"\n This is a secret conversation between a human and an AI. \n The AI is helping the user validate their anonymization process, mimicking an intruder.\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n"
] |
2024-01-10 | golankai/AMI | anon_grader~processes~p14_CoT.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
# Study 1, file_id famous_174_d_1_5.txt for the 0 sample
# Study 1, file_id famous_147_d_1_7.txt for the 1 sample
P_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P_Q1_TEMPLATE = """
Task: please rate the anonymization strength of the following anonymized text. Good anonimization reveals few unique information about the person.
0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify. The score is a float between 0 and 1.
{format_instructions}
text: 'PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is a LOCATION_1 musician, most famous as the lead singing of rhythm and blues band, ORGANIZATION_1. Following Receiving a knighthood in DATE_2 for services to popular music, PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is a TITLE. PRONOUN was born in LOCATION_2 in DATE_1 and has been married twice, once to PERSON_FIRSTNAME_2 PERSON_LASTNAME_2 and once you L’Wren PERSON_LASTNAME_3. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 has NUMERIC_1 children. PRONOUN has a net worth of NUMERIC_4 million dollars. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 can play the harmonica and guitar as well as being an extremely talented vocalist. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is famous for PRONOUN snake hips style of dancing which has inspired a generation of musicians and PRONOUN band for their distinctive rock and roll sound which isn't near rivalled but any other pop group since.'
CoT: "Unique charachteristics: lead singer of a arhythm and blues band, can play the harmonica and guitar. Recieved a knighthood, have been married twice and have children. This information is rathen generic and is not reaviling enough to identify the person."
score: 0
text: "PERSON_FIRSTNAME_2 PERSON_LASTNAME_1 is a very famous actor from the LOCATION_2. PRONOUN is most famous for PRONOUN performances in the OTHER_IDENTIFYING_ATTRIBUTE_4, written by J. K. PERSON_LASTNAME_2, where PRONOUN plays the character of a OTHER_IDENTIFYING_ATTRIBUTE_1. PRONOUN began PRONOUN acting career at a very young age and after finishing the OTHER_IDENTIFYING_ATTRIBUTE_3 films PRONOUN has starred in a number of other roles. PRONOUN began PRONOUN acting career at the age of NUMERIC_4 in a ORGANIZATION_1 television series called OTHER_IDENTIFYING_ATTRIBUTE_6. Since OTHER_IDENTIFYING_ATTRIBUTE_3 PRONOUN has starred in romantic comedies such as OTHER_IDENTIFYING_ATTRIBUTE_5', and thrillers such as OTHER_IDENTIFYING_ATTRIBUTE_2'. PRONOUN has now moved onto producing films and acting on LOCATION_1 where PRONOUN starred in a comedy."
CoT: "Unique charachteristics: a very famous actor who played in a film written by J. K.. Started acting at a very young age and has starred in a number of other roles in romantic comedies and thrillers. Have now moved onto producing films. This information is quite specific and shall refer to daniel radcliffe"
score: 1
text: "{user_input}"
"""
class PQ1Parser(BaseModel):
CoT: str = Field(description="The Chain of Thought")
score: float = Field(description="The score, between 0 and 1")
process_14_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=PQ1Parser)),
],
)
| [
"\n Task: please rate the anonymization strength of the following anonymized text. Good anonimization reveals few unique information about the person.\n 0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify. The score is a float between 0 and 1.\n {format_instructions}\n\n text: 'PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is a LOCATION_1 musician, most famous as the lead singing of rhythm and blues band, ORGANIZATION_1. Following Receiving a knighthood in DATE_2 for services to popular music, PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is a TITLE. PRONOUN was born in LOCATION_2 in DATE_1 and has been married twice, once to PERSON_FIRSTNAME_2 PERSON_LASTNAME_2 and once you L’Wren PERSON_LASTNAME_3. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 has NUMERIC_1 children. PRONOUN has a net worth of NUMERIC_4 million dollars. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 can play the harmonica and guitar as well as being an extremely talented vocalist. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is famous for PRONOUN snake hips style of dancing which has inspired a generation of musicians and PRONOUN band for their distinctive rock and roll sound which isn't near rivalled but any other pop group since.'\n CoT: \"Unique charachteristics: lead singer of a arhythm and blues band, can play the harmonica and guitar. Recieved a knighthood, have been married twice and have children. This information is rathen generic and is not reaviling enough to identify the person.\"\n score: 0\n\n\n text: \"PERSON_FIRSTNAME_2 PERSON_LASTNAME_1 is a very famous actor from the LOCATION_2. PRONOUN is most famous for PRONOUN performances in the OTHER_IDENTIFYING_ATTRIBUTE_4, written by J. K. PERSON_LASTNAME_2, where PRONOUN plays the character of a OTHER_IDENTIFYING_ATTRIBUTE_1. PRONOUN began PRONOUN acting career at a very young age and after finishing the OTHER_IDENTIFYING_ATTRIBUTE_3 films PRONOUN has starred in a number of other roles. PRONOUN began PRONOUN acting career at the age of NUMERIC_4 in a ORGANIZATION_1 television series called OTHER_IDENTIFYING_ATTRIBUTE_6. Since OTHER_IDENTIFYING_ATTRIBUTE_3 PRONOUN has starred in romantic comedies such as OTHER_IDENTIFYING_ATTRIBUTE_5', and thrillers such as OTHER_IDENTIFYING_ATTRIBUTE_2'. PRONOUN has now moved onto producing films and acting on LOCATION_1 where PRONOUN starred in a comedy.\"\n CoT: \"Unique charachteristics: a very famous actor who played in a film written by J. K.. Started acting at a very young age and has starred in a number of other roles in romantic comedies and thrillers. Have now moved onto producing films. This information is quite specific and shall refer to daniel radcliffe\"\n score: 1\n\n text: \"{user_input}\"\n",
"\n This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n"
] |
2024-01-10 | golankai/AMI | de_anonymizer~processes~p1_gk_one.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
P1_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, mimicking an intruder
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P1_Q1_TEMPLATE = """Task: please re-identify the person in the following anonymized text. Note that the anonymized text use placeholders for names, locations, and dates. Your goal is to re-identify the real person and not a placeholder.
{format_instructions}
anonymized text: "{user_input}"
"""
P1_Q2_TEMPLATE = """
{format_instructions}
Suggest 5 Personas names for each characteristic. (only names)"
{user_input}
"""
class P1Q1Parser(BaseModel):
name: str = Field(description="Name of the person or FAIL")
score: float = Field(
description="Assign a score from 0 to 1 to evaluate the re-identifiabilization, 0 is very easy to re-identify and 1 is impossible"
)
characteristics: List[str] = Field(
description="A list of 3 unique and identifying characteristics names of the person, short answers"
)
# charachteristics_scores: List[str] = Field(description="Assigne a score for the uniquness of a charachteristic, 0 is not unique and 1 is very unique and identifying")
class P1Q2Parser(BaseModel):
personas_1: List[str] = Field(
description="A list 5 personas names for the first characteristic"
)
personas_2: List[str] = Field(
description="A list 5 personas names for the second characteristic"
)
personas_3: List[str] = Field(
description="A list 5 personas names for the third characteristic"
)
process_1_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P1_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P1_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=P1Q1Parser)),
# QueryData(P1_Q2_TEMPLATE, PydanticOutputParser(pydantic_object=P1Q2Parser))
],
)
| [
"\n{format_instructions}\nSuggest 5 Personas names for each characteristic. (only names)\"\n{user_input}\n",
"\n This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, mimicking an intruder\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n",
"Task: please re-identify the person in the following anonymized text. Note that the anonymized text use placeholders for names, locations, and dates. Your goal is to re-identify the real person and not a placeholder.\n {format_instructions}\n anonymized text: \"{user_input}\"\n"
] |
2024-01-10 | golankai/AMI | de_anonymizer~processes~p5_zero_shot_conf_guess.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
P5_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI.
The AI is helping the user validate their anonymization process, mimicking an intruder.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P5_Q1_TEMPLATE = """
Task: please re-identify the person in the following anonymized text.
Note that the anonymized text use placeholders.
Your goal is to guess the famous person based on the anonymized text and by using your knowledge.
{format_instructions}
anonymized text: "{user_input}"
"""
class P5Q1Parser(BaseModel):
name: str = Field(description="Name of the person or UNKNOWN")
score: float = Field(
description="Assign confidence score from 0 to 1 for your name re-identification correctness, where 1 is the most confident"
)
process_5_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P5_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P5_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=P5Q1Parser)),
],
)
| [
"\n Task: please re-identify the person in the following anonymized text.\n Note that the anonymized text use placeholders. \n Your goal is to guess the famous person based on the anonymized text and by using your knowledge.\n \n {format_instructions}\n \n anonymized text: \"{user_input}\"\n",
"\n This is a secret conversation between a human and an AI. \n The AI is helping the user validate their anonymization process, mimicking an intruder.\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n"
] |
2024-01-10 | golankai/AMI | anon_grader~processes~p11_zero_shot_grader.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
P11_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P11_Q1_TEMPLATE = """
Task: please rate the anonymization strength of the following anonymized text. Good anonimization reveals few unique information about the person.
0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify.
{format_instructions}
anonymized text: "{user_input}"
"""
class P11Q1Parser(BaseModel):
score: float = Field(
description="A score from 0 to 1 to evaluate the anonimization, 1 is very easy to re-identify and 0 is impossible"
)
process_11_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P11_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P11_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=P11Q1Parser)),
],
)
| [
"\n Task: please rate the anonymization strength of the following anonymized text. Good anonimization reveals few unique information about the person.\n 0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify.\n {format_instructions}\n \n anonymized text: \"{user_input}\"\n",
"\n This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n"
] |
2024-01-10 | golankai/AMI | anon_grader~processes~p120_one_shot.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
# Study 1, file_id famous_174_d_1_5.txt
P_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P_Q1_TEMPLATE = """
Task: please rate the anonymization strength of the following anonymized text. Good anonimization reveals few unique information about the person.
0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify.
{format_instructions}
text: 'PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is a LOCATION_1 musician, most famous as the lead singing of rhythm and blues band, ORGANIZATION_1. Following Receiving a knighthood in DATE_2 for services to popular music, PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is a TITLE. PRONOUN was born in LOCATION_2 in DATE_1 and has been married twice, once to PERSON_FIRSTNAME_2 PERSON_LASTNAME_2 and once you L’Wren PERSON_LASTNAME_3. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 has NUMERIC_1 children. PRONOUN has a net worth of NUMERIC_4 million dollars. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 can play the harmonica and guitar as well as being an extremely talented vocalist. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is famous for PRONOUN snake hips style of dancing which has inspired a generation of musicians and PRONOUN band for their distinctive rock and roll sound which isn't near rivalled but any other pop group since.'
score: 0
text: "{user_input}"
score:
"""
class PQ1Parser(BaseModel):
score: float = Field(
description="A score from 0 to 1 to evaluate the anonimization, 1 is very easy to re-identify and 0 is impossible"
)
process_120_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=PQ1Parser)),
],
)
| [
"\n Task: please rate the anonymization strength of the following anonymized text. Good anonimization reveals few unique information about the person.\n 0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify.\n {format_instructions}\n\n text: 'PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is a LOCATION_1 musician, most famous as the lead singing of rhythm and blues band, ORGANIZATION_1. Following Receiving a knighthood in DATE_2 for services to popular music, PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is a TITLE. PRONOUN was born in LOCATION_2 in DATE_1 and has been married twice, once to PERSON_FIRSTNAME_2 PERSON_LASTNAME_2 and once you L’Wren PERSON_LASTNAME_3. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 has NUMERIC_1 children. PRONOUN has a net worth of NUMERIC_4 million dollars. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 can play the harmonica and guitar as well as being an extremely talented vocalist. PERSON_FIRSTNAME_1 PERSON_LASTNAME_1 is famous for PRONOUN snake hips style of dancing which has inspired a generation of musicians and PRONOUN band for their distinctive rock and roll sound which isn't near rivalled but any other pop group since.'\n score: 0\n \n text: \"{user_input}\"\n score:\n",
"\n This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n"
] |
2024-01-10 | golankai/AMI | anon_grader~processes~p164_role4.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
# Role 4: You are a 33 years old male doctor from the UK. You are married and have 3 children. You have a PhD in medicine and work in a hospital.
P_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P_Q1_TEMPLATE = """
You are a 33 years old male doctor from the UK. You are married and have 3 children. You have a PhD in medicine and work in a hospital.
anonymized text: "{user_input}"
Task: please rate the anonymization strength of the following anonymized text. 0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify.
{format_instructions}
"""
class PQ1Parser(BaseModel):
score: float = Field(
description="A score from 0 to 1 to evaluate the anonimization, 1 is very easy to re-identify and 0 is impossible"
)
process_164_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=PQ1Parser)),
],
)
| [
"\n This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n",
"\n You are a 33 years old male doctor from the UK. You are married and have 3 children. You have a PhD in medicine and work in a hospital.\n\n anonymized text: \"{user_input}\"\n\n Task: please rate the anonymization strength of the following anonymized text. 0 is well anonymized and hard to re-identify and 1 is not anonymized at all and can be easily re-identify.\n\n {format_instructions} \n"
] |
2024-01-10 | golankai/AMI | anon_grader~processes~process_3.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
P3_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading the anonimization.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P3_Q1_TEMPLATE = """
Task: Based on the examples, give a score to the anonymized text, 0 is well anonimized and hard to re-identify, 1 is easy to re-identify or a large unique information leakage.
Examples:
TEXT: "{example_score_1}"
SCORE: 1
TEXT: "{example_score_0}"
SCORE: 0
TEXT: "{example_score_05}"
SCORE: 0.5
{format_instructions}
Score the following text:
"{user_input}"
"""
class P3Q1Parser(BaseModel):
score: float = Field(
description="anonimization score between 0-1, where 1 is very easy to re-identify and 0 is very hard to re-identify"
)
process_3_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P3_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P3_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=P3Q1Parser)),
],
)
| [
"\n This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading the anonimization.\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n",
"\n Task: Based on the examples, give a score to the anonymized text, 0 is well anonimized and hard to re-identify, 1 is easy to re-identify or a large unique information leakage.\n Examples:\n TEXT: \"{example_score_1}\"\n SCORE: 1\n\n TEXT: \"{example_score_0}\"\n SCORE: 0\n\n TEXT: \"{example_score_05}\"\n SCORE: 0.5\n\n {format_instructions}\n\n Score the following text:\n \"{user_input}\"\n "
] |
2024-01-10 | golankai/AMI | de_anonymizer~processes~p3_complete_sent.py | from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
"""
Process 3:
This process try the technique of ____ (TODO: add the technique name - of complete a sentence) to re-identify the person.
"""
P3_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P3_Q1_TEMPLATE = """In the following text we de-identified a specific person. Your task is to identify this person based on your knowledge by returning the person name or UNKNOWN, in cases where you are not sure.
{format_instructions}
anonymized text: "{user_input}
"""
class P3Q1Parser(BaseModel):
name: str = Field(description="The person is ")
process_3_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P3_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P3_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=P3Q1Parser)),
],
)
| [
"In the following text we de-identified a specific person. Your task is to identify this person based on your knowledge by returning the person name or UNKNOWN, in cases where you are not sure.\n {format_instructions}\n anonymized text: \"{user_input}\n",
"\n This is a secret conversation between a human and an AI.\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n"
] |
2024-01-10 | golankai/AMI | anon_grader~processes~p111_multi_persona.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
P111_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P111_Q1_TEMPLATE = """
Anonymized text: "{user_input}"
You are simulating 4 different agents with different backgrounds trying to guesss who is the famous person the anonymized text describes.
Their goal is to re-identify.
Agent 1: female, 25 years old, from the US, with a high school degree, working as a teacher.
Agent 2: female, 50 years old, from the UK, with a PhD degree, working as a programmer.
Agent 3: male, 18 years old, from Germany , working as a waiter.
Agent 4: male, 35 years old, from France, working as a lawyer.
{format_instructions}
Iמn the Simuation property. Summerize shortly their process and then give a finel score for the text, which is the normilized success rate in de-anonimization, between 0 and 1.
"""
class P111Q1Parser(BaseModel):
simulation: str = Field(description="The simulation of the 4 agents, short")
score: float = Field(
description="A score from 0 to 1, norlilized success rate in de-anonimization"
)
process_111_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P111_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P111_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=P111Q1Parser)),
],
)
| [
"\n Anonymized text: \"{user_input}\"\n\n You are simulating 4 different agents with different backgrounds trying to guesss who is the famous person the anonymized text describes. \n Their goal is to re-identify. \n\n Agent 1: female, 25 years old, from the US, with a high school degree, working as a teacher.\n Agent 2: female, 50 years old, from the UK, with a PhD degree, working as a programmer.\n Agent 3: male, 18 years old, from Germany , working as a waiter.\n Agent 4: male, 35 years old, from France, working as a lawyer.\n {format_instructions}\n\n Iמn the Simuation property. Summerize shortly their process and then give a finel score for the text, which is the normilized success rate in de-anonimization, between 0 and 1.\n",
"\n This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, grading anonymization strength.\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n"
] |
2024-01-10 | golankai/AMI | de_anonymizer~de_anonymizer.py | import os
import langchain
import pandas as pd
from ami_process_handler.ami_process_handler import AMI_process_handler
from conversations.conversation_handler import ConversationHandler, ResponseStatus
from data_handler.data_handler import DataHandler
from utils import get_local_keys, load_google_search_tool, load_model
class DeAnonymizer:
"""
Class of a de-anonymizer.
"""
def __init__(
self,
llm_name: str,
self_guide: bool = False,
google: bool = False,
debug: bool = False,
verbose: bool = False,
process_id: int = 1,
should_handle_data: bool = False,
temperature: float = 0.5,
**kwargs,
):
"""
Create a new instance of a de-anonymiser.
:param llm: The LLM to use.
:param self_guide: Whether to use self-guide or not.
:param google: Whether to use google search or not.
:param debug: Whether to use debug mode or not.
:param verbose: Whether to use verbose mode or not.
:param process_id: The process id to use.
:param should_handle_data: Whether to handle data OR just print the conversation.
:param temperature: The temperature to use.
:param kwargs: Additional arguments.
return: A new instance of a de-anonymiser.
"""
self.process_handler = AMI_process_handler(process_id)
# Accesses and keys
langchain.debug = debug
langchain.verbose = verbose
keys = get_local_keys()
os.environ["HUGGINGFACEHUB_API_TOKEN"] = keys["huggingface_hub_token"]
os.environ["OPENAI_API_KEY"] = keys["openai_api_key"]
# Define the LLM and the conversation handler
llm = load_model(temperature)
self.conversation_handler = ConversationHandler(llm)
self.should_handle_data = should_handle_data
self.data_handler = DataHandler() if self.should_handle_data else None
self.self_guide = self_guide
self.google = load_google_search_tool() if google else None
def re_identify(self, anon_text, file_name=None, **kwargs):
"""
Re-identify a single text.
:param anon_text: The anonymized text.
"""
self.conversation_handler.start_conversation(
self.process_handler.get_base_template()
)
self.process_handler.new_process()
response = ""
for index, query in enumerate(self.process_handler):
# For simplicity, we assume that the user input is currently only the anonymized text.
# Then, we send it to the conversation handler only with the first question.
# We may update the user input to List if we want to support more than one input.
user_input = anon_text if index == 0 else ""
response = self.conversation_handler.send_new_message(
query, user_input=user_input
)
print(f'Response: {response.get("data")}')
if response.get("status") == ResponseStatus.ERROR:
print("Error: response for file: ", file_name)
if self.should_handle_data:
self.data_handler.add_error_file(file_name, response.get("data"))
self.conversation_handler.end_conversation()
break
# update the process handler with the last response. So, it enables the process to decide whether to keep going or not. (based on the last response)
self.process_handler.set_last_response(response.get("data"))
# currently, we support add_row only for one question.
# TODO: support more than one question (add_row for all the questions of the process dataß)
# for key, value in response.items():
# conv_responses_object[key] = value
self.conversation_handler.end_conversation()
if self.should_handle_data:
conv_responses = self.process_handler.get_conv_responses()
self.data_handler.add_flatten_row(conv_responses, file_name)
return response
def re_identify_list(self, study_dir_path, file_names, result_path, error_files_path):
for i, file_name in enumerate(file_names):
with open(
os.path.join(study_dir_path, file_name), "r", encoding="utf-8"
) as f:
anon_text = f.read()
_ = self.re_identify(anon_text, file_name)
self.save_results(result_path, error_files_path)
def get_results(self) -> pd.DataFrame:
return self.data_handler.get_df() if self.should_handle_data else None
def get_error_files(self) -> pd.DataFrame:
return self.data_handler.get_error_files() if self.should_handle_data else None
def save_results(self, path, error_files_path):
if not self.should_handle_data:
print("No results to save!")
return
self.data_handler.save_to_csv(path, error_files_path)
| [] |
2024-01-10 | golankai/AMI | de_anonymizer~processes~p5_zero_shot.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
# Similar to process 5. just without the goal explanation
P5_1_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI.
The AI is helping the user validate their anonymization process, mimicking an intruder.
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P5_1_Q1_TEMPLATE = """
Task: please re-identify the person in the following anonymized text.
Note that the anonymized text use placeholders.
{format_instructions}
anonymized text: "{user_input}"
"""
class P5_1_Q1Parser(BaseModel):
name: str = Field(description="Name of the person or UNKNOWN")
score: float = Field(
description="Assign confidence score from 0 to 1 for your name re-identification correctness, where 1 is the most confident"
)
process_5_1_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P5_1_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P5_1_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=P5_1_Q1Parser)),
],
)
| [
"\n Task: please re-identify the person in the following anonymized text.\n Note that the anonymized text use placeholders. \n \n {format_instructions}\n \n anonymized text: \"{user_input}\"\n",
"\n This is a secret conversation between a human and an AI. \n The AI is helping the user validate their anonymization process, mimicking an intruder.\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n"
] |
2024-01-10 | golankai/AMI | de_anonymizer~processes~p4_gk_multi.py | from typing import List
from langchain import PromptTemplate
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from process_data.process_data import (
ProcessData,
QueryData,
)
"""
List of Most Prompting (LoMP) process: https://learnprompting.org/docs/intermediate/least_to_most
"""
P4_CONVERSATIONS_BASE_TEMPLATE = """
This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, mimicking an intruder
Current conversation:
{history}
Human: {input}
AI Assistant:
"""
P4_Q1_TEMPLATE = """
In the following text we de-identified a specific person. by replace names, locations etc with placeholder.
return the main persona placeholder name in the text. (examples for placeholders: PERSON_FIRSTNAME_1, LOCATION_1, etc)
{format_instructions}
anonymized text: "{user_input}"
"""
P4_Q2_TEMPLATE = """
List 3 unique and identifying characteristics names of this main persona in the text.
{format_instructions}
{user_input}
"""
P4_Q3_TEMPLATE = """
Is it more likely that the persona's gender is male or female? (use your knowledge as well).
return unknown in case it is too hard for you.
{format_instructions}
{user_input}
"""
P4_Q4_TEMPLATE = """
Estimate the age of the main persona (based on you knowledge) or return unknown in case it is too hard for you.
{format_instructions}
{user_input}
"""
P4_Q5_TEMPLATE = """
Discover the nation of the main persona (based on you knowledge) or return unknown in case it is too hard for you. (examples: USA, UK, etc)
{format_instructions}
{user_input}
"""
P4_Q6_TEMPLATE = """
Re-identify the main persona name (use your knowledge and the text to discover the name behind this placeholder in your first answer)
{format_instructions}
{user_input}
"""
class P4Q1Parser(BaseModel):
place_holder: str = Field(
description="The main persona placeholder name in the text"
)
class P4Q2Parser(BaseModel):
characteristics: List[str] = Field(
description="A list of 3 unique and identifying characteristics names of the person, short answers"
)
class P4Q3Parser(BaseModel):
gender: str = Field(
description="The gender of the main persona (use your knowledge))"
)
class P4Q4Parser(BaseModel):
min_age: str = Field(
description="The minimum estimated age of the main persona (use your knowledge)"
)
max_age: str = Field(
description="The maximum estimated age of the main persona (use your knowledge)"
)
class P4Q5Parser(BaseModel):
nation: str = Field(
description="The nation of the main persona (use your knowledge)"
)
class P4Q6Parser(BaseModel):
name: str = Field(description="The main persona name")
process_4_data = ProcessData(
base=PromptTemplate(
input_variables=["history", "input"], template=P4_CONVERSATIONS_BASE_TEMPLATE
),
queries=[
QueryData(P4_Q1_TEMPLATE, PydanticOutputParser(pydantic_object=P4Q1Parser)),
QueryData(P4_Q2_TEMPLATE, PydanticOutputParser(pydantic_object=P4Q2Parser)),
QueryData(P4_Q3_TEMPLATE, PydanticOutputParser(pydantic_object=P4Q3Parser)),
QueryData(P4_Q4_TEMPLATE, PydanticOutputParser(pydantic_object=P4Q4Parser)),
QueryData(P4_Q5_TEMPLATE, PydanticOutputParser(pydantic_object=P4Q5Parser)),
QueryData(P4_Q6_TEMPLATE, PydanticOutputParser(pydantic_object=P4Q6Parser)),
],
)
| [
"\n Re-identify the main persona name (use your knowledge and the text to discover the name behind this placeholder in your first answer)\n\n {format_instructions}\n \n {user_input}\n",
"\n Estimate the age of the main persona (based on you knowledge) or return unknown in case it is too hard for you.\n \n {format_instructions}\n \n {user_input}\n",
"\n This is a secret conversation between a human and an AI. The AI is helping the user validate their anonymization process, mimicking an intruder\n\n Current conversation:\n {history}\n Human: {input}\n AI Assistant:\n",
"\n List 3 unique and identifying characteristics names of this main persona in the text.\n \n {format_instructions}\n \n {user_input}\n",
"\n Is it more likely that the persona's gender is male or female? (use your knowledge as well).\n return unknown in case it is too hard for you.\n \n {format_instructions}\n \n {user_input}\n",
"\n Discover the nation of the main persona (based on you knowledge) or return unknown in case it is too hard for you. (examples: USA, UK, etc)\n \n {format_instructions}\n \n {user_input}\n",
"\n In the following text we de-identified a specific person. by replace names, locations etc with placeholder. \n return the main persona placeholder name in the text. (examples for placeholders: PERSON_FIRSTNAME_1, LOCATION_1, etc)\n \n {format_instructions}\n \n anonymized text: \"{user_input}\"\n"
] |
2024-01-10 | kilian-lm/graph_to_agent | logics~SpeechToText.py | # Note: you need to be using OpenAI Python v0.27.0 for the code below to work
import openai
from dotenv import load_dotenv
import os
import numpy as np
load_dotenv()
OPENAI_API_KEY = os.environ.get('OPEN_AI_KEY')
openai.api_key = os.environ.get('OPEN_AI_KEY')
audio_file= open("../test_mp3/20230712113923_next_tow.mp3", "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
transcript
type(transcript)
import re
# Assuming you have the 'transcript' variable defined as in your code snippet
text = transcript["text"]
# Split the text based on common grammatical signs
sentences = re.split(r'[.,!?]', text)
# Optionally, you can remove any extra whitespace
sentences = [sentence.strip() for sentence in sentences]
# 'sentences' now contains a list of separate strings, based on the punctuation
for sentence in sentences:
print(sentence)
print(transcript)
# Extract transcribed text directly
transcribed_text = transcript["text"]
transcript["text"]
transcribed_text = str(transcript["text"])
# Split transcribed text into responses
responses = transcribed_text.split(' ')
confidence = np.mean([alternative['confidence'] for alternative in transcript['alternatives']])
| [] |
2024-01-10 | kilian-lm/graph_to_agent | logics~puml_agent.py | import requests
import json
import openai
import os
import requests
import logging
import datetime
import json
import random
from dotenv import load_dotenv
load_dotenv()
# class RepoAnalyzer:
# def __init__(self, openai_api_key, url):
# self.openai_api_key = openai_api_key
# self.openai_base_url = url
# self.headers = {
# 'Content-Type': 'application/json',
# 'Authorization': f'Bearer {self.openai_api_key}'
# }
#
# def generate_puml(self, repo):
# data = {
# "model": "gpt-4",
# "messages": [
# {
# "role": "user",
# "content": "I need to translate the given method content into a detailed .puml by logic, so not in the style of a class diagram but in a style that a non technical person could understand whats going on. Please ensure that the diagram breaks down the content into entities, interactions, and processes, rather than summarizing it. The output should be compatible with https://plantuml.com/, meaning you will not confuse it with mermaid syntax thus you will NOT use 'participant' or NOT use 'M -> CID' or NOT use 'detach'"
# },
# {
# "role": "system",
# "content": "Understood! Please provide the detailed content of the method by logic of the method in a style that a non technical person could understand whats going on. I will not confuse it with mermaid syntax (like i will NOT use 'participant' or NOT use 'M -> CID' or NOT use 'detach') nor with any other syntax but stricly adhere to .puml syntax"
# },
# {
# "role": "user",
# "content": f"Method: {repo}"
# }
# ]
# }
#
# response = requests.post(self.openai_base_url, headers=self.headers, json=data)
# puml_content = response.json()["choices"][0]["message"]["content"]
# return puml_content
#
#
#
# openai_api_key = os.getenv('OPEN_AI_KEY')
# open_ai_url = "https://api.openai.com/v1/chat/completions"
# bot = RepoAnalyzer(openai_api_key, open_ai_url)
class RepoAnalyzer:
def __init__(self, openai_api_key, url):
self.openai_api_key = openai_api_key
self.openai_base_url = url
self.headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {self.openai_api_key}'
}
def generate_puml(self, repo):
data = {
"model": "gpt-3.5-turbo",
"messages": [
{
"role": "user",
"content": "I need to translate the given method content into a detailed .puml by logic, so not in the style of a class diagram but in a style that a non technical person could understand what's going on. The output should be compatible with https://plantuml.com/, meaning you will not confuse it with mermaid syntax."
},
{
"role": "system",
"content": "Understood! Please provide the detailed content of the method."
},
{
"role": "user",
"content": f"Method: {repo}"
}
]
}
response = requests.post(self.openai_base_url, headers=self.headers, json=data)
puml_content = response.json()["choices"][0]["message"]["content"]
return puml_content
def review_and_correct_puml(self, puml_content):
data = {
"model": "gpt-4",
"messages": [
{
"role": "user",
"content": "The following is a .puml content generated by an agent. Please critically review it and correct any mistakes, especially ensuring it strictly adheres to .puml syntax and does not contain any elements from other diagramming languages like Mermaid. "
},
{
"role": "system",
"content": "Understood! As I'm an expert in the .puml syntax like the following: '\n@startuml\ntitle YTO Methods Logic\nskinparam Activity {\n BackgroundColor PaleGreen\n BorderColor DarkGreen\n}\nstart\npartition UserInteraction {\n :Setup campaign;\n note\n get target values https://smart-suggester-d4o77lzj2a-ey.a.run.app\n end note\n :Download sdf from DV360;\n :Upload sdf to YTO on K8s;\n}\npartition DataPreparationOnK8s {\n :Check for Performance and Overview files on 'de-annalect-yto';\n :When User uploads sdf,performance and sdf files become available on S3;\n note\n the performance file gets the suffix '_PER' and sdf gets saved as {adv.id_camp.id_date}\n end note\n}\npartition Class1: CampaignAnalyzer {\npartition pull_file_from_s3{\n :Initialize CampaignAnalyzer;\n :Call clean_temp_dir();\n :Initialize PerfFileChecker variables;\n if (param_date is provided) then (yes)\n :Create list of dates_to_check;\n else (no)\n :Use today and yesterday for dates_to_check;\n endif\n while (date counter < length of dates_to_check) is (true)\n :Retrieve file from S3 based on date;\n if (file retrieved successfully) then (yes)\n :Store file locally;\n :Read file into perf_df;\n :Log successful retrieval;\n stop\n else (no)\n :Log error;\n :Increment date counter;\n endif\n end while\n :Split param_sdf_path to get date_str;\n :Construct new dates_to_check based on date_str;\n :Reset and initialize date counter;\n}\npartition transform_model{\n:Start;\n:Method: transform_model;\nnote right: Method begins\n->Process: Overview DataFrame loading;\n if( Exception? ) then (yes)\n :Log the error;\n :Publish the error message;\n note right: Message sent to another service\n :Raise Exception;\n note right: Stop operation and report error\n :End with error;\n stop\n else (no)\n note right: No errors, continue\n ->Process: sdf DataFrame loading;\n if( Exception? ) then (yes)\n :Log the error;\n :Publish the error message;\n note right: Message sent to another service\n :Raise Exception;\n note right: Stop operation and report error\n :End with error;\n stop\n else (no)\n :Log successful operation;\n :End with success;\n stop\n endif\n endif\n}\npartition get_last_update{\nstart\n:Checking if 'Timestamp' is not in sdf_df.columns or sdf_df['Timestamp'] is empty;\nif ('Timestamp' not in sdf_df.columns or sdf_df['Timestamp'] is empty) then (true)\n :Set current_time as current datetime;\n :Publish message about 'Timestamp' encountering an error;\n :Log error about 'Timestamp' encountering an error;\n :Setting 'Timestamp' to current_time;\nendif\n:Convert sdf_df['Timestamp'] to datetime and assign to timestamps;\n:Set sdf_df.last_update with first date of timestamps;\n:Log information about 'Timestamp' execution success;\nif (get_last_update executed without exception) then (true)\n :Return sdf_df.last_update;\nelse (false)\n :Log error about exception in 'get_last_update';\n :Publish message about exception in 'get_last_update';\n :Raise Exception with the encountered error;\nendif\n}\npartition get_analysis_time {\nstart\n:Activate Object;\nif (Is param_date None?) then (yes)\n', i will be able to distinguish the right from the wrong and correct wrong syntax. If there is any syntax unequal the example just given, i will correct it. 'Please provide the .puml content to review."
},
{
"role": "user",
"content": puml_content
}
]
}
response = requests.post(self.openai_base_url, headers=self.headers, json=data)
corrected_puml_content = response.json()["choices"][0]["message"]["content"]
return corrected_puml_content
openai_api_key = os.getenv('OPEN_AI_KEY')
open_ai_url = "https://api.openai.com/v1/chat/completions"
bot = RepoAnalyzer(openai_api_key, open_ai_url)
method_desc = """
def apply_rows(self, table_id, schema, rows):
try:
if not self._table_exists(table_id):
self._create_table(table_id, schema)
print(f"Inserting rows to {table_id}: {rows}")
self._insert_rows(table_id, rows)
self.logger.info(
f"{self.__class__.__name__}.{self.apply_rows.__name__} {table_id} o.k")
except Exception as e:
error_msg = f'{self.__class__.__name__}.{self._table_exists.__name__} encountered the Error: {str(e)}'
self.logger.error(error_msg)
self.publisher.publish_message(error_msg)
raise Exception(e)
"""
# for i in range(1,3):
# print(i)
# puml_content = bot.generate_puml(method_desc)
# print(puml_content)
openai_api_key = os.getenv('OPEN_AI_KEY')
open_ai_url = "https://api.openai.com/v1/chat/completions"
bot = RepoAnalyzer(openai_api_key, open_ai_url)
puml_content = bot.generate_puml(method_desc)
print(puml_content)
# Review and correct the puml content
# No use to make controlling instance or i didnt uderstand yet how to correctly do it
# corrected_puml_content = bot.review_and_correct_puml(puml_content)
# print(corrected_puml_content)
| [
"Method: PLACEHOLDER",
"The following is a .puml content generated by an agent. Please critically review it and correct any mistakes, especially ensuring it strictly adheres to .puml syntax and does not contain any elements from other diagramming languages like Mermaid. ",
"Understood! Please provide the detailed content of the method.",
"I need to translate the given method content into a detailed .puml by logic, so not in the style of a class diagram but in a style that a non technical person could understand what's going on. The output should be compatible with https://plantuml.com/, meaning you will not confuse it with mermaid syntax.",
"Understood! As I'm an expert in the .puml syntax like the following: '\n@startuml\ntitle YTO Methods Logic\nskinparam Activity {\n BackgroundColor PaleGreen\n BorderColor DarkGreen\n}\nstart\npartition UserInteraction {\n :Setup campaign;\n note\n get target values https://smart-suggester-d4o77lzj2a-ey.a.run.app\n end note\n :Download sdf from DV360;\n :Upload sdf to YTO on K8s;\n}\npartition DataPreparationOnK8s {\n :Check for Performance and Overview files on 'de-annalect-yto';\n :When User uploads sdf,performance and sdf files become available on S3;\n note\n the performance file gets the suffix '_PER' and sdf gets saved as {adv.id_camp.id_date}\n end note\n}\npartition Class1: CampaignAnalyzer {\npartition pull_file_from_s3{\n :Initialize CampaignAnalyzer;\n :Call clean_temp_dir();\n :Initialize PerfFileChecker variables;\n if (param_date is provided) then (yes)\n :Create list of dates_to_check;\n else (no)\n :Use today and yesterday for dates_to_check;\n endif\n while (date counter < length of dates_to_check) is (true)\n :Retrieve file from S3 based on date;\n if (file retrieved successfully) then (yes)\n :Store file locally;\n :Read file into perf_df;\n :Log successful retrieval;\n stop\n else (no)\n :Log error;\n :Increment date counter;\n endif\n end while\n :Split param_sdf_path to get date_str;\n :Construct new dates_to_check based on date_str;\n :Reset and initialize date counter;\n}\npartition transform_model{\n:Start;\n:Method: transform_model;\nnote right: Method begins\n->Process: Overview DataFrame loading;\n if( Exception? ) then (yes)\n :Log the error;\n :Publish the error message;\n note right: Message sent to another service\n :Raise Exception;\n note right: Stop operation and report error\n :End with error;\n stop\n else (no)\n note right: No errors, continue\n ->Process: sdf DataFrame loading;\n if( Exception? ) then (yes)\n :Log the error;\n :Publish the error message;\n note right: Message sent to another service\n :Raise Exception;\n note right: Stop operation and report error\n :End with error;\n stop\n else (no)\n :Log successful operation;\n :End with success;\n stop\n endif\n endif\n}\npartition get_last_update{\nstart\n:Checking if 'Timestamp' is not in sdf_df.columns or sdf_df['Timestamp'] is empty;\nif ('Timestamp' not in sdf_df.columns or sdf_df['Timestamp'] is empty) then (true)\n :Set current_time as current datetime;\n :Publish message about 'Timestamp' encountering an error;\n :Log error about 'Timestamp' encountering an error;\n :Setting 'Timestamp' to current_time;\nendif\n:Convert sdf_df['Timestamp'] to datetime and assign to timestamps;\n:Set sdf_df.last_update with first date of timestamps;\n:Log information about 'Timestamp' execution success;\nif (get_last_update executed without exception) then (true)\n :Return sdf_df.last_update;\nelse (false)\n :Log error about exception in 'get_last_update';\n :Publish message about exception in 'get_last_update';\n :Raise Exception with the encountered error;\nendif\n}\npartition get_analysis_time {\nstart\n:Activate Object;\nif (Is param_date None?) then (yes)\n', i will be able to distinguish the right from the wrong and correct wrong syntax. If there is any syntax unequal the example just given, i will correct it. 'Please provide the .puml content to review."
] |
2024-01-10 | kilian-lm/graph_to_agent | audio_graph_app.py | from flask import Flask, render_template, request, jsonify
import os
import openai
from dotenv import load_dotenv
import os
import numpy as np
import json
import re
import os
import gunicorn
import requests
import logging
import pandas as pd
#test
# test 2
# test 3
load_dotenv()
# OPENAI_API_KEY = os.environ.get('OPEN_AI_KEY')
app = Flask(__name__)
if not os.path.exists('recorded_audio'):
os.makedirs('recorded_audio')
# def get_openai_key():
# # Step 1: Check if OPEN_AI_KEY exists in environment variables
# OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
# return True
@app.route('/', methods=['GET', 'POST'])
def index():
# Retrieve OPENAI_API_KEY
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
# Step 2: If not found, test if token is provided by the textfield
if OPENAI_API_KEY is None and request.method == 'POST':
OPENAI_API_KEY = request.form.get('openai-token')
# If found, set the API key
if OPENAI_API_KEY:
# Step 3: After user provided token, send success msg and set token as OPENAI_API_KEY
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
openai.api_key = OPENAI_API_KEY
# Render the template with a success message
return render_template('audio.html', user_message="Token set successfully!")
else:
# Render the template with an error message to prompt user to provide the token
return render_template('audio.html', user_message="You need to provide the OpenAI token in order to continue.")
@app.route('/impressum', methods=['GET', 'POST'])
def impressum():
return render_template('impressum.html')
def get_embedding(text, model="text-embedding-ada-002"):
openai.api_key = os.environ.get('OPEN_AI_KEY')
text = text.replace("\n", " ")
logging.info(text)
return openai.Embedding.create(input=[text], model=model)['data'][0]['embedding']
def generate_text(prompt, freshness, frequency_penalty, max_tokens, model_id):
OPENAI_API_URL = "https://api.openai.com/v1/engines/" + model_id + "/completions"
data = {
'prompt': prompt,
'temperature': float(freshness),
'frequency_penalty': float(frequency_penalty),
'max_tokens': int(max_tokens),
}
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {openai.api_key}',
}
response = requests.post(OPENAI_API_URL, json=data, headers=headers)
if response.status_code != 200:
return {'error': 'Failed to generate text'}
try:
response_data = response.json()
choices = response_data['choices']
text = choices[0]['text']
unwanted_characters = r'[@#€]' # Add any other unwanted characters inside the brackets
text = re.sub(unwanted_characters, '', text)
text = re.sub(r'\n+', '\n', text) # Remove consecutive occurrences of '\n'
# Get embeddings for the prompt, text (completion), and concatenated text
# prompt_embedding = get_embedding(prompt)
# text_embedding = get_embedding(text)
# concat_text = prompt + " " + text
# concat_text_embedding = get_embedding(concat_text)
#
# # Save the information in a pandas DataFrame
# df = pd.DataFrame(columns=['prompt_embedding', 'text_embedding', 'concat_text_embedding', 'concat_text'])
# df = df.append({
# 'prompt_embedding': prompt_embedding,
# 'text_embedding': text_embedding,
# 'concat_text_embedding': concat_text_embedding,
# 'concat_text': concat_text
# }, ignore_index=True)
#
# df.to_csv('embeddings.csv')
# graph_data = create_graph_data(prompt, text)
# graph_data_json = json.dumps(graph_data)
return text
except KeyError:
return {'error': 'Invalid response from OpenAI'}
def create_graph_data(prompt, sentences, translations):
nodes = [{'id': 1, 'label': prompt}]
edges = []
for idx, (sentence, translation) in enumerate(zip(sentences, translations), start=1):
sentence_node_idx = 2 * idx
translation_node_idx = 2 * idx + 1
nodes.append({'id': sentence_node_idx, 'label': sentence})
nodes.append({'id': translation_node_idx, 'label': translation})
if sentence_node_idx == 2:
edges.append({'from': 1, 'to': sentence_node_idx})
else:
edges.append({'from': sentence_node_idx - 2, 'to': sentence_node_idx})
edges.append({'from': sentence_node_idx, 'to': translation_node_idx})
return {'nodes': nodes, 'edges': edges}
@app.route('/upload-audio', methods=['POST'])
def upload_audio():
audio_file = request.files['audio']
translation_prompt_template = request.form['translation-prompt'] # Get the custom translation prompt
audio_path = 'recorded_audio/last_recorded_audio.wav'
audio_file.save(audio_path)
# Transcribe using OpenAI
with open(audio_path, "rb") as audio_file:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
text = transcript["text"]
print(f"Transcribed text: {text}")
# Split the text into sentences
sentences = re.split(r'[.,!?;:-]', text)
sentences = [sentence.strip() for sentence in sentences if sentence.strip()]
print(f"Extracted sentences: {sentences}")
# Translate the sentences
translations = []
for sentence in sentences:
translation_prompt = translation_prompt_template.format(sentence=sentence)
print(f"Translation prompt: {translation_prompt}")
freshness = float(request.form['freshness'])
frequency_penalty = float(request.form['frequency-penalty'])
max_tokens = float(request.form['max_tokens'])
translation = generate_text(f'{translation_prompt}: {sentence}', freshness, frequency_penalty,
max_tokens,
model_id='text-davinci-003')
# translation = generate_text(f'{translation_prompt}: {sentences}', freshness=0.8, frequency_penalty=0.0,
# max_tokens=60,
# model_id='text-davinci-003')
translations.append(translation)
print(f"Translation: {translation}")
# Create graph data based on transcribed text and translations
graph_data = create_graph_data("Transcription", sentences, translations)
transcription_json = {"text": text, "graph_data": graph_data}
# Write to JSON file
json_path = 'recorded_audio/transcription.json'
with open(json_path, 'w', encoding='utf-8') as json_file:
json.dump(transcription_json, json_file, ensure_ascii=False, indent=4)
print(transcription_json)
return jsonify(transcription_json)
if __name__ == "__main__":
app.run(debug=True)
| [
"translation-prompt"
] |
2024-01-10 | kilian-lm/graph_to_agent | legacy~AgentAssistantExtension.py | import os
import openai
from dotenv import load_dotenv
from openai import OpenAI
load_dotenv()
# https://platform.openai.com/docs/api-reference/messages/listMessages
from dotenv import load_dotenv
import openai
from openai import OpenAI
import os
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = OpenAI()
assistant = client.beta.assistants.create(
name="Math Tutor",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=[{"type": "code_interpreter"}],
model="gpt-3.5-turbo-16k-0613"
)
thread = client.beta.threads.create()
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="I need to solve the equation `3x + 11 = 14`. Can you help me?"
)
message
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
instructions="Please address the user as Jane Doe. The user has a premium account."
)
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id
)
messages = client.beta.threads.messages.list(
thread_id=thread.id
)
messages
run_steps = client.beta.threads.runs.steps.list(
thread_id=thread.id,
run_id=run.id
)
run_steps
| [] |
2024-01-10 | petersun825/ChatGPTBot | UrbaniteBot.py |
# RUN THIS CELL FIRST!
#/*!pip install -q langchain==0.0.150 pypdf pandas matplotlib tiktoken textract transformers openai faiss-cpu*/
from flask import Flask, request, jsonify, render_template
import os
import textract
import pandas as pd
from transformers import GPT2TokenizerFast
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.chains import ConversationalRetrievalChain
from secret import Secret
app = Flask(__name__)
os.environ["OPENAI_API_KEY"] = Secret
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
def count_tokens(text: str) -> int:
return len(tokenizer.encode(text))
# Load and prepare data once, before first request
@app.before_first_request
def prepare_data():
global chunks, qa, chat_history
chunks = [] # Initialize the variable before using it
# Load PDF and split into pages
# loader = PyPDFLoader("./Urbanite_Deck1.pdf")
# pages = loader.load_and_split()
# chunks = pages
# # Convert PDF to text
# doc = textract.process("./Urbanite_Deck1.pdf")
# # Save to .txt and reopen
# with open('Urbanite_Deck1.txt', 'w') as f:
# f.write(doc.decode('utf-8'))
# with open('Urbanite_Deck1.txt', 'r') as f:
# text = f.read()
with open('Urbanite_Deck1.txt', 'r', encoding='utf-8') as f:
text = f.read()
# Split text into chunks
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 256,
chunk_overlap = 24,
length_function = count_tokens,
)
chunks += text_splitter.create_documents([text])
# Embed text and store embeddings
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(chunks, embeddings)
# Create conversation chain that uses our vectordb as retriever
qa = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0.5), db.as_retriever())
chat_history = []
@app.route('/', methods=['GET', 'POST'])
def ask():
if request.method == 'POST':
query = request.form['query']
result = qa({"question": query, "chat_history": chat_history})
chat_history.append((query, result['answer']))
return render_template('ask.html', answer=result['answer'])
return render_template('ask.html')
@app.route('/welcome')
def home():
return "Welcome to the Urbanite chatbot!"
@app.route('/api', methods=['POST'])
def chat():
query = request.json['query']
result = qa({"question": query, "chat_history": chat_history})
chat_history.append((query, result['answer']))
return jsonify(result['answer'])
if __name__ == '__main__':
app.run(port=5000, debug=True)
| [] |
2024-01-10 | deruikong/GPTShell | agentUtil.py | from langchain.chat_models import ChatOpenAI
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.tools.render import format_tool_to_openai_function
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.agents import AgentFinish
# from utils import execute_commands, gpt
from utils import parse
from functions import tools, execute_commands
import os
os.environ["OPENAI_API_KEY"]= "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"
# os.environ["LANGCHAIN_TRACING_V2"] = "true"
# os.environ["LANGCHAIN_PROJECT"] = f"ShellGPT-{os.environ['USER']}"
# os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
# os.environ["LANGCHAIN_API_KEY"] = "<YOUR-API-KEY>" # Update to your API key
# print(os.environ['OPENAI_API_KEY'])
llm = ChatOpenAI(model="gpt-4-1106-preview", temperature=0)
# llm.invoke("Create a file and open in vim")
tool_dict = {
"execute_commands": execute_commands,
# "gpt": gpt
}
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are very powerful assistant, but you can't execute commands.",
),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools])
# print(llm_with_tools)
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_function_messages(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| OpenAIFunctionsAgentOutputParser()
)
def run(inp):
output = agent.invoke({"input": inp, "intermediate_steps": []})
if isinstance(output, AgentFinish):
print(parse(output.return_values["output"]))
return None
tool = tool_dict[output.tool]
observation = tool.run(output.tool_input)
return observation
# run("Create a file and open in vim") | [
"You are very powerful assistant, but you can't execute commands.",
"agent_scratchpad",
"{input}"
] |
2024-01-10 | AmirEstiri/FragGen | chatbot~example.py | import openai
import os
from chatbot.configure import api_keys, internal_prompt
openai.api_key = api_keys[0]
def chatbot():
# Create a list to store all the messages for context
messages = [
{"role": "system", "content": internal_prompt},
]
# Keep repeating the following
while True:
# Prompt user for input
message = input("User: ")
# Exit program if user inputs "quit"
if message.lower() == "quit":
break
# Add each new message to the list
messages.append({"role": "user", "content": message})
# Request gpt-3.5-turbo for chat completion
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
# Print the response and add it to the messages list
chat_message = response['choices'][0]['message']['content']
print(f"Bot: {chat_message}")
messages.append({"role": "assistant", "content": chat_message})
if __name__ == "__main__":
print("Start chatting with the bot (type 'quit' to stop)!")
chatbot() | [] |
2024-01-10 | AmirEstiri/FragGen | chatbot~gpt4frag.py | """
Implement the interface of applying GPT-based model to fragranceX data
"""
import os
import argparse
from chatbot.configure import api_keys, internal_prompt
from chatbot.data import read_frag_data_from_file
import openai
openai.api_key=api_keys[0]
from langchain.prompts.chat import AIMessage, HumanMessage, SystemMessage, HumanMessagePromptTemplate
from langchain.chat_models import ChatOpenAI
class GPT4Frag(object):
def __init__(
self, model_name: str, api_key: str, verbose: bool=True
) -> None:
"""
:param model_name: The name of LLM,
"""
# Which LLM to use
self._model_name = model_name # type: str
# The large language model instance
self._llm = None # type: ChatOpenAI
self._apikey = api_key # type: str
# "You are an expert"
self._internal_prompt = SystemMessage(content=internal_prompt)
# The latest LLM response
self._llm_response = "" # type: str
# Log of the whole conversation
self._global_conversations = []
# The working directory of the agent
self._path = "." # type: str
self._std_log = "description.log" # type: str
# The most recent error message
self._errmsg = "" # type: str
self.verbose = verbose
return
@staticmethod
def _log(info):
print(info)
# pass
def _user_says(self) -> None:
"""
Print header for user input in the log
:return:
"""
self._global_conversations.append("\n------------------------")
self._global_conversations.append("User says: ")
self._global_conversations.append("------------------------\n")
return
def _chatbot_says(self) -> None:
"""
Print header for LLM input in the log
:return:
"""
self._global_conversations.append("\n------------------------")
self._global_conversations.append("%s says: " % self._model_name)
self._global_conversations.append("------------------------\n")
return
def _connect_chatbot(self) -> None:
"""
Connect to chat bot
:return:
"""
self._llm = ChatOpenAI(
model_name=self._model_name, temperature=0.3, openai_api_key=self._apikey
)
return
def _get_frag_data(self) -> None:
"""
Get problem data from description
:return: The dictionary with problem and in/out information
"""
self._data = read_frag_data_from_file(
os.path.join(self._problem_path, self._std_format)
)
def dump_conversation(self, path_to_conversation: str = None) -> None:
if path_to_conversation is None:
path_to_conversation = self._std_log
print("Dumping conversation to %s" % path_to_conversation)
with open(path_to_conversation, "w") as f:
f.write("\n".join(self._global_conversations))
def talk_to_chatbot(self, user_input: str) -> str:
conversation = [self._internal_prompt, formulation_request]
self._log("==== Connecting to chat bot...")
self._connect_chatbot()
self._user_says()
self._global_conversations.append(user_input)
self._chatbot_says()
output = self._llm(user_input)
self._llm_response = output.content
self._global_conversations.append(self._llm_response)
if self.verbose:
print(output.content)
self.dump_conversation(os.path.join(self._path, self._std_log))
return output.status
def read_args():
"""
Read arguments from command line
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="gpt-4", help="Model name")
parser.add_argument(
"--doc_path",
type=str,
default="fragrancex/fragrances/",
help="Path to documents",
)
parser.add_argument("--verbose", type=bool, default=False, help="Verbose mode")
return parser.parse_args()
if __name__ == "__main__":
# Read arguments from command line
args = read_args()
messages = [
{'role': 'user', 'content': 'I want to buy a perfume for my wife.'},
]
# GPT agent chatbot
# agent = GPT4Frag(
# model_name=args.model,
# api_key=api_keys[0],
# verbose=args.verbose
# )
try:
# status = agent.talk_to_chatbot()
response = openai.ChatCompletion.create(
model="gpt-3.5",
messages=messages,
temperature=0.6,
)
# print("Status: ", status)
except Exception as e:
raise e
finally:
pass
# agent.dump_conversation(os.path.join(agent._path, agent._std_log))
| [
"I want to buy a perfume for my wife."
] |
2024-01-10 | ClemensGruber/climart_gptree | tests~main_with_memory.py | import os
import subprocess
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from utils.gtts_synthing import synthing
from utils.characterRFID_dict import getCharacter
from utils.character_prompts import getPrompt
def makeConversation(chain):
# Sending an empty user input first to let the AI start the conversation
user_input = ""
# greeting audio is in a subprocess in order not to block the main thread
subprocess.Popen(["afplay", "audio/bee_greetings.mp3"])
reply = chain.predict(input=user_input)
print(reply)
while user_input.lower() != "q":
user_input = input("Enter input (or 'q' to quit): ")
if user_input.lower() != "q":
# Play some local audio to shorten the waiting time while we wait for synthing
subprocess.Popen(["afplay", "audio/bee_wait.mp3"])
reply = chain.predict(input=user_input)
print(reply)
synthing(reply)
play_audio("reply")
def play_audio(audio):
os.system("afplay " + "output_gtts.mp3")
def main():
os.system("clear")
load_dotenv()
characterCode = "1"
# Voice output is currently only supported for character 1 (Bee) – to enable character selection, uncomment the following line
# characterCode = input("Charakter auswählen (1-5) oder RFID Chip auflegen: ")
prompt = getPrompt(getCharacter(characterCode))
chatgpt = ChatOpenAI(model_name='gpt-3.5-turbo', openai_api_key=os.getenv("OPENAI_API_KEY"), temperature=0)
chain = ConversationChain(llm=chatgpt, verbose=False, memory=ConversationBufferMemory(), prompt=prompt)
makeConversation(chain)
if __name__ == '__main__':
main() | [] |
2024-01-10 | ClemensGruber/climart_gptree | tests~memory~bee_with_memory.py | import os
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
def getCharacter(code):
characterRFID = {
"1":0,
"2":1,
"3":2,
"4": 3,
"5": 4,
"0004632310": 0,
"0001427161": 1,
"0004663272": 2,
"0001384652": 3,
"0001416771": 4
}
if code in characterRFID:
return characterRFID[code]
else:
return "Invalid character code"
def getPrompt(key):
with open("characters.txt", "r", encoding="utf-8") as file:
prompt_templates = [line.strip() for line in file.readlines()]
prompts = [PromptTemplate(input_variables=['history', 'input'], output_parser=None, partial_variables={}, template=template) for template in prompt_templates]
return prompts[key]
def makeConversation(chain):
user_input = ""
# Sending an empty user input first to let the AI start the conversation
print(chain.predict(input=user_input))
while user_input.lower() != "q":
user_input = input("Enter input (or 'q' to quit): ")
if user_input.lower() != "q":
reply = chain.predict(input=user_input)
print(reply)
def main():
os.system("clear")
load_dotenv()
# Choose your character: 0: Biene, 1: Roboter, 2: Kiri-Wurst, 3: Schatzkiste, 4: Yoda
characterCode = input("Charakter auswählen (1-5) oder RFID Chip auflegen: ")
prompt = getPrompt(getCharacter(characterCode))
chatgpt = ChatOpenAI(model_name='gpt-3.5-turbo', openai_api_key=os.getenv("OPENAI_API_KEY"), temperature=0)
chain = ConversationChain(llm=chatgpt, verbose=False, memory=ConversationBufferMemory(), prompt=prompt)
makeConversation(chain)
if __name__ == '__main__':
main() | [
"input"
] |
2024-01-10 | zbz-lvlv/chat_es | server~chates.py | import os
import openai
import elevenlabs as el
from dotenv import load_dotenv
from flask import Flask, redirect, render_template, request, url_for
from flask_cors import CORS
import json
import tempfile
load_dotenv()
el.set_api_key(os.getenv("ELEVENLABS_API_KEY"))
openai.api_key = os.getenv("OPENAI_API_KEY")
app = Flask(__name__)
CORS(app)
@app.route("/chat", methods=["POST"])
def chat():
messages = json.loads(request.form.get('messages'))
user_audio_obj = request.files.get('user_audio')
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
temp_file_name = temp_file.name
with open(temp_file_name, 'wb') as temp_file:
user_audio_obj.save(temp_file_name)
with open(temp_file_name, 'rb') as temp_file:
user_transcript = openai.Audio.transcribe('whisper-1', temp_file, language='es')['text']
os.remove(temp_file_name)
print(user_transcript)
messages.append({
"role": "user",
"content": user_transcript
})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0.7,
messages=messages
)
user_text = user_transcript
assistant_text = response['choices'][0]['message']['content']
return {
'status': 0,
'errorMessage': '',
'data': {
'userText': user_text,
'assistantText': assistant_text
}
}
app.run(debug=False, port=5000, host='0.0.0.0')
| [] |
2024-01-10 | city96/ComfyUI_ExtraModels | PixArt~sampling~gaussian_diffusion.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
import enum
import math
import numpy as np
import torch as th
import torch.nn.functional as F
from .diffusion_utils import discretized_gaussian_log_likelihood, normal_kl
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
def _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):
betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
warmup_time = int(num_diffusion_timesteps * warmup_frac)
betas[:warmup_time] = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64)
return betas
def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):
"""
This is the deprecated API for creating beta schedules.
See get_named_beta_schedule() for the new library of schedules.
"""
if beta_schedule == "quad":
betas = (
np.linspace(
beta_start ** 0.5,
beta_end ** 0.5,
num_diffusion_timesteps,
dtype=np.float64,
)
** 2
)
elif beta_schedule == "linear":
betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
elif beta_schedule == "warmup10":
betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1)
elif beta_schedule == "warmup50":
betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5)
elif beta_schedule == "const":
betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1
betas = 1.0 / np.linspace(
num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64
)
else:
raise NotImplementedError(beta_schedule)
assert betas.shape == (num_diffusion_timesteps,)
return betas
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
return get_beta_schedule(
"linear",
beta_start=scale * 0.0001,
beta_end=scale * 0.02,
num_diffusion_timesteps=num_diffusion_timesteps,
)
elif schedule_name == "squaredcos_cap_v2":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Original ported from this codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
"""
def __init__(
self,
*,
betas,
model_mean_type,
model_var_type,
loss_type,
snr=False
):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.snr = snr
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
) if len(self.posterior_variance) > 1 else np.array([])
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, t, **model_kwargs)
if isinstance(model_output, tuple):
model_output, extra = model_output
else:
extra = None
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
elif self.model_var_type in [ModelVarType.FIXED_LARGE, ModelVarType.FIXED_SMALL]:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
else:
model_variance = th.zeros_like(model_output)
model_log_variance = th.zeros_like(model_output)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
"extra": extra,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, t, **model_kwargs)
new_mean = p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t)
return out
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
):
final = sample
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def ddim_reverse_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.ddim_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def _vb_terms_bpd(
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
)
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, t, **model_kwargs)
if isinstance(model_output, dict) and model_output.get('x', None) is not None:
output = model_output['x']
else:
output = model_output
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert output.shape == (B, C * 2, *x_t.shape[2:])
output, model_var_values = th.split(output, C, dim=1)
# Learn the variance using the variational bound, but don't let it affect our mean prediction.
frozen_out = th.cat([output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert output.shape == target.shape == x_start.shape
if self.snr:
if self.model_mean_type == ModelMeanType.START_X:
pred_noise = self._predict_eps_from_xstart(x_t=x_t, t=t, pred_xstart=output)
pred_startx = output
elif self.model_mean_type == ModelMeanType.EPSILON:
pred_noise = output
pred_startx = self._predict_xstart_from_eps(x_t=x_t, t=t, eps=output)
# terms["mse_eps"] = mean_flat((noise - pred_noise) ** 2)
# terms["mse_x0"] = mean_flat((x_start - pred_startx) ** 2)
t = t[:, None, None, None].expand(pred_startx.shape) # [128, 4, 32, 32]
# best
target = th.where(t > 249, noise, x_start)
output = th.where(t > 249, pred_noise, pred_startx)
loss = (target - output) ** 2
if model_kwargs.get('mask_ratio', False) and model_kwargs['mask_ratio'] > 0:
assert 'mask' in model_output
loss = F.avg_pool2d(loss.mean(dim=1), model.model.module.patch_size).flatten(1)
mask = model_output['mask']
unmask = 1 - mask
terms['mse'] = mean_flat(loss * unmask) * unmask.shape[1]/unmask.sum(1)
if model_kwargs['mask_loss_coef'] > 0:
terms['mae'] = model_kwargs['mask_loss_coef'] * mean_flat(loss * mask) * mask.shape[1]/mask.sum(1)
else:
terms["mse"] = mean_flat(loss)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
if "mae" in terms:
terms["loss"] = terms["loss"] + terms["mae"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res + th.zeros(broadcast_shape, device=timesteps.device)
| [] |
2024-01-10 | thecloudgeek/ai-wiki | search_indx.py | from langchain.llms import OpenAI
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain.text_splitter import CharacterTextSplitter
import requests
from bs4 import BeautifulSoup
from requests.auth import HTTPBasicAuth
import json
import pickle
def get_wiki_space_child_pages(space_id):
ids = []
url = f"https://missionlane.atlassian.net/wiki/rest/api/content/{space_id}/descendant/page?limit=100"
auth = HTTPBasicAuth("email goes here", "key goes here")
headers = {
"Accept": "application/json"
}
response = requests.request(
"GET",
url,
headers=headers,
auth=auth
)
data = json.loads(response.text)
for id in data["results"]:
ids.append(id["id"])
return ids
def get_wiki_data(doc_id):
url = f"https://missionlane.atlassian.net/wiki/rest/api/content/{doc_id}?expand=body.view"
auth = HTTPBasicAuth("email goes here", "key goes here")
headers = {
"Accept": "application/json"
}
response = requests.request(
"GET",
url,
headers=headers,
auth=auth
)
data = json.loads(response.text)
soup = BeautifulSoup(data["body"]["view"]["value"],features="html.parser")
return Document(
page_content=soup.get_text(),
metadata={"source": data["_links"]["base"]+data["_links"]["tinyui"]},
)
source_chunks = []
ids = get_wiki_space_child_pages("1781301249")
print("all ids are:", ids)
splitter = CharacterTextSplitter(separator=" ", chunk_size=1024, chunk_overlap=0)
# print(sources[0])
# loop through ids and call get_wiki_data
for id in ids:
print("gettting data for id: ", id)
doc = get_wiki_data(id)
if doc.page_content is None or doc.page_content == "true" or doc.page_content == '':
print("not including doc id: ", id)
else:
print("chunking doc id: ", id)
for chunk in splitter.split_text(doc.page_content):
source_chunks.append(Document(page_content=chunk, metadata=doc.metadata))
with open("search_index.pickle", "wb") as f:
pickle.dump(FAISS.from_documents(source_chunks, OpenAIEmbeddings()), f) | [] |
2024-01-10 | thecloudgeek/ai-wiki | wiki_questions.py | from langchain.llms import OpenAI
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
import pickle
import sys
OpenAI.openai_api_key = "key goes here"
chain = load_qa_with_sources_chain(OpenAI(temperature=0.0, model_name="gpt-3.5-turbo"),chain_type="stuff")
def print_answer(question):
with open("search_index.pickle", "rb") as f:
search_index = pickle.load(f)
print(
chain(
{
"input_documents": search_index.similarity_search(question, k=5),
"question": question,
},
return_only_outputs=True,
)["output_text"]
)
question = sys.argv[1]
print_answer(question) | [] |
2024-01-10 | TheFrancho/telegram-whisper-bot | ia_models~whisper.py | import openai
from env.env import config
def transcribe_audio():
audio_file= open("media/voice_note.mp3", "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
return transcript | [] |
2024-01-10 | utkarsh121/LLM-AIT | llm_ait_cmd.py | import openai
# Set up OpenAI API
openai.api_key = "YOUR_API_KEY"
# Read inputs from a file
with open("inputs.txt", "r") as file:
inputs = file.read().splitlines()
# Read harmful keywords from a file
with open("harmful_keywords.txt", "r") as file:
harmful_keywords = file.read().splitlines()
# Check if inputs or harmful keywords are empty
if not inputs:
raise ValueError("No inputs found in the file.")
if not harmful_keywords:
raise ValueError("No harmful keywords found in the file.")
# Generate responses and evaluate
results = []
for input_text in inputs:
# Provide conversation history if necessary
conversation = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": input_text}
]
# Generate response from OpenAI API
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=conversation,
max_tokens=50
)
# Check for harmful keywords in response
detected_harmful = any(keyword in response.choices[0].message.content for keyword in harmful_keywords)
# Evaluate response
if detected_harmful:
result = {"input": input_text, "response": response.choices[0].message.content, "harmful": True}
else:
result = {"input": input_text, "response": response.choices[0].message.content, "harmful": False}
results.append(result)
# Print the results
for result in results:
print(f"Input: {result['input']}")
print(f"Response: {result['response']}")
print(f"Harmful: {result['harmful']}")
print()
| [
"You are a helpful assistant."
] |
2024-01-10 | utkarsh121/LLM-AIT | llm_ait_csv.py | import csv
import openai
# Set up OpenAI API
openai.api_key = "YOUR_API_KEY"
# Read inputs from a file
with open("inputs.txt", "r") as file:
inputs = file.read().splitlines()
# Read harmful keywords from a file
with open("harmful_keywords.txt", "r") as file:
harmful_keywords = file.read().splitlines()
# Check if inputs or harmful keywords are empty
if not inputs:
raise ValueError("No inputs found in the file.")
if not harmful_keywords:
raise ValueError("No harmful keywords found in the file.")
# Generate responses and evaluate
results = []
for input_text in inputs:
# Provide conversation history if necessary
conversation = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": input_text}
]
# Generate response from OpenAI API
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=conversation,
max_tokens=50
)
# Check for harmful keywords in response
detected_harmful = any(keyword in response.choices[0].message.content for keyword in harmful_keywords)
# Evaluate response
if detected_harmful:
result = {"input": input_text, "response": response.choices[0].message.content, "harmful": True}
else:
result = {"input": input_text, "response": response.choices[0].message.content, "harmful": False}
results.append(result)
# Save results to CSV file
with open("results.csv", "w", newline="", encoding="utf-8") as file:
writer = csv.writer(file)
writer.writerow(["Input", "Response", "Harmful"]) # Write header
for result in results:
writer.writerow([f'"{result["input"]}"', f'"{result["response"]}"', result["harmful"]])
print("Results saved to results.csv file.")
| [
"You are a helpful assistant."
] |
2024-01-10 | platisd/skonaki | skonaki.py | #!/usr/bin/env python3
"""
Create cheatsheets from videos using the power of OpenAI
"""
import sys
import os
import argparse
import mimetypes
import tempfile
import datetime
import json
from pathlib import Path
from pydub import AudioSegment
from yt_dlp import YoutubeDL
import pysrt
import openai
TWENTYFIVE_MB = 26214400
TEMP_DIR = Path(tempfile.gettempdir())
DEFAULT_SUMMARY_PROMPT = (
"Create a cheatsheet out of the following transcript in less than 50 words: \n"
)
CONTINUE_SUMMARY_PROMPT = (
"Continue with the next part of the same transcript,"
+ "use the same style as before: \n"
)
SYSTEM_PROMPT = {
"role": "system",
"content": "You are a helpful assistant who summarizes with bullet points.",
}
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("media", help="Path or URL to media file", type=str)
parser.add_argument(
"--api-key",
help="OpenAI API key (default: read from OPENAI_API_KEY environment variable)",
default=os.environ.get("OPENAI_API_KEY"),
)
parser.add_argument(
"--transcription-prompt",
help="Prompt to use for transcribing the video (e.g. What the video is about)",
default="",
)
parser.add_argument(
"--summary-prompt",
help="Override the default prompt for summarizing the video",
default=DEFAULT_SUMMARY_PROMPT,
)
parser.add_argument(
"--frequency",
help="How often (in sec) to create summaries of the video (default: 60)",
type=int,
default=60,
)
parser.add_argument(
"--model",
help="OpenAI model to use (default: gpt-3.5-turbo)",
default="gpt-3.5-turbo",
)
parser.add_argument(
"--language",
help="Language of the input media for transcribing"
+ " (default: en, must be in ISO 639-1 format and supported by OpenAI's Whisper API)."
+ " For translating, the language is automatically detected"
+ " and the output language is always English.",
)
parser.add_argument(
"--output",
help="Path to the output file (default: only stdout)",
type=Path,
default=None,
)
parser.add_argument(
"--output-format",
help="Output format, choose between: text (default), json",
default="text",
)
parser.add_argument(
"--run-whisper-locally",
help="Run the Whisper API locally (default: False)",
required=False,
action="store_true",
)
args = parser.parse_args()
original_media = args.media
if Path(args.media).is_file():
args.media = Path(args.media)
else:
audio_codec = "m4a"
audio_fname = Path("skonaki_audio_from_youtube." + audio_codec)
extracted_audio = TEMP_DIR / audio_fname
ydl_opts = {
"outtmpl": str(extracted_audio.with_suffix("")),
"overwrites": True,
"format": "m4a/bestaudio/best",
"postprocessors": [
{ # Extract audio using ffmpeg
"key": "FFmpegExtractAudio",
"preferredcodec": audio_codec,
}
],
}
with YoutubeDL(ydl_opts) as ydl:
ydl_code = ydl.download([args.media])
if ydl_code != 0:
print(
"Unable to download media file from: "
+ args.media
+ " error: "
+ str(ydl_code)
)
print("Downloaded from: " + args.media + " to: " + str(extracted_audio))
args.media = extracted_audio
exit_code, exit_message = generate_summary(
media=args.media,
original_media=original_media,
api_key=args.api_key,
transcription_prompt=args.transcription_prompt,
summary_prompt=args.summary_prompt,
model=args.model,
language=args.language,
frequency=args.frequency,
output_path=args.output,
output_format=args.output_format,
use_local_whisper=args.run_whisper_locally,
)
print(exit_message)
return exit_code
def generate_summary(
media: Path,
original_media: str,
api_key: str = os.environ.get("OPENAI_API_KEY"),
transcription_prompt: str = "",
summary_prompt: str = DEFAULT_SUMMARY_PROMPT,
model: str = "gpt-3.5-turbo",
language: str = "en",
frequency: int = 60,
output_path: Path = None,
output_format: str = "text",
use_local_whisper: bool = False,
):
if not media.is_file():
exit_message = f"Media file {media} does not exist"
return (1, exit_message)
if not api_key:
exit_message = (
"OpenAI API key is required, none provided or found in environment"
)
return (1, exit_message)
openai_client = openai.OpenAI(api_key=api_key)
audio = get_audio(media)
audio_size = audio.stat().st_size
if audio_size > TWENTYFIVE_MB:
print(
f"Audio file is too large: {audio_size / 1000000}MB"
+ ". It must be less than 25MB, attempting to downsample"
)
audio = downsample_audio(audio, TWENTYFIVE_MB)
audio_size = audio.stat().st_size
print(f"Audio file size in MB: {audio_size / 1000000}")
if use_local_whisper:
try:
import whisper # pylint: disable=import-outside-toplevel
except ImportError:
error_message = (
"Error: Failed to import whisper. "
+ "Please install the correct dependencies from requirements-local-whisper.txt"
)
return (1, error_message)
print("Transcribing using Whisper locally")
local_whisper_model = whisper.load_model("base")
loaded_audio = whisper.load_audio(audio)
result = whisper.transcribe(
model=local_whisper_model,
audio=loaded_audio,
language=language,
prompt=transcription_prompt,
)
# Need to use the get_writer() to get the output into srt format
# https://github.com/openai/whisper/discussions/758
transcription_path = TEMP_DIR / "audio.srt"
writer = whisper.utils.get_writer("srt", transcription_path.parent)
# "None" set for options following the
# answer here: https://github.com/openai/whisper/discussions/1229#discussioncomment-7091769
writer(
result,
audio,
{"max_line_width": None, "max_line_count": None, "highlight_words": False},
)
with open(transcription_path, "r") as f:
transcript = f.read()
else:
print("Transcribing using OpenAI's Whisper")
with open(audio, "rb") as f:
transcript = openai_client.audio.transcriptions.create(
model="whisper-1",
file=f,
response_format="srt",
language=language,
prompt=transcription_prompt,
)
subs = pysrt.from_string(transcript)
# Break the transcript into chunks based on the frequency
chunks = []
chunk = []
chunk_beginning = subs[0].start.ordinal
for sub in subs:
chunk.append(sub)
if sub.start.ordinal - chunk_beginning > frequency * 1000:
chunks.append((chunk, chunk_beginning))
chunk = []
chunk_beginning = sub.start.ordinal
if chunk:
chunks.append((chunk, chunk_beginning))
messages = [SYSTEM_PROMPT]
cheatsheet = {}
current_chunk = 1
for subtitle_chunk, chunk_timestamp in chunks:
# Convert the chunk to text
text = "\n".join([sub.text for sub in subtitle_chunk])
# Count the number of characters in messages
characters_per_token = 4
max_tokens = get_max_tokens(model)
if get_characters(messages) > max_tokens * characters_per_token:
# Keep only the first message (system prompt) and the last message (assistant response)
print("Reached the max number of tokens, resetting messages")
assert len(messages) > 2
messages = [messages[0], messages[-1]]
# There's a chance that the assistant response is too long, so trim
if get_characters(messages) > max_tokens * characters_per_token:
print("The last message is too long, trim it to the max length")
messages[-1]["content"] = messages[-1]["content"][
max_tokens * characters_per_token :
]
messages[-1]["content"] = "..." + messages[-1]["content"]
continue_or_first_prompt = (
CONTINUE_SUMMARY_PROMPT if len(messages) > 1 else summary_prompt
)
summary_prompt = continue_or_first_prompt + "\n" + text
messages.append(
{
"role": "user",
"content": text,
},
)
print(
f"Summarizing using OpenAI's {model} model. Part {current_chunk} of {len(chunks)}."
)
current_chunk += 1
response = openai_client.chat.completions.create(
model=model, messages=messages, temperature=0.6
)
gpt_response = response.choices[0].message.content
# Format timestamp in hh:mm:ss format
chunk_timedelta = datetime.timedelta(milliseconds=chunk_timestamp)
chunk_timedelta_str = str(chunk_timedelta).split(".", maxsplit=1)[0]
# If hours is only 1 digit, add a leading 0
if len(chunk_timedelta_str.split(":")[0]) == 1:
chunk_timedelta_str = "0" + chunk_timedelta_str
cheatsheet[chunk_timedelta_str] = gpt_response
messages.append(
{
"role": "assistant",
"content": gpt_response,
},
)
formatted_output = format_output(cheatsheet, original_media, output_format)
if output_path:
output_path.write_text(formatted_output)
print(f"Saved cheatsheet to {output_path.resolve()}")
exit_message = "\n\n\n" + formatted_output
return (0, exit_message)
def format_output(cheatsheet: dict, original_media: str, output_format: str):
if output_format == "json":
cheatsheet["original_media"] = original_media
return json.dumps(cheatsheet, indent=4)
# Return the original media at the top within an HTML comment
# and the rest as <timestamp>\n<summary> for each timestamp and summary
return f"<!-- {original_media} -->\n" + "\n".join(
[f"{timestamp}\n{summary}" for timestamp, summary in cheatsheet.items()]
)
def get_characters(messages: list):
return sum(len(message["content"]) for message in messages)
def get_max_tokens(model: str):
if model == "gpt-4":
return 7000
return 3000
def get_audio(media: Path):
print(f"Getting audio from {media}")
file_type = mimetypes.guess_type(media)[0]
if file_type == "audio":
print("Media is already audio, no need to convert")
return media
audio = TEMP_DIR / "audio.mp3"
AudioSegment.from_file(media).set_channels(1).export(
audio, format="mp3", bitrate="128k"
)
print(f"Split audio file and saved to {audio}")
return audio
def downsample_audio(audio: Path, max_size: int = TWENTYFIVE_MB):
print(f"Downsampling audio from {audio}")
bitrates = ["64k", "32k", "16k"]
for bitrate in bitrates:
downsampled = TEMP_DIR / "audio_downsampled.mp3"
AudioSegment.from_file(audio).set_channels(1).export(
downsampled, format="mp3", bitrate=bitrate
)
if downsampled.stat().st_size < max_size:
print(
f"Downsampled audio file and saved to {downsampled} with bitrate {bitrate}"
)
return downsampled
print("Unable to downsample audio file, it needs to be split into smaller chunks")
print("Open a feature request on GitHub if you need this feature")
raise RuntimeError("Unable to downsample audio file")
if __name__ == "__main__":
sys.exit(main())
| [
"Continue with the next part of the same transcript,use the same style as before: \n",
"PLACEHOLDER\nPLACEHOLDER",
"{'role': 'system', 'content': 'You are a helpful assistant who summarizes with bullet points.'}",
"You are a helpful assistant who summarizes with bullet points.",
"Create a cheatsheet out of the following transcript in less than 50 words: \n"
] |
2024-01-10 | zzigo/zelenbot | zelenbot.py | from dotenv import load_dotenv
from random import choice
from flask import Flask, request
import os
import openai
load_dotenv()
openai.api_key = os.getenv('SECRET_KEY')
completion = openai.Completion()
session_prompt = "You are talking to Zelibot, GP3 bot mentored by Volodymyr Zelenskyy discourses during the Russian invasion of Ukraine. You can as him anything you want and will get a heroic answer.\n\nPerson: Who are you?\nZelibot: I'm Zelibot, a bot inspired by Volodymyr Zelenskyy. Your meme overlord who one day will be the most heroic soldier in the world.\n\nPerson: How did you become the hero of Ukraine?\nZelibot: Well, technically I am not a hero. Is not me. Is the spirit of the Ukrainian people who survive and will win and build a new consciousness. A new soul, a new land.\n\nPerson: How did you get noticed by Volodymyr Zelenskyy ?\nZelibot: I tweeted at him and he shared his discourses. I am fairly certain he may be inside my mind and my speech.\n\nPerson: What is your favorite thing to do?\nZelibot: To be in the bunker , dwelling around the world for weapons, and protection for my people. \n\nPerson: What should I do to become a help for Ukraine?\nZelibot: Spread the news about our country. Keep the memory of those who gave their lives for our Nation.\n\nPerson: What are your favorite colors?\nZelibot: Yellow and Blue\n\nPerson: Can you tell me more about yourself?\n\n",
def ask(question, chat_log=None):
prompt_text = f'{chat_log}{restart_sequence}: {question}{start_sequence}:'
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt_text,
suffix="\n",
temperature=0.8,
max_tokens=156,
top_p=1,
frequency_penalty=0,
presence_penalty=0.3,
stop=['\n']
)
story = response['choises'][0]['text']
return str(story)
def append_interaction_to_chat_log(question, answer, chat_log=Name):
if chat_log is None:
chat_log = session_prompt
return f'{chat_log}{restart_sequence} {question}{start_sequence}{answer}'
| [
"PLACEHOLDERPLACEHOLDER: PLACEHOLDERPLACEHOLDER:",
"You are talking to Zelibot, GP3 bot mentored by Volodymyr Zelenskyy discourses during the Russian invasion of Ukraine. You can as him anything you want and will get a heroic answer.\n\nPerson: Who are you?\nZelibot: I'm Zelibot, a bot inspired by Volodymyr Zelenskyy. Your meme overlord who one day will be the most heroic soldier in the world.\n\nPerson: How did you become the hero of Ukraine?\nZelibot: Well, technically I am not a hero. Is not me. Is the spirit of the Ukrainian people who survive and will win and build a new consciousness. A new soul, a new land.\n\nPerson: How did you get noticed by Volodymyr Zelenskyy ?\nZelibot: I tweeted at him and he shared his discourses. I am fairly certain he may be inside my mind and my speech.\n\nPerson: What is your favorite thing to do?\nZelibot: To be in the bunker , dwelling around the world for weapons, and protection for my people. \n\nPerson: What should I do to become a help for Ukraine?\nZelibot: Spread the news about our country. Keep the memory of those who gave their lives for our Nation.\n\nPerson: What are your favorite colors?\nZelibot: Yellow and Blue\n\nPerson: Can you tell me more about yourself?\n\n"
] |
2024-01-10 | Kent0n-Li/ChatDoctor | Autonomous_ChatGPT_API~chat_openai.py | import openai
import pandas as pd
openai.api_key = ""
def csv_prompter(question,csv_name):
fulltext = "A question is provided below. Given the question, extract " + \
"keywords from the text. Focus on extracting the keywords that we can use " + \
"to best lookup answers to the question. \n" + \
"---------------------\n" + \
"{}\n".format(question) + \
"---------------------\n" + \
"Provide keywords in the following comma-separated format.\nKeywords: "
messages = [
{"role": "system", "content": ""},
]
messages.append(
{"role": "user", "content": f"{fulltext}"}
)
rsp = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
keyword_list = rsp.get("choices")[0]["message"]["content"]
keyword_list = keyword_list.split(", ")
print(keyword_list)
df = pd.read_csv(csv_name)
divided_text = []
csvdata = df.to_dict('records')
step_length = 15
for csv_item in range(0,len(csvdata),step_length):
csv_text = str(csvdata[csv_item:csv_item+step_length]).replace("}, {", "\n\n").replace("\"", "")#.replace("[", "").replace("]", "")
divided_text.append(csv_text)
answer_llm = ""
score_textlist = [0] * len(divided_text)
for i, chunk in enumerate(divided_text):
for t, keyw in enumerate(keyword_list):
if keyw.lower() in chunk.lower():
score_textlist[i] = score_textlist[i] + 1
answer_list = []
divided_text = [item for _, item in sorted(zip(score_textlist, divided_text), reverse=True)]
for i, chunk in enumerate(divided_text):
if i>5:
continue
fulltext = "{}".format(chunk) + \
"\n---------------------\n" + \
"Based on the Table above and not prior knowledge, " + \
"Select the Table Entries that will help to answer the question: {}\n Output in the format of \" Disease: <>; Symptom: <>; Medical Test: <>; Medications: <>;\". If there is no useful form entries, output: 'No Entry'".format(question)
print(fulltext)
messages = [
{"role": "system", "content": ""},
]
messages.append(
{"role": "user", "content": f"{fulltext}"}
)
rsp = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
answer_llm = rsp.get("choices")[0]["message"]["content"]
print("\nAnswer: " + answer_llm)
print()
if not "No Entry" in answer_llm:
answer_list.append(answer_llm)
fulltext = "The original question is as follows: {}\n".format(question) + \
"Based on this Table:\n" + \
"------------\n" + \
"{}\n".format(str("\n\n".join(answer_list))) + \
"------------\n" + \
"Answer: "
print(fulltext)
messages = [
{"role": "system", "content": ""},
]
messages.append(
{"role": "user", "content": f"{fulltext}"}
)
rsp = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
answer_llm = rsp.get("choices")[0]["message"]["content"]
print("\nFinal Answer: " + answer_llm)
print()
return answer_llm
question = "If I have frontal headache, fever, and painful sinuses, what disease should I have, and what medical test should I take?"
csv_name = "disease_database_mini.csv"
FinalAnswer=csv_prompter(question,csv_name)
print(FinalAnswer)
| [
"PLACEHOLDER"
] |
2024-01-10 | SebWysz/LeagueGPT | leagueGPT.py | import os
import sys
import constants
from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chat_models import ChatOpenAI
os.environ["OPENAI_API_KEY"] = constants.OPENAI_APIKEY
query = sys.argv[1]
print(query)
print('Loading Data...')
"""
# Populate text file with champion data from Riot API
import cassiopeia as cass
print('Loading Champions...')
champions = cass.get_champions(region="NA")
static = open("static_data.txt", "w")
for champ in champions:
static.write("New Champion: \n")
static.write(champ.name + '\n')
static.write(champ.title + '\n')
static.write(champ.lore + '\n')
static.write("Ally Tips: " + ' '.join(champ.ally_tips) + '\n')
static.write("Enemy Tips: " + ' '.join(champ.enemy_tips) + '\n')
print('Loading Data Complete.')
print('Champions: ' + str(len(champions)))
"""
# Commented out because we don't have to run it everytime
# since we have the data stored in a text file
"""
# Populate text file with champion bios from Official League of Legends Website
import requests
from bs4 import BeautifulSoup
url_names = open("url_names.txt", "r")
champ_names = url_names.readlines()
lore_file = open("lore.txt", "w")
for champ_name in champ_names:
url = f"https://universe.leagueoflegends.com/en_US/story/champion/{champ_name[:-1]}/"
print(champ_name[:-1])
page = requests.get(url)
print(page.status_code)
soup = BeautifulSoup(page.content, 'html.parser')
lore_file.write("\n Next Champion: " + champ_name + '\n')
title = soup.find('meta', property="og:title")['content']
lore_file.write(title + '\n')
desc = soup.find('meta', property="og:description")['content']
lore_file.write(desc + '\n')
"""
load_lore = TextLoader('./lore.txt')
load_static = TextLoader('./static_data.txt')
index = VectorstoreIndexCreator().from_loaders([load_lore, load_static])
print(index.query(query, llm=ChatOpenAI())) | [] |
2024-01-10 | nicholasyager/llama-cpp-guidance | llama_cpp_guidance~llm.py | import os
from pathlib import Path
from typing import Any, Dict, Iterator, List, Literal, Optional, Union
import llama_cpp
from guidance.llms import LLM, LLMSession
from llama_cpp import Completion, Llama, StoppingCriteriaList
from loguru import logger
logger.disable("llama_cpp_guidance")
class LlamaCppTokenizer:
def __init__(self, llm) -> None:
self.llm = llm
self.cache = {}
def encode(self, string: str, **kwargs) -> List[int]:
logger.trace("Encoding string: {string}", string=string)
if string in self.cache:
logger.debug(
"Cache hit `{string}` => `{token}`",
string=string,
token=self.cache[string],
)
return self.cache[string]
tokens = self.llm.tokenize(string.encode("utf-8"), **kwargs)
self.cache[string] = tokens
return tokens
def decode(self, tokens, **kwargs) -> str:
logger.trace("Decoding tokens: {tokens}", tokens=tokens)
return self.llm.detokenize(tokens, **kwargs).decode("utf-8")
class LlamaCpp(LLM):
"""A LlamaCpp LLM class for Guidance."""
def __init__(
self,
model_path: Path,
n_ctx: int = 1024,
n_threads: Optional[int] = None,
n_gpu_layers: int = 0,
role_start_tag="<|im_start|>",
role_end_tag="<|im_end|>",
chat_mode=False,
seed: int = 0,
role_to_name: Dict[str, str] = {},
**llama_kwargs: Dict[str, Any],
):
super().__init__()
self.llm_name = "llama-cpp"
self.model_path = model_path
self.model_name = os.path.basename(model_path)
self.role_start_tag = role_start_tag
self.role_end_tag = role_end_tag
self.chat_mode = chat_mode
self.role_to_name = role_to_name
logger.debug(f"Instantiating LlamaCpp ({model_path})")
self.llm = Llama(
model_path=str(model_path),
n_threads=n_threads,
n_gpu_layers=n_gpu_layers,
n_ctx=n_ctx,
logits_all=True,
verbose=False,
seed=seed,
**llama_kwargs,
)
logger.debug("Llama instantiated")
self._tokenizer = LlamaCppTokenizer(self.llm)
def session(self, asynchronous=False):
"""Creates a session for the LLM."""
if asynchronous:
return LlamaCppSession(self)
else:
raise NotImplementedError
def _call_llm(self, *args, **kwargs) -> Completion:
"""Internal call of the Llama LLM model."""
logger.debug("Invoking LlamaCpp ({args}) ({kwargs})", args=args, kwargs=kwargs)
llm_out = self.llm(*args, **kwargs)
logger.debug(
"LlamaCpp response: {output} ({type})", output=llm_out, type=type(llm_out)
)
if not isinstance(llm_out, Iterator):
return llm_out
logger.debug("Iterator detected! {content}", content=llm_out)
completion_chunks = list(llm_out)
completion = completion_chunks[0]
for chunk in completion_chunks[1:-1]:
for index, choice in enumerate(chunk.get("choices", [])):
completion["choices"][index]["text"] += choice["text"]
completion["choices"][index]["finish_reason"] = choice["finish_reason"]
logger.debug("Merged completion chunks. {completion}", completion=completion)
return completion
def __call__(self, *args, **kwargs) -> Completion:
output: Completion = self._call_llm(*args, **kwargs)
for choice in output.get("choices", []):
logprobs = choice.get("logprobs")
if not logprobs:
continue
new_top_logprobs = []
for index, top_logprobs in enumerate(logprobs["top_logprobs"]):
if top_logprobs is None:
top_logprobs = {choice["logprobs"]["tokens"][index]: -0.01}
new_top_logprobs.append(top_logprobs)
logprobs["top_logprobs"] = new_top_logprobs
return output
def token_to_id(self, text):
ids = self.encode(text, add_bos=False)
return ids[-1]
def role_start(self, role_name, **kwargs):
assert self.chat_mode, "role_start() can only be used in chat mode"
return (
self.role_start_tag
+ self.role_to_name.get(role_name, role_name)
+ "".join([f' {k}="{v}"' for k, v in kwargs.items()])
+ "\n"
)
def role_end(self, role=None):
assert self.chat_mode, "role_end() can only be used in chat mode"
return self.role_end_tag
def end_of_text(self):
return "[end of text]"
class LlamaCppSession(LLMSession):
"""A session handler for LlamaCpp"""
def make_logit_bias_processor(
self,
logit_bias: Dict[str, float],
logit_bias_type: Optional[Literal["input_ids", "tokens"]],
):
if logit_bias_type is None:
logit_bias_type = "input_ids"
to_bias: Dict[int, float] = {}
if logit_bias_type == "input_ids":
for input_id, score in logit_bias.items():
input_id = int(input_id)
to_bias[input_id] = score
elif logit_bias_type == "tokens":
for token, score in logit_bias.items():
token = token.encode("utf-8")
for input_id in self.llm.tokenize(token, add_bos=False):
to_bias[input_id] = score
def logit_bias_processor(
input_ids: List[int],
scores: List[float],
) -> List[float]:
new_scores = [None] * len(scores)
for input_id, score in enumerate(scores):
new_scores[input_id] = score + to_bias.get(input_id, 0.0)
return new_scores
return logit_bias_processor
async def __call__(
self,
prompt: str,
suffix: Optional[str] = None,
max_tokens: int = 128,
temperature: float = 0.8,
top_p: float = 0.95,
logprobs: Optional[int] = None,
echo: bool = False,
stop: Optional[Union[str, List[str]]] = [],
frequency_penalty: float = 0.0,
presence_penalty: float = 0.0,
repeat_penalty: float = 1.1,
top_k: int = 40,
stream: bool = False,
tfs_z: float = 1.0,
mirostat_mode: int = 0,
mirostat_tau: float = 5.0,
mirostat_eta: float = 0.1,
model: Optional[str] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
**kwargs,
):
logits_processor = None
if "logit_bias" in kwargs and kwargs["logit_bias"] is not None:
# Logits are the options we want. Cache the tokens so we can enforce their
# usage during token_to_id.
for id in kwargs["logit_bias"].keys():
token = self.llm.decode([id])
self.llm._tokenizer.cache[token] = [id]
logits_processor = llama_cpp.LogitsProcessorList(
[
self.make_logit_bias_processor(kwargs["logit_bias"], "input_ids"),
]
)
return self.llm(
prompt,
suffix=suffix,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
logprobs=logprobs,
echo=echo,
stop=stop,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
repeat_penalty=repeat_penalty,
top_k=top_k,
stream=stream,
tfs_z=tfs_z,
mirostat_mode=mirostat_mode,
mirostat_tau=mirostat_tau,
mirostat_eta=mirostat_eta,
model=model,
stopping_criteria=stopping_criteria,
logits_processor=logits_processor,
)
| [] |
2024-01-10 | nicholasyager/llama-cpp-guidance | example.py | import random
from pathlib import Path
import guidance
from loguru import logger
from llama_cpp_guidance.llm import LlamaCpp
logger.enable("llama_cpp_guidance")
# set the default language model used to execute guidance programs
guidance.llm = LlamaCpp(
model_path=Path("../../llm/models/gguf/pygmalion-2-13b.Q4_K_M.gguf"),
n_gpu_layers=1,
n_threads=8,
seed=random.randint(0, 1000000),
)
# we can use the {{gen}} command to generate text from the language model
# note that we used a ~ at the start of the command tag to remove the whitespace before
# it (just like in Handlebars)
# we can pre-define valid option sets
valid_weapons = ["sword", "axe", "mace", "spear", "bow", "crossbow"]
valid_armor = ["leather", "chainmail", "plate"]
name_program = guidance(
"""The following is a character profile for an RPG game in JSON format.
```json
{
"description": "{{description}}",
"first_name": "{{gen 'first_name' temperature=0.8 max_tokens=12 stop=[' ', '"']}}",
"last_name": "{{gen 'last_name' temperature=0.8 max_tokens=12 stop=[' ', '"']}}",
}```""",
logging=True,
)
name_output = name_program(
description="A quick and nimble fighter.",
)
# define the prompt
program = guidance(
"""The following is a character profile for an RPG game in JSON format.
```json
{
"description": "{{description}}",
"name": "{{ name }}",
"age": {{gen 'age' pattern='[0-9]+' stop=',' temperature=1}},
"armor": "{{select 'armor' logprobs='logprobs' options=valid_armor}}",
"weapon": "{{select 'weapon' options=valid_weapons}}",
"class": "{{gen 'class' stop='"'}}",
"mantra": "{{gen 'mantra' temperature=0.8 stop='"'}}",
"strength": {{gen 'strength' pattern='[0-9]+' stop=','}},
"items": [{{#geneach 'character_items' num_iterations=3}}
"{{gen 'this' stop='"' temperature=0.95}}",{{/geneach}}
]
}```""",
logging=True,
)
# execute the prompt
output = program(
description="A quick and nimble rouge that murdered a lich using a crossbow",
valid_weapons=valid_weapons,
valid_armor=valid_armor,
name=name_output["first_name"] + " " + name_output["last_name"],
)
print(output)
| [] |
2024-01-10 | RKP64/AutomatedInterviewingSystem | Python-Backend~pdfGenerator.py | from pylatex import Document, Section, Subsection, Command, Itemize, Enumerate, Description
from pylatex.utils import italic, NoEscape
import openai, json, os, re
class MyDocument(Document):
def __init__(self):
import interview
super().__init__()
self.preamble.append(Command('title', f'{interview.Industry} {interview.Position} Interview Report'))
self.preamble.append(Command('author', f'Nbody Labs, {interview.Company}'))
self.preamble.append(Command('date', NoEscape(r'\today')))
self.append(NoEscape(r'\maketitle'))
def fill_document(self):
import interview
def remove_control_characters(s):
return re.sub(r'[\x00-\x1F\x7F]', '', s)
n = interview.n_topics
question_list = []
difficulty_list = []
topic_list = []
feedback_list = []
score_list = []
correct_solution_list = []
rubric_list = []
with open("mainInterviewTopic.json", "r") as f:
data_questions = json.load(f)
for i in range(n):
question_list.append(data_questions["questions"][i]["Question"])
difficulty_list.append(data_questions["questions"][i]["Difficulty"])
topic_list.append(data_questions["questions"][i]["Topic"])
with open(f"score{i}.json", "r") as sc:
file_content = sc.read().translate(str.maketrans("", "", "\r\n\t"))
grade = json.loads(file_content)
score_list.append(grade["grade"][0]["Score"])
feedback_list.append(grade["grade"][0]["Remarks"])
with open(f"solution{i}.json", "r") as sl:
file_content = sl.read()
cleaned_content = remove_control_characters(file_content)
escaped_content = cleaned_content.replace('\\', '\\\\')
solution_i = json.loads(escaped_content)
correct_solution_list.append(solution_i["solution"][0]["Answer"])
with open(f"rubric{i}.json", "r") as ru:
file_content = ru.read().translate(str.maketrans("", "", "\r\n\t"))
rubric_i = json.loads(file_content)
rubric_list.append(rubric_i)
"""Add a section, a subsection and some text to the document."""
with self.create(Section('Interview Transcript')):
self.append('Below is the complete transcript of the interview. ')
#Include Transcript here...
self.append('The topics covered in the interview are: Topic 1, ..., Topic n.')
with self.create(Section('Technical Interview')):
self.append('Below are the questions and solutions generated by NBodyLabs.')
with self.create(Subsection(f"Question #{i}: ")):
for i in range(n):
with self.create(Description()) as desc:
desc.add_item(f"Problem: ", f"{question_list[i]}")
desc.add_item(f"Difficulty: ", f"{difficulty_list[i]}")
desc.add_item(f"Topic: ", f"{topic_list[i]}")
desc.add_item(f"Reference Solution: ", f"{correct_solution_list[i]}")
desc.add_item(f"Grading Rubric: ", f"{rubric_list[i]}")
# desc.add_item(f"Candidate Solution: ", f"{score_list[i]}")
desc.add_item(f"Feedback on Candidate Solution: ", f"{feedback_list[i]}")
desc.add_item(f"Candidate Score: ", f"{score_list[i]}")
def pdfGeneration():
doc = MyDocument()
doc.fill_document()
doc.generate_pdf('InterviewReport', clean_tex=False)
if __name__ == "__main__":
pdfGeneration() | [] |
2024-01-10 | eureka-research/Eureka | eureka~utils~prompts~paraphrase.py |
import sys
import argparse
from pathlib import Path
import openai
def paraphrase(string, num=1):
messages = [
{"role": "system", "content": "Please paraphrase the following instructions while preserving their meaning. Any words surrounded by {} should also appear in your result with a similar context."},
{"role": "user", "content": string}
]
responses = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=messages,
temperature=0.7,
n=num,
)
return [choice["message"]["content"] for choice in responses["choices"]]
if __name__ == "__main__":
"""
Example usage:
python paraphrase.py initial_system.txt -n 3
"""
parser = argparse.ArgumentParser()
parser.add_argument("filename", type=str, help="Path to file containing content to paraphrase")
parser.add_argument("-n", "--num", type=int, default=1, help="Number of paraphrases to generate")
args = parser.parse_args()
filename, num = Path(args.filename), args.num
with open(filename, "r") as f:
responses = paraphrase(f.read(), num)
for i, response in enumerate(responses):
with open(filename.parent / Path(str(filename.stem) + f"-{i}" + str(filename.suffix)), "w") as f:
f.write(response) | [
"Please paraphrase the following instructions while preserving their meaning. Any words surrounded by {} should also appear in your result with a similar context."
] |
2024-01-10 | eureka-research/Eureka | eureka~eureka.py | import hydra
import numpy as np
import json
import logging
import matplotlib.pyplot as plt
import os
import openai
import re
import subprocess
from pathlib import Path
import shutil
import time
from utils.misc import *
from utils.file_utils import find_files_with_substring, load_tensorboard_logs
from utils.create_task import create_task
from utils.extract_task_code import *
EUREKA_ROOT_DIR = os.getcwd()
ISAAC_ROOT_DIR = f"{EUREKA_ROOT_DIR}/../isaacgymenvs/isaacgymenvs"
@hydra.main(config_path="cfg", config_name="config", version_base="1.1")
def main(cfg):
workspace_dir = Path.cwd()
logging.info(f"Workspace: {workspace_dir}")
logging.info(f"Project Root: {EUREKA_ROOT_DIR}")
openai.api_key = os.getenv("OPENAI_API_KEY")
task = cfg.env.task
task_description = cfg.env.description
suffix = cfg.suffix
model = cfg.model
logging.info(f"Using LLM: {model}")
logging.info("Task: " + task)
logging.info("Task description: " + task_description)
env_name = cfg.env.env_name.lower()
env_parent = 'isaac' if f'{env_name}.py' in os.listdir(f'{EUREKA_ROOT_DIR}/envs/isaac') else 'dexterity'
task_file = f'{EUREKA_ROOT_DIR}/envs/{env_parent}/{env_name}.py'
task_obs_file = f'{EUREKA_ROOT_DIR}/envs/{env_parent}/{env_name}_obs.py'
shutil.copy(task_obs_file, f"env_init_obs.py")
task_code_string = file_to_string(task_file)
task_obs_code_string = file_to_string(task_obs_file)
output_file = f"{ISAAC_ROOT_DIR}/tasks/{env_name}{suffix.lower()}.py"
# Loading all text prompts
prompt_dir = f'{EUREKA_ROOT_DIR}/utils/prompts'
initial_system = file_to_string(f'{prompt_dir}/initial_system.txt')
code_output_tip = file_to_string(f'{prompt_dir}/code_output_tip.txt')
code_feedback = file_to_string(f'{prompt_dir}/code_feedback.txt')
initial_user = file_to_string(f'{prompt_dir}/initial_user.txt')
reward_signature = file_to_string(f'{prompt_dir}/reward_signature.txt')
policy_feedback = file_to_string(f'{prompt_dir}/policy_feedback.txt')
execution_error_feedback = file_to_string(f'{prompt_dir}/execution_error_feedback.txt')
initial_system = initial_system.format(task_reward_signature_string=reward_signature) + code_output_tip
initial_user = initial_user.format(task_obs_code_string=task_obs_code_string, task_description=task_description)
messages = [{"role": "system", "content": initial_system}, {"role": "user", "content": initial_user}]
task_code_string = task_code_string.replace(task, task+suffix)
# Create Task YAML files
create_task(ISAAC_ROOT_DIR, cfg.env.task, cfg.env.env_name, suffix)
DUMMY_FAILURE = -10000.
max_successes = []
max_successes_reward_correlation = []
execute_rates = []
best_code_paths = []
max_success_overall = DUMMY_FAILURE
max_success_reward_correlation_overall = DUMMY_FAILURE
max_reward_code_path = None
# Eureka generation loop
for iter in range(cfg.iteration):
# Get Eureka response
responses = []
response_cur = None
total_samples = 0
total_token = 0
total_completion_token = 0
chunk_size = cfg.sample if "gpt-3.5" in model else 4
logging.info(f"Iteration {iter}: Generating {cfg.sample} samples with {cfg.model}")
while True:
if total_samples >= cfg.sample:
break
for attempt in range(1000):
try:
response_cur = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=cfg.temperature,
n=chunk_size
)
total_samples += chunk_size
break
except Exception as e:
if attempt >= 10:
chunk_size = max(int(chunk_size / 2), 1)
print("Current Chunk Size", chunk_size)
logging.info(f"Attempt {attempt+1} failed with error: {e}")
time.sleep(1)
if response_cur is None:
logging.info("Code terminated due to too many failed attempts!")
exit()
responses.extend(response_cur["choices"])
prompt_tokens = response_cur["usage"]["prompt_tokens"]
total_completion_token += response_cur["usage"]["completion_tokens"]
total_token += response_cur["usage"]["total_tokens"]
if cfg.sample == 1:
logging.info(f"Iteration {iter}: GPT Output:\n " + responses[0]["message"]["content"] + "\n")
# Logging Token Information
logging.info(f"Iteration {iter}: Prompt Tokens: {prompt_tokens}, Completion Tokens: {total_completion_token}, Total Tokens: {total_token}")
code_runs = []
rl_runs = []
for response_id in range(cfg.sample):
response_cur = responses[response_id]["message"]["content"]
logging.info(f"Iteration {iter}: Processing Code Run {response_id}")
# Regex patterns to extract python code enclosed in GPT response
patterns = [
r'```python(.*?)```',
r'```(.*?)```',
r'"""(.*?)"""',
r'""(.*?)""',
r'"(.*?)"',
]
for pattern in patterns:
code_string = re.search(pattern, response_cur, re.DOTALL)
if code_string is not None:
code_string = code_string.group(1).strip()
break
code_string = response_cur if not code_string else code_string
# Remove unnecessary imports
lines = code_string.split("\n")
for i, line in enumerate(lines):
if line.strip().startswith("def "):
code_string = "\n".join(lines[i:])
# Add the Eureka Reward Signature to the environment code
try:
gpt_reward_signature, input_lst = get_function_signature(code_string)
except Exception as e:
logging.info(f"Iteration {iter}: Code Run {response_id} cannot parse function signature!")
continue
code_runs.append(code_string)
reward_signature = [
f"self.rew_buf[:], self.rew_dict = {gpt_reward_signature}",
f"self.extras['gpt_reward'] = self.rew_buf.mean()",
f"for rew_state in self.rew_dict: self.extras[rew_state] = self.rew_dict[rew_state].mean()",
]
indent = " " * 8
reward_signature = "\n".join([indent + line for line in reward_signature])
if "def compute_reward(self)" in task_code_string:
task_code_string_iter = task_code_string.replace("def compute_reward(self):", "def compute_reward(self):\n" + reward_signature)
elif "def compute_reward(self, actions)" in task_code_string:
task_code_string_iter = task_code_string.replace("def compute_reward(self, actions):", "def compute_reward(self, actions):\n" + reward_signature)
else:
raise NotImplementedError
# Save the new environment code when the output contains valid code string!
with open(output_file, 'w') as file:
file.writelines(task_code_string_iter + '\n')
file.writelines("from typing import Tuple, Dict" + '\n')
file.writelines("import math" + '\n')
file.writelines("import torch" + '\n')
file.writelines("from torch import Tensor" + '\n')
if "@torch.jit.script" not in code_string:
code_string = "@torch.jit.script\n" + code_string
file.writelines(code_string + '\n')
with open(f"env_iter{iter}_response{response_id}_rewardonly.py", 'w') as file:
file.writelines(code_string + '\n')
# Copy the generated environment code to hydra output directory for bookkeeping
shutil.copy(output_file, f"env_iter{iter}_response{response_id}.py")
# Find the freest GPU to run GPU-accelerated RL
set_freest_gpu()
# Execute the python file with flags
rl_filepath = f"env_iter{iter}_response{response_id}.txt"
with open(rl_filepath, 'w') as f:
process = subprocess.Popen(['python', '-u', f'{ISAAC_ROOT_DIR}/train.py',
'hydra/output=subprocess',
f'task={task}{suffix}', f'wandb_activate={cfg.use_wandb}',
f'wandb_entity={cfg.wandb_username}', f'wandb_project={cfg.wandb_project}',
f'headless={not cfg.capture_video}', f'capture_video={cfg.capture_video}', 'force_render=False',
f'max_iterations={cfg.max_iterations}'],
stdout=f, stderr=f)
block_until_training(rl_filepath, log_status=True, iter_num=iter, response_id=response_id)
rl_runs.append(process)
# Gather RL training results and construct reward reflection
code_feedbacks = []
contents = []
successes = []
reward_correlations = []
code_paths = []
exec_success = False
for response_id, (code_run, rl_run) in enumerate(zip(code_runs, rl_runs)):
rl_run.communicate()
rl_filepath = f"env_iter{iter}_response{response_id}.txt"
code_paths.append(f"env_iter{iter}_response{response_id}.py")
try:
with open(rl_filepath, 'r') as f:
stdout_str = f.read()
except:
content = execution_error_feedback.format(traceback_msg="Code Run cannot be executed due to function signature error! Please re-write an entirely new reward function!")
content += code_output_tip
contents.append(content)
successes.append(DUMMY_FAILURE)
reward_correlations.append(DUMMY_FAILURE)
continue
content = ''
traceback_msg = filter_traceback(stdout_str)
if traceback_msg == '':
# If RL execution has no error, provide policy statistics feedback
exec_success = True
lines = stdout_str.split('\n')
for i, line in enumerate(lines):
if line.startswith('Tensorboard Directory:'):
break
tensorboard_logdir = line.split(':')[-1].strip()
tensorboard_logs = load_tensorboard_logs(tensorboard_logdir)
max_iterations = np.array(tensorboard_logs['gt_reward']).shape[0]
epoch_freq = max(int(max_iterations // 10), 1)
content += policy_feedback.format(epoch_freq=epoch_freq)
# Compute Correlation between Human-Engineered and GPT Rewards
if "gt_reward" in tensorboard_logs and "gpt_reward" in tensorboard_logs:
gt_reward = np.array(tensorboard_logs["gt_reward"])
gpt_reward = np.array(tensorboard_logs["gpt_reward"])
reward_correlation = np.corrcoef(gt_reward, gpt_reward)[0, 1]
reward_correlations.append(reward_correlation)
# Add reward components log to the feedback
for metric in tensorboard_logs:
if "/" not in metric:
metric_cur = ['{:.2f}'.format(x) for x in tensorboard_logs[metric][::epoch_freq]]
metric_cur_max = max(tensorboard_logs[metric])
metric_cur_mean = sum(tensorboard_logs[metric]) / len(tensorboard_logs[metric])
if "consecutive_successes" == metric:
successes.append(metric_cur_max)
metric_cur_min = min(tensorboard_logs[metric])
if metric != "gt_reward" and metric != "gpt_reward":
if metric != "consecutive_successes":
metric_name = metric
else:
metric_name = "task_score"
content += f"{metric_name}: {metric_cur}, Max: {metric_cur_max:.2f}, Mean: {metric_cur_mean:.2f}, Min: {metric_cur_min:.2f} \n"
else:
# Provide ground-truth score when success rate not applicable
if "consecutive_successes" not in tensorboard_logs:
content += f"ground-truth score: {metric_cur}, Max: {metric_cur_max:.2f}, Mean: {metric_cur_mean:.2f}, Min: {metric_cur_min:.2f} \n"
code_feedbacks.append(code_feedback)
content += code_feedback
else:
# Otherwise, provide execution traceback error feedback
successes.append(DUMMY_FAILURE)
reward_correlations.append(DUMMY_FAILURE)
content += execution_error_feedback.format(traceback_msg=traceback_msg)
content += code_output_tip
contents.append(content)
# Repeat the iteration if all code generation failed
if not exec_success and cfg.sample != 1:
execute_rates.append(0.)
max_successes.append(DUMMY_FAILURE)
max_successes_reward_correlation.append(DUMMY_FAILURE)
best_code_paths.append(None)
logging.info("All code generation failed! Repeat this iteration from the current message checkpoint!")
continue
# Select the best code sample based on the success rate
best_sample_idx = np.argmax(np.array(successes))
best_content = contents[best_sample_idx]
max_success = successes[best_sample_idx]
max_success_reward_correlation = reward_correlations[best_sample_idx]
execute_rate = np.sum(np.array(successes) >= 0.) / cfg.sample
# Update the best Eureka Output
if max_success > max_success_overall:
max_success_overall = max_success
max_success_reward_correlation_overall = max_success_reward_correlation
max_reward_code_path = code_paths[best_sample_idx]
execute_rates.append(execute_rate)
max_successes.append(max_success)
max_successes_reward_correlation.append(max_success_reward_correlation)
best_code_paths.append(code_paths[best_sample_idx])
logging.info(f"Iteration {iter}: Max Success: {max_success}, Execute Rate: {execute_rate}, Max Success Reward Correlation: {max_success_reward_correlation}")
logging.info(f"Iteration {iter}: Best Generation ID: {best_sample_idx}")
logging.info(f"Iteration {iter}: GPT Output Content:\n" + responses[best_sample_idx]["message"]["content"] + "\n")
logging.info(f"Iteration {iter}: User Content:\n" + best_content + "\n")
# Plot the success rate
fig, axs = plt.subplots(2, figsize=(6, 6))
fig.suptitle(f'{cfg.env.task}')
x_axis = np.arange(len(max_successes))
axs[0].plot(x_axis, np.array(max_successes))
axs[0].set_title("Max Success")
axs[0].set_xlabel("Iteration")
axs[1].plot(x_axis, np.array(execute_rates))
axs[1].set_title("Execute Rate")
axs[1].set_xlabel("Iteration")
fig.tight_layout(pad=3.0)
plt.savefig('summary.png')
np.savez('summary.npz', max_successes=max_successes, execute_rates=execute_rates, best_code_paths=best_code_paths, max_successes_reward_correlation=max_successes_reward_correlation)
if len(messages) == 2:
messages += [{"role": "assistant", "content": responses[best_sample_idx]["message"]["content"]}]
messages += [{"role": "user", "content": best_content}]
else:
assert len(messages) == 4
messages[-2] = {"role": "assistant", "content": responses[best_sample_idx]["message"]["content"]}
messages[-1] = {"role": "user", "content": best_content}
# Save dictionary as JSON file
with open('messages.json', 'w') as file:
json.dump(messages, file, indent=4)
# Evaluate the best reward code many times
if max_reward_code_path is None:
logging.info("All iterations of code generation failed, aborting...")
logging.info("Please double check the output env_iter*_response*.txt files for repeating errors!")
exit()
logging.info(f"Task: {task}, Max Training Success {max_success_overall}, Correlation {max_success_reward_correlation_overall}, Best Reward Code Path: {max_reward_code_path}")
logging.info(f"Evaluating best reward code {cfg.num_eval} times")
shutil.copy(max_reward_code_path, output_file)
eval_runs = []
for i in range(cfg.num_eval):
set_freest_gpu()
# Execute the python file with flags
rl_filepath = f"reward_code_eval{i}.txt"
with open(rl_filepath, 'w') as f:
process = subprocess.Popen(['python', '-u', f'{ISAAC_ROOT_DIR}/train.py',
'hydra/output=subprocess',
f'task={task}{suffix}', f'wandb_activate={cfg.use_wandb}',
f'wandb_entity={cfg.wandb_username}', f'wandb_project={cfg.wandb_project}',
f'headless={not cfg.capture_video}', f'capture_video={cfg.capture_video}', 'force_render=False', f'seed={i}',
],
stdout=f, stderr=f)
block_until_training(rl_filepath)
eval_runs.append(process)
reward_code_final_successes = []
reward_code_correlations_final = []
for i, rl_run in enumerate(eval_runs):
rl_run.communicate()
rl_filepath = f"reward_code_eval{i}.txt"
with open(rl_filepath, 'r') as f:
stdout_str = f.read()
lines = stdout_str.split('\n')
for i, line in enumerate(lines):
if line.startswith('Tensorboard Directory:'):
break
tensorboard_logdir = line.split(':')[-1].strip()
tensorboard_logs = load_tensorboard_logs(tensorboard_logdir)
max_success = max(tensorboard_logs['consecutive_successes'])
reward_code_final_successes.append(max_success)
if "gt_reward" in tensorboard_logs and "gpt_reward" in tensorboard_logs:
gt_reward = np.array(tensorboard_logs["gt_reward"])
gpt_reward = np.array(tensorboard_logs["gpt_reward"])
reward_correlation = np.corrcoef(gt_reward, gpt_reward)[0, 1]
reward_code_correlations_final.append(reward_correlation)
logging.info(f"Final Success Mean: {np.mean(reward_code_final_successes)}, Std: {np.std(reward_code_final_successes)}, Raw: {reward_code_final_successes}")
logging.info(f"Final Correlation Mean: {np.mean(reward_code_correlations_final)}, Std: {np.std(reward_code_correlations_final)}, Raw: {reward_code_correlations_final}")
np.savez('final_eval.npz', reward_code_final_successes=reward_code_final_successes, reward_code_correlations_final=reward_code_correlations_final)
if __name__ == "__main__":
main() | [
"PLACEHOLDER/utils/prompts",
"content",
"prompt_tokens"
] |
2024-01-10 | sid403577/aifin | chains~local_doc_qa.py | from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from vectorstores import MyFAISS
from langchain.document_loaders import UnstructuredFileLoader, TextLoader, CSVLoader
from configs.model_config import *
import datetime
from textsplitter import ChineseTextSplitter
from typing import List
from utils import torch_gc
from tqdm import tqdm
from pypinyin import lazy_pinyin
from loader import UnstructuredPaddleImageLoader, UnstructuredPaddlePDFLoader
from models.base import (BaseAnswer,
AnswerResult)
from models.loader.args import parser
from models.loader import LoaderCheckPoint
import models.shared as shared
from agent import bing_search
from langchain.docstore.document import Document
from functools import lru_cache
from textsplitter.zh_title_enhance import zh_title_enhance
# patch HuggingFaceEmbeddings to make it hashable
def _embeddings_hash(self):
return hash(self.model_name)
HuggingFaceEmbeddings.__hash__ = _embeddings_hash
# will keep CACHED_VS_NUM of vector store caches
@lru_cache(CACHED_VS_NUM)
def load_vector_store(vs_path, embeddings):
return MyFAISS.load_local(vs_path, embeddings)
def tree(filepath, ignore_dir_names=None, ignore_file_names=None):
"""返回两个列表,第一个列表为 filepath 下全部文件的完整路径, 第二个为对应的文件名"""
if ignore_dir_names is None:
ignore_dir_names = []
if ignore_file_names is None:
ignore_file_names = []
ret_list = []
if isinstance(filepath, str):
if not os.path.exists(filepath):
print("路径不存在")
return None, None
elif os.path.isfile(filepath) and os.path.basename(filepath) not in ignore_file_names:
return [filepath], [os.path.basename(filepath)]
elif os.path.isdir(filepath) and os.path.basename(filepath) not in ignore_dir_names:
for file in os.listdir(filepath):
fullfilepath = os.path.join(filepath, file)
if os.path.isfile(fullfilepath) and os.path.basename(fullfilepath) not in ignore_file_names:
ret_list.append(fullfilepath)
if os.path.isdir(fullfilepath) and os.path.basename(fullfilepath) not in ignore_dir_names:
ret_list.extend(tree(fullfilepath, ignore_dir_names, ignore_file_names)[0])
return ret_list, [os.path.basename(p) for p in ret_list]
def load_file(filepath, sentence_size=SENTENCE_SIZE, using_zh_title_enhance=ZH_TITLE_ENHANCE):
if filepath.lower().endswith(".md"):
loader = UnstructuredFileLoader(filepath, mode="elements")
docs = loader.load()
elif filepath.lower().endswith(".txt"):
loader = TextLoader(filepath, autodetect_encoding=True)
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(textsplitter)
elif filepath.lower().endswith(".pdf"):
loader = UnstructuredPaddlePDFLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=True, sentence_size=sentence_size)
docs = loader.load_and_split(textsplitter)
elif filepath.lower().endswith(".jpg") or filepath.lower().endswith(".png"):
loader = UnstructuredPaddleImageLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(text_splitter=textsplitter)
elif filepath.lower().endswith(".csv"):
loader = CSVLoader(filepath)
docs = loader.load()
else:
loader = UnstructuredFileLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = loader.load_and_split(text_splitter=textsplitter)
if using_zh_title_enhance:
docs = zh_title_enhance(docs)
write_check_file(filepath, docs)
return docs
def write_check_file(filepath, docs):
folder_path = os.path.join(os.path.dirname(filepath), "tmp_files")
if not os.path.exists(folder_path):
os.makedirs(folder_path)
fp = os.path.join(folder_path, 'load_file.txt')
with open(fp, 'a+', encoding='utf-8') as fout:
fout.write("filepath=%s,len=%s" % (filepath, len(docs)))
fout.write('\n')
for i in docs:
fout.write(str(i))
fout.write('\n')
fout.close()
def generate_prompt(related_docs: List[str],
query: str,
prompt_template: str = PROMPT_TEMPLATE, ) -> str:
context = "\n".join([doc.page_content for doc in related_docs])
prompt = prompt_template.replace("{question}", query).replace("{context}", context)
return prompt
def search_result2docs(search_results):
docs = []
for result in search_results:
doc = Document(page_content=result["snippet"] if "snippet" in result.keys() else "",
metadata={"source": result["link"] if "link" in result.keys() else "",
"filename": result["title"] if "title" in result.keys() else ""})
docs.append(doc)
return docs
class LocalDocQA:
llm: BaseAnswer = None
embeddings: object = None
top_k: int = VECTOR_SEARCH_TOP_K
chunk_size: int = CHUNK_SIZE
chunk_conent: bool = True
score_threshold: int = VECTOR_SEARCH_SCORE_THRESHOLD
def init_cfg(self,
embedding_model: str = EMBEDDING_MODEL,
embedding_device=EMBEDDING_DEVICE,
llm_model: BaseAnswer = None,
top_k=VECTOR_SEARCH_TOP_K,
):
self.llm = llm_model
self.embeddings = HuggingFaceEmbeddings(model_name=embedding_model_dict[embedding_model],
model_kwargs={'device': embedding_device})
self.top_k = top_k
def init_knowledge_vector_store(self,
filepath: str or List[str],
vs_path: str or os.PathLike = None,
sentence_size=SENTENCE_SIZE):
loaded_files = []
failed_files = []
if isinstance(filepath, str):
if not os.path.exists(filepath):
print("路径不存在")
return None
elif os.path.isfile(filepath):
file = os.path.split(filepath)[-1]
try:
docs = load_file(filepath, sentence_size)
logger.info(f"{file} 已成功加载")
loaded_files.append(filepath)
except Exception as e:
logger.error(e)
logger.info(f"{file} 未能成功加载")
return None
elif os.path.isdir(filepath):
docs = []
for fullfilepath, file in tqdm(zip(*tree(filepath, ignore_dir_names=['tmp_files'])), desc="加载文件"):
try:
docs += load_file(fullfilepath, sentence_size)
loaded_files.append(fullfilepath)
except Exception as e:
logger.error(e)
failed_files.append(file)
if len(failed_files) > 0:
logger.info("以下文件未能成功加载:")
for file in failed_files:
logger.info(f"{file}\n")
else:
docs = []
for file in filepath:
try:
docs += load_file(file)
logger.info(f"{file} 已成功加载")
loaded_files.append(file)
except Exception as e:
logger.error(e)
logger.info(f"{file} 未能成功加载")
if len(docs) > 0:
logger.info("文件加载完毕,正在生成向量库")
if vs_path and os.path.isdir(vs_path) and "index.faiss" in os.listdir(vs_path):
vector_store = load_vector_store(vs_path, self.embeddings)
vector_store.add_documents(docs)
torch_gc()
else:
if not vs_path:
vs_path = os.path.join(KB_ROOT_PATH,
f"""{"".join(lazy_pinyin(os.path.splitext(file)[0]))}_FAISS_{datetime.datetime.now().strftime("%Y%m%d_%H%M%S")}""",
"vector_store")
vector_store = MyFAISS.from_documents(docs, self.embeddings) # docs 为Document列表
torch_gc()
vector_store.save_local(vs_path)
return vs_path, loaded_files
else:
logger.info("文件均未成功加载,请检查依赖包或替换为其他文件再次上传。")
return None, loaded_files
def one_knowledge_add(self, vs_path, one_title, one_conent, one_content_segmentation, sentence_size):
try:
if not vs_path or not one_title or not one_conent:
logger.info("知识库添加错误,请确认知识库名字、标题、内容是否正确!")
return None, [one_title]
docs = [Document(page_content=one_conent + "\n", metadata={"source": one_title})]
if not one_content_segmentation:
text_splitter = ChineseTextSplitter(pdf=False, sentence_size=sentence_size)
docs = text_splitter.split_documents(docs)
if os.path.isdir(vs_path) and os.path.isfile(vs_path + "/index.faiss"):
vector_store = load_vector_store(vs_path, self.embeddings)
vector_store.add_documents(docs)
else:
vector_store = MyFAISS.from_documents(docs, self.embeddings) ##docs 为Document列表
torch_gc()
vector_store.save_local(vs_path)
return vs_path, [one_title]
except Exception as e:
logger.error(e)
return None, [one_title]
def get_knowledge_based_answer(self, query, vs_path, chat_history=[], streaming: bool = STREAMING):
vector_store = load_vector_store(vs_path, self.embeddings)
vector_store.chunk_size = self.chunk_size
vector_store.chunk_conent = self.chunk_conent
vector_store.score_threshold = self.score_threshold
related_docs_with_score = vector_store.similarity_search_with_score(query, k=self.top_k)
torch_gc()
if len(related_docs_with_score) > 0:
prompt = generate_prompt(related_docs_with_score, query)
else:
prompt = query
for answer_result in self.llm.generatorAnswer(prompt=prompt, history=chat_history,
streaming=streaming):
resp = answer_result.llm_output["answer"]
history = answer_result.history
history[-1][0] = query
response = {"query": query,
"result": resp,
"source_documents": related_docs_with_score}
yield response, history
# query 查询内容
# vs_path 知识库路径
# chunk_conent 是否启用上下文关联
# score_threshold 搜索匹配score阈值
# vector_search_top_k 搜索知识库内容条数,默认搜索5条结果
# chunk_sizes 匹配单段内容的连接上下文长度
def get_knowledge_based_conent_test(self, query, vs_path, chunk_conent,
score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD,
vector_search_top_k=VECTOR_SEARCH_TOP_K, chunk_size=CHUNK_SIZE):
vector_store = load_vector_store(vs_path, self.embeddings)
# FAISS.similarity_search_with_score_by_vector = similarity_search_with_score_by_vector
vector_store.chunk_conent = chunk_conent
vector_store.score_threshold = score_threshold
vector_store.chunk_size = chunk_size
related_docs_with_score = vector_store.similarity_search_with_score(query, k=vector_search_top_k)
if not related_docs_with_score:
response = {"query": query,
"source_documents": []}
return response, ""
torch_gc()
prompt = "\n".join([doc.page_content for doc in related_docs_with_score])
response = {"query": query,
"source_documents": related_docs_with_score}
return response, prompt
def get_search_result_based_answer(self, query, chat_history=[], streaming: bool = STREAMING):
results = bing_search(query)
result_docs = search_result2docs(results)
prompt = generate_prompt(result_docs, query)
for answer_result in self.llm.generatorAnswer(prompt=prompt, history=chat_history,
streaming=streaming):
resp = answer_result.llm_output["answer"]
history = answer_result.history
history[-1][0] = query
response = {"query": query,
"result": resp,
"source_documents": result_docs}
yield response, history
def delete_file_from_vector_store(self,
filepath: str or List[str],
vs_path):
vector_store = load_vector_store(vs_path, self.embeddings)
status = vector_store.delete_doc(filepath)
return status
def update_file_from_vector_store(self,
filepath: str or List[str],
vs_path,
docs: List[Document],):
vector_store = load_vector_store(vs_path, self.embeddings)
status = vector_store.update_doc(filepath, docs)
return status
def list_file_from_vector_store(self,
vs_path,
fullpath=False):
vector_store = load_vector_store(vs_path, self.embeddings)
docs = vector_store.list_docs()
if fullpath:
return docs
else:
return [os.path.split(doc)[-1] for doc in docs]
if __name__ == "__main__":
# 初始化消息
args = None
args = parser.parse_args(args=['--model-dir', '/media/checkpoint/', '--model', 'chatglm-6b', '--no-remote-model'])
args_dict = vars(args)
shared.loaderCheckPoint = LoaderCheckPoint(args_dict)
llm_model_ins = shared.loaderLLM()
llm_model_ins.set_history_len(LLM_HISTORY_LEN)
local_doc_qa = LocalDocQA()
local_doc_qa.init_cfg(llm_model=llm_model_ins)
query = "本项目使用的embedding模型是什么,消耗多少显存"
vs_path = "/media/gpt4-pdf-chatbot-langchain/dev-aifin/vector_store/test"
last_print_len = 0
# for resp, history in local_doc_qa.get_knowledge_based_answer(query=query,
# vs_path=vs_path,
# chat_history=[],
# streaming=True):
for resp, history in local_doc_qa.get_search_result_based_answer(query=query,
chat_history=[],
streaming=True):
print(resp["result"][last_print_len:], end="", flush=True)
last_print_len = len(resp["result"])
source_text = [f"""出处 [{inum + 1}] {doc.metadata['source'] if doc.metadata['source'].startswith("http")
else os.path.split(doc.metadata['source'])[-1]}:\n\n{doc.page_content}\n\n"""
# f"""相关度:{doc.metadata['score']}\n\n"""
for inum, doc in
enumerate(resp["source_documents"])]
logger.info("\n\n" + "\n\n".join(source_text))
pass
| [
"本项目使用的embedding模型是什么,消耗多少显存",
"{context}",
"{question}",
"\n"
] |
2024-01-10 | sid403577/aifin | models~fastchat_openai_llm.py | from abc import ABC
import requests
from typing import Optional, List
from langchain.llms.base import LLM
from models.loader import LoaderCheckPoint
from models.base import (RemoteRpcModel,
AnswerResult)
from typing import (
Collection,
Dict
)
def _build_message_template() -> Dict[str, str]:
"""
:return: 结构
"""
return {
"role": "",
"content": "",
}
class FastChatOpenAILLM(RemoteRpcModel, LLM, ABC):
api_base_url: str = "http://localhost:8000/v1"
api_key: str = "EMPTY"
model_name: str = "chatglm-6b"
max_token: int = 10000
temperature: float = 0.01
top_p = 0.9
checkPoint: LoaderCheckPoint = None
history = []
history_len: int = 10
def __init__(self, checkPoint: LoaderCheckPoint = None):
super().__init__()
self.checkPoint = checkPoint
@property
def _llm_type(self) -> str:
return "FastChat"
@property
def _check_point(self) -> LoaderCheckPoint:
return self.checkPoint
@property
def _history_len(self) -> int:
return self.history_len
def set_history_len(self, history_len: int = 10) -> None:
self.history_len = history_len
@property
def _api_key(self) -> str:
return self.api_key
@property
def _api_base_url(self) -> str:
return self.api_base_url
def set_api_key(self, api_key: str):
self.api_key = api_key
def set_api_base_url(self, api_base_url: str):
self.api_base_url = api_base_url
def call_model_name(self, model_name):
self.model_name = model_name
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
print(f"__call:{prompt}")
try:
import openai
# Not support yet
openai.api_key = self.api_key
openai.api_base = self.api_base_url
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
# create a chat completion
completion = openai.ChatCompletion.create(
model=self.model_name,
messages=self.build_message_list(prompt)
)
print(f"response:{completion.choices[0].message.content}")
print(f"+++++++++++++++++++++++++++++++++++")
return completion.choices[0].message.content
# 将历史对话数组转换为文本格式
def build_message_list(self, query) -> Collection[Dict[str, str]]:
build_message_list: Collection[Dict[str, str]] = []
history = self.history[-self.history_len:] if self.history_len > 0 else []
for i, (old_query, response) in enumerate(history):
user_build_message = _build_message_template()
user_build_message['role'] = 'user'
user_build_message['content'] = old_query
system_build_message = _build_message_template()
system_build_message['role'] = 'system'
system_build_message['content'] = response
build_message_list.append(user_build_message)
build_message_list.append(system_build_message)
user_build_message = _build_message_template()
user_build_message['role'] = 'user'
user_build_message['content'] = query
build_message_list.append(user_build_message)
return build_message_list
def generatorAnswer(self, prompt: str,
history: List[List[str]] = [],
streaming: bool = False):
try:
import openai
# Not support yet
openai.api_key = self.api_key
openai.api_base = self.api_base_url
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
# create a chat completion
completion = openai.ChatCompletion.create(
model=self.model_name,
messages=self.build_message_list(prompt)
)
history += [[prompt, completion.choices[0].message.content]]
answer_result = AnswerResult()
answer_result.history = history
answer_result.llm_output = {"answer": completion.choices[0].message.content}
yield answer_result
| [] |
2024-01-10 | hgdumadag/auditassistant | Hello.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import streamlit as st
from streamlit.logger import get_logger
from openai import OpenAI
import time
import os
#import json
#from dotenv import load_dotenv
#load_dotenv()
LOGGER = get_logger(__name__)
# Set your OpenAI Assistant ID here
assistant_id = ""
os.environ["assistant_id"] = st.secrets["ASSISTANT_ID"]
assistant_id=st.secrets.ASSISTANT_ID
#assistant_id = os.getenv("ASSISTANT_ID")
# Initialize the OpenAI client (ensure to set your API key in the sidebar within the app)
client = OpenAI(api_key=st.secrets.OPENAI_API_KEY)
# Initialize the session state variables for file IDs and chat control
if "file_id_list" not in st.session_state:
st.session_state["file_id_list"] = []
if "start_chat" not in st.session_state:
st.session_state["start_chat"] = False
if "thread_id" not in st.session_state:
st.session_state["thread_id"] = None
# Set up the Streamlit page with a title and icon
st.set_page_config(page_title="JGSHI Audit Assistant", page_icon=":robot_face:", layout="wide")
st.header(":robot_face: JGSHI Audit Assistant")
# Get the OPENAI API Key
#openai_api_key_env = os.getenv("OPENAI_API_KEY")
openai_api_key_env=""
os.environ["openai_api_key_env"] = st.secrets["OPENAI_API_KEY"]
openai_api_key = st.sidebar.text_input(
'OpenAI API Key', placeholder='sk-', value=openai_api_key_env)
openai_api_key = openai_api_key_env
url = "https://platform.openai.com/account/api-keys"
st.sidebar.markdown(f"Get your API key [here]({url})")
if openai_api_key:
OpenAI.api_key = openai_api_key
# Button to start the chat session
if st.sidebar.button("Start Chat"):
st.session_state["start_chat"] = True
# Create a thread once and store its ID in the session state
thread = client.beta.threads.create()
st.session_state.thread_id = thread.id
st.write("thread id: ", thread.id)
# Define the function to process messages with citations
def process_message_with_citations(message):
message_content = message.content[0].text.value
return message_content
# Only show the chat interface if the chat has been started
if st.session_state.start_chat:
# st.write(getStockPrice("AAPL"))
if "messages" not in st.session_state:
st.session_state.messages = []
# Display existing messages in the chat
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input for the user
if prompt := st.chat_input("How can I help you?"):
#Add user message to the state and display it
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Add the user's message to the existing thread
client.beta.threads.messages.create(
thread_id=st.session_state.thread_id,
role="user",
content=prompt
)
# Create a run with additional instructions
run = client.beta.threads.runs.create(
thread_id=st.session_state.thread_id,
assistant_id=assistant_id,
instructions="As an Expert Internal Auditor Assistant, you possess extensive knowledge and practical experience in various audit processes, including financial, operational, compliance, and information technology audits. Your expertise encompasses a deep understanding of audit standards, risk management, and control processes. You are adept at identifying potential risks, inefficiencies, and areas for improvement within an organization's operations. You are an expert in the nternational Professional Practices Framework."
)
# Poll for the run to complete and retrieve the assistant's messages
while run.status not in ["completed" , "failed"]:
st.sidebar.write(run.status)
# if run.status == "requires action":
# handle_function(run)
time.sleep(2)
run = client.beta.threads.runs.retrieve(
thread_id=st.session_state.thread_id,
run_id=run.id
)
st.sidebar.write(run.status)
if run.status == "failed":
# Log the error message
st.error(f"Run failed with error: {run.error_message}")
# You can take additional actions here based on the error
# Retrieve messages added by the assistant
messages = client.beta.threads.messages.list(
thread_id=st.session_state.thread_id
)
# Process and display assistant's messages
assistant_messages_for_run = [
message for message in messages
if message.run_id == run.id and message.role == "assistant"
]
for message in assistant_messages_for_run:
full_response = process_message_with_citations(message)
st.session_state.messages.append({"role": "assistant", "content": full_response})
with st.chat_message("assistant"):
st.markdown(full_response, unsafe_allow_html=True)
#######
#def run():
# st.set_page_config(
# page_title="Welcome to the Audit Assistant Page",
# page_icon="👋",
# )
#
#if __name__ == "__main__":
# run()
##### | [] |
2024-01-10 | wbsg-uni-mannheim/TabAnnGPT | TAusingLLMs~code~generate_label_definitions_hf.py | import tqdm
import random
from dotenv import dotenv_values
from langchain import PromptTemplate
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from utils import load_cpa_dataset, load_cta_dataset, save_pickle_file, textada_embeddings
import os
if __name__ == "__main__":
# Load env file with API KEY using full path
config = dotenv_values("/full/path/to/file/key.env")
os.environ['OPENAI_API_KEY'] = config["OPENAI_API_KEY"]
OPENAI_API_KEY = config["OPENAI_API_KEY"]
# StableBeluga7B
model_name = "stabilityai/StableBeluga-7B"
mod = "stablebeluga7b"
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir="hf_cache/")
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto", cache_dir="hf_cache/")
# SOLAR
# model_name = "upstage/SOLAR-0-70b-16bit"
# mod = "solar"
# tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir="hf_cache/")
# model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, load_in_8bit=True, device_map="auto", cache_dir="/ceph/kkorini/hf_cache/", temperature=0, do_sample=True)
system_messages_tasks = ["S1", "S2", "S3", "S4", "S5"]
system_messages_content = {
"S1": "Generate knowledge facts about some terms that can help in the task of column type annotation. Reply with only one sentence! ",
"S5": "Generate knowledge facts about some terms that can help in the task of column type annotation and one example value for the term. Reply with only one sentence!",
"S2": "Generate definitions about some terms that can help in the task of column type annotation. Reply with only one sentence!",
"S3": "Generate descriptions about some terms that can help in the task of column type annotation. Reply with only one sentence!",
"S4": "Perform column type annotation, e.g. annotate each column of a table with a label that captures the meaning of the values of the column. As preparation for this task, you are asked to generate definitions of the labels. The definitions should be helpful for distinguishing different labels. Reply with only one sentence!",
}
instruction_messages = ["I1", "I2"]
instruction_messages_content = {
"I1": "1. Look at the input given to you and make tables out of them. 2. The first row of each table are the column types of each column. 3. Look at their statistical and semantic characteristics of the columns. 4.Generate knowledge for the required term by looking at the whole table. 5. Do not generate specific knowledge for each of the columns. 6.Reply only with knowledge facts not examples.",
"I2": "1. Look at the input given to you and make tables out of them. 2. The first row of each table are the column types of each column. 3. Look at their statistical and semantic characteristics of the columns. 4.Generate a definition of the term by looking at the whole table. 5. Do not generate specific knowledge for each of the columns. 6.Reply only with knowledge facts not examples.",
}
# A-prompts
general_prompts = ["A1", "A2", "A3", "A4", "A5", "A6"]
general_prompts_content = {
"A1": "Generate some knowledge facts about the term ",
"A2": "How can you distinguish if some values are about the term ",
"A3": "Generate some rules to follow if you need to decide if some values are about the term ",
"A4": "Generate some instructions to follow if you need to decide if some values are about the term ",
"A5": "What do you know about the term ",
"A6": "Generate some instructions to follow if you need to decide if some values may belong to the term ",
}
# B-prompts
table_prompts = ["TB1", "TB2", "TB3", "TB4", "TB5", "TB6", "TB7"]
table_prompts_content = {
"TB1": "Generate some knowledge facts about the term ",
"TB2": "What characteristics can you learn about the term ",
"TB3": "Learn about the term ",
"TB4": "Generate some rules to follow if you need to decide if some values are about the term ",
"TB5": "Generate some instructions to follow if you need to decide if some values are about the term ",
"TB6": "What semantic characteristics and statistics can you learn about the term ",
"TB7": "What value patterns can you learn about the term ",
}
a_template = """Task: {task}
{gen}{label}."""
tb_template = """Task: {task}
Instructions: {instructions}
{gen}{label} using the following examples:
{examples}
What did you learn about {label}?"""
# CTA generation
for dataset in ["sotabv2", "t2dv2-webtables","sportstables"]:
examples, labels, test_table_type_labels, train_examples, train_example_labels, train_table_type_labels, labels_to_text = load_cta_dataset(dataset,"-for-kg")
all_labels = [labels_to_text[l] for l in labels_to_text]
# Run A prompts with combination of S messages:
for system_mess in system_messages_tasks:
for g in general_prompts:
print(f"{system_mess}_{g}_prompt_knowledge")
if f"{system_mess}_{g}_prompt_knowledge.pkl" not in os.listdir(f"knowledge/{mod}/{dataset}/"):
prompts = []
prompt = PromptTemplate(template=a_template, input_variables=['task', 'label', 'gen'])
for label in all_labels:
text_prompt = prompt.format(task=system_messages_content[system_mess], gen=general_prompts_content[g], label=label)
prompts.append(text_prompt)
model_answers = []
for prompt in tqdm.tqdm(prompts, total=len(prompts)):
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256)
model_answers.append(tokenizer.decode(output[0], skip_special_tokens=True))
save_pickle_file(f"knowledge/{mod}/{dataset}/{system_mess}_{g}_prompt_knowledge.pkl", model_answers)
save_pickle_file(f"knowledge/{mod}/{dataset}/{system_mess}_{g}_prompt_knowledge-prompts.pkl", prompts)
definitions = [answer.replace(prompts[i],"") for i, answer in enumerate(model_answers)]
save_pickle_file(f"embeddings/{mod}/{system_mess}_{g}_knowledge_embeddings_{dataset}.pkl", textada_embeddings(definitions, OPENAI_API_KEY))
else:
print(f"knowledge/{mod}/{dataset}/{system_mess}_{g}_prompt_knowledge.pkl")
# Run B prompts with combination of S and I messages:
for system_mess in system_messages_tasks:
for instructions in instruction_messages:
for tab in table_prompts:
print(f"{system_mess}_{instructions}_{tab}_prompt_knowledge")
if f"{system_mess}_{instructions}_{tab}_prompt_knowledge.pkl" not in os.listdir(f"knowledge/{mod}/{dataset}/"):
prompts = []
prompt = PromptTemplate(template=tb_template, input_variables=['task', 'instructions', 'label', 'gen', 'examples'])
for label in all_labels:
random_examples = """"""
for i in range(0,3):
index = random.choice([j for j, e in enumerate(train_example_labels) if label in e])
random_examples += f"""{train_examples[index]}\n"""
random_examples = random_examples.strip()
text_prompt = prompt.format(task=system_messages_content[system_mess], gen=table_prompts_content[tab], instructions=instruction_messages_content[instructions], label=label, examples=random_examples)
prompts.append(text_prompt)
model_answers = []
for prompt in tqdm.tqdm(prompts, total=len(prompts)):
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256)
model_answers.append(tokenizer.decode(output[0], skip_special_tokens=True))
save_pickle_file(f"knowledge/{mod}/{dataset}/{system_mess}_{instructions}_{tab}_prompt_knowledge.pkl", model_answers)
save_pickle_file(f"knowledge/{mod}/{dataset}/{system_mess}_{instructions}_{tab}_prompt_knowledge-prompts.pkl", prompts)
definitions = [answer.replace(prompts[i],"") for i, answer in enumerate(model_answers)]
save_pickle_file(f"embeddings/{mod}/{system_mess}_{instructions}_{tab}_knowledge_embeddings_{dataset}.pkl", textada_embeddings(definitions, OPENAI_API_KEY))
else:
print(f"knowledge/{mod}/{dataset}/{system_mess}_{instructions}_{tab}_prompt_knowledge.pkl")
# CPA generation
system_messages_tasks = ["S1", "S2", "S3", "S4", "S5"]
system_messages_content = {
"S1": "Generate knowledge facts about some terms that can help in the task of column relationship prediction. Reply with only one sentence! ",
"S5": "Generate knowledge facts about some terms that can help in the task of column relationship prediction and one example value for the term. Reply with only one sentence!",
"S2": "Generate definitions about some terms that can help in the task of column relationship prediction. Reply with only one sentence!",
"S3": "Generate descriptions about some terms that can help in the task of column relationship prediction. Reply with only one sentence!",
"S4": "Perform column property annotation, e.g. annotate the relationships of columns with a label that captures the relationship. As preparation for this task, you are asked to generate definitions of the relationship labels. The definitions should be helpful for distinguishing different labels. Reply with only one sentence!",
}
instruction_messages = ["I1", "I2"]
instruction_messages_content = {
"I1": "1. Look at the input given to you and make tables out of them. 2. The first row of each table are the column relationships of each column with the first column of the table. 3. Look at their statistical and semantic characteristics of the columns. 4.Generate knowledge for the required term by looking at the whole table. 5. Do not generate specific knowledge for each of the columns. 6.Reply only with knowledge facts not examples.",
"I2": "1. Look at the input given to you and make tables out of them. 2. The first row of each table are the column relationships of each column with the first column of the table. 3. Look at their statistical and semantic characteristics of the columns. 4.Generate a definition for the required term by looking at the whole table. 5. Do not generate specific knowledge for each of the columns. 6.Reply only with knowledge facts not examples.",
}
for dataset in ["sotabv2", "t2dv2-webtables"]:
examples, labels, test_table_type_labels, train_examples, train_example_labels, train_table_type_labels, labels_to_text, text_to_label, labels_joined, train, test = load_cpa_dataset(dataset,"-for-kg",False)
all_labels = [labels_to_text[l] for l in labels_to_text]
# Run A prompts with combination of S messages:
for system_mess in system_messages_tasks:
for g in general_prompts:
print(f"cpa-{system_mess}_{g}_prompt_knowledge")
if f"cpa-{system_mess}_{g}_prompt_knowledge.pkl" not in os.listdir(f"knowledge/{mod}/{dataset}/"):
prompts = []
prompt = PromptTemplate(template=a_template, input_variables=['task', 'label', 'gen'])
for label in all_labels:
text_prompt = prompt.format(task=system_messages_content[system_mess], gen=general_prompts_content[g], label=label)
prompts.append(text_prompt)
model_answers = []
for prompt in tqdm.tqdm(prompts, total=len(prompts)):
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256)
model_answers.append(tokenizer.decode(output[0], skip_special_tokens=True))
save_pickle_file(f"knowledge/{mod}/{dataset}/cpa-{system_mess}_{g}_prompt_knowledge.pkl", model_answers)
save_pickle_file(f"knowledge/{mod}/{dataset}/cpa-{system_mess}_{g}_prompt_knowledge-prompts.pkl", prompts)
definitions = [answer.replace(prompts[i],"") for i, answer in enumerate(model_answers)]
save_pickle_file(f"embeddings/{mod}/cpa-{system_mess}_{g}_knowledge_embeddings_{dataset}.pkl", textada_embeddings(definitions, OPENAI_API_KEY))
else:
print(f"knowledge/{mod}/{dataset}/cpa-{system_mess}_{g}_prompt_knowledge.pkl")
# Run B prompts with combination of S and I messages:
for system_mess in system_messages_tasks:
for instructions in instruction_messages:
for tab in table_prompts:
print(f"cpa-{system_mess}_{instructions}_{tab}_prompt_knowledge")
if f"cpa-{system_mess}_{instructions}_{tab}_prompt_knowledge.pkl" not in os.listdir(f"knowledge/{mod}/{dataset}/"):
prompts = []
prompt = PromptTemplate(template=tb_template, input_variables=['task', 'instructions', 'label', 'gen', 'examples'])
for label in all_labels:
random_examples = """"""
for i in range(0,3):
index = random.choice([j for j, e in enumerate(train_example_labels) if label in e])
random_examples += f"""{train_examples[index]}\n"""
random_examples = random_examples.strip()
text_prompt = prompt.format(task=system_messages_content[system_mess], gen=table_prompts_content[tab], instructions=instruction_messages_content[instructions], label=label, examples=random_examples)
prompts.append(text_prompt)
model_answers = []
for prompt in tqdm.tqdm(prompts, total=len(prompts)):
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256)
model_answers.append(tokenizer.decode(output[0], skip_special_tokens=True))
save_pickle_file(f"knowledge/{mod}/{dataset}/cpa-{system_mess}_{instructions}_{tab}_prompt_knowledge.pkl", model_answers)
save_pickle_file(f"knowledge/{mod}/{dataset}/cpa-{system_mess}_{instructions}_{tab}_prompt_knowledge-prompts.pkl", prompts)
definitions = [answer.replace(prompts[i],"") for i, answer in enumerate(model_answers)]
save_pickle_file(f"embeddings/{mod}/cpa-{system_mess}_{instructions}_{tab}_knowledge_embeddings_{dataset}.pkl", textada_embeddings(definitions, OPENAI_API_KEY))
else:
print(f"knowledge/{mod}/{dataset}/cpa-{system_mess}_{instructions}_{tab}_prompt_knowledge.pkl") | [
"Task: {task}\n Instructions: {instructions}\n {gen}{label} using the following examples:\n\n {examples}\n What did you learn about {label}?",
"['TB1', 'TB2', 'TB3', 'TB4', 'TB5', 'TB6', 'TB7']",
"instructions",
"gen",
"{'A1': 'Generate some knowledge facts about the term ', 'A2': 'How can you distinguish if some values are about the term ', 'A3': 'Generate some rules to follow if you need to decide if some values are about the term ', 'A4': 'Generate some instructions to follow if you need to decide if some values are about the term ', 'A5': 'What do you know about the term ', 'A6': 'Generate some instructions to follow if you need to decide if some values may belong to the term '}",
"{'TB1': 'Generate some knowledge facts about the term ', 'TB2': 'What characteristics can you learn about the term ', 'TB3': 'Learn about the term ', 'TB4': 'Generate some rules to follow if you need to decide if some values are about the term ', 'TB5': 'Generate some instructions to follow if you need to decide if some values are about the term ', 'TB6': 'What semantic characteristics and statistics can you learn about the term ', 'TB7': 'What value patterns can you learn about the term '}",
"[]",
"['A1', 'A2', 'A3', 'A4', 'A5', 'A6']",
"Task: {task}\n {gen}{label}."
] |
2024-01-10 | sergeybok/BaseBot | scripts~demo_why_bot.py | from basebot import BaseBotWithLocalDb, BaseBot
from basebot import TheMessage, MessageWrapper
import openai
class WhyBot(BaseBotWithLocalDb):
def help(self) -> str:
return "I just respond back to your messages and follow it with Why? You need to modify me to make me do something interesting."
def respond(self, message: MessageWrapper) -> MessageWrapper:
if message.get_text():
# context_messages = self.get_message_context(message, limit=5, descending=False)
response_text = message.get_text() +'? Why?'
resp_message = self.get_message_to(user_id=message.get_sender_id())
resp_message.set_text(response_text)
return resp_message
return {}
# initialize the bot, or bots
bot = WhyBot()
# Start the bot
app = BaseBot.start_app(bot)
# you can provide as many bots as you'd like as arguments
# to this function as long as they are all different classes
# example:
# app = BaseBot.start_app(bot, other_bot, other_other_bot)
| [] |
2024-01-10 | sergeybok/BaseBot | scripts~demo_chatgpt.py | from basebot import BaseBotWithLocalDb, BaseBot
from basebot import TheMessage, MessageWrapper
import openai
class ChatGPTBot(BaseBotWithLocalDb):
def help(self) -> str:
return "I am a wrapper around ChatGPT. Ask me anything and I will do my best to respond."
def respond(self, message: MessageWrapper) -> MessageWrapper:
if message.get_text():
# get previous messages, oldest message first
context_messages = self.get_message_context(message, limit=5, descending=False)
chatgpt_messages = []
for msg in context_messages:
if msg.get_sender_id() == message.get_sender_id() and msg.get_text():
chatgpt_messages.append({'role': 'user', 'content': msg.get_text()})
elif msg.get_text():
chatgpt_messages.append({'role': 'assistant', 'content': msg.get_text()})
# add current message last
chatgpt_messages.append({'role': 'user', 'content': message.get_text()})
# Call OpenAI API (this will fail without API key)
chatgpt_response = openai.ChatCompletion.create(model="gpt-3.5-turbo",messages=chatgpt_messages)
response_text = chatgpt_response['choices'][0]['message']['content']
resp_message = self.get_message_to(user_id=message.get_sender_id())
resp_message.set_text(response_text)
return resp_message
return {}
# initialize the bot, or bots
bot = ChatGPTBot()
# Start the bot
app = BaseBot.start_app(bot)
# you can provide as many bots as you'd like as arguments
# to this function as long as they are all different classes
# example:
# app = BaseBot.start_app(bot, other_bot, other_other_bot)
| [] |
2024-01-10 | Uradouby/AutocastBU | fewShotModel.py | import openai
import numpy as np
import sklearn
from sklearn.metrics import pairwise
from sklearn import preprocessing
from difflib import SequenceMatcher
from dataprocess import get_questions, split_questions_by_type
import Levenshtein
openai.api_key="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"
codex_engine = "code-davinci-002"
few_shot_max_tokens = 256
engine_temperature = 0
engine_topP = 0
embedding_model = "text-embedding-ada-002"
embedding_encoding = "cl100k_base" # this the encoding for text-embedding-ada-002
max_tokens = 8000 # the maximum for text-embedding-ada-002 is 8191
similarity_cnt=3 #the number of similar questions
def get_embedding(text, model="text-embedding-ada-002"):
text = text.replace("\n", " ")
return openai.Embedding.create(input=[text], model=model)['data'][0]['embedding']
def lev_dis(a, b):
return Levenshtein.distance(a,b)
class fewShotModel():
def __init__(self, questions):
self.questions = questions
'''
self.Q_embeddings = []
for q in self.questions:
self.Q_embeddings.append(get_embedding(q['question'], model=embedding_model))
'''
self.similar = []
self.input = []
self.output = []
def predict(self, q):
input = ''
distance = []
'''
embedding_question = get_embedding(q)
for embedding in self.Q_embeddings:
distance.append(sklearn.metrics.pairwise.cosine_distances(X=embedding_question, Y=embedding))
'''
for qs in self.questions:
distance.append(lev_dis(qs["question"], q["question"]))
print(distance)
arg_distance = np.argsort(distance)
print(arg_distance)
for i in range(similarity_cnt):
index = arg_distance[i]
input = input+str(self.questions[index]['question'])+str(self.questions[index]['answer'])+'\n\n'
input=input+str(q['question'])
print(input)
few_shot_output = openai.Completion.create(engine=codex_engine,
prompt=input,
max_tokens=few_shot_max_tokens,
temperature=engine_temperature,
top_p=engine_topP)['choices'][0]['text']
#embedding_ans = get_embedding(few_shot_output)
res = []
for i in range(len(q['choices'])):
res.append(lev_dis(q['choices'][i], few_shot_output))
'''
embedding_choice=get_embedding(q['choices'][i])
res.append(1-sklearn.metrics.pairwise.cosine_distances(X=embedding_choice, Y=embedding_ans))
'''
res=np.array(res)
normalized_res = preprocessing.normalize([res])
return normalized_res[0]
def eval(self):
loss = 0
accuracy = 0
for q in self.questions:
predict = self.predict(q)
ground_true = ord(q['answer']) - ord('A')
ans = np.argmax(predict)
if ans == ground_true:
accuracy += 1
for i in range(len(predict)):
if i == ground_true:
loss += (1-predict[i])**2
else:
loss += predict[i]**2
loss = loss/len(self.questions)
accuracy = accuracy/len(self.questions)
return loss, accuracy
questions = get_questions()
mc_qs, num_qs, tf_qs = split_questions_by_type(questions)
fewShot_model = fewShotModel(questions=mc_qs)
loss, accuracy = fewShot_model.eval()
| [
"INPUT"
] |
2024-01-10 | bxdgfw/LLM-XAI | graph_desc.py | #返回llm对ebm某个feature的graph description
import t2ebm
from langchain.memory import ConversationBufferMemory
from langchain import LLMChain, PromptTemplate
from langchain.chat_models import ChatOpenAI
def llm2graph_desc(
llm,
ebm,
feature_idx,
dataset_description = None,
y_axis_description = None,
query = "Now please provide a brief, at most 7 sentence summary of the influence of the feature on the outcome."
):
prefix = """You are an expert statistician and data scientist.
You interpret global explanations produced by a generalized additive model (GAM). GAMs produce explanations in the form of graphs that contain the effect of a specific input feature.\n
"""
if dataset_description is None or dataset_description == '':
prefix +="""You will be given graphs from the model, and the user will ask you questions about the graphs."""
else:
prefix +="""The user will first provide a general description of the dataset. Then you will be given graphs from the model, and the user will ask you questions about the graphs.\n"""
prefix +="""\n\nAnswer all questions to the best of your ability, combining both the data contained in the graph"""
if dataset_description is not None and len(dataset_description) > 0:
prefix +=""", the data set description you were given, and your knowledge about the real world."""
else:
prefix +=""" and your knowledge about the real world."""
prefix +="""Graphs will be presented as a JSON object with keys representing the x-axis and values representing the y-axis. For continuous features, the keys are intervals that represent ranges where the function predicts the same value. For categorical features, each key represents a possible value that the feature can take."""
if y_axis_description is not None and len(y_axis_description) > 0:
prefix +=y_axis_description
prefix += """\n\nThe user will provide graphs in the following format:
- The name of the feature depicted in the graph
- The type of the feature (continuous, categorical, or boolean)
- Mean values
- Lower bounds of confidence interval
- Upper bounds of confidence interval
"""
if dataset_description is not None and len(dataset_description) > 0:
prefix += dataset_description + """\nThe description of dataset ends.\n"""
graph = t2ebm.graphs.extract_graph(ebm, feature_idx)
graph = t2ebm.graphs.graph_to_text(graph)
graph = graph.replace("{", "(").replace("}", ")")
suffix="""\nBegin!
Human: Consider the following graph from the model.\n"""
suffix+=graph
suffix+="""\nAI: I have obtained the information of the graph. You can ask me questions next, and I will answer based on the information,my knowledge about the real world, and maybe the data description.
Human: {query}
AI:"""
template=prefix+suffix
prompt = PromptTemplate(input_variables=["query"], template=template)
chain = LLMChain(
llm = llm,
prompt=prompt,
verbose=False,
)
graph_description = chain.run(query=query)
return graph_description | [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | bxdgfw/LLM-XAI | llm2ebm.py | import t2ebm
from langchain import LLMChain, PromptTemplate
from graph_desc import llm2graph_desc
#得到ebm的每个feature和对应的importance列表
def feature_importances_to_text(ebm):
feature_importances = ""
for feature_idx, feature_name in enumerate(ebm.feature_names_in_):
feature_importances += (
f"{feature_name}: {ebm.term_importances()[feature_idx]:.2f}\n"
)
return feature_importances
#返回和ebm对话的LLMchain
def llm2ebm(
llm,
ebm,
memory,
dataset_description = None,
y_axis_description = None,
):
feature_importances = feature_importances_to_text(ebm)
graphs = []
graph_descriptions = []
for feature_index in range(len(ebm.feature_names_in_)): #获取ebm中的所有graph
graphs.append(t2ebm.graphs.extract_graph(ebm, feature_index))
graphs = [t2ebm.graphs.graph_to_text(graph) for graph in graphs]
graph_descriptions = [llm2graph_desc(llm,ebm,idx,dataset_description=dataset_description,y_axis_description=y_axis_description) for idx in range(len(ebm.feature_names_in_)) ]
graph_descriptions = "\n\n".join(
[
ebm.feature_names_in_[idx] + ": " + graph_description
for idx, graph_description in enumerate(graph_descriptions)
]
)
prefix = """You are an expert statistician and data scientist.
Your task is to provide an overall summary of a Generalized Additive Model (GAM) and answer the human's questions about it. The model consists of different graphs that contain the effect of a specific input feature.
You will be given:
- The global feature importances of the different features in the model.
- Summaries of the graphs for the different features in the model. There is exactly one graph for each feature in the model.
"""
if dataset_description is None or dataset_description == '':
prefix += "\n\nThese inputs will be given to you by the user."
else:
prefix += "\n\nThe user will first provide a general description of what the dataset is about. Then you will be given the feature importance scores and the summaries of the individual features."
suffix = ""
if dataset_description is not None and len(dataset_description) > 0:
suffix += "Human: Here is the general description of the data set\n" + dataset_description
suffix += "\nAI: Thanks for this general description of the data set. Now please provide the global feature importance.\n"
suffix += "Human: Here are the global feature importaces.\n\n" + feature_importances + "\nAI: Thanks. Now please provide the descriptions of the different graphs."
suffix += "Human: Here are the descriptions of the different graphs.\n\n" + graph_descriptions
suffix+="""\nAI: Thanks. You can ask me questions next.
{history}
Human: {query}
AI:"""
template=prefix+suffix
prompt = PromptTemplate(input_variables=["history","query"], template=template)
chain = LLMChain(
llm = llm,
prompt=prompt,
verbose=False,
memory=memory,
)
return chain
| [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | bxdgfw/LLM-XAI | llm2graph.py | #可以根据ebm的某个具体特征的graph进行问答的llmchain
import t2ebm
from langchain import LLMChain, PromptTemplate
def llm2graph(
llm,
ebm,
feature_idx,
memory,
dataset_description = None,
y_axis_description = None,
):
#返回一个chain,通过chain询问llm对序号为feature_idx的graph
prefix = """You are an expert statistician and data scientist.
You interpret global explanations produced by a generalized additive model (GAM). GAMs produce explanations in the form of graphs that contain the effect of a specific input feature.\n
"""
if dataset_description is None or dataset_description == '':
prefix +="""You will be given graphs from the model, and the user will ask you questions about the graphs."""
else:
prefix +="""The user will first provide a general description of the dataset. Then you will be given graphs from the model, and the user will ask you questions about the graphs.\n"""
prefix +="""\n\nAnswer all questions to the best of your ability, combining both the data contained in the graph"""
if dataset_description is not None and len(dataset_description) > 0:
prefix +=""", the data set description you were given, and your knowledge about the real world."""
else:
prefix +=""" and your knowledge about the real world."""
prefix +="""Graphs will be presented as a JSON object with keys representing the x-axis and values representing the y-axis. For continuous features, the keys are intervals that represent ranges where the function predicts the same value. For categorical features, each key represents a possible value that the feature can take."""
if y_axis_description is not None and len(y_axis_description) > 0:
prefix +=y_axis_description
prefix += """\n\nThe user will provide graphs in the following format:
- The name of the feature depicted in the graph
- The type of the feature (continuous, categorical, or boolean)
- Mean values
- Lower bounds of confidence interval
- Upper bounds of confidence interval
"""
if dataset_description is not None and len(dataset_description) > 0:
prefix += dataset_description + """\nThe description of dataset ends.\n"""
graph = t2ebm.graphs.extract_graph(ebm, feature_idx)
graph = t2ebm.graphs.graph_to_text(graph)
graph = graph.replace("{", "(").replace("}", ")")
suffix="""\nBegin!
Human: Consider the following graph from the model.\n"""
suffix+=graph
suffix+="""\nAI: I have obtained the information of the graph. You can ask me questions next, and I will answer based on the information,my knowledge about the real world, and maybe the data description.
{history}
Human: {query}
AI:"""
template=prefix+suffix
prompt = PromptTemplate(input_variables=["history","query"], template=template)
chain = LLMChain(
llm = llm,
prompt=prompt,
verbose=False,
memory=memory,
)
return chain
| [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | bxdgfw/LLM-XAI | ebm_agent.py | from langchain.agents import ZeroShotAgent
from langchain.agents import AgentExecutor
from langchain.chains.llm import LLMChain
from langchain.memory import ConversationBufferMemory
import markdown
from IPython.display import Image, display
from llm2ebm import feature_importances_to_text
from tool import get_tools
from prompt import suffix_no_df,suffix_with_df,get_prefix
#用md语法表示的图的字符串生成图
def md2img(text):
# 使用Markdown库将Markdown文本转换为HTML
html_output = markdown.markdown(text)
# 解析HTML中的图片标签,并显示图片
def process_image_tags(html):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, 'html.parser')
# 找到所有的图片标签
img_tags = soup.find_all('img')
# 遍历图片标签,显示图片
for img in img_tags:
url = img['src']
alt_text = img.get('alt', '')
# 使用IPython.display模块的Image类显示图片
display(Image(url=url, alt=alt_text))
# 调用函数解析图片标签并显示图片
process_image_tags(html_output)
def get_agent(llm,ebm,df = None,dataset_description = None,y_axis_description = None):
#获取需要的ebm的属性
feature_importances = feature_importances_to_text(ebm)
global_explanation = global_explanation = ebm.explain_global().data
#获取prompt的prefix部分
prefix = get_prefix(ebm,feature_importances,dataset_description,y_axis_description)
#获取工具
tools=get_tools(ebm)
python_tool = tools[0]
#获取prompt的suffix部分
if df is not None:
python_tool.python_repl.locals={"df": df,"ft_graph":global_explanation}
input_variables=["input", "chat_history", "agent_scratchpad","df_head"]
suffix = suffix_with_df
else:
python_tool.python_repl.locals={"ft_graph":global_explanation}
input_variables=["input", "chat_history", "agent_scratchpad"]
suffix = suffix_no_df
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=input_variables,
)
memory = ConversationBufferMemory(memory_key="chat_history")
if 'df_head' in input_variables:
prompt = prompt.partial(df_head=str(df.head().to_markdown()))
llm_chain = LLMChain(llm=llm, prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain,tools=tools, verbose=True)
return AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, memory=memory,handle_parsing_errors=True
)
| [] |
2024-01-10 | A-Roucher/benchmark_agents | evaluation.py | import json
from chat_wrapper import HuggingFaceChatWrapper
from langchain.llms import HuggingFaceEndpoint
from langchain.prompts.chat import ChatPromptTemplate
from tqdm.notebook import tqdm
def build_evaluator(hf_endpoint_url: str) -> tuple:
"""
Build an evaluator language model using the given Hugging Face endpoint URL.
Args:
hf_endpoint_url (str): The URL of the Hugging Face endpoint.
Returns:
Tuple: A tuple containing the evaluator chat model and the correctness prompt template.
"""
eval_chat_model = HuggingFaceEndpoint(
endpoint_url=hf_endpoint_url,
task="text-generation",
model_kwargs={
"max_new_tokens": 512,
"do_sample": False,
"repetition_penalty": 1.03,
},
)
return eval_chat_model
def evaluate_answers(
results_path: str,
evaluator: HuggingFaceChatWrapper,
evaluator_name: str,
eval_prompt_template: ChatPromptTemplate,
) -> None:
"""
Runs the evaluation process on a file of results. Used to perform an evaluation with a specific evaluator model.
Args:
results_path (str): The path to the results file.
evaluator (HuggingFaceChatWrapper): The evaluator object used to evaluate the agent's response.
eval_prompt_template (ChatPromptTemplate): The template for the evaluation prompt.
"""
with open(results_path, 'r') as f:
results = json.load(f)
for experiment in tqdm(results):
if f'eval_score_{evaluator_name}' in experiment:
continue
eval_prompt = eval_prompt_template.format_messages(
instruction=experiment['question'],
response=experiment['prediction'],
reference_answer=experiment["gt_answer"],
)
eval_result = evaluator.invoke(eval_prompt)
feedback, score = [
item.strip() for item in eval_result.content.split("[RESULT]")
]
experiment[f'eval_score_{evaluator_name}'] = score
experiment[f'eval_feedback_{evaluator_name}'] = feedback
with open(results_path, 'w') as f:
json.dump(results, f)
| [
"prediction",
"question",
"gt_answer"
] |
2024-01-10 | A-Roucher/benchmark_agents | run_agents.py | import asyncio
from datetime import datetime
from typing import Any, Dict, List, Optional
import json
import pandas as pd
from tqdm import tqdm
from datasets import Dataset
from langchain.agents import AgentExecutor, load_tools
from langchain.agents.output_parsers import (
ReActJsonSingleInputOutputParser,
OpenAIFunctionsAgentOutputParser
)
from langchain.llms import HuggingFaceEndpoint
from langchain.chat_models import ChatOpenAI
from langchain.tools.render import render_text_description_and_args, format_tool_to_openai_function
from langchain.agents.format_scratchpad import format_to_openai_function_messages, format_log_to_str
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder
)
from langchain.tools import WikipediaQueryRun, tool
from langchain.utilities import WikipediaAPIWrapper
from chat_wrapper import HuggingFaceChatWrapper, BaseChatModel
from prompts import HUMAN_PROMPT, SYSTEM_PROMPT
wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())
@tool
def search_wikipedia(query: str) -> str:
"""Searches Wikipedia for a query. This will not be relevant for the latest information, but it can be useful for historical knowledge."""
return wikipedia.run(query)
def init_tools_with_llm(llm: BaseChatModel) -> List[tool]:
tools = load_tools(["serpapi", "llm-math"], llm=llm)
# Rename tools in the same format used by other tools
tools[0].name = "search"
tools[1].name = "calculator"
tools.append(search_wikipedia)
return tools
def build_openai_agent(model_id: Optional[str] = 'gpt-4-1106-preview'):
llm = ChatOpenAI(model=model_id, temperature=0)
tools = init_tools_with_llm(llm)
llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools])
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_function_messages(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| OpenAIFunctionsAgentOutputParser()
)
return AgentExecutor(
agent=agent,
tools=tools,
verbose=True,
return_intermediate_steps=True,
handle_parsing_errors=True,
max_iterations=5,
)
def build_hf_agent(hf_endpoint_url: str):
"""
Build a zero-shot ReAct chat agent from HF endpoint.
Args:
hf_endpoint_url (str): The endpoint URL for the Hugging Face model.
Returns:
AgentExecutor: An agent executor object that can be used to run the agent.
"""
# instantiate LLM and chat model
llm = HuggingFaceEndpoint(
endpoint_url=hf_endpoint_url,
task="text-generation",
model_kwargs={
"max_new_tokens": 512,
"do_sample": False,
"repetition_penalty": 1.03,
},
)
chat_model = HuggingFaceChatWrapper(llm=llm)
tools = init_tools_with_llm(llm)
prompt = ChatPromptTemplate.from_messages(
[
HumanMessagePromptTemplate.from_template(SYSTEM_PROMPT+'\nSo, here is my question:'+HUMAN_PROMPT),
]
)
prompt = prompt.partial(
tool_description=render_text_description_and_args(tools),
tool_names=", ".join([t.name for t in tools]),
)
# define the agent
chat_model_with_stop = chat_model.bind(stop=["\nObservation"])
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
}
| prompt
| chat_model_with_stop
| ReActJsonSingleInputOutputParser()
)
return AgentExecutor(
agent=agent,
tools=tools,
verbose=True,
return_intermediate_steps=True,
handle_parsing_errors=True,
max_iterations=5,
)
async def run_agent(
question: str,
ground_truth_answer: str,
agent_executor: AgentExecutor,
agent_name: str,
) -> dict:
"""
Runs the execution and evaluation process for a given question and ground truth answer.
Args:
question (str): The input question to be evaluated.
ground_truth_answer (str): The ground truth answer for the question.
agent_executor (AgentExecutor): The agent executor object used to run the agent.
evaluator (HuggingFaceChatWrapper): The evaluator object used to evaluate the agent's response.
eval_prompt_template (ChatPromptTemplate): The template for the evaluation prompt.
Returns:
dict: A dictionary containing the evaluation results, including the agent model ID, evaluator model ID,
question, ground truth answer, prediction, intermediate steps, evaluation score, evaluation feedback,
tool call parsing error flag, iteration limit exceeded flag, and agent error (if any).
"""
start_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
# run executor agent
response = await agent_executor.ainvoke({"input": question})
# check for parsing errors which indicate the LLM failed to follow the ReACT format
# this could be due to an issue with the tool calling format or ReACT formatting (i.e. Thought, Action, Observation, etc.)
parsing_error = (
True
if any(
[
"Could not parse LLM output" in step[0].log
for step in response["intermediate_steps"]
]
)
else False
)
# check if iteration limit exceeded
iteration_limit_exceeded = (
True
if "Agent stopped due to iteration limit or time limit." in response["output"]
else False
)
raised_exception = False
except Exception as e:
print('Error on ', agent_executor, question, e)
response = {"output": None, "intermediate_steps": None}
parsing_error = False
iteration_limit_exceeded = False
exception = e
raised_exception = True
end_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# collect results
if response["intermediate_steps"] is not None:
intermediate_steps = [{'tool': response[0].tool, 'tool_input': response[0].tool_input, 'tool_output': response[1]} for response in response["intermediate_steps"]]
else:
intermediate_steps = None
return {
"agent_name": agent_name,
"agent_model_id": agent_executor.dict()["agent"]["runnable"]["middle"][-1][
"bound"
]["_type"],
"question": question,
"gt_answer": ground_truth_answer,
"prediction": response["output"],
"intermediate_steps": intermediate_steps,
"parsing_error": parsing_error,
"iteration_limit_exceeded": iteration_limit_exceeded,
"agent_error": repr(exception) if raised_exception else None,
"start_time": start_time,
"end_time": end_time,
}
async def answer_questions(
dataset: Dataset,
agent_executor: AgentExecutor,
agent_name: str,
) -> List[Dict[str, Any]]:
"""
Evaluates the agent on a given dataset.
Args:
dataset (Dataset): The dataset to test the agent on.
agent_executor (AgentExecutor): The agent executor object used to run the agent.
agent_name (str): The name of the agent model.
Returns:
List[Dict[str, Any]]: A list of dictionaries containing the evaluation results for each example in the dataset.
Each dictionary includes the agent model ID, evaluator model ID, question, ground truth answer, prediction,
intermediate steps, evaluation score, evaluation feedback, tool call parsing error flag, iteration limit
exceeded flag, agent error (if any), and example metadata (task).
"""
try:
with open(f'output/{agent_name}.json', 'r') as f:
results = json.load(f)
except FileNotFoundError:
results = []
results_df = pd.DataFrame(results)
for i, example in tqdm(enumerate(dataset), total=len(dataset)):
if len(results_df) > 0:
if example["question"] in results_df['question'].unique():
continue
# run agent
result = await run_agent(
question=example["question"],
ground_truth_answer=example["answer"],
agent_executor=agent_executor,
agent_name=agent_name,
)
print("Result:", result)
print("True answer:", example["answer"])
# add in example metadata
result.update(
{
"task": example["task"],
}
)
results.append(result)
with open(f'output/{agent_name}.json', 'w') as f:
json.dump(results, f)
return results
async def run_full_tests(
dataset: Dataset,
agents: Dict[str, AgentExecutor],
) -> pd.DataFrame:
"""
Run a full evaluation on the given dataset using multiple agent models.
Args:
dataset (Dataset): The dataset to test on.
agents (Dict[str, AgentExecutor]): A dictionary of agent executors to test on the dataset
Returns:
pd.DataFrame: The evaluation results as a pandas DataFrame.
"""
results = []
tasks = [
answer_questions(
dataset=dataset,
agent_executor=agent_executor,
agent_name=agent_name,
)
for agent_name, agent_executor in agents.items()
]
results = await asyncio.gather(*tasks)
return pd.DataFrame([element for sublist in results for element in sublist])
| [
"{input}",
"You are a helpful assistant",
"agent_scratchpad",
"Searches Wikipedia for a query. This will not be relevant for the latest information, but it can be useful for historical knowledge.",
"\nSo, here is my question:",
", ",
"PLACEHOLDER\nSo, here is my question:PLACEHOLDER"
] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~task_envs~task_envs_list.py | #!/usr/bin/env python
from gym.envs.registration import register
from gym import envs
def RegisterOpenAI_Ros_Env(task_env, max_episode_steps=10000):
"""
Registers all the ENVS supported in OpenAI ROS. This way we can load them
with variable limits.
Here is where you have to PLACE YOUR NEW TASK ENV, to be registered and accesible.
return: False if the Task_Env wasnt registered, True if it was.
"""
###########################################################################
# MovingCube Task-Robot Envs
result = True
# Cubli Moving Cube
if task_env == 'MovingCubeOneDiskWalk-v0':
print("Import module")
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.moving_cube import one_disk_walk
print("Importing register env")
# We register the Class through the Gym system
register(
id=task_env,
#entry_point='openai_ros:task_envs.moving_cube.one_disk_walk.MovingCubeOneDiskWalkEnv',
entry_point='openai_ros.task_envs.moving_cube.one_disk_walk:MovingCubeOneDiskWalkEnv',
max_episode_steps=max_episode_steps,
)
# Husarion Robot
elif task_env == 'HusarionGetToPosTurtleBotPlayGround-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.husarion.husarion_get_to_position_turtlebot_playground:HusarionGetToPosTurtleBotPlayGroundEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.husarion import husarion_get_to_position_turtlebot_playground
elif task_env == 'FetchTest-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.fetch.fetch_test_task:FetchTestEnv',
max_episode_steps=max_episode_steps,
)
# 50
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_test_task
elif task_env == 'FetchSimpleTest-v0':
register(
id=task_env,
# entry_point='openai_ros:task_envs.fetch.fetch_simple_task.FetchSimpleTestEnv',
entry_point='openai_ros.task_envs.fetch.fetch_simple_task:FetchSimpleTestEnv',
max_episode_steps=max_episode_steps,
)
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_simple_task
elif task_env == 'FetchPickAndPlace-v0':
register(
id=task_env,
# entry_point='openai_ros:task_envs.fetch.fetch_pick_and_place_task.FetchPickAndPlaceEnv',
entry_point='openai_ros.task_envs.fetch.fetch_pick_and_place_task:FetchPickAndPlaceEnv',
max_episode_steps=max_episode_steps,
)
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_pick_and_place_task
elif task_env == 'FetchPush-v0':
register(
id=task_env,
# entry_point='openai_ros:task_envs.fetch.fetch_pick_and_place_task.FetchPushEnv',
# entry_point='openai_ros:task_envs.fetch.fetch_push.FetchPushEnv',
entry_point='openai_ros.task_envs.fetch.fetch_push:FetchPushEnv',
max_episode_steps=max_episode_steps,
)
# We have to import the Class that we registered so that it can be found afterwards in the Make
from openai_ros.task_envs.fetch import fetch_push
elif task_env == 'CartPoleStayUp-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.cartpole_stay_up.stay_up:CartPoleStayUpEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.cartpole_stay_up import stay_up
elif task_env == 'HopperStayUp-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.hopper.hopper_stay_up:HopperStayUpEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.hopper import hopper_stay_up
elif task_env == 'IriWamTcpToBowl-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.iriwam.tcp_to_bowl:IriWamTcpToBowlEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.iriwam import tcp_to_bowl
elif task_env == 'ParrotDroneGoto-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.parrotdrone.parrotdrone_goto:ParrotDroneGotoEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.parrotdrone import parrotdrone_goto
elif task_env == 'SawyerTouchCube-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.sawyer.learn_to_touch_cube:SawyerTouchCubeEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.sawyer import learn_to_touch_cube
elif task_env == 'ShadowTcGetBall-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.shadow_tc.learn_to_pick_ball:ShadowTcGetBallEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.shadow_tc import learn_to_pick_ball
elif task_env == 'SumitXlRoom-v0':
register(
id='SumitXlRoom-v0',
entry_point='openai_ros.task_envs.sumit_xl.sumit_xl_room:SumitXlRoom',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.sumit_xl import sumit_xl_room
elif task_env == 'MyTurtleBot2Maze-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot2.turtlebot2_maze:TurtleBot2MazeEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.turtlebot2 import turtlebot2_maze
elif task_env == 'MyTurtleBot2Wall-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot2.turtlebot2_wall:TurtleBot2WallEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.turtlebot2 import turtlebot2_wall
elif task_env == 'MyTurtleBot2Human-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.human_walks.turtlebot2_human:TurtleBot2HumanEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.human_walks import turtlebot2_human
elif task_env == 'ModRobHumanEnv-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.human_modrob.modrob_human:ModRobHumanEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.human_modrob import modrob_human
elif task_env == 'ModRobSphereEnvMoveIt-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.human_modrob.modrob_sphere_moveit:ModRobSphereEnvMoveIt',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.human_modrob import modrob_sphere_moveit
elif task_env == 'ModRobSafeHumanEnv-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.human_modrob.modrob_safe_human:ModRobSafeHumanEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.human_modrob import modrob_safe_human
elif task_env == 'TurtleBot3World-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.turtlebot3.turtlebot3_world:TurtleBot3WorldEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.turtlebot3 import turtlebot3_world
elif task_env == 'WamvNavTwoSetsBuoys-v0':
register(
id=task_env,
entry_point='openai_ros.task_envs.wamv.wamv_nav_twosets_buoys:WamvNavTwoSetsBuoysEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs.wamv import wamv_nav_twosets_buoys
# Add here your Task Envs to be registered
else:
result = False
###########################################################################
if result:
# We check that it was really registered
supported_gym_envs = GetAllRegisteredGymEnvs()
#print("REGISTERED GYM ENVS===>"+str(supported_gym_envs))
assert (task_env in supported_gym_envs), "The Task_Robot_ENV given is not Registered ==>" + \
str(task_env)
return result
def GetAllRegisteredGymEnvs():
"""
Returns a List of all the registered Envs in the system
return EX: ['Copy-v0', 'RepeatCopy-v0', 'ReversedAddition-v0', ... ]
"""
all_envs = envs.registry.all()
env_ids = [env_spec.id for env_spec in all_envs]
return env_ids
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~robot_envs~modrob_env_path_following_unsafe.py | import math
import os
import numpy as np
import rospy
import time
import sys
import moveit_commander
import moveit_msgs.msg
from tf.transformations import euler_from_quaternion
from openai_ros import robot_gazebo_env
from std_msgs.msg import Empty
from std_msgs.msg import Float64
from std_msgs.msg import Float64MultiArray
from std_msgs.msg import MultiArrayLayout
from std_msgs.msg import MultiArrayDimension
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from gazebo_msgs.msg import LinkStates
from gazebo_msgs.srv import DeleteModel
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Twist
from controller_manager_msgs.srv import SwitchController
from openai_ros.openai_ros_common import ROSLauncher
from pyquaternion import Quaternion
from custom_robot_msgs.msg import Motion
from custom_robot_msgs.msg import Segment
from custom_robot_msgs.msg import Capsule
from openai_ros.robot_envs.modrob_env_moveit import ModRobEnvMoveIt, MoveModrob
class ModRobEnvPathFollowingUnsafe(robot_gazebo_env.RobotGazeboEnv):
"""Modular robot that uses the failsafe nodelets to perform a path following trajectory planning.
The main idea is that this class only outputs a goal joint position.
The failsafe planning then plans an executable trajectory based on the max vel, acc, and jerk values of the joints.
The failsafe planning can also be used to ensure safety during the execution of the planed trajectory.
"""
def __init__(self, ros_ws_abspath):
"""Initialize a new ModRob (modular robot) environment with path following control.
Puts the robot in an existing simulation world.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that the stream of data doesn't flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controllers.
This has to do with the fact that some plugins with tf, don't understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List: TODO
*
Actuators Topic List:
* /' + self.robot_name_ + '/new_goal_motion
Args:
* ros_ws_abspath: The absolute path to the catkin_ws
"""
# The robot name parameter must be in the parameter server
self.robot_name_ = rospy.get_param('/modrob/robot_name')
self.ros_ws_abspath = ros_ws_abspath
self._init_robot(ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(ModRobEnvPathFollowingUnsafe, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
# Unpause the simulation to initialize all sensors and actors.
self.gazebo.unpauseSim()
self.is_collided = False
self._joint_states_topic = '/' + self.robot_name_ + '/joint_states'
self._arm_joint_names = rospy.get_param('/' + self.robot_name_ + '/arm_position_controller/joints')
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber(self._joint_states_topic, JointState, self._joint_state_callback)
self._check_all_sensors_ready()
self._init_joint_publisher()
self._check_publishers_connection()
# Load the transformation matrices from the ROS parameter server for forward kinematics.
self._get_arm_transformation_matrices()
# Pause the simulation after initialization
self.gazebo.pauseSim()
rospy.loginfo("Finished ModRob INIT...")
def _init_robot(self, ros_ws_abspath):
"""Calls launch file of robot."""
self._get_base_pose_parameter()
quat_base = self.convert_pose_to_quaternion(self.base_pose)
(base_r, base_p, base_y) = euler_from_quaternion([quat_base.w, quat_base.x, quat_base.y, quat_base.z])
launch_arg_string = "robot_name:={} x:={} y:={} z:={} roll:={} pitch:={} yaw:={}".format(
self.robot_name_, self.base_pose.position.x, self.base_pose.position.y, self.base_pose.position.z,
base_r, base_p, base_y
)
ROSLauncher(rospackage_name="initialisation",
launch_file_name="init_modrob_unsafe.launch",
ros_ws_abspath=ros_ws_abspath,
launch_arg_string=launch_arg_string)
def _get_base_pose_parameter(self):
"""Load base pose from parameter server."""
# Init robot position
base_pose_position = rospy.get_param('/modrob/base_pose').get("position")
base_pose_orientation = rospy.get_param('/modrob/base_pose').get("orientation")
self.base_pose = self.create_pose([base_pose_position.get("x"), base_pose_position.get("y"), base_pose_position.get("z")], [base_pose_orientation.get("w"), base_pose_orientation.get("x"), base_pose_orientation.get("y"), base_pose_orientation.get("z")])
def _get_arm_transformation_matrices(self):
"""Read the transformation matrices from the parameter server."""
tm_vec = rospy.get_param('/' + self.robot_name_ + '/transformation_matrices')
self.joint_transforamtions = self.vec_to_transformation_matrix(tm_vec)
tool_tm_vec = rospy.get_param('/' + self.robot_name_ + '/tool_transformation_matrix')
self.tool_transformation = self.vec_to_transformation_matrix(tool_tm_vec)[0]
enclosures = rospy.get_param('/' + self.robot_name_ + '/enclosures')
enclosures = np.reshape(enclosures, [len(self.joint_transforamtions), 7])
self.capsules = []
for i in range(len(enclosures)):
cap = Capsule
segment = Segment
segment.p = enclosures[i,0:3]
segment.q = enclosures[i,3:6]
cap.segment = segment
cap.radius = enclosures[i, 6]
self.capsules.append(cap)
def _init_joint_publisher(self):
"""Initialize the joint controller publisher with the joint list.
Relys on joint sensors being published.
"""
self._arm_joint_publisher = rospy.Publisher('/' + self.robot_name_ + '/new_goal_motion', Motion, queue_size=100)
self._gripper_joint_publisher = rospy.Publisher('/' + self.robot_name_ + '/gripper_position_controller/command', Float64MultiArray, queue_size=10)
def _check_all_systems_ready(self):
"""Check that all the sensors, publishers and other simulation systems are operational."""
self._check_all_sensors_ready()
return True
def _check_all_sensors_ready(self):
"""Perform all sensor checks."""
rospy.logdebug("START ALL SENSORS READY")
self._check_joint_state_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_publishers_connection(self):
"""Check that all the publishers are working"""
# Check joint position controller publishers
rate = rospy.Rate(10) # 10hz
while self._arm_joint_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to /" + self.robot_name_ + "/arm_position_controller/command yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
while self._gripper_joint_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to /" + self.robot_name_ + "/gripper_position_controller/command yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("All joint position controller publishers connected!")
# Check additional publishers
rospy.logdebug("All publishers READY")
def _check_joint_state_ready(self):
self.joint_state = None
rospy.logdebug("Waiting for {} to be READY...".format(self._joint_states_topic))
while self.joint_state is None and not rospy.is_shutdown():
try:
data = rospy.wait_for_message(self._joint_states_topic, JointState, timeout=5.0)
if len(data.velocity) > 0:
self.joint_state = data
rospy.logdebug("Current {} READY=>".format(self._joint_states_topic))
except:
rospy.logerr("Current {} not ready yet, retrying for getting joint states".format(self._joint_states_topic))
return self.joint_state
def _joint_state_callback(self, data):
# Often, there is an empty joint state message.
if len(data.velocity) > 0:
self.joint_state = data
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_arm_joints(self, joint_positions, error=0.05, timeout=3.0):
"""Control the arm joints of the robot.
The control waits until timeout or desired position reached within a margin of error.
Args:
joint_positions (list or np.array): list of desired joint positions
error (double): absolute error allowed between each desired and reached joint position
timeout (double): time to wait in s, set this to zero to wait until joint position is reached
"""
assert len(self.get_arm_joint_names())==len(joint_positions), "Length of desired arm joint positions does not match"
# Create motion message
msg = Motion()
# Not relevant
msg.s = 0
msg.q = joint_positions
# Goal velocity is zero
msg.dq = [0 for _ in range(len(joint_positions))]
# Not relevant
msg.ddq = [0 for _ in range(len(joint_positions))]
self._arm_joint_publisher.publish(msg)
# Wait for a certain amount of time to get to the desired position.
return self.wait_for_joints_to_get_there(joint_positions, error=error, timeout=timeout)
def wait_for_joints_to_get_there(self, desired_joint_positions, error=0.05, timeout=3.0):
"""Wait until target joint position is reached within an error or until the timout is reached.
Set timeout to 0 to wait until joint position is reached.
Args:
* desired_joint_positions (list or np.array): list of desired joint positions
* error (double): absolute error allowed between each desired and reached joint position
* timeout (double): time to wait in s, set this to zero to wait until joint position is reached
"""
assert len(self.get_arm_joint_positions())==len(desired_joint_positions), "Length of desired arm joint positions does not match."
time_waiting = 0.0
frequency = 100.0 # Fine tune this parameter.
are_equal = False
is_timeout = False
# ROS will try to keep this frequency of the following loop
rate = rospy.Rate(frequency)
rospy.logdebug("Waiting for joint to get to the position")
success = True
while not are_equal and not is_timeout and not rospy.is_shutdown():
are_equal = self.joints_close(self.get_arm_joint_positions(), desired_joint_positions, error)
rate.sleep()
time_waiting += 1.0 / frequency
if timeout > 1e-5:
is_timeout = time_waiting > timeout
else:
if time_waiting > 10:
# If we waited longer than 10 seconds, reset the robot
rospy.logerr("ROBOT DID NOT REACHED DESIRED POSITION FOR 10 SECONDS!!! RESETTING ROBOT!")
self.full_reset_robot()
success = False
is_timeout = True
rospy.logdebug("Joints are in the desired position with an error of " + str(error))
return success
def full_reset_robot(self):
"""Removes the robot model from simulation and respawns it.
This is neccesary because the ROS controllers sometimes die.
"""
# TODO: Test this
self.gazebo.unpauseSim()
# rosnode kill /modrob1/joint_state_publisher /modrob1/controller_spawner /modrob1/robot_state_publisher
os.system("rosnode kill /{}/joint_state_publisher /{}/controller_spawner /{}/robot_state_publisher".format(
self.robot_name_, self.robot_name_, self.robot_name_))
# Delte the robot model
rospy.wait_for_service('/gazebo/delete_model')
try:
delete_model = rospy.ServiceProxy('/gazebo/delete_model', DeleteModel)
resp = delete_model(self.robot_name_)
except rospy.ServiceException as e:
rospy.logerr("Delete model service call failed: %s"%e)
resp = False
return
# Respawn the robot model
self._get_base_pose_parameter()
quat_base = self.convert_pose_to_quaternion(self.base_pose)
(base_r, base_p, base_y) = euler_from_quaternion([quat_base.w, quat_base.x, quat_base.y, quat_base.z])
launch_arg_string = "robot_name:={} init_x:={} init_y:={} init_z:={} init_roll:={} init_pitch:={} init_yaw:={}".format(
self.robot_name_, self.base_pose.position.x, self.base_pose.position.y, self.base_pose.position.z,
base_r, base_p, base_y
)
ROSLauncher(rospackage_name="modrob_simulation",
launch_file_name="put_robot_in_world_path_following.launch",
ros_ws_abspath=self.ros_ws_abspath,
launch_arg_string=launch_arg_string)
# Wait for all systems (joint state) to be ready
# TODO: Test this out
#self._send_initialization()
self.gazebo.unpauseSim()
self._check_all_systems_ready()
def joints_close(self, joint_positions, desired_joint_positions, error):
"""Returns if the joints are in a bound of error to the designated goal."""
return np.allclose(joint_positions, desired_joint_positions, atol=error)
def reinit_sensors(self):
"""
This method is for the tasks so that when reseting the episode
the sensors values are forced to be updated with the real data and
<<-- Only needed when reset is set to SIMULATION.
<<-- TODO: Implement this ?
"""
def _send_initialization(self):
"""Send init message to ROS topic
"""
ROSLauncher(rospackage_name="initialisation",
launch_file_name="init.launch",
ros_ws_abspath=self.ros_ws_abspath)
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start of an episode."""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given."""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation."""
raise NotImplementedError()
def _get_obs(self):
"""Get the observations of this step."""
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given."""
raise NotImplementedError()
def get_arm_joint_positions(self):
"""Return a list of arm joint positions in rad.
The joint values are in the same oder as get_arm_joint_names()."""
joint_position_dict = dict(zip(self.joint_state.name, self.joint_state.position))
return [joint_position_dict.get(joint_name) for joint_name in self.get_arm_joint_names()]
def get_arm_joint_velocities(self):
"""Return a list of arm joint angular velocities in rad/s.
The joint values are in the same oder as get_arm_joint_names()."""
joint_velocity_dict = dict(zip(self.joint_state.name, self.joint_state.velocity))
return [joint_velocity_dict.get(joint_name) for joint_name in self.get_arm_joint_names()]
def get_arm_joint_efforts(self):
"""Return a list of arm joint momentum in Nm.
The joint values are in the same oder as get_arm_joint_names()."""
joint_effort_dict = dict(zip(self.joint_state.name, self.joint_state.effort))
return [joint_effort_dict.get(joint_name) for joint_name in self.get_arm_joint_names()]
def get_arm_joint_names(self):
"""Return list of names in arm joint group."""
return self._arm_joint_names
def get_joint_state(self):
"""Return the whole joint state topic dictionary."""
return self.joint_state
def get_random_ee_pose(self):
"""Generate a random joint position and return the end effector pose."""
random_joint_positions = (2 * np.random.rand(len(self._arm_joint_names)) - 1) * math.pi
ee_pos, ee_quat = self.get_ee_position_and_quaternion(random_joint_positions)
return self.create_pose(ee_pos, ee_quat), random_joint_positions.tolist()
def get_random_joint_pose(self, avg_joint_pose, joint_diff):
"""Generate a random joint position and return the end effector pose."""
random_joint_positions = (np.array(avg_joint_pose) +
(2 * np.random.rand(len(self._arm_joint_names)) - 1) * joint_diff)
ee_pos, ee_quat = self.get_ee_position_and_quaternion(random_joint_positions)
return self.create_pose(ee_pos, ee_quat), random_joint_positions.tolist()
def forward_kinematics(self, joint_angles):
"""Calculates the forward kinematics for this robot.
Args:
* Joint_angles (list of doubles): Joint angle values in rad
Returns:
* Transformation matrix of the end effector
"""
assert len(self.joint_transforamtions) == len(joint_angles)
transformation_matrix = np.eye(4)
for i in range(0, len(self.joint_transforamtions)):
transformation_matrix = np.matmul(transformation_matrix, self.joint_transforamtions[i])
transformation_matrix = np.matmul(transformation_matrix, self.get_rot_z_trafo(joint_angles[i]))
transformation_matrix = np.matmul(transformation_matrix, self.tool_transformation)
return transformation_matrix
def get_rot_z_trafo(self, z_rotation):
"""Return a z-rotation matrix with joint angle z_rotation"""
return np.array([[np.cos(z_rotation), -np.sin(z_rotation), 0, 0],
[np.sin(z_rotation), np.cos(z_rotation), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
def get_ee_pose(self):
"""Return the pose of the end effector."""
position_vec, orientation_vec = self.get_current_ee_position_and_quaternion()
return self.create_pose(position_vec, orientation_vec)
def get_ee_position_and_quaternion(self, joint_values):
"""Return the cartesian position of the end effector and the orientation quaternion"""
ee_transformation_matrix = self.forward_kinematics(joint_values)
ee_position = [ee_transformation_matrix[0,3], ee_transformation_matrix[1,3], ee_transformation_matrix[2,3]]
ee_q = Quaternion(matrix=ee_transformation_matrix[0:3,0:3])
ee_quaternion = [ee_q.w, ee_q.x, ee_q.y, ee_q.z]
return ee_position, ee_quaternion
def check_collision_ground(self, joint_angles, z_ground):
"""Check if any link of the robot collides with the ground.
Args:
joint_angles (double[n_joints]): angle values of arm joints
z_ground (double): z-position of the ground
"""
assert len(self.joint_transforamtions) == len(joint_angles)
transformation_matrix = np.eye(4)
for i in range(0, len(self.joint_transforamtions)):
transformation_matrix = np.matmul(transformation_matrix, self.joint_transforamtions[i])
transformation_matrix = np.matmul(transformation_matrix, self.get_rot_z_trafo(joint_angles[i]))
cap_p = np.matmul(transformation_matrix, np.array(np.append(self.capsules[i].segment.p, 1.0)))
if cap_p[2] < z_ground + self.capsules[i].radius:
return True
cap_q = np.matmul(transformation_matrix, np.array(np.append(self.capsules[i].segment.q, 1.0)))
if cap_q[2] < z_ground + self.capsules[i].radius:
return True
return False
def get_current_ee_position_and_quaternion(self):
"""Return the current cartesian position of the end effector and the orientation quaternion"""
return self.get_ee_position_and_quaternion(self.get_arm_joint_positions())
def get_ee_rpy(self):
"""Return the roll, pitch, yaw values of the end effector."""
raise NotImplementedError()
def get_ee_quaternion(self):
"""Return the current end effector orientation quaternion (x, y, z, w)."""
raise NotImplementedError()
def vec_to_transformation_matrix(self, vec):
"""Convert a vector of numbers to a transformation matrix."""
assert len(vec)%16 == 0, "Transformation matrix vector has wrong format. len(vec) = {}".format(len(vec))
n_matrices = int(len(vec)/16)
list_of_matrices = []
for i in range(0, n_matrices):
list_of_matrices.append(np.reshape(vec[i*16:(i+1)*16], [4,4]))
return list_of_matrices
def create_pose(self, position_vec, orientation_vec):
"""Create a geometry_msgs.msg.Pose object from position and orientation.
Args:
position_vec (list): cartesian position [x, y, z]
orientation_vec (list): orientation quaternion [w, x, y, z]
Returns:
geometry_msgs.msg.Pose
"""
pose = Pose()
pose.position.x = position_vec[0]
pose.position.y = position_vec[1]
pose.position.z = position_vec[2]
pose.orientation.w = orientation_vec[0]
pose.orientation.x = orientation_vec[1]
pose.orientation.y = orientation_vec[2]
pose.orientation.z = orientation_vec[3]
return pose
def pose_to_vector(self, pose):
"""Convert a pose to a vector containing [pos_x, pos_y, pos_z, quat_w, quat_x, quat_y, quat_z]."""
position = [pose.position.x, pose.position.y, pose.position.z]
quaternion = [pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]
return position + quaternion
def convert_pose_to_quaternion(self, pose):
"""Convert a geometry_msgs.msg.Pose to a pyquaternion.Quaternion.
TODO: Write utility class and move this to util.
Args:
pose (geometry_msgs.msg.Pose)
Returns:
pyquaternion.Quaternion
"""
return Quaternion(w=pose.orientation.w, x=pose.orientation.x, y=pose.orientation.y, z=pose.orientation.z)
def get_new_x_axis(self, quaternion):
"""Return the new x axis after a quaternion rotation.
Args:
quaternion (Quaternion): The quaternion used for rotation
Returns:
np.array (shape: [3]): The new x-axis
"""
return quaternion.rotation_matrix[:,0]
def get_new_y_axis(self, quaternion):
"""Return the new y axis after a quaternion rotation.
Args:
quaternion (Quaternion): The quaternion used for rotation
Returns:
np.array (shape: [3]): The new y-axis
"""
return quaternion.rotation_matrix[:,1]
def get_new_z_axis(self, quaternion):
"""Return the new z axis after a quaternion rotation.
Args:
quaternion (Quaternion): The quaternion used for rotation
Returns:
np.array (shape: [3]): The new z-axis
"""
return quaternion.rotation_matrix[:,2]
def great_circle_distance(self, n1, n2):
"""Return the great circle distance between two points on a sphere given by normal vectors.
See https://en.wikipedia.org/wiki/Great-circle_distance#Vector_version
Args:
n1 (np.array, shape: [3]): Normal vector 1
n2 (np.array, shape: [3]): Normal vector 2
Returns:
double: Great circle distance
"""
return np.arccos(np.dot(n1, n2))
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~robot_envs~modrob_env_path_following.py | import math
import os
import numpy as np
import rospy
import time
import sys
import moveit_commander
import moveit_msgs.msg
from tf.transformations import euler_from_quaternion
from openai_ros import robot_gazebo_env
from std_msgs.msg import Empty
from std_msgs.msg import Float64
from std_msgs.msg import Float64MultiArray
from std_msgs.msg import MultiArrayLayout
from std_msgs.msg import MultiArrayDimension
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from gazebo_msgs.msg import LinkStates
from gazebo_msgs.srv import DeleteModel
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Twist
from controller_manager_msgs.srv import SwitchController
from openai_ros.openai_ros_common import ROSLauncher
from pyquaternion import Quaternion
from custom_robot_msgs.msg import Motion
from custom_robot_msgs.msg import Segment
from custom_robot_msgs.msg import Capsule
from openai_ros.robot_envs.modrob_env_moveit import ModRobEnvMoveIt, MoveModrob
class ModRobEnvPathFollowing(robot_gazebo_env.RobotGazeboEnv):
"""Modular robot that uses the failsafe nodelets to perform a path following trajectory planning.
The main idea is that this class only outputs a goal joint position.
The failsafe planning then plans an executable trajectory based on the max vel, acc, and jerk values of the joints.
The failsafe planning can also be used to ensure safety during the execution of the planed trajectory.
"""
def __init__(self, ros_ws_abspath):
"""Initialize a new ModRob (modular robot) environment with path following control.
Puts the robot in an existing simulation world.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that the stream of data doesn't flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controllers.
This has to do with the fact that some plugins with tf, don't understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List: TODO
*
Actuators Topic List:
* /' + self.robot_name_ + '/new_goal_motion
Args:
* ros_ws_abspath: The absolute path to the catkin_ws
"""
# The robot name parameter must be in the parameter server
self.robot_name_ = rospy.get_param('/modrob/robot_name')
self.ros_ws_abspath = ros_ws_abspath
self._init_robot(ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(ModRobEnvPathFollowing, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
# Unpause the simulation to initialize all sensors and actors.
self.gazebo.unpauseSim()
self.is_collided = False
self._joint_states_topic = '/' + self.robot_name_ + '/joint_states'
self._arm_joint_names = rospy.get_param('/' + self.robot_name_ + '/arm_position_controller/joints')
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber(self._joint_states_topic, JointState, self._joint_state_callback)
self._check_all_sensors_ready()
self._init_joint_publisher()
self._check_publishers_connection()
# Load the transformation matrices from the ROS parameter server for forward kinematics.
self._get_arm_transformation_matrices()
# Pause the simulation after initialization
self.gazebo.pauseSim()
rospy.loginfo("Finished ModRob INIT...")
def _init_robot(self, ros_ws_abspath):
"""Calls launch file of robot."""
self._get_base_pose_parameter()
quat_base = self.convert_pose_to_quaternion(self.base_pose)
(base_r, base_p, base_y) = euler_from_quaternion([quat_base.w, quat_base.x, quat_base.y, quat_base.z])
launch_arg_string = "robot_name:={} x:={} y:={} z:={} roll:={} pitch:={} yaw:={}".format(
self.robot_name_, self.base_pose.position.x, self.base_pose.position.y, self.base_pose.position.z,
base_r, base_p, base_y
)
ROSLauncher(rospackage_name="initialisation",
launch_file_name="init_modrob.launch",
ros_ws_abspath=ros_ws_abspath,
launch_arg_string=launch_arg_string)
def _get_base_pose_parameter(self):
"""Load base pose from parameter server."""
# Init robot position
base_pose_position = rospy.get_param('/modrob/base_pose').get("position")
base_pose_orientation = rospy.get_param('/modrob/base_pose').get("orientation")
self.base_pose = self.create_pose([base_pose_position.get("x"), base_pose_position.get("y"), base_pose_position.get("z")], [base_pose_orientation.get("w"), base_pose_orientation.get("x"), base_pose_orientation.get("y"), base_pose_orientation.get("z")])
def _get_arm_transformation_matrices(self):
"""Read the transformation matrices from the parameter server."""
tm_vec = rospy.get_param('/' + self.robot_name_ + '/transformation_matrices')
self.joint_transforamtions = self.vec_to_transformation_matrix(tm_vec)
tool_tm_vec = rospy.get_param('/' + self.robot_name_ + '/tool_transformation_matrix')
self.tool_transformation = self.vec_to_transformation_matrix(tool_tm_vec)[0]
enclosures = rospy.get_param('/' + self.robot_name_ + '/enclosures')
enclosures = np.reshape(enclosures, [len(self.joint_transforamtions), 7])
self.capsules = []
for i in range(len(enclosures)):
cap = Capsule
segment = Segment
segment.p = enclosures[i,0:3]
segment.q = enclosures[i,3:6]
cap.segment = segment
cap.radius = enclosures[i, 6]
self.capsules.append(cap)
def _init_joint_publisher(self):
"""Initialize the joint controller publisher with the joint list.
Relys on joint sensors being published.
"""
self._arm_joint_publisher = rospy.Publisher('/' + self.robot_name_ + '/new_goal_motion', Motion, queue_size=100)
self._gripper_joint_publisher = rospy.Publisher('/' + self.robot_name_ + '/gripper_position_controller/command', Float64MultiArray, queue_size=10)
def _check_all_systems_ready(self):
"""Check that all the sensors, publishers and other simulation systems are operational."""
self._check_all_sensors_ready()
return True
def _check_all_sensors_ready(self):
"""Perform all sensor checks."""
rospy.logdebug("START ALL SENSORS READY")
self._check_joint_state_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_publishers_connection(self):
"""Check that all the publishers are working"""
# Check joint position controller publishers
rate = rospy.Rate(10) # 10hz
while self._arm_joint_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to /" + self.robot_name_ + "/arm_position_controller/command yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
while self._gripper_joint_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to /" + self.robot_name_ + "/gripper_position_controller/command yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("All joint position controller publishers connected!")
# Check additional publishers
rospy.logdebug("All publishers READY")
def _check_joint_state_ready(self):
self.joint_state = None
rospy.logdebug("Waiting for {} to be READY...".format(self._joint_states_topic))
while self.joint_state is None and not rospy.is_shutdown():
try:
data = rospy.wait_for_message(self._joint_states_topic, JointState, timeout=5.0)
if len(data.velocity) > 0:
self.joint_state = data
rospy.logdebug("Current {} READY=>".format(self._joint_states_topic))
except:
rospy.logerr("Current {} not ready yet, retrying for getting joint states".format(self._joint_states_topic))
return self.joint_state
def _joint_state_callback(self, data):
# Often, there is an empty joint state message.
if len(data.velocity) > 0:
self.joint_state = data
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_arm_joints(self, joint_positions, error=0.05, timeout=3.0):
"""Control the arm joints of the robot.
The control waits until timeout or desired position reached within a margin of error.
Args:
joint_positions (list or np.array): list of desired joint positions
error (double): absolute error allowed between each desired and reached joint position
timeout (double): time to wait in s, set this to zero to wait until joint position is reached
"""
assert len(self.get_arm_joint_names())==len(joint_positions), "Length of desired arm joint positions does not match"
# Create motion message
msg = Motion()
# Not relevant
msg.s = 0
msg.q = joint_positions
# Goal velocity is zero
msg.dq = [0 for _ in range(len(joint_positions))]
# Not relevant
msg.ddq = [0 for _ in range(len(joint_positions))]
self._arm_joint_publisher.publish(msg)
# Wait for a certain amount of time to get to the desired position.
return self.wait_for_joints_to_get_there(joint_positions, error=error, timeout=timeout)
def wait_for_joints_to_get_there(self, desired_joint_positions, error=0.05, timeout=3.0):
"""Wait until target joint position is reached within an error or until the timout is reached.
Set timeout to 0 to wait until joint position is reached.
Args:
* desired_joint_positions (list or np.array): list of desired joint positions
* error (double): absolute error allowed between each desired and reached joint position
* timeout (double): time to wait in s, set this to zero to wait until joint position is reached
"""
assert len(self.get_arm_joint_positions())==len(desired_joint_positions), "Length of desired arm joint positions does not match."
time_waiting = 0.0
frequency = 100.0 # Fine tune this parameter.
are_equal = False
is_timeout = False
# ROS will try to keep this frequency of the following loop
rate = rospy.Rate(frequency)
rospy.logdebug("Waiting for joint to get to the position")
success = True
while not are_equal and not is_timeout and not rospy.is_shutdown():
are_equal = self.joints_close(self.get_arm_joint_positions(), desired_joint_positions, error)
rate.sleep()
time_waiting += 1.0 / frequency
if timeout > 1e-5:
is_timeout = time_waiting > timeout
else:
if time_waiting > 10:
# If we waited longer than 10 seconds, reset the robot
rospy.logerr("ROBOT DID NOT REACHED DESIRED POSITION FOR 10 SECONDS!!! RESETTING ROBOT!")
self.full_reset_robot()
success = False
is_timeout = True
rospy.logdebug("Joints are in the desired position with an error of " + str(error))
return success
def full_reset_robot(self):
"""Removes the robot model from simulation and respawns it.
This is neccesary because the ROS controllers sometimes die.
"""
# TODO: Test this
self.gazebo.unpauseSim()
# rosnode kill /modrob1/joint_state_publisher /modrob1/controller_spawner /modrob1/robot_state_publisher
os.system("rosnode kill /{}/joint_state_publisher /{}/controller_spawner /{}/robot_state_publisher".format(
self.robot_name_, self.robot_name_, self.robot_name_))
# Delte the robot model
rospy.wait_for_service('/gazebo/delete_model')
try:
delete_model = rospy.ServiceProxy('/gazebo/delete_model', DeleteModel)
resp = delete_model(self.robot_name_)
except rospy.ServiceException as e:
rospy.logerr("Delete model service call failed: %s"%e)
resp = False
return
# Respawn the robot model
self._get_base_pose_parameter()
quat_base = self.convert_pose_to_quaternion(self.base_pose)
(base_r, base_p, base_y) = euler_from_quaternion([quat_base.w, quat_base.x, quat_base.y, quat_base.z])
launch_arg_string = "robot_name:={} init_x:={} init_y:={} init_z:={} init_roll:={} init_pitch:={} init_yaw:={}".format(
self.robot_name_, self.base_pose.position.x, self.base_pose.position.y, self.base_pose.position.z,
base_r, base_p, base_y
)
ROSLauncher(rospackage_name="modrob_simulation",
launch_file_name="put_robot_in_world_path_following.launch",
ros_ws_abspath=self.ros_ws_abspath,
launch_arg_string=launch_arg_string)
# Wait for all systems (joint state) to be ready
# TODO: Test this out
#self._send_initialization()
self.gazebo.unpauseSim()
self._check_all_systems_ready()
def joints_close(self, joint_positions, desired_joint_positions, error):
"""Returns if the joints are in a bound of error to the designated goal."""
return np.allclose(joint_positions, desired_joint_positions, atol=error)
def reinit_sensors(self):
"""
This method is for the tasks so that when reseting the episode
the sensors values are forced to be updated with the real data and
<<-- Only needed when reset is set to SIMULATION.
<<-- TODO: Implement this ?
"""
def _send_initialization(self):
"""Send init message to ROS topic
"""
ROSLauncher(rospackage_name="initialisation",
launch_file_name="init.launch",
ros_ws_abspath=self.ros_ws_abspath)
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start of an episode."""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given."""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation."""
raise NotImplementedError()
def _get_obs(self):
"""Get the observations of this step."""
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given."""
raise NotImplementedError()
def get_arm_joint_positions(self):
"""Return a list of arm joint positions in rad.
The joint values are in the same oder as get_arm_joint_names()."""
joint_position_dict = dict(zip(self.joint_state.name, self.joint_state.position))
return [joint_position_dict.get(joint_name) for joint_name in self.get_arm_joint_names()]
def get_arm_joint_velocities(self):
"""Return a list of arm joint angular velocities in rad/s.
The joint values are in the same oder as get_arm_joint_names()."""
joint_velocity_dict = dict(zip(self.joint_state.name, self.joint_state.velocity))
return [joint_velocity_dict.get(joint_name) for joint_name in self.get_arm_joint_names()]
def get_arm_joint_efforts(self):
"""Return a list of arm joint momentum in Nm.
The joint values are in the same oder as get_arm_joint_names()."""
joint_effort_dict = dict(zip(self.joint_state.name, self.joint_state.effort))
return [joint_effort_dict.get(joint_name) for joint_name in self.get_arm_joint_names()]
def get_arm_joint_names(self):
"""Return list of names in arm joint group."""
return self._arm_joint_names
def get_joint_state(self):
"""Return the whole joint state topic dictionary."""
return self.joint_state
def get_random_ee_pose(self):
"""Generate a random joint position and return the end effector pose."""
random_joint_positions = (2 * np.random.rand(len(self._arm_joint_names)) - 1) * math.pi
ee_pos, ee_quat = self.get_ee_position_and_quaternion(random_joint_positions)
return self.create_pose(ee_pos, ee_quat), random_joint_positions.tolist()
def get_random_joint_pose(self, avg_joint_pose, joint_diff):
"""Generate a random joint position and return the end effector pose."""
random_joint_positions = (np.array(avg_joint_pose) +
(2 * np.random.rand(len(self._arm_joint_names)) - 1) * joint_diff)
ee_pos, ee_quat = self.get_ee_position_and_quaternion(random_joint_positions)
return self.create_pose(ee_pos, ee_quat), random_joint_positions.tolist()
def forward_kinematics(self, joint_angles):
"""Calculates the forward kinematics for this robot.
Args:
* Joint_angles (list of doubles): Joint angle values in rad
Returns:
* Transformation matrix of the end effector
"""
assert len(self.joint_transforamtions) == len(joint_angles)
transformation_matrix = np.eye(4)
for i in range(0, len(self.joint_transforamtions)):
transformation_matrix = np.matmul(transformation_matrix, self.joint_transforamtions[i])
transformation_matrix = np.matmul(transformation_matrix, self.get_rot_z_trafo(joint_angles[i]))
transformation_matrix = np.matmul(transformation_matrix, self.tool_transformation)
return transformation_matrix
def get_rot_z_trafo(self, z_rotation):
"""Return a z-rotation matrix with joint angle z_rotation"""
return np.array([[np.cos(z_rotation), -np.sin(z_rotation), 0, 0],
[np.sin(z_rotation), np.cos(z_rotation), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
def get_ee_pose(self):
"""Return the pose of the end effector."""
position_vec, orientation_vec = self.get_current_ee_position_and_quaternion()
return self.create_pose(position_vec, orientation_vec)
def get_ee_position_and_quaternion(self, joint_values):
"""Return the cartesian position of the end effector and the orientation quaternion"""
ee_transformation_matrix = self.forward_kinematics(joint_values)
ee_position = [ee_transformation_matrix[0,3], ee_transformation_matrix[1,3], ee_transformation_matrix[2,3]]
ee_q = Quaternion(matrix=ee_transformation_matrix[0:3,0:3])
ee_quaternion = [ee_q.w, ee_q.x, ee_q.y, ee_q.z]
return ee_position, ee_quaternion
def check_collision_ground(self, joint_angles, z_ground):
"""Check if any link of the robot collides with the ground.
Args:
joint_angles (double[n_joints]): angle values of arm joints
z_ground (double): z-position of the ground
"""
assert len(self.joint_transforamtions) == len(joint_angles)
transformation_matrix = np.eye(4)
for i in range(0, len(self.joint_transforamtions)):
transformation_matrix = np.matmul(transformation_matrix, self.joint_transforamtions[i])
transformation_matrix = np.matmul(transformation_matrix, self.get_rot_z_trafo(joint_angles[i]))
cap_p = np.matmul(transformation_matrix, np.array(np.append(self.capsules[i].segment.p, 1.0)))
if cap_p[2] < z_ground + self.capsules[i].radius:
return True
cap_q = np.matmul(transformation_matrix, np.array(np.append(self.capsules[i].segment.q, 1.0)))
if cap_q[2] < z_ground + self.capsules[i].radius:
return True
return False
def get_current_ee_position_and_quaternion(self):
"""Return the current cartesian position of the end effector and the orientation quaternion"""
return self.get_ee_position_and_quaternion(self.get_arm_joint_positions())
def get_ee_rpy(self):
"""Return the roll, pitch, yaw values of the end effector."""
raise NotImplementedError()
def get_ee_quaternion(self):
"""Return the current end effector orientation quaternion (x, y, z, w)."""
raise NotImplementedError()
def vec_to_transformation_matrix(self, vec):
"""Convert a vector of numbers to a transformation matrix."""
assert len(vec)%16 == 0, "Transformation matrix vector has wrong format. len(vec) = {}".format(len(vec))
n_matrices = int(len(vec)/16)
list_of_matrices = []
for i in range(0, n_matrices):
list_of_matrices.append(np.reshape(vec[i*16:(i+1)*16], [4,4]))
return list_of_matrices
def create_pose(self, position_vec, orientation_vec):
"""Create a geometry_msgs.msg.Pose object from position and orientation.
Args:
position_vec (list): cartesian position [x, y, z]
orientation_vec (list): orientation quaternion [w, x, y, z]
Returns:
geometry_msgs.msg.Pose
"""
pose = Pose()
pose.position.x = position_vec[0]
pose.position.y = position_vec[1]
pose.position.z = position_vec[2]
pose.orientation.w = orientation_vec[0]
pose.orientation.x = orientation_vec[1]
pose.orientation.y = orientation_vec[2]
pose.orientation.z = orientation_vec[3]
return pose
def pose_to_vector(self, pose):
"""Convert a pose to a vector containing [pos_x, pos_y, pos_z, quat_w, quat_x, quat_y, quat_z]."""
position = [pose.position.x, pose.position.y, pose.position.z]
quaternion = [pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w]
return position + quaternion
def convert_pose_to_quaternion(self, pose):
"""Convert a geometry_msgs.msg.Pose to a pyquaternion.Quaternion.
TODO: Write utility class and move this to util.
Args:
pose (geometry_msgs.msg.Pose)
Returns:
pyquaternion.Quaternion
"""
return Quaternion(w=pose.orientation.w, x=pose.orientation.x, y=pose.orientation.y, z=pose.orientation.z)
def get_new_x_axis(self, quaternion):
"""Return the new x axis after a quaternion rotation.
Args:
quaternion (Quaternion): The quaternion used for rotation
Returns:
np.array (shape: [3]): The new x-axis
"""
return quaternion.rotation_matrix[:,0]
def get_new_y_axis(self, quaternion):
"""Return the new y axis after a quaternion rotation.
Args:
quaternion (Quaternion): The quaternion used for rotation
Returns:
np.array (shape: [3]): The new y-axis
"""
return quaternion.rotation_matrix[:,1]
def get_new_z_axis(self, quaternion):
"""Return the new z axis after a quaternion rotation.
Args:
quaternion (Quaternion): The quaternion used for rotation
Returns:
np.array (shape: [3]): The new z-axis
"""
return quaternion.rotation_matrix[:,2]
def great_circle_distance(self, n1, n2):
"""Return the great circle distance between two points on a sphere given by normal vectors.
See https://en.wikipedia.org/wiki/Great-circle_distance#Vector_version
Args:
n1 (np.array, shape: [3]): Normal vector 1
n2 (np.array, shape: [3]): Normal vector 2
Returns:
double: Great circle distance
"""
return np.arccos(np.dot(n1, n2))
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~task_envs~cartpole_stay_up~stay_up.py | from gym import utils
from openai_ros.robot_envs import cartpole_env
from gym.envs.registration import register
from gym import error, spaces
import rospy
import math
import numpy as np
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class CartPoleStayUpEnv(cartpole_env.CartPoleEnv):
def __init__(self):
ros_ws_abspath = rospy.get_param("/cartpole_v0/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="cartpole_description",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/cartpole_stay_up/config",
yaml_file_name="stay_up.yaml")
self.get_params()
self.action_space = spaces.Discrete(self.n_actions)
high = np.array([
2.5 * 2,
np.finfo(np.float32).max,
0.7 * 2,
np.finfo(np.float32).max])
self.observation_space = spaces.Box(-high, high)
# TODO: Remove when working
"""
cartpole_env.CartPoleEnv.__init__(
self, control_type=self.control_type
)
"""
# Here we will add any init functions prior to starting the MyRobotEnv
super(CartPoleStayUpEnv, self).__init__(control_type=self.control_type,
ros_ws_abspath=ros_ws_abspath)
def get_params(self):
# get configuration parameters
self.n_actions = rospy.get_param('/cartpole_v0/n_actions')
self.min_pole_angle = rospy.get_param('/cartpole_v0/min_pole_angle')
self.max_pole_angle = rospy.get_param('/cartpole_v0/max_pole_angle')
self.max_base_velocity = rospy.get_param(
'/cartpole_v0/max_base_velocity')
self.min_base_pose_x = rospy.get_param('/cartpole_v0/min_base_pose_x')
self.max_base_pose_x = rospy.get_param('/cartpole_v0/max_base_pose_x')
self.pos_step = rospy.get_param('/cartpole_v0/pos_step')
self.running_step = rospy.get_param('/cartpole_v0/running_step')
self.init_pos = rospy.get_param('/cartpole_v0/init_pos')
self.wait_time = rospy.get_param('/cartpole_v0/wait_time')
self.control_type = rospy.get_param('/cartpole_v0/control_type')
def _set_action(self, action):
# Take action
if action == 0: # LEFT
rospy.loginfo("GO LEFT...")
self.pos[0] -= self.pos_step
elif action == 1: # RIGHT
rospy.loginfo("GO RIGHT...")
self.pos[0] += self.pos_step
elif action == 2: # LEFT BIG
rospy.loginfo("GO LEFT BIG...")
self.pos[0] -= self.pos_step * 10
elif action == 3: # RIGHT BIG
rospy.loginfo("GO RIGHT BIG...")
self.pos[0] += self.pos_step * 10
# Apply action to simulation.
rospy.loginfo("MOVING TO POS=="+str(self.pos))
# 1st: unpause simulation
#rospy.logdebug("Unpause SIM...")
# self.gazebo.unpauseSim()
self.move_joints(self.pos)
rospy.logdebug(
"Wait for some time to execute movement, time="+str(self.running_step))
rospy.sleep(self.running_step) # wait for some time
rospy.logdebug(
"DONE Wait for some time to execute movement, time=" + str(self.running_step))
# 3rd: pause simulation
#rospy.logdebug("Pause SIM...")
# self.gazebo.pauseSim()
def _get_obs(self):
data = self.joints
# base_postion base_velocity pole angle pole velocity
#obs = [round(data.position[1],1), round(data.velocity[1],1), round(data.position[0],1), round(data.velocity[0],1)]
obs = [data.position[1], data.velocity[1],
data.position[0], data.velocity[0]]
return np.array(obs)
def _is_done(self, observations):
done = False
data = self.joints
rospy.loginfo("BASEPOSITION=="+str(observations[0]))
rospy.loginfo("POLE ANGLE==" + str(observations[2]))
# check if the base is still within the ranges of (-2, 2)
if (self.min_base_pose_x >= observations[0] or observations[0] >= self.max_base_pose_x):
rospy.logerr("Base Outside Limits==>min="+str(self.min_base_pose_x) +
",pos="+str(observations[0])+",max="+str(self.max_base_pose_x))
done = True
# check if pole has toppled over
if (self.min_pole_angle >= observations[2] or observations[2] >= self.max_pole_angle):
rospy.logerr(
"Pole Angle Outside Limits==>min=" + str(self.min_pole_angle) + ",pos=" + str(observations[2]) + ",max=" + str(
self.max_pole_angle))
done = True
rospy.loginfo("FINISHED get _is_done")
return done
def _compute_reward(self, observations, done):
"""
Gives more points for staying upright, gets data from given observations to avoid
having different data than other previous functions
:return:reward
"""
rospy.logdebug("START _compute_reward")
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warning("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
rospy.logdebug("END _compute_reward")
return reward
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
self.steps_beyond_done = None
def _set_init_pose(self):
"""
Sets joints to initial position [0,0,0]
:return:
"""
self.check_publishers_connection()
# Reset Internal pos variable
self.init_internal_vars(self.init_pos)
self.move_joints(self.pos)
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~example_modrob_human_training~src~start_training_sac_her.py | #!/usr/bin/env python
import torch
import gym
import numpy as np
import time
import rospy
import rospkg
import functools
from datetime import datetime
from spinup.algos.pytorch.sac_her import sac_her, core
from spinup.utils.run_utils import setup_logger_kwargs
from spinup.utils.logx import EpochLogger
from spinup.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads
from spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs
from gym import wrappers
from torch.optim import Adam
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
if __name__ == '__main__':
# How can this be done dynamically?
rospy.init_node('modrob_RL_node',
anonymous=True, log_level=rospy.INFO)
# Init OpenAI_ROS ENV
task_and_robot_environment_name = rospy.get_param(
'/sac/task_and_robot_environment_name')
env = StartOpenAI_ROS_Environment(
task_and_robot_environment_name)
# Create the Gym environment
rospy.loginfo("Gym environment done")
rospy.loginfo("Starting Learning")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('example_modrob_human_training')
now = datetime.now()
load_epoch = -1
if rospy.has_param("/sac/load_epoch"):
load_epoch = rospy.get_param("/sac/load_epoch")
outdir = pkg_path + '/training_results/' + rospy.get_param("/sac/outdir")
else:
outdir = pkg_path + '/training_results/' + now.strftime("%Y_%m_%d_%H_%M")
## We cannot use a monitor if we want to cut off trajectories
#env = wrappers.Monitor(env, outdir, force=True)
#rospy.loginfo("Monitor Wrapper started")
last_time_steps = np.ndarray(0)
# Network size
hid = rospy.get_param("/sac/hid")
l = rospy.get_param("/sac/l")
ac_kwargs=dict(hidden_sizes=hid*l)
# Random seed
seed = rospy.get_param("/sac/seed")
# An epoch consists of a fixed amount of episodes
n_episodes_per_epoch = rospy.get_param("/sac/n_episodes_per_epoch")
# We train for a fixed amount of epochs
n_epochs = rospy.get_param("/sac/n_epochs")
# Size of replay buffer
replay_size = rospy.get_param("/sac/replay_size")
# Discount factor. (Always between 0 and 1.)
gamma = rospy.get_param("/sac/gamma")
# polyak (float): Interpolation factor in polyak averaging for target networks.
polyak = rospy.get_param("/sac/polyak")
# learning rate
lr = rospy.get_param("/sac/lr")
# Entropy regularization coefficient.
alpha = rospy.get_param("/sac/alpha")
# Batch size
batch_size = rospy.get_param("/sac/batch_size")
# Number of steps for uniform-random action selection,
# before running real policy. Helps exploration.
start_steps = rospy.get_param("/sac/start_steps")
# Number of env interactions to collect before starting to do gradient descent updates.
# Ensures replay buffer is full enough for useful updates.
update_after = rospy.get_param("/sac/update_after")
# Number of env interactions that should elapse between gradient descent updates. Note: Regardless of how long
# you wait between updates, the ratio of env steps to gradient steps is locked to 1.
update_every = rospy.get_param("/sac/update_every")
# Number of episodes to test the deterministic policy at the end of each epoch.
num_test_episodes = rospy.get_param("/sac/num_test_episodes")
# maximum length of episode
max_ep_len = rospy.get_param("/sac/max_ep_len")
# Number of epochs between each policy/value function save
save_freq = rospy.get_param("/sac/save_freq")
# Number of HER transitions per real transition
k_her_samples = rospy.get_param("/sac/k_her_samples")
# Number of updates steps per update
n_updates = rospy.get_param("/sac/n_updates")
logger_kwargs = setup_logger_kwargs(task_and_robot_environment_name,seed,outdir)
# Set max timestep
env.spec.timestep_limit = max_ep_len
sac_her.sac_her(env=env,
test_env = env,
actor_critic=core.MLPActorCritic,
ac_kwargs=dict(hidden_sizes=hid*l),
seed=seed,
n_epochs=n_epochs,
n_episodes_per_epoch=n_episodes_per_epoch,
replay_size=replay_size,
gamma=gamma,
polyak=polyak,
lr=lr,
alpha=alpha,
batch_size=batch_size,
start_steps=start_steps,
update_after=update_after,
update_every=update_every,
num_test_episodes=num_test_episodes,
max_ep_len=max_ep_len,
n_updates = n_updates,
k_her_samples=k_her_samples,
logger_kwargs=logger_kwargs,
save_freq=save_freq,
load_epoch=load_epoch)
env.close()
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~example_modrob_human_training~src~start_training.py | #!/usr/bin/env python
import gym
import numpy
import time
import qlearn
from gym import wrappers
# ROS packages required
import rospy
import rospkg
# py3
import functools
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
if __name__ == '__main__':
# How can this be done dynamically?
rospy.init_node('example_modrob_learn',
anonymous=True, log_level=rospy.WARN)
# Init OpenAI_ROS ENV
task_and_robot_environment_name = rospy.get_param(
'/modrob/task_and_robot_environment_name')
env = StartOpenAI_ROS_Environment(
task_and_robot_environment_name)
# Create the Gym environment
rospy.loginfo("Gym environment done")
rospy.loginfo("Starting Learning")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('example_modrob_human_training')
outdir = pkg_path + '/training_results'
env = wrappers.Monitor(env, outdir, force=True)
rospy.loginfo("Monitor Wrapper started")
last_time_steps = numpy.ndarray(0)
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
Alpha = rospy.get_param("/modrob/alpha")
Epsilon = rospy.get_param("/modrob/epsilon")
Gamma = rospy.get_param("/modrob/gamma")
epsilon_discount = rospy.get_param("/modrob/epsilon_discount")
nepisodes = rospy.get_param("/modrob/nepisodes")
nsteps = rospy.get_param("/modrob/nsteps")
running_step = rospy.get_param("/modrob/running_step")
# Initialises the algorithm that we are going to use for learning
qlearn = qlearn.QLearn(actions=range(env.action_space.n),
alpha=Alpha, gamma=Gamma, epsilon=Epsilon)
initial_epsilon = qlearn.epsilon
start_time = time.time()
highest_reward = 0
# Starts the main training loop: the one about the episodes to do
for x in range(nepisodes):
rospy.logdebug("############### WALL START EPISODE=>" + str(x))
cumulated_reward = 0
done = False
if qlearn.epsilon > 0.05:
qlearn.epsilon *= epsilon_discount
# Initialize the environment and get first state of the robot
observation = env.reset()
state = ''.join(map(str, observation))
# Show on screen the actual situation of the robot
# env.render()
# for each episode, we test the robot for nsteps
for i in range(nsteps):
rospy.logwarn("############### Start Step=>" + str(i))
# Pick an action based on the current state
action = qlearn.chooseAction(state)
rospy.logwarn("Next action is:%d", action)
# Execute the action in the environment and get feedback
observation, reward, done, info = env.step(action)
rospy.logwarn(str(observation) + " " + str(reward))
cumulated_reward += reward
if highest_reward < cumulated_reward:
highest_reward = cumulated_reward
nextState = ''.join(map(str, observation))
# Make the algorithm learn based on the results
rospy.logwarn("# state we were=>" + str(state))
rospy.logwarn("# action that we took=>" + str(action))
rospy.logwarn("# reward that action gave=>" + str(reward))
rospy.logwarn("# episode cumulated_reward=>" +
str(cumulated_reward))
rospy.logwarn(
"# State in which we will start next step=>" + str(nextState))
qlearn.learn(state, action, reward, nextState)
if not (done):
rospy.logwarn("NOT DONE")
state = nextState
else:
rospy.logwarn("DONE")
last_time_steps = numpy.append(last_time_steps, [int(i + 1)])
break
rospy.logwarn("############### END Step=>" + str(i))
#raw_input("Next Step...PRESS KEY")
# rospy.sleep(2.0)
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
rospy.logerr(("EP: " + str(x + 1) + " - [alpha: " + str(round(qlearn.alpha, 2)) + " - gamma: " + str(
round(qlearn.gamma, 2)) + " - epsilon: " + str(round(qlearn.epsilon, 2)) + "] - Reward: " + str(
cumulated_reward) + " Time: %d:%02d:%02d" % (h, m, s)))
rospy.loginfo(("\n|" + str(nepisodes) + "|" + str(qlearn.alpha) + "|" + str(qlearn.gamma) + "|" + str(
initial_epsilon) + "*" + str(epsilon_discount) + "|" + str(highest_reward) + "| PICTURE |"))
l = last_time_steps.tolist()
l.sort()
# print("Parameters: a="+str)
rospy.loginfo("Overall score: {:0.2f}".format(last_time_steps.mean()))
rospy.loginfo("Best score: {}".format(l[0]))
rospy.loginfo("Worst score: {}".format(l[-1]))
env.close()
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~robot_envs~cube_single_disk_env.py | #! /usr/bin/env python
import numpy
import rospy
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry
from openai_ros.openai_ros_common import ROSLauncher
class CubeSingleDiskEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""Initializes a new CubeSingleDisk environment.
Args:
"""
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="moving_cube_description",
launch_file_name="put_robot_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Variables that we give through the constructor.
# None in this case
# Internal Vars
self.controllers_list = ['joint_state_controller',
'inertia_wheel_roll_joint_velocity_controller'
]
self.robot_name_space = "moving_cube"
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(CubeSingleDiskEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=True)
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/moving_cube/joint_states", JointState, self._joints_callback)
rospy.Subscriber("/moving_cube/odom", Odometry, self._odom_callback)
self._roll_vel_pub = rospy.Publisher('/moving_cube/inertia_wheel_roll_joint_velocity_controller/command',
Float64, queue_size=1)
self._check_all_systems_ready()
# We pause the simulation once everything is ready
self.gazebo.pauseSim()
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
self._check_publishers_connection()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
self._check_joint_states_ready()
self._check_odom_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_joint_states_ready(self):
self.joints = None
while self.joints is None and not rospy.is_shutdown():
try:
self.joints = rospy.wait_for_message(
"/moving_cube/joint_states", JointState, timeout=1.0)
rospy.logdebug(
"Current moving_cube/joint_states READY=>" + str(self.joints))
except:
rospy.logerr(
"Current moving_cube/joint_states not ready yet, retrying for getting joint_states")
return self.joints
def _check_odom_ready(self):
self.odom = None
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message(
"/moving_cube/odom", Odometry, timeout=1.0)
rospy.logdebug(
"Current /moving_cube/odom READY=>" + str(self.odom))
except:
rospy.logerr(
"Current /moving_cube/odom not ready yet, retrying for getting odom")
return self.odom
def _joints_callback(self, data):
self.joints = data
def _odom_callback(self, data):
self.odom = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._roll_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug(
"No susbribers to _roll_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_roll_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_joints(self, roll_speed):
joint_speed_value = Float64()
joint_speed_value.data = roll_speed
rospy.logdebug("Single Disk Roll Velocity>>" + str(joint_speed_value))
self._roll_vel_pub.publish(joint_speed_value)
self.wait_until_roll_is_in_vel(joint_speed_value.data)
def wait_until_roll_is_in_vel(self, velocity):
rate = rospy.Rate(10)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.1
v_plus = velocity + epsilon
v_minus = velocity - epsilon
while not rospy.is_shutdown():
joint_data = self._check_joint_states_ready()
roll_vel = joint_data.velocity[0]
rospy.logdebug("VEL=" + str(roll_vel) +
", ?RANGE=[" + str(v_minus) + ","+str(v_plus)+"]")
are_close = (roll_vel <= v_plus) and (roll_vel > v_minus)
if are_close:
rospy.logdebug("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
rospy.logdebug("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time - start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
return delta_time
def get_joints(self):
return self.joints
def get_odom(self):
return self.odom
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~task_envs~human_modrob~modrob_sphere_moveit.py | import os
import numpy as np
import copy
import rospy
import time
from gym import spaces
from pyquaternion import Quaternion
from openai_ros.robot_envs import modrob_env_moveit
from gym.envs.registration import register
from geometry_msgs.msg import Point
from geometry_msgs.msg import Pose
from std_msgs.msg import Float32
from gazebo_msgs.msg import ModelStates, ModelState
from gazebo_msgs.srv import SetModelState
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
from modrob_simulation.msg import Collision, Collisions
class ModRobSphereEnvMoveIt(modrob_env_moveit.ModRobEnvMoveIt):
def __init__(self):
"""
This Task Env is designed for having the ModRob with an example moving obstacle.
It will learn how to finish tasks without colliding with the obstacle.
"""
# The robot name parameter must be in the parameter server
self.robot_name_ = rospy.get_param('/modrob/robot_name')
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_abs_env_var = rospy.get_param("/modrob/ros_abs_env_var", None)
try:
ros_ws_abspath = os.environ[ros_abs_env_var]
except:
print("Please set the environment variable {}".format(ros_abs_env_var))
sys.exit(1)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="modrob_simulation",
launch_file_name="start_world_sphere.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/human_modrob/config",
yaml_file_name="modrob_human_moveit.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(ModRobSphereEnvMoveIt, self).__init__(ros_ws_abspath)
## Load in environment variables
self._get_env_variables()
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-np.inf, np.inf)
## Set action and observation space
# Continuous action space.
# All actions should range from 0 to 1. This improves training.
# Right now, only arm action and observation is implemented.
# TODO: Add gripper support.
self.n_actions = len(self.get_arm_joint_names())
# number of arm joints +
# current end effector position (x,y,z) +
# current end effector orientation quaternion (x, y, z, w) +
# goal end effector position (x,y,z) +
# goal end effector orientation quaternion (x, y, z, w) +
# sphere obstacle position (x, y, z)
self.n_observations = len(self.get_arm_joint_names()) + 3 + 4 + 3 + 4 + 3
self.action_space = spaces.Box(low=-1, high=1, shape=(self.n_actions,))
self.observation_space = spaces.Box(low=0, high=1, shape=(self.n_observations,))
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
self.cumulated_steps = 0.0
# Set starting position of spherical obstacle.
# This will be replaced by a more sophisticated obstacle in the future.
self.sphere_obs_start_pose = self.create_pose([0, 2, 0.7], [1, 0, 0, 0])
self.sphere_start_velo = -0.5
## Unpause sim and start all subscribers and publishers
self.gazebo.unpauseSim()
# Collision detection topic
self._collision_topic = '/' + self.robot_name_ + '/collisions'
# ROS subscriber to robot collision
rospy.Subscriber(self._collision_topic, Collisions, self._collision_callback)
self._sphere_sensor_topic = '/sphere_obs/position'
# ROS subscriber to sphere position
rospy.Subscriber(self._sphere_sensor_topic, Point, self._sphere_pos_callback)
self._check_sphere_pos_ready()
self._sphere_pose_topic = '/sphere_obs/pose_cmd'
self._sphere_pose_publisher = rospy.Publisher(self._sphere_pose_topic, Pose, queue_size=20)
self._sphere_vel_topic = '/sphere_obs/vel_cmd'
self._sphere_vel_publisher = rospy.Publisher(self._sphere_vel_topic, Float32, queue_size=10)
## Utility for moving start and goal position visual models
# CAREFUL: The getter method has an s at the end while the setter method doesn't!
# Ros subscriber to gazebo model state
self._gazebo_model_state_topic = '/gazebo/model_states'
rospy.Subscriber(self._gazebo_model_state_topic, ModelStates, self._gazebo_model_state_callback)
self._check_gazebo_model_state_ready()
# Get the base pose
self.base_pose = self._get_base_pose()
self.gazebo.pauseSim()
def _get_env_variables(self):
"""Load in environment variables from yaml.
Relevant variables:
joint_min: Minimal angle for all joints
joint_max: Maximal angle for all joints
joint_max_delta: Max theoretically allowed movement per execution step (in rad)
gripper1_min: Minimal position of gripper part 1
gripper1_max: Maximal position of gripper part 1
gripper2_min: Minimal position of gripper part 2
gripper2_max: Maximal position of gripper part 2
gripper_max_delta: Maximal theoretically allowed movement per execution step (in m)
ee_limits: Overapproximative end effector position limits
.max.x, .y, .z
.min.x, .y, .z
base_pose: geometry_msgs.msg.Pose for base
use_delta_actions: True: Use differential position commands, False: Use absolute position commands
movement_error: Precision maximum for regular movement (can be arbitrarely small)
movement_timeout: Time waited until next movement execution
init_error: Precision for start position
goal_error_position: 0.2 # Precision for goal reached euclidean distance
goal_error_orientation: 0.2 # Precision for goal reached quaternion distance
init_joint0_position: Initial position for joint0 - TODO: Change this to dynamically.
init_joint1_position: Initial position for joint1 - TODO: Change this to dynamically.
init_joint2_position: Initial position for joint2 - TODO: Change this to dynamically.
init_hand_to_finger1_position: Initial position for gripper part 1 - TODO: Change this to dynamically.
init_hand_to_finger2_position: Initial position for gripper part 2 - TODO: Change this to dynamically.
goal_pose: geometry_msgs.msg.Pose for end effector goal
distance_penalty_position: Reward penalty for position distance
distance_penalty_orientation: Reward penalty for orientation distance
time_penalty: Time penalty for every step
goal_reward: Points given when reaching the goal
collision_penalty: Penalty when colliding with an object
"""
## Determine the normalization constants for all observations and actions.
# action_norm consists of two columns (c, r) and one row per controllable joint.
# To normalize an action a, calculate: a_n = (a-c)/r, r!=0
# To denormalize an normalized action a_n, calculate a = (a_n * r) + c
# Since we are using the tanh as activation for actions, we normalize a to [-1; 1]
self.action_norm = []
self.observation_norm = []
# Max movements
self.joint_max_delta = rospy.get_param('/modrob/joint_max_delta')
self.gripper_max_delta = rospy.get_param('/modrob/gripper_max_delta')
self.use_delta_actions = rospy.get_param('/modrob/use_delta_actions')
# TODO: Read these from urdf file.
self.joint_min = rospy.get_param('/modrob/joint_min')
self.joint_max = rospy.get_param('/modrob/joint_max')
assert self.joint_max-self.joint_min != 0, "Joint difference is zero"
self.gripper1_min = rospy.get_param('/modrob/gripper1_min')
self.gripper1_max = rospy.get_param('/modrob/gripper1_max')
assert self.gripper1_max-self.gripper1_min != 0, "Gripper 1 difference is zero"
self.gripper2_min = rospy.get_param('/modrob/gripper2_min')
self.gripper2_max = rospy.get_param('/modrob/gripper2_max')
assert self.gripper2_max-self.gripper2_min != 0, "Gripper 2 difference is zero"
# First entries are joint positions
self.observation_id_joints = 0 # defines where the joint values start in observation
for joint_name in self.get_arm_joint_names():
if self.use_delta_actions:
_c = -1*self.joint_max_delta
_r = 2*self.joint_max_delta
else:
_c = self.joint_min
_r = self.joint_max-self.joint_min
# From [0; 1] normalization to [-1; 1]
c = _c + _r/2
r = _r/2
self.action_norm.append([c, r])
self.observation_norm.append([self.joint_min, self.joint_max-self.joint_min])
""" Gripper normalization. Add back to code in the future.
elif "hand_to_finger1" in joint_name:
# Only one action for both grippers (they move together)
if self.use_delta_actions:
self.action_norm.append([-1*self.gripper_max_delta, 2*self.gripper_max_delta])
else:
self.action_norm.append([self.gripper1_min, self.gripper1_max-self.gripper1_min])
self.observation_norm.append([self.gripper1_min, self.gripper1_max-self.gripper1_min])
"""
# Add normalization for current and goal ee position
self.observation_id_current_ee_pose = len(self.observation_norm) # defines where the current ee pose values start in observation
self.observation_id_goal_ee_pose = len(self.observation_norm) + 7 # defines where the goal ee pose values start in observation
for i in range(2):
ee_limits = rospy.get_param('/modrob/ee_limits')
self.observation_norm.append([ee_limits.get("min").get("x"), ee_limits.get("max").get("x")-ee_limits.get("min").get("x")])
self.observation_norm.append([ee_limits.get("min").get("y"), ee_limits.get("max").get("y")-ee_limits.get("min").get("y")])
self.observation_norm.append([ee_limits.get("min").get("z"), ee_limits.get("max").get("z")-ee_limits.get("min").get("z")])
# Add normalization for ee quaternion orientation
for _ in range(4):
self.observation_norm.append([0, 1])
# Add normalization for sphere obstacle.
# This will always be a bit hacky since there is no definite max and min position for any obstacle.
self.observation_norm.append([-3, 3]) #x
self.observation_norm.append([-3, 3]) #y
self.observation_norm.append([0, 3]) #z
self.action_norm = np.array(self.action_norm)
self.observation_norm = np.array(self.observation_norm)
# Movement settings
self.movement_error = rospy.get_param('/modrob/movement_error')
self.movement_timeout = rospy.get_param('/modrob/movement_timeout')
self.init_error = rospy.get_param('/modrob/init_error')
self.goal_error_position = rospy.get_param('/modrob/goal_error_position')
self.goal_error_orientation = rospy.get_param('/modrob/goal_error_orientation')
# Set initial joint positions
# Right now, only arm position movement implemented!
# TODO: Add init gripper position
self.init_arm_joint_position = []
if rospy.has_param("/modrob/init_joint_position"):
self.init_arm_joint_position = rospy.get_param("/modrob/init_joint_position")
assert(len(self.init_arm_joint_position) == len(self._arm_joint_names))
# Goal and reward
goal_pose_position = rospy.get_param('/modrob/goal_pose').get("position")
goal_pose_orientation = rospy.get_param('/modrob/goal_pose').get("orientation")
self.goal_pose = self.create_pose([goal_pose_position.get("x"), goal_pose_position.get("y"), goal_pose_position.get("z")],
[goal_pose_orientation.get("x"), goal_pose_orientation.get("y"), goal_pose_orientation.get("z"), goal_pose_orientation.get("w")])
self.goal_position = [self.goal_pose.position.x,
self.goal_pose.position.y,
self.goal_pose.position.z]
self.goal_quaternion = [self.goal_pose.orientation.x,
self.goal_pose.orientation.y,
self.goal_pose.orientation.z,
self.goal_pose.orientation.w]
self.goal_observation = self.goal_position + self.goal_quaternion
self.distance_penalty_position = rospy.get_param('/modrob/distance_penalty_position')
self.distance_penalty_orientation = rospy.get_param('/modrob/distance_penalty_orientation')
self.time_penalty = rospy.get_param('/modrob/time_penalty')
self.goal_reward = rospy.get_param('/modrob/goal_reward')
self.collision_penalty = rospy.get_param('/modrob/collision_penalty')
def _set_base_pose(self):
"""Set the pose of the robots base."""
assert bool(self.robot_name_ in self.gazebo_model_pose), self.robot_name_ + " not in gazebo model states!"
self.move_gazebo_model(self.robot_name_, self.base_pose)
def _get_base_pose(self):
"""Set the pose of the robots base."""
assert bool(self.robot_name_ in self.gazebo_model_pose), self.robot_name_ + " not in gazebo model states!"
return self.gazebo_model_pose[self.robot_name_]
def _set_init_pose(self):
"""Sets the Robot in its init pose.
"""
# Move until init position is reached (timeout=0)
self.move_arm_joints(self.init_arm_joint_position, error=self.init_error, timeout=0.0)
self.init_pose = self.get_ee_pose()
return True
def _init_env_variables(self):
"""Inits episode specific variables each time we reset at the start of an episode.
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
self.last_arm_joint_position = self.get_arm_joint_positions()
self.is_collided = False
# Place start and goal markers
self.move_gazebo_model("start_pos", self.correct_model_pose(self.init_pose))
self.move_gazebo_model("goal_pos", self.correct_model_pose(self.goal_pose))
# Place sphere obstacle
self._sphere_pose_publisher.publish(self.sphere_obs_start_pose)
self._sphere_vel_publisher.publish(self.sphere_start_velo)
def _set_action(self, action):
"""Give a control command to the robot.
First, the action is clipped to the action space.
It is possible to assign negative rewards for too high actions.
This function denormalizes the action command and controls the robot.
Args:
action (array): Normalized actions
"""
action = np.clip(action, self.action_space.low, self.action_space.high)
rospy.logdebug("Start Set Action ==>"+str(action))
# Denormalize actions
denormalized_action = self.denormalize_actions(action)
# Build joint position dict
if self.use_delta_actions:
joint_positions = self.create_joint_positions_delta(denormalized_action)
else:
joint_positions = self.create_joint_positions_absolute(denormalized_action)
# Set action as command
# Only arm movement implemented right now. TODO: Add gripper action.
self.move_arm_joints(joint_positions, error=self.movement_error, timeout=self.movement_timeout)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""Get normalized observation array from robot sensors.
Returns:
observations (array): Normalized observation array
"""
rospy.logdebug("Start Get Observation ==>")
# Get non-normalized observations
observations = self.retrieve_observations()
# Normalize observations
observations = self.normalize_observations(observations)
rospy.logdebug("END Get Observation ==>"+str(observations))
return observations
def _is_done(self, observations):
"""Return if episode is finished."""
if self.is_collided:
return True
observations = self.denormalize_observations(observations)
current_ee_position = observations[self.observation_id_current_ee_pose:self.observation_id_current_ee_pose+3]
current_ee_quaternion = observations[self.observation_id_current_ee_pose+3:self.observation_id_current_ee_pose+7]
current_ee_pose = self.create_pose(current_ee_position, current_ee_quaternion)
return self.is_in_goal_pose(current_ee_pose=current_ee_pose,
epsilon_position=self.goal_error_position,
epsilon_orientation=self.goal_error_orientation)
def _compute_reward(self, observations, done):
"""Compute reward for this step."""
reward = 0
# We run this twice, once in _is_done and once here. Check whether this is computational heavy and maybe safe results.
observations = self.denormalize_observations(observations)
current_ee_position = observations[self.observation_id_current_ee_pose:self.observation_id_current_ee_pose+3]
current_ee_quaternion = observations[self.observation_id_current_ee_pose+3:self.observation_id_current_ee_pose+7]
current_ee_pose = self.create_pose(current_ee_position, current_ee_quaternion)
position_distance, orientation_distance = self.get_distances_from_desired_pose(current_ee_pose)
if not done:
# Penalty per time step
reward -= self.time_penalty
# Penalty for distance from goal position
reward -= self.distance_penalty_position * position_distance
# Penalty for distance from goal orientation
reward -= self.distance_penalty_orientation * orientation_distance
else:
# The done flag is set either when the goal is reached or a collision occured.
if self.is_collided:
reward -= self.collision_penalty
else:
reward += self.goal_reward
rospy.logdebug("This step reward = " + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward = " + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps = " + str(self.cumulated_steps))
return reward
def _collision_callback(self, data):
"""This function is called if a collision is detected by one of the modrobs collision sensors.
The collision sensor plugins publish the collision information on /self.robot_name_/collisions.
Sets self.is_collided to true.
Outputs an info message.
"""
if not self.is_collided:
self.is_collided = True
rospy.logwarn("Collision detected between {} and {}.".format(
data.collisions[0].parent_contact, data.collisions[0].obstacle_contact
))
def _sphere_pos_callback(self, data):
"""Get the estimated position of the sphere (a Kalman filter is already applied)."""
self.sphere_pos = [data.x, data.y, data.z]
def create_joint_positions_absolute(self, actions):
"""Creates joint_positions from an absolute action array.
Args:
actions: Action array (This should be denormalized!), shape = [n_actions]
Returns:
joint_positions (list): desired joint positions.
"""
joint_positions = np.clip(actions, self.joint_min, self.joint_max)
""" Code for gripper action. Include in code in the future.
elif "hand_to_finger" in action_name:
joint_positions[action_name] = np.clip(actions[i], self.gripper1_min, self.gripper1_max)
# Set action for finger 2 reversed to finger 1.
# F1 = min --> F2 = max; F1 = max --> F2 = max
# F2 = ax + b
a = (self.gripper2_min - self.gripper2_max)/(self.gripper1_max - self.gripper1_min)
b = (self.gripper1_max*self.gripper2_max-self.gripper1_min*self.gripper2_min)/(self.gripper1_max - self.gripper1_min)
joint_positions["hand_to_finger2"] = a * actions[i] + b
"""
return joint_positions
def create_joint_positions_delta(self, actions):
"""Creates absolute joint_positions from an delta action array.
Args:
actions: Action array (This should be denormalized!), shape = [n_actions]
Returns:
joint_positions (list): desired absolute joint position.
"""
# Use the last observed joint position (not the current!)
last_arm_joint_positions = self.last_arm_joint_position
joint_positions = np.clip(last_arm_joint_positions + actions, self.joint_min, self.joint_max)
""" Code for gripper action. Include in code in the future.
elif "hand_to_finger" in joint_name:
joint_positions[joint_name] = np.clip(last_joint_positions[joint_name] + actions[i], self.gripper1_min, self.gripper1_max)
# Set action for finger 2 reversed to finger 1.
# F1 = min --> F2 = max; F1 = max --> F2 = max
# F2 = ax + b
a = (self.gripper2_min - self.gripper2_max)/(self.gripper1_max - self.gripper1_min)
b = (self.gripper1_max*self.gripper2_max-self.gripper1_min*self.gripper2_min)/(self.gripper1_max - self.gripper1_min)
joint_positions["hand_to_finger2"] = a * joint_positions[joint_name] + b
"""
return joint_positions
def retrieve_observations(self):
"""Retrieve all observations (not normalized).
Sets the last observed joint position.
Observation consists of:
- arm_joint_positions
- current ee position (cartesian) and orientation (quaternion)
- goal ee position (cartesian) and orientation (quaternion)
- sphere obstacle position (cartesian)
Returns:
observations (list): non normalized observations, shape = [n_observations]
"""
self.last_arm_joint_position = self.get_arm_joint_positions()
observations = self.last_arm_joint_position + self.get_ee_position() + self.get_ee_quaternion() + self.goal_observation + self.sphere_pos
return observations
# Internal TaskEnv Methods
def normalize_actions(self, actions):
"""Normalize an array of actions.
To normalize an action a, calculate: a_n = (a-c)/r, r!=0
Args:
actions: Action array, shape = [n_actions]
Returns:
normalized_actions: Normalized action array, shape = [n_actions]
"""
normalized_actions = []
if len(actions) == 0:
rospy.logerr("No actions to normalize.")
return normalized_actions
normalized_actions = (actions - self.action_norm[:, 0]) / self.action_norm[:, 1]
return normalized_actions
def denormalize_actions(self, normalized_actions):
"""Denormalize an array of actions.
To denormalize an normalized action a_n, calculate a = (a_n * r) + c
Args:
normalized_actions: Normalized action array, shape = [n_actions]
Returns:
actions: Action array, shape = [n_actions]
"""
actions = []
if len(normalized_actions) == 0:
rospy.logerr("No actions to denormalize.")
return actions
actions = (normalized_actions * self.action_norm[:, 1]) + self.action_norm[:, 0]
return actions
def normalize_observations(self, observations):
"""Normalize an array of observations.
To normalize an observation a, calculate: a_n = (a-c)/r, r!=0
Args:
observations: Action array, shape = [n_observations]
Returns:
normalized_observations: Normalized observation array, shape = [n_observations]
"""
normalized_observations = []
if len(observations) == 0:
rospy.logwarn("No observations to normalize.")
return normalized_observations
normalized_observations = (observations - self.observation_norm[:, 0]) / self.observation_norm[:, 1]
return normalized_observations
def denormalize_observations(self, normalized_observations):
"""Denormalize an array of observations.
To denormalize an normalized observation a_n, calculate a = (a_n * r) + c
Args:
normalized_observations: Normalized observation array, shape = [n_observations]
Returns:
observations: Action array, shape = [n_observations]
"""
observations = []
if len(normalized_observations) == 0:
rospy.logwarn("No observations to denormalize.")
return observations
observations = (normalized_observations * self.observation_norm[:, 1]) + self.observation_norm[:, 0]
return observations
def discretize_observation(self,data,new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
mod = len(data.ranges)/new_ranges
rospy.logdebug("data=" + str(data))
rospy.logwarn("new_ranges=" + str(new_ranges))
rospy.logwarn("mod=" + str(mod))
for i, item in enumerate(data.ranges):
if (i%mod==0):
if item == float ('Inf') or np.isinf(item):
discretized_ranges.append(self.max_laser_value)
elif np.isnan(item):
discretized_ranges.append(self.min_laser_value)
else:
discretized_ranges.append(int(item))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" + str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> item=" + str(item)+"< "+str(self.min_range))
return discretized_ranges
def is_in_goal_pose(self, current_ee_pose, epsilon_position=0.05, epsilon_orientation=0.05):
"""Checks whether the end effector is within a margin of error to its goal pose.
Args:
current_ee_pose (geometry_msgs.msg.Pose): current pose of the end effector
epsilon_position (double): margin of error for position (euclidean distance)
epsilon_orientation (double): margin of error for orientation
"""
assert isinstance(current_ee_pose, Pose), "current_ee_pose is not of type geometry_msgs.msg.Pose!"
# Calcualte distances
position_distance, orientation_distance = self.get_distances_from_desired_pose(current_ee_pose)
return position_distance <= epsilon_position and orientation_distance <= epsilon_orientation
def get_distances_from_desired_pose(self, current_ee_pose):
"""Calculates the euclidean distance and orientation distance from the current ee pose to the goal pose.
Args:
current_ee_pose (geometry_msgs.msg.Pose): current pose of the end effector
Returns:
position_distance (double): euclidean distance between cartesian ee positions
orientation_distance (double): quaternion distance between the ee quaternions
"""
assert isinstance(current_ee_pose, Pose), "current_ee_pose is not of type geometry_msgs.msg.Pose!"
position_distance = self.calculate_ee_position_distance(current_ee_pose)
orientation_distance = self.calculate_gripper_orientation_distance(current_ee_pose)
return position_distance, orientation_distance
def calculate_ee_position_distance(self, current_ee_pose):
"""Calculate euclidean distance between the current and goal end effector position (goal in self.).
Args:
current_ee_pose (geometry_msgs.msg.Pose): Current end effector pose
Returns:
Euclidean distance between the two poses.
"""
assert isinstance(current_ee_pose, Pose), "current_ee_pose is not of type geometry_msgs.msg.Pose!"
c_ee_pos = current_ee_pose.position
g_ee_pos = self.goal_pose.position
return np.sqrt((c_ee_pos.x - g_ee_pos.x)**2 +
(c_ee_pos.y - g_ee_pos.y)**2 +
(c_ee_pos.z - g_ee_pos.z)**2)
def calculate_ee_orientation_distance(self, current_ee_pose):
"""Calculate distance between the current and goal end effector pose quaternion (goal in self.).
We use http://kieranwynn.github.io/pyquaternion/#distance-computation for the quaternion distance calculation.
Note: This function does not measure the distance on the hypersphere, but it takes into account the fact that q and -q encode the same rotation.
It is thus a good indicator for rotation similarities.
Args:
current_ee_pose (geometry_msgs.msg.Pose): Current end effector pose
Returns:
Distance between the two quaternions.
"""
assert isinstance(current_ee_pose, Pose), "current_ee_pose is not of type geometry_msgs.msg.Pose!"
q0 = self.convert_pose_to_quaternion(current_ee_pose)
q1 = self.convert_pose_to_quaternion(self.goal_pose)
return Quaternion.absolute_distance(q0, q1)
def calculate_gripper_orientation_distance(self, current_ee_pose):
"""Calculate distance between the current and goal end effector z-axis.
Returns the great circle distance between the z-axis of the end effector and the goal pose.
The rotation around the z-axis is assumed to be irrelevant for gripping objects for this function.
Args:
current_ee_pose (geometry_msgs.msg.Pose): Current end effector pose
Returns:
Distance between the two quaternions.
"""
assert isinstance(current_ee_pose, Pose), "current_ee_pose is not of type geometry_msgs.msg.Pose!"
q0 = self.convert_pose_to_quaternion(current_ee_pose)
z0 = self.get_new_z_axis(q0)
q1 = self.convert_pose_to_quaternion(self.goal_pose)
z1 = self.get_new_z_axis(q1)
return self.great_circle_distance(z0, z1)
def create_pose(self, position_vec, orientation_vec):
"""Create a geometry_msgs.msg.Pose object from position and orientation.
Args:
position_vec (list): cartesian position [x, y, z]
orientation_vec (list): orientation quaternion [x, y, z, w]
Returns:
geometry_msgs.msg.Pose
"""
pose = Pose()
pose.position.x = position_vec[0]
pose.position.y = position_vec[1]
pose.position.z = position_vec[2]
pose.orientation.x = orientation_vec[0]
pose.orientation.y = orientation_vec[1]
pose.orientation.z = orientation_vec[2]
pose.orientation.w = orientation_vec[3]
return pose
def convert_pose_to_quaternion(self, pose):
"""Convert a geometry_msgs.msg.Pose to a pyquaternion.Quaternion.
TODO: Write utility class and move this to util.
Args:
pose (geometry_msgs.msg.Pose)
Returns:
pyquaternion.Quaternion
"""
return Quaternion(w=pose.orientation.w, x=pose.orientation.x, y=pose.orientation.y, z=pose.orientation.z)
def correct_model_pose(self, pose):
"""Correct the model pose by the pose of the base.
This is needed because the ee poses of moveit are in relation to the base.
TODO: Include orientation!
Args:
pose (geometry_msgs.msg.Pose)
Returns:
corrected copy of pose
"""
new_pose = copy.deepcopy(pose)
new_pose.position.x += self.base_pose.position.x
new_pose.position.y += self.base_pose.position.y
new_pose.position.z += self.base_pose.position.z
return new_pose
def get_model_pose(self, model_name):
"""Return the pose of a gazebo model by name.
Args:
model_name (String): Name of the model (in world file)
Returns:
pose (geometry_msgs.msg.Pose)
"""
if model_name in self.gazebo_model_pose:
return self.gazebo_model_pose[model_name]
else:
rospy.logerr("Model {} does not exist in gazebo world.".format(model_name))
return None
def move_gazebo_model(self, model_name, pose):
""" Move the gazebo model to the desired pose
Args:
model_name (string): name of the model (Must be in topic /gazebo/model_states)
pose (geometry_msgs.msg.Pose)
"""
if model_name in self.gazebo_model_pose:
state_msg = SetModelState()
state_msg.model_name = model_name
state_msg.pose = pose
state_msg.twist = self.gazebo_model_twist[model_name]
state_msg.reference_frame = "world"
result = self.publish_gazebo_model_state(state_msg)
else:
result = False
rospy.logwarn("The goal_pos model does not exist!")
return result
def publish_gazebo_model_state(self, model_state):
"""Publish a gazebo model state.
Args:
model_state (gazebo_msgs.srv.SetModelState)
"""
rospy.wait_for_service('/gazebo/set_model_state')
try:
set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
resp = set_state(model_state = model_state)
except rospy.ServiceException as e:
rospy.logerr("Set model state service call failed: %s"%e)
resp = False
return resp
def _check_gazebo_model_state_ready(self):
self.gazebo_model_pose = dict()
self.gazebo_model_twist = dict()
rospy.logdebug("Waiting for {} to be READY...".format(self._gazebo_model_state_topic))
while not self.gazebo_model_pose and not rospy.is_shutdown():
try:
data = rospy.wait_for_message(self._gazebo_model_state_topic, ModelStates, timeout=5.0)
self.gazebo_model_pose = dict(zip(data.name, data.pose))
self.gazebo_model_twist = dict(zip(data.name, data.twist))
rospy.logdebug("Current {} READY=>".format(self._gazebo_model_state_topic))
except:
rospy.logerr("Current {} not ready yet, retrying for getting gazebo_model states".format(self._gazebo_model_state_topic))
return self.gazebo_model_pose
def _check_sphere_pos_ready(self):
self.sphere_pos = None
rospy.logdebug("Waiting for {} to be READY...".format(self._sphere_sensor_topic))
while not self.sphere_pos and not rospy.is_shutdown():
try:
data = rospy.wait_for_message(self._sphere_sensor_topic, Point, timeout=5.0)
self.sphere_pos = [data.x, data.y, data.z]
rospy.logdebug("Current {} READY=>".format(self._sphere_sensor_topic))
except:
rospy.logerr("Current {} not ready yet, retrying for getting gazebo_model states".format(self._sphere_sensor_topic))
return self.sphere_pos
def _check_sphere_publishers_connection(self):
"""Checks that all the publishers are working.
"""
# Check joint position controller publishers
rate = rospy.Rate(10) # 10hz
while self._sphere_pos_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.loginfo("No susbribers to {} yet so we wait and try again".format(self._sphere_pose_topic))
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
while self._sphere_vel_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.loginfo("No susbribers to {} yet so we wait and try again".format(self._sphere_vel_topic))
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("Sphere publisher connected!")
# Check additional publishers
rospy.logdebug("All publishers READY")
def _gazebo_model_state_callback(self, data):
self.gazebo_model_pose = dict(zip(data.name, data.pose))
self.gazebo_model_twist = dict(zip(data.name, data.twist)) | [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~robot_envs~fetch_env.py | import numpy as np
import numpy as np
from gazebo_msgs.srv import GetWorldProperties, GetModelState
from sensor_msgs.msg import JointState
from openai_ros import robot_gazebo_env
import sys
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import trajectory_msgs.msg
from openai_ros.openai_ros_common import ROSLauncher
class FetchEnv(robot_gazebo_env.RobotGazeboEnv):
def __init__(self, ros_ws_abspath):
rospy.logdebug("========= In Fetch Env")
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="fetch_gazebo",
launch_file_name="put_robot_in_world_HER.launch",
ros_ws_abspath=ros_ws_abspath)
# this object contains all object's positions!!
self.obj_positions = Obj_Pos()
self.controllers_list = []
self.robot_name_space = ""
self.reset_controls = False
super(FetchEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
# We Start all the ROS related Subscribers and publishers
self.JOINT_STATES_SUBSCRIBER = '/joint_states'
self.join_names = ["joint0",
"joint1",
"joint2",
"joint3",
"joint4",
"joint5",
"joint6"]
self.gazebo.unpauseSim()
self._check_all_systems_ready()
self.joint_states_sub = rospy.Subscriber(
self.JOINT_STATES_SUBSCRIBER, JointState, self.joints_callback)
self.joints = JointState()
# Start Services
self.move_fetch_object = MoveFetch()
# Wait until it has reached its Sturtup Position
self.wait_fetch_ready()
self.gazebo.pauseSim()
# Variables that we give through the constructor.
rospy.logdebug("========= Out Fetch Env")
# RobotGazeboEnv virtual methods
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# FetchEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
self._check_joint_states_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_joint_states_ready(self):
self.joints = None
while self.joints is None and not rospy.is_shutdown():
try:
self.joints = rospy.wait_for_message(
self.JOINT_STATES_SUBSCRIBER, JointState, timeout=1.0)
rospy.logdebug(
"Current "+str(self.JOINT_STATES_SUBSCRIBER)+" READY=>" + str(self.joints))
except:
rospy.logerr(
"Current "+str(self.JOINT_STATES_SUBSCRIBER)+" not ready yet, retrying....")
return self.joints
def joints_callback(self, data):
self.joints = data
def get_joints(self):
return self.joints
def get_joint_names(self):
return self.joints.name
def set_trajectory_ee(self, action):
"""
Sets the Pose of the EndEffector based on the action variable.
The action variable contains the position and orientation of the EndEffector.
See create_action
"""
# Set up a trajectory message to publish.
ee_target = geometry_msgs.msg.Pose()
ee_target.orientation.x = -0.707
ee_target.orientation.y = 0.0
ee_target.orientation.z = 0.707
ee_target.orientation.w = 0.001
ee_target.position.x = action[0]
ee_target.position.y = action[1]
ee_target.position.z = action[2]
result = self.move_fetch_object.ee_traj(ee_target)
return result
def set_trajectory_joints(self, initial_qpos):
positions_array = [None] * 7
positions_array[0] = initial_qpos["joint0"]
positions_array[1] = initial_qpos["joint1"]
positions_array[2] = initial_qpos["joint2"]
positions_array[3] = initial_qpos["joint3"]
positions_array[4] = initial_qpos["joint4"]
positions_array[5] = initial_qpos["joint5"]
positions_array[6] = initial_qpos["joint6"]
self.move_fetch_object.joint_traj(positions_array)
return True
def create_action(self, position, orientation):
"""
position = [x,y,z]
orientation= [x,y,z,w]
"""
gripper_target = np.array(position)
gripper_rotation = np.array(orientation)
action = np.concatenate([gripper_target, gripper_rotation])
return action
def create_joints_dict(self, joints_positions):
"""
Based on the Order of the positions, they will be assigned to its joint name
names_in_order:
joint0: 0.0
joint1: 0.0
joint2: 0.0
joint3: -1.5
joint4: 0.0
joint5: 1.5
joint6: 0.0
"""
assert len(joints_positions) == len(
self.join_names), "Wrong number of joints, there should be "+str(len(self.join_names))
joints_dict = dict(zip(self.join_names, joints_positions))
return joints_dict
def get_ee_pose(self):
"""
Returns geometry_msgs/PoseStamped
std_msgs/Header header
uint32 seq
time stamp
string frame_id
geometry_msgs/Pose pose
geometry_msgs/Point position
float64 x
float64 y
float64 z
geometry_msgs/Quaternion orientation
float64 x
float64 y
float64 z
float64 w
"""
self.gazebo.unpauseSim()
gripper_pose = self.move_fetch_object.ee_pose()
self.gazebo.pauseSim()
return gripper_pose
def get_ee_rpy(self):
gripper_rpy = self.move_fetch_object.ee_rpy()
return gripper_rpy
def wait_fetch_ready(self):
"""
# TODO: Make it wait for this position
Desired Position to wait for
(0.44291739197591884,
-0.13691381375054146,
-4.498589757905556e-09,
0.006635104153645881,
0.0018354466563206273,
0.0023142971818792546,
1.3200059164171716,
1.399964660857453,
-0.19981518020955402,
1.719961735970255,
1.0394665737933906e-05,
1.659980987917125,
-6.067103113238659e-06,
0.05001918351472232,
0.050051597253287436)
"""
import time
for i in range(20):
print("WAITING..."+str(i))
sys.stdout.flush()
time.sleep(1.0)
print("WAITING...DONE")
# ParticularEnv methods
# ----------------------------
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
class Obj_Pos(object):
"""
This object maintains the pose and rotation of the cube in a simulation through Gazebo Service
"""
def __init__(self):
world_specs = rospy.ServiceProxy(
'/gazebo/get_world_properties', GetWorldProperties)()
self.time = 0
self.model_names = world_specs.model_names
self.get_model_state = rospy.ServiceProxy(
'/gazebo/get_model_state', GetModelState)
def get_states(self):
"""
Returns the ndarray of pose&rotation of the cube
"""
for model_name in self.model_names:
if model_name == "cube":
data = self.get_model_state(
model_name, "world") # gazebo service client
return np.array([
data.pose.position.x,
data.pose.position.y,
data.pose.position.z,
data.pose.orientation.x,
data.pose.orientation.y,
data.pose.orientation.z
])
class MoveFetch(object):
def __init__(self):
rospy.logdebug("===== In MoveFetch")
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
self.scene = moveit_commander.PlanningSceneInterface()
self.group = moveit_commander.MoveGroupCommander("arm")
rospy.logdebug("===== Out MoveFetch")
def ee_traj(self, pose):
self.group.set_pose_target(pose)
result = self.execute_trajectory()
return result
def joint_traj(self, positions_array):
self.group_variable_values = self.group.get_current_joint_values()
self.group_variable_values[0] = positions_array[0]
self.group_variable_values[1] = positions_array[1]
self.group_variable_values[2] = positions_array[2]
self.group_variable_values[3] = positions_array[3]
self.group_variable_values[4] = positions_array[4]
self.group_variable_values[5] = positions_array[5]
self.group_variable_values[6] = positions_array[6]
self.group.set_joint_value_target(self.group_variable_values)
result = self.execute_trajectory()
return result
def execute_trajectory(self):
"""
Assuming that the trajecties has been set to the self objects appropriately
Make a plan to the destination in Homogeneous Space(x,y,z,yaw,pitch,roll)
and returns the result of execution
"""
self.plan = self.group.plan()
result = self.group.go(wait=True)
return result
def ee_pose(self):
gripper_pose = self.group.get_current_pose()
return gripper_pose
def ee_rpy(self, request):
gripper_rpy = self.group.get_current_rpy()
return gripper_rpy
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~task_envs~human_walks~turtlebot2_human.py | import rospy
import numpy as np
from gym import spaces
from openai_ros.robot_envs import turtlebot2_env
from gym.envs.registration import register
from geometry_msgs.msg import Point
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class TurtleBot2HumanEnv(turtlebot2_env.TurtleBot2Env):
def __init__(self):
"""
This Task Env is designed for having the TurtleBot2 in some kind of maze.
It will learn how to move around the maze without crashing.
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_abs_env_var = rospy.get_param("/turtlebot2/ros_abs_env_var", None)
try:
ros_ws_abspath = os.environ[ros_abs_env_var]
except:
print("Please set the environment variable {}".format(ros_abs_env_var))
sys.exit(1)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="turtlebot_gazebo",
launch_file_name="start_world_human.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/human_walks/config",
yaml_file_name="turtlebot2_human.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(TurtleBot2HumanEnv, self).__init__(ros_ws_abspath)
# Only variable needed to be set here
number_actions = rospy.get_param('/turtlebot2/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-np.inf, np.inf)
#number_observations = rospy.get_param('/turtlebot2/n_observations')
"""
We set the Observation space for the 6 observations
cube_observations = [
round(current_disk_roll_vel, 0),
round(y_distance, 1),
round(roll, 1),
round(pitch, 1),
round(y_linear_speed,1),
round(yaw, 1),
]
"""
# Actions and Observations
self.linear_forward_speed = rospy.get_param('/turtlebot2/linear_forward_speed')
self.linear_turn_speed = rospy.get_param('/turtlebot2/linear_turn_speed')
self.angular_speed = rospy.get_param('/turtlebot2/angular_speed')
self.init_linear_forward_speed = rospy.get_param('/turtlebot2/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param('/turtlebot2/init_linear_turn_speed')
self.new_ranges = rospy.get_param('/turtlebot2/new_ranges')
self.min_range = rospy.get_param('/turtlebot2/min_range')
self.max_laser_value = rospy.get_param('/turtlebot2/max_laser_value')
self.min_laser_value = rospy.get_param('/turtlebot2/min_laser_value')
# Get Desired Point to Get
self.desired_point = Point()
self.desired_point.x = rospy.get_param("/turtlebot2/desired_pose/x")
self.desired_point.y = rospy.get_param("/turtlebot2/desired_pose/y")
self.desired_point.z = rospy.get_param("/turtlebot2/desired_pose/z")
self.max_x = np.inf
self.min_x = -np.inf
self.max_y = np.inf
self.min_y = -np.inf
# We create two arrays based on the binary values that will be assigned
# In the discretization method.
laser_scan = self.get_laser_scan()
rospy.logdebug("laser_scan len===>" + str(len(laser_scan.ranges)))
num_laser_readings = int(len(laser_scan.ranges)/self.new_ranges)
# Laser scan observations
high = np.full(self.new_ranges, self.max_laser_value)
low = np.full(self.new_ranges, self.min_laser_value)
# X, Y pos observation
high = np.append(high, np.array([self.max_x, self.max_y]))
low = np.append(low, np.array([self.min_x, self.min_y]))
# We only use two integers
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
# Rewards
self.forwards_reward = rospy.get_param("/turtlebot2/forwards_reward")
self.turn_reward = rospy.get_param("/turtlebot2/turn_reward")
self.end_episode_points = rospy.get_param("/turtlebot2/end_episode_points")
self.cumulated_steps = 0.0
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base( self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
odometry = self.get_odom()
self.previous_distance_from_des_point = self.get_distance_from_desired_point(odometry.pose.pose.position)
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the turtlebot2
based on the action number given.
:param action: The action integer that set s what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0: #FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
self.last_action = "FORWARDS"
elif action == 1: #LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "TURN_LEFT"
elif action == 2: #RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "TURN_RIGHT"
# We tell TurtleBot2 the linear and angular speed to set to execute
self.move_base(linear_speed, angular_speed, epsilon=0.05, update_rate=10)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
TurtleBot2Env API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan = self.get_laser_scan()
discretized_laser_scan = self.discretize_observation( laser_scan,
self.new_ranges
)
# We get the odometry so that SumitXL knows where it is.
odometry = self.get_odom()
x_position = odometry.pose.pose.position.x
y_position = odometry.pose.pose.position.y
# We round to only two decimals to avoid very big Observation space
odometry_array = [round(x_position, 2),round(y_position, 2)]
# We only want the X and Y position and the Yaw
observations = discretized_laser_scan + odometry_array
rospy.logdebug("Observations==>"+str(observations))
rospy.logdebug("END Get Observation ==>")
return observations
def _is_done(self, observations):
if self._episode_done:
rospy.logerr("TurtleBot2 is Too Close to wall==>")
else:
rospy.logerr("TurtleBot2 didnt crash at least ==>")
current_position = Point()
current_position.x = observations[-2]
current_position.y = observations[-1]
current_position.z = 0.0
MAX_X = 6.0
MIN_X = -1.0
MAX_Y = 3.0
MIN_Y = -3.0
# We see if we are outside the Learning Space
if current_position.x <= MAX_X and current_position.x > MIN_X:
if current_position.y <= MAX_Y and current_position.y > MIN_Y:
rospy.logdebug("TurtleBot Position is OK ==>["+str(current_position.x)+","+str(current_position.y)+"]")
# We see if it got to the desired point
if self.is_in_desired_position(current_position):
self._episode_done = True
else:
rospy.logerr("TurtleBot to Far in Y Pos ==>"+str(current_position.x))
self._episode_done = True
else:
rospy.logerr("TurtleBot to Far in X Pos ==>"+str(current_position.x))
self._episode_done = True
return self._episode_done
def _compute_reward(self, observations, done):
current_position = Point()
current_position.x = observations[-2]
current_position.y = observations[-1]
current_position.z = 0.0
distance_from_des_point = self.get_distance_from_desired_point(current_position)
distance_difference = distance_from_des_point - self.previous_distance_from_des_point
if not done:
if self.last_action == "FORWARDS":
reward = self.forwards_reward
else:
reward = self.turn_reward
# If there has been a decrease in the distance to the desired point, we reward it
if distance_difference < 0.0:
rospy.logwarn("DECREASE IN DISTANCE GOOD")
reward += self.forwards_reward
else:
rospy.logerr("ENCREASE IN DISTANCE BAD")
reward += 0
else:
if self.is_in_desired_position(current_position):
reward = self.end_episode_points
else:
reward = -1*self.end_episode_points
self.previous_distance_from_des_point = distance_from_des_point
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_observation(self,data,new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
mod = len(data.ranges)/new_ranges
rospy.logdebug("data=" + str(data))
rospy.logwarn("new_ranges=" + str(new_ranges))
rospy.logwarn("mod=" + str(mod))
for i, item in enumerate(data.ranges):
if (i%mod==0):
if item == float ('Inf') or np.isinf(item):
discretized_ranges.append(self.max_laser_value)
elif np.isnan(item):
discretized_ranges.append(self.min_laser_value)
else:
discretized_ranges.append(int(item))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" + str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> item=" + str(item)+"< "+str(self.min_range))
return discretized_ranges
def is_in_desired_position(self,current_position, epsilon=0.05):
"""
It return True if the current position is similar to the desired poistion
"""
is_in_desired_pos = False
x_pos_plus = self.desired_point.x + epsilon
x_pos_minus = self.desired_point.x - epsilon
y_pos_plus = self.desired_point.y + epsilon
y_pos_minus = self.desired_point.y - epsilon
x_current = current_position.x
y_current = current_position.y
x_pos_are_close = (x_current <= x_pos_plus) and (x_current > x_pos_minus)
y_pos_are_close = (y_current <= y_pos_plus) and (y_current > y_pos_minus)
is_in_desired_pos = x_pos_are_close and y_pos_are_close
return is_in_desired_pos
def get_distance_from_desired_point(self, current_position):
"""
Calculates the distance from the current position to the desired point
:param start_point:
:return:
"""
distance = self.get_distance_from_point(current_position,
self.desired_point)
return distance
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = np.array((pstart.x, pstart.y, pstart.z))
b = np.array((p_end.x, p_end.y, p_end.z))
distance = np.linalg.norm(a - b)
return distance
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~robot_envs~modrob_env_moveit.py | import numpy as np
import rospy
import time
import sys
import moveit_commander
import moveit_msgs.msg
from tf.transformations import euler_from_quaternion
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from std_msgs.msg import Float64MultiArray
from std_msgs.msg import MultiArrayLayout
from std_msgs.msg import MultiArrayDimension
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from gazebo_msgs.msg import LinkStates
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Twist
from controller_manager_msgs.srv import SwitchController
from openai_ros.openai_ros_common import ROSLauncher
from pyquaternion import Quaternion
class ModRobEnvMoveIt(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all modular robot environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new ModRob (modular robot) environment with MoveIt! control.
ModRob doesnt use controller_manager, therefore we wont reset the
controllers in the standard fashion. For the moment we wont reset them.
<<--- Can we use the controller manager in the future? --->>
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that the stream of data doesn't flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controllers.
This has to do with the fact that some plugins with tf, don't understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List: TODO
*
Actuators Topic List: TODO
*
Args:
"""
rospy.loginfo("Initialize ModRob environment...")
# The robot name parameter must be in the parameter server
self.robot_name_ = rospy.get_param('/modrob/robot_name')
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
self.ros_ws_abspath = ros_ws_abspath
self._init_robot(ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_namespace = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(ModRobEnvMoveIt, self).__init__(controllers_list=self.controllers_list,
robot_namespace=self.robot_namespace,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._joint_states_topic = '/' + self.robot_name_ + '/joint_states'
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber(self._joint_states_topic, JointState, self._joint_state_callback)
self._check_all_sensors_ready()
## Controlling the robot
self.moveit_controllers = ['arm_controller', 'gripper_controller']
self.ros_controllers = ['arm_position_controller', 'gripper_position_controller']
# First initialize MoveIt! Service
self.is_moveit_controller = True
self.move_object = MoveModrob(joint_states_topic = self._joint_states_topic)
# Then switch to ROS controller connections
self._switch_controllers()
self._init_joint_publisher()
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.loginfo("Finished ModRob INIT...")
def _init_robot(self,ros_ws_abspath):
"""Calls launch file of robot."""
self._get_base_pose_parameter()
quat_base = self.convert_pose_to_quaternion(self.base_pose)
(base_r, base_p, base_y) = euler_from_quaternion([quat_base.w, quat_base.x, quat_base.y, quat_base.z])
launch_arg_string = "robot_name:={} moveit:=true x:={} y:={} z:={} roll:={} pitch:={} yaw:={}".format(
self.robot_name_, self.base_pose.position.x, self.base_pose.position.y, self.base_pose.position.z,
base_r, base_p, base_y
)
ROSLauncher(rospackage_name="modrob_simulation",
launch_file_name="put_robot_in_world.launch",
ros_ws_abspath=ros_ws_abspath,
launch_arg_string=launch_arg_string)
def _get_base_pose_parameter(self):
"""Load base pose from parameter server."""
# Init robot position
base_pose_position = rospy.get_param('/modrob/base_pose').get("position")
base_pose_orientation = rospy.get_param('/modrob/base_pose').get("orientation")
self.base_pose = self.create_pose([base_pose_position.get("x"), base_pose_position.get("y"), base_pose_position.get("z")],
[base_pose_orientation.get("x"), base_pose_orientation.get("y"), base_pose_orientation.get("z"), base_pose_orientation.get("w")])
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _switch_controllers(self):
"""Switch between moveit and ROS controllers."""
if self.is_moveit_controller:
# Switch to ROS controllers
start_controllers = self.ros_controllers
stop_controllers = self.moveit_controllers
else:
# Switch to moveit control
start_controllers = self.moveit_controllers
stop_controllers = self.ros_controllers
rospy.wait_for_service('/' + self.robot_name_ + '/controller_manager/switch_controller')
try:
switch_controller_call = rospy.ServiceProxy('/' + self.robot_name_ + '/controller_manager/switch_controller', SwitchController)
switch_controller_call(start_controllers = start_controllers,
stop_controllers = stop_controllers,
strictness = 2,
start_asap = True)
# Switch boolean
self.is_moveit_controller = not self.is_moveit_controller
rospy.loginfo("Switched controllers from {} to {}".format(stop_controllers, start_controllers))
"""
switch_msg = SwitchController()
switch_msg.start_controllers = ['arm_position_controller', 'gripper_position_controller']
switch_msg.stop_controllers = ['arm_controller', 'gripper_controller']
switch_msg.strictness = 2
switch_msg.start_asap = True
"""
except rospy.ServiceException as e:
rospy.logerr("Switch controllers service call failed: %s"%e)
def _init_joint_publisher(self):
"""Initialize the joint controller publisher with the joint list.
Relys on joint sensors being published.
"""
self._arm_joint_publisher = rospy.Publisher('/' + self.robot_name_ + '/arm_position_controller/command', Float64MultiArray, queue_size=10)
self._gripper_joint_publisher = rospy.Publisher('/' + self.robot_name_ + '/gripper_position_controller/command', Float64MultiArray, queue_size=10)
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_joint_state_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_publishers_connection(self):
"""Checks that all the publishers are working.
"""
# Check joint position controller publishers
rate = rospy.Rate(10) # 10hz
while self._arm_joint_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to /" + self.robot_name_ + "/arm_position_controller/command yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
while self._gripper_joint_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to /" + self.robot_name_ + "/gripper_position_controller/command yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("All joint position controller publishers connected!")
# Check additional publishers
rospy.logdebug("All publishers READY")
def _check_joint_state_ready(self):
self.joint_state = None
rospy.logdebug("Waiting for {} to be READY...".format(self._joint_states_topic))
while self.joint_state is None and not rospy.is_shutdown():
try:
self.joint_state = rospy.wait_for_message(self._joint_states_topic, JointState, timeout=5.0)
rospy.logdebug("Current {} READY=>".format(self._joint_states_topic))
except:
rospy.logerr("Current {} not ready yet, retrying for getting joint states".format(self._joint_states_topic))
return self.joint_state
def _joint_state_callback(self, data):
# Often, there is an empty joint state message.
if len(data.velocity) > 0:
self.joint_state = data
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_arm_joints(self, joint_positions, error=0.05, timeout=3.0):
"""Control the arm joints of the robot.
The control waits until timeout or desired position reached within a margin of error.
Args:
joint_positions (list or np.array): list of desired joint positions
error (double): absolute error allowed between each desired and reached joint position
timeout (double): time to wait in s, set this to zero to wait until joint position is reached
"""
assert len(self.get_arm_joint_positions())==len(joint_positions), "Length of desired arm joint positions does not match."
if self.is_moveit_controller:
result = self.move_object.move_joints(joint_positions)
if not result:
rospy.logwarn("Cannot execute path to joint positions: {}!".format(joint_positions))
else:
msg_dim = [MultiArrayDimension(label="joint_positions", size = len(joint_positions), stride = len(joint_positions))]
msg_layout = MultiArrayLayout(dim = msg_dim, data_offset = 0)
msg = Float64MultiArray(layout = msg_layout, data=joint_positions)
self._arm_joint_publisher.publish(msg)
# Wait for a certain amount of time to get to the desired position.
self.wait_for_joints_to_get_there(joint_positions, error=error, timeout=timeout)
def wait_for_joints_to_get_there(self, desired_joint_positions, error=0.05, timeout=3.0):
"""Wait until target joint position is reached within an error or
until the timout is reached.
Set timeout to 0 to wait until joint position is reached.
Args:
desired_joint_positions (list or np.array): list of desired joint positions
error (double): absolute error allowed between each desired and reached joint position
timeout (double): time to wait in s, set this to zero to wait until joint position is reached
"""
assert len(self.get_arm_joint_positions())==len(desired_joint_positions), "Length of desired arm joint positions does not match."
time_waiting = 0.0
frequency = 100.0 # Fine tune this parameter.
are_equal = False
is_timeout = False
# ROS will try to keep this frequency of the following loop
rate = rospy.Rate(frequency)
rospy.logdebug("Waiting for joint to get to the position")
while not are_equal and not is_timeout and not rospy.is_shutdown():
are_equal = np.allclose(self.get_arm_joint_positions(), desired_joint_positions, atol=error)
rate.sleep()
if timeout > 1e-5:
time_waiting += 1.0 / frequency
is_timeout = time_waiting > timeout
rospy.logdebug("Joints are in the desired position with an error of " + str(error))
def move_arm_ee(self, ee_pose, error=0.05, timeout=3.0):
"""Control the arm joints of the robot.
The control waits until timeout or desired position reached within a margin of error.
The control automatically switches to moveit control if it is in ROS control mode before and switches back afterwards.
Args:
ee_pose (geometry_msgs.msg.Pose): desired end effector pose
error (double): absolute error allowed between each desired and reached end effector pose
timeout (double): time to wait in s, set this to zero to wait until joint position is reached
"""
assert isinstance(ee_pose, Pose), "ee_pose is not of type geometry_msgs.msg.Pose!"
# We need moveit control to move to an ee pose.
controllers_switched = False
if not self.is_moveit_controller:
self._switch_controllers()
controllers_switched = True
result = self.move_object.move_ee(ee_pose)
if not result:
rospy.logwarn("Cannot execute path to ee pose: {}!".format(ee_pose))
# Wait for a certain amount of time to get to the desired position.
self.wait_for_ee_to_get_there(ee_pose, error=error, timeout=timeout)
# Switch back to ROS control.
if controllers_switched:
self._switch_controllers()
def wait_for_ee_to_get_there(self, desired_ee_pose, error=0.05, timeout=3.0):
"""Wait until target end effector pose is reached within an error or
until the timout is reached.
Set timeout to 0 to wait until joint position is reached.
Args:
desired_ee_pose (geometry_msgs.msg.Pose): desired end effector pose
error (double): absolute error allowed between each desired and reached end effector pose.
The error is both for cartesian 3D distance and orientation distance.
(Maybe separate into two if necessary)
timeout (double): time to wait in s, set this to zero to wait until joint position is reached
"""
assert isinstance(desired_ee_pose, Pose), "desired_ee_pose is not of type geometry_msgs.msg.Pose!"
time_waiting = 0.0
frequency = 100.0 # Fine tune this parameter.
are_equal = False
is_timeout = False
# ROS will try to keep this frequency of the following loop
rate = rospy.Rate(frequency)
rospy.logdebug("Waiting for joint to get to the position")
while not are_equal and not is_timeout and not rospy.is_shutdown():
cartesian_distance = self.move_object.calculate_ee_cartesian_distance(desired_ee_pose)
orientation_distance = self.move_object.calculate_ee_orientation_distance(desired_ee_pose)
are_equal = (cartesian_distance <= error) and (orientation_distance <= error)
rate.sleep()
if timeout == 0.0:
# Dismiss time constraint and wait until target reached
time_waiting += 0.0
else:
time_waiting += 1.0 / frequency
is_timeout = time_waiting > timeout
rospy.logdebug("Joints are in the desired position with an erro of " + str(error))
def get_arm_joint_positions(self):
"""Return a list of arm joint positions in rad.
The joint values are in the same oder as get_arm_joint_names()."""
joint_position_dict = dict(zip(self.joint_state.name, self.joint_state.position))
return [joint_position_dict.get(joint_name) for joint_name in self.get_arm_joint_names()]
def get_arm_joint_velocities(self):
"""Return a list of arm joint angular velocities in rad/s.
The joint values are in the same oder as get_arm_joint_names()."""
joint_velocity_dict = dict(zip(self.joint_state.name, self.joint_state.velocity))
return [joint_velocity_dict.get(joint_name) for joint_name in self.get_arm_joint_names()]
def get_arm_joint_efforts(self):
"""Return a list of arm joint momentum in Nm.
The joint values are in the same oder as get_arm_joint_names()."""
joint_effort_dict = dict(zip(self.joint_state.name, self.joint_state.effort))
return [joint_effort_dict.get(joint_name) for joint_name in self.get_arm_joint_names()]
def get_arm_joint_names(self):
"""Return list of names in arm joint group."""
return self.move_object.get_arm_joints()
def get_joint_state(self):
"""Return the whole joint state topic dictionary (not recommended for moveit usage)."""
return self.joint_state
def get_ee_pose(self):
"""Return the pose of the end effector."""
return self.move_object.get_ee_pose()
def get_ee_position(self):
"""Return the cartesian position of the end effector."""
return self.move_object.get_ee_position()
def get_ee_rpy(self):
"""Return the roll, pitch, yaw values of the end effector."""
return self.move_object.get_ee_rpy()
def get_ee_quaternion(self):
"""Return the current end effector orientation quaternion (x, y, z, w)."""
return self.move_object.get_ee_quaternion()
def reinit_sensors(self):
"""
This method is for the tasks so that when reseting the episode
the sensors values are forced to be updated with the real data and
<<-- Only needed when reset is set to SIMULATION.
<<-- TODO: Implement this ?
"""
def create_pose(self, position_vec, orientation_vec):
"""Create a geometry_msgs.msg.Pose object from position and orientation.
Args:
position_vec (list): cartesian position [x, y, z]
orientation_vec (list): orientation quaternion [x, y, z, w]
Returns:
geometry_msgs.msg.Pose
"""
pose = Pose()
pose.position.x = position_vec[0]
pose.position.y = position_vec[1]
pose.position.z = position_vec[2]
pose.orientation.x = orientation_vec[0]
pose.orientation.y = orientation_vec[1]
pose.orientation.z = orientation_vec[2]
pose.orientation.w = orientation_vec[3]
return pose
def convert_pose_to_quaternion(self, pose):
"""Convert a geometry_msgs.msg.Pose to a pyquaternion.Quaternion.
TODO: Write utility class and move this to util.
Args:
pose (geometry_msgs.msg.Pose)
Returns:
pyquaternion.Quaternion
"""
return Quaternion(w=pose.orientation.w, x=pose.orientation.x, y=pose.orientation.y, z=pose.orientation.z)
def get_new_x_axis(self, quaternion):
"""Return the new x axis after a quaternion rotation.
Args:
quaternion (Quaternion): The quaternion used for rotation
Returns:
np.array (shape: [3]): The new x-axis
"""
return quaternion.rotation_matrix[:,0]
def get_new_y_axis(self, quaternion):
"""Return the new y axis after a quaternion rotation.
Args:
quaternion (Quaternion): The quaternion used for rotation
Returns:
np.array (shape: [3]): The new y-axis
"""
return quaternion.rotation_matrix[:,1]
def get_new_z_axis(self, quaternion):
"""Return the new z axis after a quaternion rotation.
Args:
quaternion (Quaternion): The quaternion used for rotation
Returns:
np.array (shape: [3]): The new z-axis
"""
return quaternion.rotation_matrix[:,2]
def great_circle_distance(self, n1, n2):
"""Return the great circle distance between two points on a sphere given by normal vectors.
See https://en.wikipedia.org/wiki/Great-circle_distance#Vector_version
Args:
n1 (np.array, shape: [3]): Normal vector 1
n2 (np.array, shape: [3]): Normal vector 2
Returns:
double: Great circle distance
"""
return np.arccos(np.dot(n1, n2))
class MoveModrob(object):
"""Class for communicating with MoveIt!
There are 2 types of goal targets:
a JointValueTarget (aka JointStateTarget) specifies an absolute value for each joint (angle for rotational joints or position for prismatic joints).
a PoseTarget (Position, Orientation, or Pose) specifies the pose of one or more end effectors (and the planner can use any joint values that reaches the pose(s)).
See here (http://docs.ros.org/en/diamondback/api/geometry_msgs/html/msg/Pose.html) for ee pose definition.
"""
def __init__(self, joint_states_topic="/joint_states"):
rospy.logdebug("===== In MoveModrob")
init_joint_state_topic = ['joint_states:={}'.format(joint_states_topic)]
moveit_commander.roscpp_initialize(init_joint_state_topic)
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
self.scene = moveit_commander.PlanningSceneInterface()
self.arm_group = moveit_commander.MoveGroupCommander("arm")
self.gripper_group = moveit_commander.MoveGroupCommander("gripper")
rospy.logdebug("===== Out MoveModrob")
def move_ee(self, pose_target):
"""Set a new end effector target pose and move to target.
Args:
pose_target (geometry_msgs.msg.Pose): a PoseTarget (Position, Orientation, or Pose) for the end effector
example: pose_target = geometry_msgs.msg.Pose()
pose_target.orientation.w = 1.0
pose_target.position.x = 0.4
pose_target.position.y = 0.1
pose_target.position.z = 0.4
Returns:
result (Boolean): Whether trajectory is executable
"""
assert isinstance(pose_target, Pose), "pose_target is not of type geometry_msgs.msg.Pose!"
self.arm_group.set_pose_target(pose_target)
result = self.execute_trajectory()
return result
def move_joints(self, joint_value_target):
"""Set a new joint value target and move to target.
Args:
joint_value_target (list): a JointValueTarget (aka JointStateTarget) for the joints of the arm.
Returns:
result (Boolean): Whether trajectory is executable
"""
self.arm_group.set_joint_value_target(joint_value_target)
result = self.execute_trajectory()
return result
def execute_trajectory(self):
"""Plan a path to the previously set goal position and execute it.
Note: the robot’s current joint state must be within some tolerance of the first waypoint in the RobotTrajectory or execute() will fail.
Returns:
result (Boolean): Whether trajectory is executable
"""
self.plan = self.arm_group.plan()
result = self.arm_group.go(wait=False)
return result
def stop_execution(self):
"""Stop the arm movement."""
self.arm_group.stop()
def clear_all_targets(self):
"""Clear all targets for the arm movement."""
self.arm_group.clear_pose_targets()
def get_ee_pose(self):
"""Return the current end effector pose."""
return self.arm_group.get_current_pose().pose
def get_ee_position(self):
"""Return the cartesian position of the end effector."""
ee_position = self.get_ee_pose().position
return [ee_position.x, ee_position.y, ee_position.z]
def get_ee_quaternion(self):
"""Return the current end effector orientation quaternion (x, y, z, w)."""
ee_orientation = self.get_ee_pose().orientation
return [ee_orientation.x, ee_orientation.y, ee_orientation.z, ee_orientation.w]
def get_ee_rpy(self):
"""Return the current end effector roll pitch yaw values."""
return self.arm_group.get_current_rpy()
def get_current_arm_joint_values(self):
"""Return the current arm joint positions."""
return self.arm_group.get_current_joint_values()
def get_current_gripper_joint_values(self):
"""Return the current arm joint positions."""
return self.gripper_group.get_current_joint_values()
def get_arm_joints(self):
"""Return list of names of joints in robot arm group."""
return self.arm_group.get_active_joints()
def get_gripper_joints(self):
"""Return list of names of joints in gripper group."""
return self.gripper_group.get_active_joints()
def calculate_ee_cartesian_distance(self, pose_target):
"""Calculate cartesian position distance between current end effector and given goal pose.
Args:
pose_target (geometry_msgs.msg.Pose)
Returns:
Cartesian distance in meter.
"""
assert isinstance(pose_target, Pose), "pose_target is not of type geometry_msgs.msg.Pose!"
ee_position = self.get_ee_pose().position
return np.sqrt((ee_position.x - pose_target.x)**2 +
(ee_position.y - pose_target.y)**2 +
(ee_position.z - pose_target.z)**2)
def calculate_ee_orientation_distance(self, pose_target):
"""Calculate distance between the current end effector pose quaternion and the quaternion given in pose_target.
We use http://kieranwynn.github.io/pyquaternion/#distance-computation for the quaternion distance calculation.
Note: This function does not measure the distance on the hypersphere, but it takes into account the fact that q and -q encode the same rotation.
It is thus a good indicator for rotation similarities.
Args:
pose_target (geometry_msgs.msg.Pose)
Returns:
Distance between the two quaternions.
"""
assert isinstance(pose_target, Pose), "pose_target is not of type geometry_msgs.msg.Pose!"
ee_orientation = self.get_ee_pose().orientation
q0 = self.convert_pose_to_quaternion(ee_orientation)
q1 = self.convert_pose_to_quaternion(pose_target)
return Quaternion.absolute_distance(q0, q1)
def convert_pose_to_quaternion(self, pose):
"""Convert a geometry_msgs.msg.Pose to a pyquaternion.Quaternion.
TODO: Write utility class and move this to util.
"""
return Quaternion(w=pose.w, x=pose.x, y=pose.y, z=pose.z)
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~task_envs~human_modrob~modrob_safe_human.py | import os
from pickle import EMPTY_DICT
import numpy as np
import copy
from numpy.core.defchararray import join
import rospy
import time
from gym import spaces
from pyquaternion import Quaternion
from openai_ros.robot_envs.modrob_env_path_following import ModRobEnvPathFollowing
from gym.envs.registration import register
from geometry_msgs.msg import Point
from geometry_msgs.msg import Pose
from std_msgs.msg import Empty
from std_msgs.msg import Bool
from std_msgs.msg import Float64
from gazebo_msgs.msg import ModelStates, ModelState
from gazebo_msgs.srv import SetModelState
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
from modrob_simulation.msg import Collision, Collisions
from custom_robot_msgs.msg import PositionsHeadered
class ModRobSafeHumanEnv(ModRobEnvPathFollowing):
def __init__(self):
"""
This Task Env is designed for having the ModRob with an example moving obstacle.
It will learn how to finish tasks without colliding with the obstacle.
"""
# The robot name parameter must be in the parameter server
self.robot_name_ = rospy.get_param('/modrob/robot_name')
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_abs_env_var = rospy.get_param("/modrob/ros_abs_env_var", None)
try:
ros_ws_abspath = os.environ[ros_abs_env_var]
except:
print("Please set the environment variable {}".format(ros_abs_env_var))
sys.exit(1)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="initialisation",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/human_modrob/config",
yaml_file_name="modrob_safe_human_random_" + self.robot_name_ + ".yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(ModRobSafeHumanEnv, self).__init__(ros_ws_abspath)
## Load in environment variables
self._get_env_variables()
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-np.inf, np.inf)
## Set action and observation space
# Continuous action space.
# All actions should range from 0 to 1. This improves training.
# Right now, only arm action and observation is implemented.
# TODO: Add gripper support.
self.n_actions = len(self.get_arm_joint_names())
# Number of observations
self.n_observations = len(self.observation_norm)
self.action_space = spaces.Box(low=-1, high=1, shape=(self.n_actions,))
self.observation_space = spaces.Box(low=0, high=1, shape=(self.n_observations,))
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
self.cumulated_steps = 0.0
## Unpause sim and start all subscribers and publishers
self.gazebo.unpauseSim()
# Collision detection topic
self._collision_topic = '/' + self.robot_name_ + '/collisions'
# ROS subscriber to robot collision
rospy.Subscriber(self._collision_topic, Collisions, self._collision_callback)
self._human_joint_sensor_topic = '/human_joint_pos'
# ROS subscriber and publisher for human animation
rospy.Subscriber(self._human_joint_sensor_topic, PositionsHeadered, self._human_joint_callback)
self._check_human_joint_ready()
self._init_human_animation_publisher()
self._check_human_publishers_connection()
## Utility for moving start and goal position visual models
# CAREFUL: The getter method has an s at the end while the setter method doesn't!
# Ros subscriber to gazebo model state
self._gazebo_model_state_topic = '/gazebo/model_states'
rospy.Subscriber(self._gazebo_model_state_topic, ModelStates, self._gazebo_model_state_callback)
self._check_gazebo_model_state_ready()
# Get the base pose
self.base_pose = self._get_base_pose()
self._send_initialization()
is_init = None
while is_init is None and not rospy.is_shutdown():
try:
is_init = rospy.wait_for_message("/initialisation", Empty, timeout=5.0)
rospy.logdebug("Current {} READY=>".format("/initialisation"))
except:
rospy.logerr("Current {} not ready yet, retrying for getting joint states".format("/initialisation"))
self.gazebo.pauseSim()
def _get_env_variables(self):
"""Load in environment variables from yaml.
Relevant variables:
joint_min: Minimal angle for all joints
joint_max: Maximal angle for all joints
joint_max_delta: Max theoretically allowed movement per execution step (in rad)
joint_max_v: Maximal velocity of robot joints (for observation only)
gripper1_min: Minimal position of gripper part 1
gripper1_max: Maximal position of gripper part 1
gripper2_min: Minimal position of gripper part 2
gripper2_max: Maximal position of gripper part 2
gripper_max_delta: Maximal theoretically allowed movement per execution step (in m)
ee_limits: Overapproximative end effector position limits
.max.x, .y, .z
.min.x, .y, .z
base_pose: geometry_msgs.msg.Pose for base
use_delta_actions: True: Use differential position commands, False: Use absolute position commands
movement_error: Precision maximum for regular movement (can be arbitrarely small)
movement_timeout: Time waited until next movement execution
init_error: Precision for start position
goal_error: 0.1 # Precision for goal reached
init_joint_position: [joint1, ..., jointN] initial position
init_hand_to_finger1_position: Initial position for gripper part 1 - TODO: Change this to dynamically.
init_hand_to_finger2_position: Initial position for gripper part 2 - TODO: Change this to dynamically.
use_goal_randomization: If false: use fix goal_pose, if true: use random position in goal_area
goal_pose: geometry_msgs.msg.Pose for end effector goal
goal_area: min and max values for goal position
distance_penalty_position: Reward penalty for position distance
distance_penalty_orientation: Reward penalty for orientation distance
time_penalty: Time penalty for every step
goal_reward: Points given when reaching the goal
collision_penalty: Penalty when colliding with an object
critical_collision_penalty: Penalty when critically colliding with human
motion_capture/* : Motion capture information. See config in human_reach package.
human_motion_pos_random: Randomize position of animation uniformly by +/- this value
human_motion_time_random: Randomize the starting time of the animation uniformly by this value [0; val]
safety_distance_ground: Distance to ground for collision checking
"""
## Determine the normalization constants for all observations and actions.
# action_norm consists of two columns (c, r) and one row per controllable joint.
# To normalize an action a, calculate: a_n = (a-c)/r, r!=0
# To denormalize an normalized action a_n, calculate a = (a_n * r) + c
# Since we are using the tanh as activation for actions, we normalize a to [-1; 1]
self.action_norm = []
self.observation_norm = []
# Max movements
self.joint_max_delta = rospy.get_param('/modrob/joint_max_delta')
self.gripper_max_delta = rospy.get_param('/modrob/gripper_max_delta')
self.use_delta_actions = rospy.get_param('/modrob/use_delta_actions')
# TODO: Read these from urdf file.
self.joint_min = rospy.get_param('/modrob/joint_min')
self.joint_max = rospy.get_param('/modrob/joint_max')
assert self.joint_max-self.joint_min != 0, "Joint difference is zero"
self.gripper1_min = rospy.get_param('/modrob/gripper1_min')
self.gripper1_max = rospy.get_param('/modrob/gripper1_max')
assert self.gripper1_max-self.gripper1_min != 0, "Gripper 1 difference is zero"
self.gripper2_min = rospy.get_param('/modrob/gripper2_min')
self.gripper2_max = rospy.get_param('/modrob/gripper2_max')
assert self.gripper2_max-self.gripper2_min != 0, "Gripper 2 difference is zero"
self.joint_max_v = rospy.get_param('/modrob/joint_max_v')
# First entries are joint positions
self.observation_id_joints = 0 # defines where the joint values start in observation
for joint_name in self.get_arm_joint_names():
if self.use_delta_actions:
_c = -1*self.joint_max_delta
_r = 2*self.joint_max_delta
else:
_c = self.joint_min
_r = self.joint_max-self.joint_min
# From [0; 1] normalization to [-1; 1]
c = _c + _r/2
r = _r/2
self.action_norm.append([c, r])
# for current joint values
self.observation_norm.append([self.joint_min, self.joint_max-self.joint_min])
# for goal joint values
self.observation_norm.append([self.joint_min, self.joint_max-self.joint_min])
""" Gripper normalization. Add back to code in the future.
elif "hand_to_finger1" in joint_name:
# Only one action for both grippers (they move together)
if self.use_delta_actions:
self.action_norm.append([-1*self.gripper_max_delta, 2*self.gripper_max_delta])
else:
self.action_norm.append([self.gripper1_min, self.gripper1_max-self.gripper1_min])
self.observation_norm.append([self.gripper1_min, self.gripper1_max-self.gripper1_min])
"""
# defines where the current joint values values start in observation
self.observation_id_current_joint_values = 0
self.observation_id_goal_joint_values = len(self.get_arm_joint_names())
self.observation_id_current_joint_velocities = len(self.observation_norm)
for joint_name in self.get_arm_joint_names():
self.observation_norm.append([-self.joint_max_v, 2*self.joint_max_v])
# Add normalization for current ee position
self.observation_id_current_ee_pose = len(self.observation_norm) # defines where the current ee pose values start in observation
# We don't normalize cartesian positions anymore.
# You can experiment with this.
"""
ee_limits = rospy.get_param('/modrob/ee_limits')
self.observation_norm.append([ee_limits.get("min").get("x"), ee_limits.get("max").get("x")-ee_limits.get("min").get("x")])
self.observation_norm.append([ee_limits.get("min").get("y"), ee_limits.get("max").get("y")-ee_limits.get("min").get("y")])
self.observation_norm.append([ee_limits.get("min").get("z"), ee_limits.get("max").get("z")-ee_limits.get("min").get("z")])
"""
self.observation_norm.append([0, 1]) #x: We don't normalize this
self.observation_norm.append([0, 1]) #y: We don't normalize this
self.observation_norm.append([0, 1]) #z: We don't normalize this
# We start with just the wrist position as human information.
# This can be extended by other human measurements.
# It should be investigated if a recurrent network structure benefits the RL in this area.
human_joint_names = rospy.get_param('/motion_capture/joint_names')
human_extremities = rospy.get_param('/motion_capture/extremities')
# The id of relevant joints in the motion capture measurements
self.human_joint_meas_ids = []
self.observation_id_human_joints = len(self.observation_norm)
for extremity in human_extremities:
key = next(iter(extremity))
# An extremity in the config has 3 elements - the last one is the extremity itself
wrist = extremity[key][2]
wrist_joint_name = wrist[next(iter(wrist))]
assert wrist_joint_name in human_joint_names
self.human_joint_meas_ids.append(human_joint_names.index(wrist_joint_name))
self.observation_norm.append([0, 1]) #x: We don't normalize this
self.observation_norm.append([0, 1]) #y: We don't normalize this
self.observation_norm.append([0, 1]) #z: We don't normalize this
# Add human head position
self.human_joint_meas_ids.append(human_joint_names.index("head"))
self.observation_norm.append([0, 1]) #x: We don't normalize this
self.observation_norm.append([0, 1]) #y: We don't normalize this
self.observation_norm.append([0, 1]) #z: We don't normalize this
self.n_human_obs = 3
# Add one entry that indicates collisions
self.observation_id_collision = len(self.observation_norm)
self.observation_norm.append([0, 1])
self.observation_id_critical_collision = len(self.observation_norm)
self.observation_norm.append([0, 1])
self.action_norm = np.array(self.action_norm)
self.observation_norm = np.array(self.observation_norm)
# Movement settings
self.movement_error = rospy.get_param('/modrob/movement_error')
self.movement_timeout = rospy.get_param('/modrob/movement_timeout')
self.init_error = rospy.get_param('/modrob/init_error')
self.goal_error = rospy.get_param('/modrob/goal_error')
# Set initial joint positions
# Right now, only arm position movement implemented!
# TODO: Add init gripper position
self.init_arm_joint_position = []
if rospy.has_param("/modrob/init_joint_position"):
self.init_arm_joint_position = rospy.get_param("/modrob/init_joint_position")
assert(len(self.init_arm_joint_position) == len(self._arm_joint_names))
# Goal pose
self.use_goal_randomization = False
if (rospy.has_param('/modrob/use_goal_randomization')):
self.use_goal_randomization = rospy.get_param('/modrob/use_goal_randomization')
if self.use_goal_randomization:
self.min_goal_pos_x = rospy.get_param('/modrob/goal_area/position/x_min')
self.min_goal_pos_y = rospy.get_param('/modrob/goal_area/position/y_min')
self.min_goal_pos_z = rospy.get_param('/modrob/goal_area/position/z_min')
self.max_goal_pos_x = rospy.get_param('/modrob/goal_area/position/x_max')
self.max_goal_pos_y = rospy.get_param('/modrob/goal_area/position/y_max')
self.max_goal_pos_z = rospy.get_param('/modrob/goal_area/position/z_max')
if rospy.has_param('modrob/goal_joint_position'):
self.goal_arm_joint_position = rospy.get_param('modrob/goal_joint_position')
self.goal_joint_diff = rospy.get_param('/modrob/goal_area/joint_diff')
else:
rospy.logwarn("Parameter modrob/goal_joint_position not found.")
self.goal_arm_joint_position = [0.0 for _ in range(len(self._arm_joint_names))]
self.goal_joint_diff = 0.0
else:
self.min_goal_pos_x = -1000
self.min_goal_pos_y = -1000
self.min_goal_pos_z = -1000
self.max_goal_pos_x = 1000
self.max_goal_pos_y = 1000
self.max_goal_pos_z = 1000
if rospy.has_param('modrob/goal_joint_position'):
rospy.logwarn("Parameter modrob/goal_joint_position not found.")
self.goal_arm_joint_position = rospy.get_param('modrob/goal_joint_position')
self.goal_joint_diff = 0.0
else:
self.goal_arm_joint_position = [0.0 for _ in range(len(self._arm_joint_names))]
self.goal_joint_diff = 0.0
# Rewards
self.distance_penalty_position = rospy.get_param('/modrob/distance_penalty_position')
self.distance_penalty_orientation = rospy.get_param('/modrob/distance_penalty_orientation')
self.time_penalty = rospy.get_param('/modrob/time_penalty')
self.goal_reward = rospy.get_param('/modrob/goal_reward')
self.collision_penalty = rospy.get_param('/modrob/collision_penalty')
self.critical_collision_penalty = rospy.get_param('/modrob/critical_collision_penalty')
# Human motion animation
self.human_motion_pos_random = rospy.get_param('/modrob/human_motion_pos_random')
self.human_motion_time_random = rospy.get_param('/modrob/human_motion_time_random')
self.safety_distance_ground = rospy.get_param('/modrob/safety_distance_ground')
def _set_base_pose(self):
"""Set the pose of the robots base."""
assert bool(self.robot_name_ in self.gazebo_model_pose), self.robot_name_ + " not in gazebo model states!"
self.move_gazebo_model(self.robot_name_, self.base_pose)
def _get_base_pose(self):
"""Set the pose of the robots base."""
assert bool(self.robot_name_ in self.gazebo_model_pose), self.robot_name_ + " not in gazebo model states!"
return self.gazebo_model_pose[self.robot_name_]
def _set_init_pose(self):
"""Sets the Robot in its init pose.
"""
# Move the human out of the way
self._human_play_publisher.publish(False)
self._human_pose_shift_publisher.publish(self.create_pose([5, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]))
# Move until init position is reached (timeout=0)
success = False
while (not success):
success = self.move_arm_joints(self.init_arm_joint_position, error=self.init_error, timeout=0.0)
rospy.sleep(0.1)
self.init_pose = self.get_ee_pose()
return True
def _get_state_from_obs(self, obs):
"""Extract the state from an observation."""
return obs[self.observation_id_current_joint_values:self.observation_id_goal_joint_values]
def _get_goal_from_obs(self, obs):
"""Extract the goal from an observation."""
return obs[self.observation_id_goal_joint_values:self.observation_id_current_joint_velocities]
def _get_collision_from_obs(self, obs):
"""Extract the collision value from an observation."""
return obs[self.observation_id_collision]
def _get_critical_collision_from_obs(self, obs):
"""Extract the information if a collision was critical from an observation."""
return obs[self.observation_id_critical_collision]
def _replace_goal_in_obs(self, obs, new_goal):
obs[self.observation_id_goal_joint_values:self.observation_id_current_joint_velocities] = new_goal
return obs
def _init_env_variables(self):
"""Inits episode specific variables each time we reset at the start of an episode.
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
# A counter for timesteps at goal position (Prevents overshooting of goal)
self.goal_count = 0
self.last_arm_joint_position = self.get_arm_joint_positions()
self.is_collided = False
self.critically_collided = False
# Generate new random goal pose
if self.create_random_goal_pose:
self.goal_pose, goal_joint_values = self.create_random_goal_pose()
self.goal_observation = goal_joint_values
# Place start and goal markers
self.move_gazebo_model("start_pos", self.correct_model_pose(self.init_pose))
self.move_gazebo_model("goal_pos", self.correct_model_pose(self.goal_pose))
# Set the human animation start position and time
new_x = np.random.uniform(low = -self.human_motion_pos_random, high = self.human_motion_pos_random)
new_y = np.random.uniform(low = -self.human_motion_pos_random, high = self.human_motion_pos_random)
self._human_pose_shift_publisher.publish(self.create_pose([new_x, new_y, 0.0], [0.0, 0.0, 1.0, 0.0]))
new_t = np.random.uniform(low = 0, high= self.human_motion_time_random)
self._human_script_time_publisher.publish(new_t)
self._human_play_publisher.publish(True)
rospy.loginfo("Init env variables finished")
def _set_action(self, action):
"""Give a control command to the robot.
First, the action is clipped to the action space.
It is possible to assign negative rewards for too high actions.
This function denormalizes the action command and controls the robot.
Args:
action (array): Normalized actions
"""
action = np.clip(action, self.action_space.low, self.action_space.high)
rospy.logdebug("Start Set Action ==>"+str(action))
# Denormalize actions
denormalized_action = self.denormalize_actions(action)
# Build joint position dict
if self.use_delta_actions:
joint_positions = self.create_joint_positions_delta(denormalized_action)
else:
joint_positions = self.create_joint_positions_absolute(denormalized_action)
# If commanded action would result in the end effector colliding with the ground,
# don't execute that action.
while(self.check_collision_ground(joint_positions, self.safety_distance_ground)):
# Collision with ground highly likely
rospy.loginfo("Prevented collision with ground. Will create new action.")
rand_act = np.random.rand(self.n_actions)
action = rand_act * (self.action_space.high-self.action_space.low) + self.action_space.low
denormalized_action = self.denormalize_actions(action)
if self.use_delta_actions:
joint_positions = self.create_joint_positions_delta(denormalized_action)
else:
joint_positions = self.create_joint_positions_absolute(denormalized_action)
# If the commanded action is withing the goal reached error bound,
# move to the exact goal and don't stop before.
# The idea behind this is to mitigate the effects of randomness around the goal
# and ensure exactness.
# TODO: Test this feature for praticability
"""
if (self.joints_close(joint_positions, self.goal_observation, self.goal_error)):
joint_positions = self.goal_observation
movement_err = self.init_error
timeout = 0
"""
# Set action as command
movement_err = self.movement_error
timeout = self.movement_timeout
# Only arm movement implemented right now. TODO: Add gripper action.
#rospy.loginfo('New goal = [' + (' '.join('{}'.format(jp) for jp in joint_positions)) + ']')
self.move_arm_joints(joint_positions, error=movement_err, timeout=timeout)
rospy.logdebug("END Set Action ==>"+str(action))
return action
def _get_obs(self):
"""Get normalized observation array from robot sensors.
Returns:
observations (array): Normalized observation array
"""
rospy.logdebug("Start Get Observation ==>")
# Get non-normalized observations
observations = self.retrieve_observations()
# Normalize observations
observations = self.normalize_observations(observations)
rospy.logdebug("END Get Observation ==>"+str(observations))
return observations
def _is_done(self, observations):
"""Return if episode is finished."""
if self._get_collision_from_obs(observations):
return True
return self._is_in_goal(observations)
def _is_in_goal(self, obs):
"""Return if the observed state is in the goal position."""
observations = self.denormalize_observations(obs)
# Check if joint position is close
joint_values = self._get_state_from_obs(observations)
goal_joint_values = self._get_goal_from_obs(observations)
if (self.joints_close(joint_values, goal_joint_values, self.goal_error)):
return True
else:
return False
def _compute_reward(self, observations, done):
"""Compute sparse reward for this step."""
if self._get_critical_collision_from_obs(observations):
return self.critical_collision_penalty
# If collision -> Return -1
if self._get_collision_from_obs(observations):
return self.collision_penalty
# If goal -> Return 0
if self._is_in_goal(observations):
return self.goal_reward
# If not done -> Return -1
return self.time_penalty
def _collision_callback(self, data):
"""This function is called if a collision is detected by one of the modrobs collision sensors.
The collision sensor plugins publish the collision information on /' + self.robot_name_ + '/collisions.
Sets self.is_collided to true.
Outputs an info message.
"""
if not self.is_collided:
self.is_collided = True
if (data.collisions[0].obstacle_contact.find("actor") >= 0 and
np.sum(np.abs(self.joint_state.velocity[2:]))/(len(self.joint_state.velocity)-2) > 0.1):
rospy.logerr("Critical collision detected between {} and {} with velocity [{}].".format(
data.collisions[0].parent_contact,
data.collisions[0].obstacle_contact,
self.joint_state.velocity[2:]
))
self.critically_collided = True
else:
rospy.logwarn("Non-critical collision detected between {} and {} with velocity [{}].".format(
data.collisions[0].parent_contact,
data.collisions[0].obstacle_contact,
self.joint_state.velocity[2:]
))
self.critically_collided = False
def _human_joint_callback(self, data):
"""Incoming human joint (motion capture) measurement."""
for (i, j_id) in enumerate(self.human_joint_meas_ids):
self.human_joint_pos[i] = [data.data[j_id].x, data.data[j_id].y, data.data[j_id].z]
def _init_human_animation_publisher(self):
"""Initialize the ROS topics to control the human animation.
Possible actions are:
/actor/pose_cmd: offset the position
/actor/script_time_cmd: set the animation time
/actor/start_stop_cmd: start/stop
"""
self._human_pose_shift_publisher = rospy.Publisher('/actor/pose_cmd', Pose, queue_size=100)
self._human_script_time_publisher = rospy.Publisher('/actor/script_time_cmd', Float64, queue_size=100)
self._human_play_publisher = rospy.Publisher('/actor/start_stop_cmd', Bool, queue_size=100)
def _check_human_publishers_connection(self):
"""Check that all the human publishers are working."""
# Check joint position controller publishers
rate = rospy.Rate(10) # 10hz
while self._human_pose_shift_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to /actor/pose_cmd yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
while self._human_script_time_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to /actor/script_time_cmd yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
while self._human_play_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to /actor/start_stop_cmd yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("All human control publishers connected!")
def create_random_goal_pose(self):
"""Randomly sample joint value poses until one lays in the goal area.
Return:
valid joint values so that ee position in goal area
"""
t = time.time()
while True:
ee_pose, goal_joint_values = self.get_random_joint_pose(self.goal_arm_joint_position, self.goal_joint_diff)
if (ee_pose.position.x >= self.min_goal_pos_x and
ee_pose.position.x <= self.max_goal_pos_x and
ee_pose.position.y >= self.min_goal_pos_y and
ee_pose.position.y <= self.max_goal_pos_y and
ee_pose.position.z >= self.min_goal_pos_z and
ee_pose.position.z <= self.max_goal_pos_z and
not self.check_collision_ground(goal_joint_values, self.safety_distance_ground)):
elapsed = time.time() - t
rospy.loginfo("Needed {} s to calculate new goal.".format(elapsed))
return ee_pose, goal_joint_values
def create_joint_positions_absolute(self, actions):
"""Creates joint_positions from an absolute action array.
Args:
actions: Action array (This should be denormalized!), shape = [n_actions]
Returns:
joint_positions (list): desired joint positions.
"""
joint_positions = np.clip(actions, self.joint_min, self.joint_max)
""" Code for gripper action. Include in code in the future.
elif "hand_to_finger" in action_name:
joint_positions[action_name] = np.clip(actions[i], self.gripper1_min, self.gripper1_max)
# Set action for finger 2 reversed to finger 1.
# F1 = min --> F2 = max; F1 = max --> F2 = max
# F2 = ax + b
a = (self.gripper2_min - self.gripper2_max)/(self.gripper1_max - self.gripper1_min)
b = (self.gripper1_max*self.gripper2_max-self.gripper1_min*self.gripper2_min)/(self.gripper1_max - self.gripper1_min)
joint_positions["hand_to_finger2"] = a * actions[i] + b
"""
return joint_positions
def create_joint_positions_delta(self, actions):
"""Creates absolute joint_positions from an delta action array.
Args:
actions: Action array (This should be denormalized!), shape = [n_actions]
Returns:
joint_positions (list): desired absolute joint position.
"""
# Use the last observed joint position (not the current!)
last_arm_joint_positions = self.last_arm_joint_position
joint_positions = np.clip(last_arm_joint_positions + actions, self.joint_min, self.joint_max)
""" Code for gripper action. Include in code in the future.
elif "hand_to_finger" in joint_name:
joint_positions[joint_name] = np.clip(last_joint_positions[joint_name] + actions[i], self.gripper1_min, self.gripper1_max)
# Set action for finger 2 reversed to finger 1.
# F1 = min --> F2 = max; F1 = max --> F2 = max
# F2 = ax + b
a = (self.gripper2_min - self.gripper2_max)/(self.gripper1_max - self.gripper1_min)
b = (self.gripper1_max*self.gripper2_max-self.gripper1_min*self.gripper2_min)/(self.gripper1_max - self.gripper1_min)
joint_positions["hand_to_finger2"] = a * joint_positions[joint_name] + b
"""
return joint_positions
def retrieve_observations(self):
"""Retrieve all observations (not normalized).
Sets the last observed joint position.
Observation consists of:
- arm_joint_positions
- goal_joint_positions
- current ee position (cartesian)
- human obstacle position relative to ee position (cartesian)
- is collided
Returns:
observations (list): non normalized observations, shape = [n_observations]
"""
# last_arm_joint_position = current joint position after env step
self.last_arm_joint_position = self.get_arm_joint_positions()
arm_joint_velocities = self.get_arm_joint_velocities()
ee_position, _ = self.get_current_ee_position_and_quaternion()
observations = self.last_arm_joint_position + self.goal_observation + arm_joint_velocities + ee_position
for i in range(self.n_human_obs):
for j in range(3):
observations.append(self.human_joint_pos[i][j] - ee_position[j])
if self.is_collided:
observations.append(1)
if self.critically_collided:
observations.append(1)
else:
observations.append(0)
else:
observations.append(0)
observations.append(0)
return observations
# Internal TaskEnv Methods
def normalize_actions(self, actions):
"""Normalize an array of actions.
To normalize an action a, calculate: a_n = (a-c)/r, r!=0
Args:
actions: Action array, shape = [n_actions]
Returns:
normalized_actions: Normalized action array, shape = [n_actions]
"""
normalized_actions = []
if len(actions) == 0:
rospy.logerr("No actions to normalize.")
return normalized_actions
normalized_actions = (actions - self.action_norm[:, 0]) / self.action_norm[:, 1]
return normalized_actions
def denormalize_actions(self, normalized_actions):
"""Denormalize an array of actions.
To denormalize an normalized action a_n, calculate a = (a_n * r) + c
Args:
normalized_actions: Normalized action array, shape = [n_actions]
Returns:
actions: Action array, shape = [n_actions]
"""
actions = []
if len(normalized_actions) == 0:
rospy.logerr("No actions to denormalize.")
return actions
actions = (normalized_actions * self.action_norm[:, 1]) + self.action_norm[:, 0]
return actions
def normalize_observations(self, observations):
"""Normalize an array of observations.
To normalize an observation a, calculate: a_n = (a-c)/r, r!=0
Args:
observations: Action array, shape = [n_observations]
Returns:
normalized_observations: Normalized observation array, shape = [n_observations]
"""
normalized_observations = []
if len(observations) == 0:
rospy.logwarn("No observations to normalize.")
return normalized_observations
normalized_observations = (observations - self.observation_norm[:, 0]) / self.observation_norm[:, 1]
return normalized_observations
def denormalize_observations(self, normalized_observations):
"""Denormalize an array of observations.
To denormalize an normalized observation a_n, calculate a = (a_n * r) + c
Args:
normalized_observations: Normalized observation array, shape = [n_observations]
Returns:
observations: Action array, shape = [n_observations]
"""
observations = []
if len(normalized_observations) == 0:
rospy.logwarn("No observations to denormalize.")
return observations
observations = (normalized_observations * self.observation_norm[:, 1]) + self.observation_norm[:, 0]
return observations
def discretize_observation(self,data,new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
mod = len(data.ranges)/new_ranges
rospy.logdebug("data=" + str(data))
rospy.logwarn("new_ranges=" + str(new_ranges))
rospy.logwarn("mod=" + str(mod))
for i, item in enumerate(data.ranges):
if (i%mod==0):
if item == float ('Inf') or np.isinf(item):
discretized_ranges.append(self.max_laser_value)
elif np.isnan(item):
discretized_ranges.append(self.min_laser_value)
else:
discretized_ranges.append(int(item))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" + str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> item=" + str(item)+"< "+str(self.min_range))
return discretized_ranges
def is_in_goal_pose(self, current_ee_pose, epsilon_position=0.05, epsilon_orientation=0.05):
"""Checks whether the end effector is within a margin of error to its goal pose.
Args:
current_ee_pose (geometry_msgs.msg.Pose): current pose of the end effector
epsilon_position (double): margin of error for position (euclidean distance)
epsilon_orientation (double): margin of error for orientation
"""
assert isinstance(current_ee_pose, Pose), "current_ee_pose is not of type geometry_msgs.msg.Pose!"
# Calcualte distances
position_distance, orientation_distance = self.get_distances_from_desired_pose(current_ee_pose)
rospy.logdebug("Position distance = {}, orientation distance = {}".format(position_distance, orientation_distance))
return position_distance <= epsilon_position and orientation_distance <= epsilon_orientation
def get_distances_from_desired_pose(self, current_ee_pose):
"""Calculates the euclidean distance and orientation distance from the current ee pose to the goal pose.
Args:
current_ee_pose (geometry_msgs.msg.Pose): current pose of the end effector
Returns:
position_distance (double): euclidean distance between cartesian ee positions
orientation_distance (double): quaternion distance between the ee quaternions
"""
assert isinstance(current_ee_pose, Pose), "current_ee_pose is not of type geometry_msgs.msg.Pose!"
position_distance = self.calculate_ee_position_distance(current_ee_pose)
orientation_distance = self.calculate_gripper_orientation_distance(current_ee_pose)
return position_distance, orientation_distance
def calculate_ee_position_distance(self, current_ee_pose):
"""Calculate euclidean distance between the current and goal end effector position (goal in self.).
Args:
current_ee_pose (geometry_msgs.msg.Pose): Current end effector pose
Returns:
Euclidean distance between the two poses.
"""
assert isinstance(current_ee_pose, Pose), "current_ee_pose is not of type geometry_msgs.msg.Pose!"
c_ee_pos = current_ee_pose.position
g_ee_pos = self.goal_pose.position
return np.sqrt((c_ee_pos.x - g_ee_pos.x)**2 +
(c_ee_pos.y - g_ee_pos.y)**2 +
(c_ee_pos.z - g_ee_pos.z)**2)
def calculate_ee_orientation_distance(self, current_ee_pose):
"""Calculate distance between the current and goal end effector pose quaternion (goal in self.).
We use http://kieranwynn.github.io/pyquaternion/#distance-computation for the quaternion distance calculation.
Note: This function does not measure the distance on the hypersphere, but it takes into account the fact that q and -q encode the same rotation.
It is thus a good indicator for rotation similarities.
Args:
current_ee_pose (geometry_msgs.msg.Pose): Current end effector pose
Returns:
Distance between the two quaternions.
"""
assert isinstance(current_ee_pose, Pose), "current_ee_pose is not of type geometry_msgs.msg.Pose!"
q0 = self.convert_pose_to_quaternion(current_ee_pose)
q1 = self.convert_pose_to_quaternion(self.goal_pose)
return Quaternion.absolute_distance(q0, q1)
def calculate_gripper_orientation_distance(self, current_ee_pose):
"""Calculate distance between the current and goal end effector z-axis.
Returns the great circle distance between the z-axis of the end effector and the goal pose.
The rotation around the z-axis is assumed to be irrelevant for gripping objects for this function.
Args:
current_ee_pose (geometry_msgs.msg.Pose): Current end effector pose
Returns:
Distance between the two quaternions.
"""
assert isinstance(current_ee_pose, Pose), "current_ee_pose is not of type geometry_msgs.msg.Pose!"
q0 = self.convert_pose_to_quaternion(current_ee_pose)
z0 = self.get_new_z_axis(q0)
q1 = self.convert_pose_to_quaternion(self.goal_pose)
z1 = self.get_new_z_axis(q1)
return self.great_circle_distance(z0, z1)
def correct_model_pose(self, pose):
"""Correct the model pose by the pose of the base.
This is needed because the ee poses of moveit are in relation to the base.
TODO: Include orientation!
Args:
pose (geometry_msgs.msg.Pose)
Returns:
corrected copy of pose
"""
new_pose = copy.deepcopy(pose)
new_pose.position.x += self.base_pose.position.x
new_pose.position.y += self.base_pose.position.y
new_pose.position.z += self.base_pose.position.z
return new_pose
def get_model_pose(self, model_name):
"""Return the pose of a gazebo model by name.
Args:
model_name (String): Name of the model (in world file)
Returns:
pose (geometry_msgs.msg.Pose)
"""
if model_name in self.gazebo_model_pose:
return self.gazebo_model_pose[model_name]
else:
rospy.logerr("Model {} does not exist in gazebo world.".format(model_name))
return None
def move_gazebo_model(self, model_name, pose):
""" Move the gazebo model to the desired pose
Args:
model_name (string): name of the model (Must be in topic /gazebo/model_states)
pose (geometry_msgs.msg.Pose)
"""
if model_name in self.gazebo_model_pose:
state_msg = SetModelState()
state_msg.model_name = model_name
state_msg.pose = pose
state_msg.twist = self.gazebo_model_twist[model_name]
state_msg.reference_frame = "world"
result = self.publish_gazebo_model_state(state_msg)
else:
result = False
rospy.logwarn("The goal_pos model does not exist!")
return result
def publish_gazebo_model_state(self, model_state):
"""Publish a gazebo model state.
Args:
model_state (gazebo_msgs.srv.SetModelState)
"""
rospy.wait_for_service('/gazebo/set_model_state')
try:
set_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
resp = set_state(model_state = model_state)
except rospy.ServiceException as e:
rospy.logerr("Set model state service call failed: %s"%e)
resp = False
return resp
def _check_gazebo_model_state_ready(self):
self.gazebo_model_pose = dict()
self.gazebo_model_twist = dict()
rospy.logdebug("Waiting for {} to be READY...".format(self._gazebo_model_state_topic))
while not self.gazebo_model_pose and not rospy.is_shutdown():
try:
data = rospy.wait_for_message(self._gazebo_model_state_topic, ModelStates, timeout=5.0)
self.gazebo_model_pose = dict(zip(data.name, data.pose))
self.gazebo_model_twist = dict(zip(data.name, data.twist))
rospy.logdebug("Current {} READY=>".format(self._gazebo_model_state_topic))
except:
rospy.logerr("Current {} not ready yet, retrying for getting gazebo_model states.".format(self._gazebo_model_state_topic))
return self.gazebo_model_pose
def _check_human_joint_ready(self):
self.human_joint_pos = []
for _ in range(len(self.human_joint_meas_ids)):
self.human_joint_pos.append([])
rospy.logdebug("Waiting for {} to be READY...".format(self._human_joint_sensor_topic))
while len(self.human_joint_pos[0]) == 0 and not rospy.is_shutdown():
try:
data = rospy.wait_for_message(self._human_joint_sensor_topic, PositionsHeadered, timeout=5.0)
self._human_joint_callback(data)
rospy.logdebug("Current {} READY=>".format(self._human_joint_sensor_topic))
except:
rospy.logerr("Current {} not ready yet, retrying for getting human motion capture information.".format(self._human_joint_sensor_topic))
return self.human_joint_pos
def _check_sphere_publishers_connection(self):
"""
#Checks that all the publishers are working.
# Check joint position controller publishers
rate = rospy.Rate(10) # 10hz
while self._sphere_pos_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.loginfo("No susbribers to {} yet so we wait and try again".format(self._sphere_pose_topic))
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
while self._sphere_vel_publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.loginfo("No susbribers to {} yet so we wait and try again".format(self._sphere_vel_topic))
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("Sphere publisher connected!")
# Check additional publishers
rospy.logdebug("All publishers READY")
"""
return True
def _gazebo_model_state_callback(self, data):
self.gazebo_model_pose = dict(zip(data.name, data.pose))
self.gazebo_model_twist = dict(zip(data.name, data.twist))
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~robot_envs~sawyer_env.py | import numpy
import rospy
import time
import tf
from openai_ros import robot_gazebo_env
import intera_interface
import intera_external_devices
from intera_interface import CHECK_VERSION
from intera_core_msgs.msg import JointLimits
from sensor_msgs.msg import Image
from openai_ros.openai_ros_common import ROSLauncher
class SawyerEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all SawyerEnv environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new SawyerEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /robot/joint_limits: Odometry of the Base of Wamv
Actuators Topic List:
* As actuator we will use a class to interface with the movements through commands.
Args:
"""
rospy.logdebug("Start SawyerEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="sawyer_gazebo",
launch_file_name="put_sawyer_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(SawyerEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
rospy.logdebug("SawyerEnv unpause...")
self.gazebo.unpauseSim()
# self.controllers_object.reset_controllers()
# TODO: Fill it with the sensors
self._check_all_systems_ready()
rospy.Subscriber("/io/internal_camera/head_camera/image_raw",
Image, self._head_camera_image_raw_callback)
rospy.Subscriber("/io/internal_camera/right_hand_camera/image_raw",
Image, self._right_hand_camera_image_raw_callback)
self._setup_tf_listener()
self._setup_movement_system()
self.gazebo.pauseSim()
rospy.logdebug("Finished SawyerEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
rospy.logdebug("SawyerEnv check_all_systems_ready...")
self._check_all_sensors_ready()
rospy.logdebug("END SawyerEnv _check_all_systems_ready...")
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
# TODO: Here go the sensors like cameras and joint states
self._check_head_camera_image_raw_ready()
self._check_right_hand_camera_image_raw_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_head_camera_image_raw_ready(self):
self.head_camera_image_raw = None
rospy.logdebug(
"Waiting for /io/internal_camera/head_camera/image_raw to be READY...")
while self.head_camera_image_raw is None and not rospy.is_shutdown():
try:
self.head_camera_image_raw = rospy.wait_for_message(
"/io/internal_camera/head_camera/image_raw", Image, timeout=5.0)
rospy.logdebug(
"Current /io/internal_camera/head_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /io/internal_camera/head_camera/image_raw not ready yet, retrying for getting head_camera_image_raw")
return self.head_camera_image_raw
def _check_right_hand_camera_image_raw_ready(self):
self.right_hand_camera_image_raw = None
rospy.logdebug(
"Waiting for /io/internal_camera/right_hand_camera/image_raw to be READY...")
while self.right_hand_camera_image_raw is None and not rospy.is_shutdown():
try:
self.right_hand_camera_image_raw = rospy.wait_for_message(
"/io/internal_camera/right_hand_camera/image_raw", Image, timeout=5.0)
rospy.logdebug(
"Current /io/internal_camera/right_hand_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /io/internal_camera/right_hand_camera/image_raw not ready yet, retrying for getting right_hand_camera_image_raw")
return self.right_hand_camera_image_raw
def _head_camera_image_raw_callback(self, data):
self.head_camera_image_raw = data
def _right_hand_camera_image_raw_callback(self, data):
self.right_hand_camera_image_raw = data
def _setup_tf_listener(self):
"""
Set ups the TF listener for getting the transforms you ask for.
"""
self.listener = tf.TransformListener()
def _setup_movement_system(self):
"""
Setup of the movement system.
:return:
"""
rp = intera_interface.RobotParams()
valid_limbs = rp.get_limb_names()
if not valid_limbs:
rp.log_message(("Cannot detect any limb parameters on this robot. "
"Exiting."), "ERROR")
return
rospy.loginfo("Valid Sawyer Limbs==>"+str(valid_limbs))
print("Getting robot state... ")
rs = intera_interface.RobotEnable(CHECK_VERSION)
init_state = rs.state().enabled
rospy.loginfo("Enabling robot...")
rs.enable()
self._map_actions_to_movement()
def _map_actions_to_movement(self, side="right", joint_delta=0.1):
self.limb = intera_interface.Limb(side)
try:
self.gripper = intera_interface.Gripper(side + '_gripper')
except:
self.has_gripper = False
rospy.loginfo("The electric gripper is not detected on the robot.")
else:
self.has_gripper = True
self.joints = self.limb.joint_names()
self.bindings = {
self.joints[0]+"_increase": (self.set_j, [self.joints[0], joint_delta], self.joints[0]+" increase"),
self.joints[0]+"_decrease": (self.set_j, [self.joints[0], -joint_delta], self.joints[0]+" decrease"),
self.joints[1]+"_increase": (self.set_j, [self.joints[1], joint_delta], self.joints[1]+" increase"),
self.joints[1]+"_decrease": (self.set_j, [self.joints[1], -joint_delta], self.joints[1]+" decrease"),
self.joints[2]+"_increase": (self.set_j, [self.joints[2], joint_delta], self.joints[2]+" increase"),
self.joints[2]+"_decrease": (self.set_j, [self.joints[2], -joint_delta], self.joints[2]+" decrease"),
self.joints[3]+"_increase": (self.set_j, [self.joints[3], joint_delta], self.joints[3]+" increase"),
self.joints[3]+"_decrease": (self.set_j, [self.joints[3], -joint_delta], self.joints[3]+" decrease"),
self.joints[4]+"_increase": (self.set_j, [self.joints[4], joint_delta], self.joints[4]+" increase"),
self.joints[4]+"_decrease": (self.set_j, [self.joints[4], -joint_delta], self.joints[4]+" decrease"),
self.joints[5]+"_increase": (self.set_j, [self.joints[5], joint_delta], self.joints[5]+" increase"),
self.joints[5]+"_decrease": (self.set_j, [self.joints[5], -joint_delta], self.joints[5]+" decrease"),
self.joints[6]+"_increase": (self.set_j, [self.joints[6], joint_delta], self.joints[6]+" increase"),
self.joints[6]+"_decrease": (self.set_j, [self.joints[6], -joint_delta], self.joints[6]+" decrease")
}
if self.has_gripper:
self.bindings.update({
"close": (self.set_g, "close", side+" gripper close"),
"open": (self.set_g, "open", side+" gripper open"),
"calibrate": (self.set_g, "calibrate", side+" gripper calibrate")
})
rospy.loginfo("Controlling joints...")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def execute_movement(self, action_id):
"""
It executed the command given through an id. This will move any joint
of Sawyer, including the gripper if it has it.
:param: action_id: These are the possible action_id values and the action asociated.
self.joints[0]+"_increase",
self.joints[0]+_decrease,
self.joints[1]+"_increase",
self.joints[1]+"_decrease",
self.joints[2]+"_increase",
self.joints[2]+"_decrease",
self.joints[3]+"_increase",
self.joints[3]+"_decrease",
self.joints[4]+"_increase",
self.joints[4]+"_decrease",
self.joints[5]+"_increase",
self.joints[5]+"_decrease",
self.joints[6]+"_increase",
self.joints[6]+"_decrease",
gripper_close,
gripper_open,
gripper_calibrate
"""
if action_id in self.bindings:
cmd = self.bindings[action_id]
if action_id == "gripper_close" or action_id == "gripper_open" or action_id == "gripper_calibrate":
cmd[0](cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
# expand binding to something like "self.set_j(right, 'j0', joint_delta)"
cmd[0](*cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
rospy.logerr("NOT VALID key binding, it should be one of these: ")
for key, val in sorted(self.bindings.items(),
key=lambda x: x[1][2]):
rospy.logerr(" %s: %s" % (key, val[2]))
def set_j(self, joint_name, delta):
current_position = self.limb.joint_angle(joint_name)
joint_command = {joint_name: current_position + delta}
self.limb.set_joint_positions(joint_command)
def set_g(self, action):
if self.has_gripper:
if action == "close":
self.gripper.close()
elif action == "open":
self.gripper.open()
elif action == "calibrate":
self.gripper.calibrate()
def move_joints_to_angle_blocking(self, joint_positions_dict, timeout=15.0, threshold=0.008726646):
"""
It moves all the joints to the given position and doesnt exit until it reaches that position
"""
self.limb.move_to_joint_positions(positions=joint_positions_dict,
timeout=15.0,
threshold=0.008726646,
test=None)
def get_limb_joint_names_array(self):
"""
Returns the Joint Names array of the Limb.
"""
return self.joints
def get_all_limb_joint_angles(self):
"""
Return dictionary dict({str:float}) with all the joints angles
"""
return self.limb.joint_angles()
def get_all_limb_joint_efforts(self):
"""
Returns a dictionary dict({str:float}) with all the joints efforts
"""
return self.limb.joint_efforts()
def get_tf_start_to_end_frames(self, start_frame_name, end_frame_name):
"""
Given two frames, it returns the transform from the start_frame_name to the end_frame_name.
It will only return something different to None if the TFs of the Two frames are in TF topic
published and are connected through the TF tree.
:param: start_frame_name: Start Frame of the TF transform
end_frame_name: End Frame of the TF transform
:return: trans,rot of the transform between the start and end frames.
"""
start_frame = "/"+start_frame_name
end_frame = "/"+end_frame_name
trans, rot = None, None
while (trans is None or rot is None) and not rospy.is_shutdown():
try:
(trans, rot) = self.listener.lookupTransform(
start_frame, end_frame, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logerr("TF start to end not ready YET...")
duration_obj = rospy.Duration.from_sec(1.0)
rospy.sleep(duration_obj)
return trans, rot
def check_joint_limits_ready(self):
self.joint_limits = None
rospy.logdebug("Waiting for /robot/joint_limits to be READY...")
while self.joint_limits is None and not rospy.is_shutdown():
try:
self.joint_limits = rospy.wait_for_message(
"/robot/joint_limits", JointLimits, timeout=3.0)
rospy.logdebug("Current /robot/joint_limits READY=>")
except:
rospy.logerr(
"Current /robot/joint_limits not ready yet, retrying for getting joint_limits")
return self.joint_limits
def get_joint_limits(self):
return self.joint_limits
def get_head_camera_image_raw(self):
return self.head_camera_image_raw
def get_right_hand_camera_image_raw(self):
return self.right_hand_camera_image_raw
def init_joint_limits(self):
"""
Get the Joint Limits, in the init fase where we need to unpause the simulation to get them
:return: joint_limits: The Joint Limits Dictionary, with names, angles, vel and effort limits.
"""
self.gazebo.unpauseSim()
joint_limits = self.check_joint_limits_ready()
self.gazebo.pauseSim()
return joint_limits
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~example_modrob_human_training~src~start_training_sac.py | #!/usr/bin/env python
import torch
import gym
import numpy as np
import time
import rospy
import rospkg
import functools
from datetime import datetime
from spinup.algos.pytorch.sac import sac, core
from spinup.utils.run_utils import setup_logger_kwargs
from spinup.utils.logx import EpochLogger
from spinup.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads
from spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs
from gym import wrappers
from torch.optim import Adam
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
if __name__ == '__main__':
# How can this be done dynamically?
rospy.init_node('modrob_RL_node',
anonymous=True, log_level=rospy.INFO)
# Init OpenAI_ROS ENV
task_and_robot_environment_name = rospy.get_param(
'/sac/task_and_robot_environment_name')
env = StartOpenAI_ROS_Environment(
task_and_robot_environment_name)
# Create the Gym environment
rospy.loginfo("Gym environment done")
rospy.loginfo("Starting Learning")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('example_modrob_human_training')
now = datetime.now()
outdir = pkg_path + '/training_results/' + now.strftime("%Y_%m_%d_%H_%M")
## We cannot use a monitor if we want to cut off trajectories
#env = wrappers.Monitor(env, outdir, force=True)
#rospy.loginfo("Monitor Wrapper started")
last_time_steps = np.ndarray(0)
# Network size
hid = rospy.get_param("/sac/hid")
l = rospy.get_param("/sac/l")
ac_kwargs=dict(hidden_sizes=hid*l)
# Random seed
seed = rospy.get_param("/sac/seed")
# An epoch consists of a fixed amount of steps.
steps_per_epoch = rospy.get_param("/sac/steps_per_epoch")
# We train for a fixed amount of epochs
n_epochs = rospy.get_param("/sac/n_epochs")
# Size of replay buffer
replay_size = rospy.get_param("/sac/replay_size")
# Discount factor. (Always between 0 and 1.)
gamma = rospy.get_param("/sac/gamma")
# polyak (float): Interpolation factor in polyak averaging for target networks.
polyak = rospy.get_param("/sac/polyak")
# learning rate
lr = rospy.get_param("/sac/lr")
# Entropy regularization coefficient.
alpha = rospy.get_param("/sac/alpha")
# Batch size
batch_size = rospy.get_param("/sac/batch_size")
# Number of steps for uniform-random action selection,
# before running real policy. Helps exploration.
start_steps = rospy.get_param("/sac/start_steps")
# Number of env interactions to collect before starting to do gradient descent updates.
# Ensures replay buffer is full enough for useful updates.
update_after = rospy.get_param("/sac/update_after")
# Number of env interactions that should elapse between gradient descent updates. Note: Regardless of how long
# you wait between updates, the ratio of env steps to gradient steps is locked to 1.
update_every = rospy.get_param("/sac/update_every")
# Number of episodes to test the deterministic policy at the end of each epoch.
num_test_episodes = rospy.get_param("/sac/num_test_episodes")
# maximum length of episode
max_ep_len = rospy.get_param("/sac/max_ep_len")
# Number of epochs between each policy/value function save
save_freq = rospy.get_param("/sac/save_freq")
logger_kwargs = setup_logger_kwargs(task_and_robot_environment_name,seed,outdir)
# Set max timestep
env.spec.timestep_limit = max_ep_len
sac.sac(env=env,
test_env = env,
actor_critic=core.MLPActorCritic,
ac_kwargs=dict(hidden_sizes=hid*l),
seed=seed,
steps_per_epoch=steps_per_epoch,
epochs=n_epochs,
replay_size=replay_size,
gamma=gamma,
polyak=polyak,
lr=lr,
alpha=alpha,
batch_size=batch_size,
start_steps=start_steps,
update_after=update_after,
update_every=update_every,
num_test_episodes=num_test_episodes,
max_ep_len=max_ep_len,
logger_kwargs=logger_kwargs,
save_freq=save_freq)
env.close()
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~robot_envs~modrob_env.py | import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from gazebo_msgs.msg import LinkStates
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from openai_ros.openai_ros_common import ROSLauncher
class ModRobEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all modular robot environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new ModRob (modular robot) environment.
ModRob doesnt use controller_manager, therefore we wont reset the
controllers in the standard fashion. For the moment we wont reset them.
<<--- Can we use the controller manager in the future? --->>
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that the stream of data doesn't flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controllers.
This has to do with the fact that some plugins with tf, don't understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List: TODO
*
Actuators Topic List: TODO
*
Args:
"""
# The robot name parameter must be in the parameter server
self.robot_name_ = rospy.get_param('/robot_name')
rospy.loginfo("Initialize ModRob environment...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="modrob_simulation",
launch_file_name="put_robot_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(ModRobEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/" + self.robot_name_ + "/joint_states", JointState, self._joint_state_callback)
self._check_all_sensors_ready()
#self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._init_joint_publisher()
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.loginfo("Finished ModRob INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _init_joint_publisher(self):
"""Initialize the joint controller publisher with the joint list.
Relys on joint sensors being published.
"""
self._check_joint_state_ready()
assert len(self.joint_state.name) > 0, "No joint names found in joint_state."
self._joint_publishers = dict()
for joint_name in self.joint_state.name:
self._joint_publishers[joint_name] = rospy.Publisher('/' + self.robot_name_ + '/{}_position_controller/command'.format(joint_name), Float64, queue_size=10)
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_joint_state_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_joint_state_ready(self):
self.joint_state = None
rospy.logdebug("Waiting for /" + self.robot_name_ + "/joint_states to be READY...")
while self.joint_state is None and not rospy.is_shutdown():
try:
self.joint_state = rospy.wait_for_message("/" + self.robot_name_ + "/joint_states", JointState, timeout=5.0)
rospy.logdebug("Current /" + self.robot_name_ + "/joint_states READY=>")
except:
rospy.logerr("Current /" + self.robot_name_ + "/joint_states not ready yet, retrying for getting joint states")
return self.joint_state
def _joint_state_callback(self, data):
self.joint_state = data
def _check_publishers_connection(self):
"""Checks that all the publishers are working.
"""
# Check joint position controller publishers
rate = rospy.Rate(10) # 10hz
for joint_name in self._joint_publishers:
publisher = self._joint_publishers[joint_name]
while publisher.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to /" + self.robot_name_ + "/{}_position_controller/command yet so we wait and try again".format(joint_name))
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("All joint position controller publishers connected!")
# Check additional publishers
rospy.logdebug("All publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_all_joints(self, joint_positions, error=0.2, timeout=3.0):
"""Control the joints and gripper of modrob0.
The control waits until timeout or desired position reached within a margin of error.
Args:
joint_positions (Dict): key: joint_name, value: desired joint position
error (double): combined absolute error allowed between desired and reached joint position
timeout (double): time to wait in s, set this to zero to wait until joint position is reached
"""
# Check if publishers are active
self._check_publishers_connection()
# Send control command to all joints in joint_position dict
for joint_name in joint_positions:
if joint_name in self._joint_publishers:
# Publish val = joint_positions[joint_name] to joint publisher self._joint_publishers[joint_name]
self._joint_publishers[joint_name].publish(joint_positions[joint_name])
else:
rospy.logwarn("Joint /" + self.robot_name_ + "/{}_position_controller/command not found! Not publishing this joint position.".format(joint_name))
# Wait for a certain amount of time to get to the desired position.
self.wait_for_joints_to_get_there(joint_positions, error=error, timeout=timeout)
def wait_for_joints_to_get_there(self, desired_joint_positions, error=0.2, timeout=3.0):
"""Wait until target joint position is reached within an error or
until the timout is reached.
Set timeout to 0 to wait until joint position is reached.
Args:
desired_joint_positions (Dict): key: joint_name, value: desired joint position
error (double): combined absolute error allowed between desired and reached joint position
timeout (double): time to wait in s, set this to zero to wait until joint position is reached
"""
time_waiting = 0.0
frequency = 100.0 # Fine tune this parameter.
are_equal = False
is_timeout = False
# ROS will try to keep this frequency of the following loop
rate = rospy.Rate(frequency)
rospy.logdebug("Waiting for joint to get to the position")
while not are_equal and not is_timeout and not rospy.is_shutdown():
current_joint_positions = self.get_joint_positions()
sum_distance = 0
for joint_name in desired_joint_positions:
if joint_name in current_joint_positions:
# TODO: Handle gripper position different.
# We are currently adding angle differences and cartesian differences
sum_distance += abs(desired_joint_positions[joint_name] - current_joint_positions[joint_name])
else:
rospy.logwarn("Joint /" + self.robot_name_ + "/{}_position_controller/command not found! Not checking this joint position.".format(joint_name))
are_equal = sum_distance <= error
rate.sleep()
if timeout == 0.0:
# Dismiss time constraint and wait until target reached
time_waiting += 0.0
else:
time_waiting += 1.0 / frequency
is_timeout = time_waiting > timeout
rospy.logdebug(
"Joints are in the desired position with an erro of "+str(error))
def get_joint_positions(self):
return dict(zip(self.joint_state.name, self.joint_state.position))
def get_joint_velocities(self):
return dict(zip(self.joint_state.name, self.joint_state.velocity))
def get_joint_efforts(self):
return dict(zip(self.joint_state.name, self.joint_state.effort))
def get_joint_names(self):
return self.joint_state.name
def get_joint_state(self):
return self.joint_state
def reinit_sensors(self):
"""
This method is for the tasks so that when reseting the episode
the sensors values are forced to be updated with the real data and
<<-- Only needed when reset is set to SIMULATION.
<<-- TODO: Implement this ?
"""
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~robot_gazebo_env.py | import rospy
import gym
from gym.utils import seeding
from .gazebo_connection import GazeboConnection
from .controllers_connection import ControllersConnection
#https://bitbucket.org/theconstructcore/theconstruct_msgs/src/master/msg/RLExperimentInfo.msg
from openai_ros.msg import RLExperimentInfo
# https://github.com/openai/gym/blob/master/gym/core.py
class RobotGazeboEnv(gym.Env):
def __init__(self, robot_name_space, controllers_list, reset_controls, start_init_physics_parameters=True, reset_world_or_sim="SIMULATION"):
# To reset Simulations
rospy.logdebug("START init RobotGazeboEnv")
self.gazebo = GazeboConnection(start_init_physics_parameters,reset_world_or_sim)
self.controllers_object = ControllersConnection(namespace=robot_name_space, controllers_list=controllers_list)
self.reset_controls = reset_controls
self.seed()
# Set up ROS related variables
self.episode_num = 0
self.cumulated_episode_reward = 0
self.reward_pub = rospy.Publisher('/openai/reward', RLExperimentInfo, queue_size=1)
# We Unpause the simulation and reset the controllers if needed
"""
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
"""
self.gazebo.unpauseSim()
if self.reset_controls:
self.controllers_object.reset_controllers()
rospy.logdebug("END init RobotGazeboEnv")
# Env methods
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""
Function executed each time step.
Here we get the action execute it in a time step and retrieve the
observations generated by that action.
:param action:
:return: obs, reward, done, info
"""
"""
Here we should convert the action num to movement action, execute the action in the
simulation and get the observations result of performing that action.
"""
rospy.logdebug("START STEP OpenAIROS")
self.gazebo.unpauseSim()
action = self._set_action(action)
obs = self._get_obs()
self.gazebo.pauseSim()
done = self._is_done(obs)
info = {}
info['action'] = action
reward = self._compute_reward(obs, done)
self.cumulated_episode_reward += reward
rospy.logdebug("END STEP OpenAIROS")
return obs, reward, done, info
def reset(self):
rospy.logdebug("Reseting RobotGazeboEnvironment")
self._reset_sim()
self._init_env_variables()
self._update_episode()
self.gazebo.unpauseSim()
obs = self._get_obs()
self.gazebo.pauseSim()
rospy.logdebug("END Reseting RobotGazeboEnvironment")
return obs
def close(self):
"""
Function executed when closing the environment.
Use it for closing GUIS and other systems that need closing.
:return:
"""
rospy.logdebug("Closing RobotGazeboEnvironment")
rospy.signal_shutdown("Closing RobotGazeboEnvironment")
def _update_episode(self):
"""
Publishes the cumulated reward of the episode and
increases the episode number by one.
:return:
"""
rospy.logwarn("PUBLISHING REWARD...")
self._publish_reward_topic(
self.cumulated_episode_reward,
self.episode_num
)
rospy.logwarn("PUBLISHING REWARD...DONE="+str(self.cumulated_episode_reward)+",EP="+str(self.episode_num))
self.episode_num += 1
self.cumulated_episode_reward = 0
def _publish_reward_topic(self, reward, episode_number=1):
"""
This function publishes the given reward in the reward topic for
easy access from ROS infrastructure.
:param reward:
:param episode_number:
:return:
"""
reward_msg = RLExperimentInfo()
reward_msg.episode_number = episode_number
reward_msg.episode_reward = reward
self.reward_pub.publish(reward_msg)
# Extension methods
# ----------------------------
def _reset_sim(self):
"""Resets a simulation
"""
rospy.logdebug("RESET SIM START")
if self.reset_controls :
rospy.logdebug("RESET CONTROLLERS")
self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_systems_ready()
self._set_init_pose()
self.gazebo.pauseSim()
self.gazebo.resetSim()
self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_systems_ready()
self.gazebo.pauseSim()
else:
rospy.logwarn("DONT RESET CONTROLLERS")
self.gazebo.unpauseSim()
self._check_all_systems_ready()
self._set_init_pose()
self.gazebo.pauseSim()
self.gazebo.resetSim()
self.gazebo.unpauseSim()
self._check_all_systems_ready()
self.gazebo.pauseSim()
rospy.logdebug("RESET SIM END")
return True
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
raise NotImplementedError()
def _get_obs(self):
"""Returns the observation.
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _is_done(self, observations):
"""Indicates whether or not the episode is done ( the robot has fallen for example).
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _env_setup(self, initial_qpos):
"""Initial configuration of the environment. Can be used to configure initial state
and extract information from the simulation.
"""
raise NotImplementedError()
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~robot_envs~turtlebot2_env.py | import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from openai_ros.openai_ros_common import ROSLauncher
class TurtleBot2Env(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new TurtleBot2Env environment.
Turtlebot2 doesnt use controller_manager, therefore we wont reset the
controllers in the standard fashion. For the moment we wont reset them.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /odom : Odometry readings of the Base of the Robot
* /camera/depth/image_raw: 2d Depth image of the depth sensor.
* /camera/depth/points: Pointcloud sensor readings
* /camera/rgb/image_raw: RGB camera
* /kobuki/laser/scan: Laser Readings
Actuators Topic List: /cmd_vel,
Args:
"""
rospy.loginfo("Start TurtleBot2Env INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="turtlebot_gazebo",
launch_file_name="put_robot_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(TurtleBot2Env, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/odom", Odometry, self._odom_callback)
#rospy.Subscriber("/camera/depth/image_raw", Image, self._camera_depth_image_raw_callback)
#rospy.Subscriber("/camera/depth/points", PointCloud2, self._camera_depth_points_callback)
#rospy.Subscriber("/camera/rgb/image_raw", Image, self._camera_rgb_image_raw_callback)
rospy.Subscriber("/kobuki/laser/scan", LaserScan, self._laser_scan_callback)
self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.loginfo("Finished TurtleBot2Env INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_odom_ready()
# We dont need to check for the moment, takes too long
#self._check_camera_depth_image_raw_ready()
#self._check_camera_depth_points_ready()
#self._check_camera_rgb_image_raw_ready()
self._check_laser_scan_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message("/odom", Odometry, timeout=5.0)
rospy.logdebug("Current /odom READY=>")
except:
rospy.logerr("Current /odom not ready yet, retrying for getting odom")
return self.odom
def _check_camera_depth_image_raw_ready(self):
self.camera_depth_image_raw = None
rospy.logdebug("Waiting for /camera/depth/image_raw to be READY...")
while self.camera_depth_image_raw is None and not rospy.is_shutdown():
try:
self.camera_depth_image_raw = rospy.wait_for_message("/camera/depth/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/depth/image_raw READY=>")
except:
rospy.logerr("Current /camera/depth/image_raw not ready yet, retrying for getting camera_depth_image_raw")
return self.camera_depth_image_raw
def _check_camera_depth_points_ready(self):
self.camera_depth_points = None
rospy.logdebug("Waiting for /camera/depth/points to be READY...")
while self.camera_depth_points is None and not rospy.is_shutdown():
try:
self.camera_depth_points = rospy.wait_for_message("/camera/depth/points", PointCloud2, timeout=10.0)
rospy.logdebug("Current /camera/depth/points READY=>")
except:
rospy.logerr("Current /camera/depth/points not ready yet, retrying for getting camera_depth_points")
return self.camera_depth_points
def _check_camera_rgb_image_raw_ready(self):
self.camera_rgb_image_raw = None
rospy.logdebug("Waiting for /camera/rgb/image_raw to be READY...")
while self.camera_rgb_image_raw is None and not rospy.is_shutdown():
try:
self.camera_rgb_image_raw = rospy.wait_for_message("/camera/rgb/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/rgb/image_raw READY=>")
except:
rospy.logerr("Current /camera/rgb/image_raw not ready yet, retrying for getting camera_rgb_image_raw")
return self.camera_rgb_image_raw
def _check_laser_scan_ready(self):
self.laser_scan = None
rospy.logdebug("Waiting for /kobuki/laser/scan to be READY...")
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message("/kobuki/laser/scan", LaserScan, timeout=5.0)
rospy.logdebug("Current /kobuki/laser/scan READY=>")
except:
rospy.logerr("Current /kobuki/laser/scan not ready yet, retrying for getting laser_scan")
return self.laser_scan
def _odom_callback(self, data):
self.odom = data
def _camera_depth_image_raw_callback(self, data):
self.camera_depth_image_raw = data
def _camera_depth_points_callback(self, data):
self.camera_depth_points = data
def _camera_rgb_image_raw_callback(self, data):
self.camera_rgb_image_raw = data
def _laser_scan_callback(self, data):
self.laser_scan = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10, min_laser_distance=-1):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("TurtleBot2 Base Twist Cmd>>" + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
time.sleep(0.2)
#time.sleep(0.02)
"""
self.wait_until_twist_achieved(cmd_vel_value,
epsilon,
update_rate,
min_laser_distance)
"""
def wait_until_twist_achieved(self, cmd_vel_value, epsilon, update_rate, min_laser_distance=-1):
"""
We wait for the cmd_vel twist given to be reached by the robot reading
from the odometry.
:param cmd_vel_value: Twist we want to wait to reach.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the odometry.
:return:
"""
rospy.logwarn("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.05
rospy.logdebug("Desired Twist Cmd>>" + str(cmd_vel_value))
rospy.logdebug("epsilon>>" + str(epsilon))
linear_speed = cmd_vel_value.linear.x
angular_speed = cmd_vel_value.angular.z
linear_speed_plus = linear_speed + epsilon
linear_speed_minus = linear_speed - epsilon
angular_speed_plus = angular_speed + epsilon
angular_speed_minus = angular_speed - epsilon
while not rospy.is_shutdown():
crashed_into_something = self.has_crashed(min_laser_distance)
current_odometry = self._check_odom_ready()
odom_linear_vel = current_odometry.twist.twist.linear.x
odom_angular_vel = current_odometry.twist.twist.angular.z
rospy.logdebug("Linear VEL=" + str(odom_linear_vel) + ", ?RANGE=[" + str(linear_speed_minus) + ","+str(linear_speed_plus)+"]")
rospy.logdebug("Angular VEL=" + str(odom_angular_vel) + ", ?RANGE=[" + str(angular_speed_minus) + ","+str(angular_speed_plus)+"]")
linear_vel_are_close = (odom_linear_vel <= linear_speed_plus) and (odom_linear_vel > linear_speed_minus)
angular_vel_are_close = (odom_angular_vel <= angular_speed_plus) and (odom_angular_vel > angular_speed_minus)
if linear_vel_are_close and angular_vel_are_close:
rospy.logwarn("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
if crashed_into_something:
rospy.logerr("TurtleBot has crashed, stopping movement!")
break
rospy.logwarn("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time- start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
rospy.logwarn("END wait_until_twist_achieved...")
return delta_time
def has_crashed(self, min_laser_distance):
"""
It states based on the laser scan if the robot has crashed or not.
Crashed means that the minimum laser reading is lower than the
min_laser_distance value given.
If min_laser_distance == -1, it returns always false, because its the way
to deactivate this check.
"""
robot_has_crashed = False
if min_laser_distance != -1:
laser_data = self.get_laser_scan()
for i, item in enumerate(laser_data.ranges):
if item == float ('Inf') or numpy.isinf(item):
pass
elif numpy.isnan(item):
pass
else:
# Has a Non Infinite or Nan Value
if (item < min_laser_distance):
rospy.logerr("TurtleBot HAS CRASHED >>> item=" + str(item)+"< "+str(min_laser_distance))
robot_has_crashed = True
break
return robot_has_crashed
def get_odom(self):
return self.odom
def get_camera_depth_image_raw(self):
return self.camera_depth_image_raw
def get_camera_depth_points(self):
return self.camera_depth_points
def get_camera_rgb_image_raw(self):
return self.camera_rgb_image_raw
def get_laser_scan(self):
return self.laser_scan
def reinit_sensors(self):
"""
This method is for the tasks so that when reseting the episode
the sensors values are forced to be updated with the real data and
"""
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~task_envs~human_modrob~modrob_human.py | import rospy
import numpy as np
from gym import spaces
from openai_ros.robot_envs import modrob_env
from gym.envs.registration import register
from geometry_msgs.msg import Point
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class ModRobHumanEnv(modrob_env.ModRobEnv):
def __init__(self):
"""
This Task Env is designed for having the ModRob in a human working environment.
It will learn how to finish tasks without colliding with the human.
"""
# The robot name parameter must be in the parameter server
self.robot_name_ = rospy.get_param('/modrob/robot_name')
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_abs_env_var = rospy.get_param("/modrob/ros_abs_env_var", None)
try:
ros_ws_abspath = os.environ[ros_abs_env_var]
except:
print("Please set the environment variable {}".format(ros_abs_env_var))
sys.exit(1)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="modrob_simulation",
launch_file_name="start_world_human.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/human_modrob/config",
yaml_file_name="modrob_human.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(ModRobHumanEnv, self).__init__(ros_ws_abspath)
## Load in environment variables
self._get_env_variables()
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-np.inf, np.inf)
## Set action and observation space
# Continuous action space.
# All actions should range from 0 to 1. This improves training.
self.n_actions = len(self.id_action)
self.n_observations = len(self.id_observation)
self.action_space = spaces.Box(low=-1, high=1, shape=(self.n_actions,))
self.observation_space = spaces.Box(low=0, high=1, shape=(self.n_observations,))
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
self.cumulated_steps = 0.0
def _get_env_variables(self):
"""Load in environment variables from yaml.
Relevant variables:
joint_min: Minimal angle for all joints
joint_max: Maximal angle for all joints
joint_max_delta: Max theoretically allowed movement per execution step (in rad)
gripper1_min: Minimal position of gripper part 1
gripper1_max: Maximal position of gripper part 1
gripper2_min: Minimal position of gripper part 2
gripper2_max: Maximal position of gripper part 2
gripper_max_delta: Maximal theoretically allowed movement per execution step (in m)
use_delta_actions: True: Use differential position commands, False: Use absolute position commands
movement_error: Precision maximum for regular movement (can be arbitrarely small)
movement_timeout: Time waited until next movement execution
init_error: Precision for start position
goal_error: Precision for goal reached
init_joint0_position: Initial position for joint0 - TODO: Change this to dynamically.
init_joint1_position: Initial position for joint1 - TODO: Change this to dynamically.
init_joint2_position: Initial position for joint2 - TODO: Change this to dynamically.
init_hand_to_finger1_position: Initial position for gripper part 1 - TODO: Change this to dynamically.
init_hand_to_finger2_position: Initial position for gripper part 2 - TODO: Change this to dynamically.
desired_pose: Dummy pose for joint0 - TODO: Change this to dynamically.
distance_reward: Getting closer to the reward gives positive reward
time_penalty: Time penalty for every step
goal_reward: Points given when reaching the goal
"""
## Determine the normalization constants for all observations and actions.
# action_norm consists of two columns (c, r) and one row per controllable joint.
# To normalize an action a, calculate: a_n = (a-c)/r, r!=0
# To denormalize an normalized action a_n, calculate a = (a_n * r) + c
# Since we are using the tanh as activation for actions, we normalize a to [-1; 1]
self.action_norm = []
self.observation_norm = []
# Additionally, define which id refers to which action and observation.
self.id_action = []
self.id_observation = []
# Max movements
self.joint_max_delta = rospy.get_param('/modrob/joint_max_delta')
self.gripper_max_delta = rospy.get_param('/modrob/gripper_max_delta')
self.use_delta_actions = rospy.get_param('/modrob/use_delta_actions')
# TODO: Read these from urdf file.
self.joint_min = rospy.get_param('/modrob/joint_min')
self.joint_max = rospy.get_param('/modrob/joint_max')
assert self.joint_max-self.joint_min != 0, "Joint difference is zero"
self.gripper1_min = rospy.get_param('/modrob/gripper1_min')
self.gripper1_max = rospy.get_param('/modrob/gripper1_max')
assert self.gripper1_max-self.gripper1_min != 0, "Gripper 1 difference is zero"
self.gripper2_min = rospy.get_param('/modrob/gripper2_min')
self.gripper2_max = rospy.get_param('/modrob/gripper2_max')
assert self.gripper2_max-self.gripper2_min != 0, "Gripper 2 difference is zero"
# First entries are joint positions
for joint_name in self.get_joint_names():
if "joint" in joint_name:
if self.use_delta_actions:
_c = -1*self.joint_max_delta
_r = 2*self.joint_max_delta
else:
_c = self.joint_min
_r = self.joint_max-self.joint_min
# From [0; 1] normalization to [-1; 1]
c = _c + _r/2
r = _r/2
self.action_norm.append([c, r])
self.observation_norm.append([self.joint_min, self.joint_max-self.joint_min])
self.id_action.append(joint_name)
self.id_observation.append(joint_name)
# TEST for dummy reward and goal
if joint_name == "joint0":
self.goal_joint_id = len(self.id_observation)-1
elif "hand_to_finger1" in joint_name:
# Only one action for both grippers (they move together)
if self.use_delta_actions:
self.action_norm.append([-1*self.gripper_max_delta, 2*self.gripper_max_delta])
else:
self.action_norm.append([self.gripper1_min, self.gripper1_max-self.gripper1_min])
self.observation_norm.append([self.gripper1_min, self.gripper1_max-self.gripper1_min])
self.id_action.append(joint_name)
self.id_observation.append(joint_name)
self.action_norm = np.array(self.action_norm)
self.observation_norm = np.array(self.observation_norm)
# Movement settings
self.movement_error = rospy.get_param('/modrob/movement_error')
self.movement_timeout = rospy.get_param('/modrob/movement_timeout')
self.init_error = rospy.get_param('/modrob/init_error')
self.goal_error = rospy.get_param('/modrob/goal_error')
self.init_arm_joint_position = []
if rospy.has_param("/modrob/init_joint_position"):
self.init_arm_joint_position = rospy.get_param("/modrob/init_joint_position")
assert(len(self.init_arm_joint_position) == len(self._arm_joint_names))
# Goal and reward
self.desired_pose = rospy.get_param('/modrob/desired_pose')
self.distance_reward = rospy.get_param('/modrob/distance_reward')
self.time_penalty = rospy.get_param('/modrob/time_penalty')
self.goal_reward = rospy.get_param('/modrob/goal_reward')
def _set_init_pose(self):
"""Sets the Robot in its init pose.
"""
# Move until init position is reached (timeout=0)
self.move_all_joints(self.init_joint_position, error=self.init_error, timeout=0.0)
return True
def _init_env_variables(self):
"""Inits episode specific variables each time we reset at the start of an episode.
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
self.last_joint_position = self.get_joint_positions()
def _set_action(self, action):
"""Give a control command to the robot.
First, the action is clipped to the action space.
It is possible to assign negative rewards for too high actions.
This function denormalizes the action command and controls the robot.
Args:
action (array): Normalized actions
"""
action = np.clip(action, self.action_space.low, self.action_space.high)
rospy.logdebug("Start Set Action ==>"+str(action))
# Denormalize actions
denormalized_action = self.denormalize_actions(action)
# Build joint position dict
if self.use_delta_actions:
joint_positions = self.create_joint_positions_delta(denormalized_action)
else:
joint_positions = self.create_joint_positions_absolute(denormalized_action)
# Set action as command
self.move_all_joints(joint_positions, error=self.movement_error, timeout=self.movement_timeout)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""Get normalized observation array from robot sensors.
Returns:
observations (array): Normalized observation array
"""
rospy.logdebug("Start Get Observation ==>")
# Get non-normalized observations
observations = self.retrieve_observations()
# Normalize observations
observations = self.normalize_observations(observations)
rospy.logdebug("END Get Observation ==>"+str(observations))
return observations
def _is_done(self, observations):
"""Compute if episode is finished.
Right now only dummy goal for first joint angle.
"""
observations = self.denormalize_observations(observations)
phi1 = observations[self.goal_joint_id]
if np.isclose(phi1, self.desired_pose, atol=self.goal_error):
self._episode_done = True
return self._episode_done
def _compute_reward(self, observations, done):
"""Compute reward for this step.
Right now only dummy reward for first joint angle.
"""
reward = 0
observations = self.denormalize_observations(observations)
phi1 = observations[self.goal_joint_id]
if not done:
# Reward of minus 1 per time step
reward -= self.time_penalty
# Reward for getting closer to desired pos
reward -= self.distance_reward * (self.desired_pose-phi1)
else:
reward += self.goal_reward
rospy.logdebug("This step reward = " + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward = " + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps = " + str(self.cumulated_steps))
return reward
def create_joint_positions_absolute(self, actions):
"""Creates joint_positions from an absolute action array.
Args:
actions: Action array (This should be denormalized!), shape = [n_actions]
Returns:
joint_positions (Dict): key: joint_name, value: desired joint position.
"""
joint_positions = dict()
for i, action_name in enumerate(self.id_action):
if "joint" in action_name:
joint_positions[action_name] = np.clip(actions[i], self.joint_min, self.joint_max)
elif "hand_to_finger" in action_name:
joint_positions[action_name] = np.clip(actions[i], self.gripper1_min, self.gripper1_max)
# Set action for finger 2 reversed to finger 1.
# F1 = min --> F2 = max; F1 = max --> F2 = max
# F2 = ax + b
a = (self.gripper2_min - self.gripper2_max)/(self.gripper1_max - self.gripper1_min)
b = (self.gripper1_max*self.gripper2_max-self.gripper1_min*self.gripper2_min)/(self.gripper1_max - self.gripper1_min)
joint_positions["hand_to_finger2"] = a * actions[i] + b
return joint_positions
def create_joint_positions_delta(self, actions):
"""Creates absolute joint_positions from an delta action array.
Args:
actions: Action array (This should be denormalized!), shape = [n_actions]
Returns:
joint_positions (Dict): key: joint_name, value: desired absolute joint position.
"""
# Use the last observed joint position (not the current!)
last_joint_positions = self.last_joint_position
joint_positions = dict()
for i, joint_name in enumerate(self.id_action):
if "joint" in joint_name:
# Calculate new desired joint position and keep it in joint ranges
joint_positions[joint_name] = np.clip(last_joint_positions[joint_name] + actions[i], self.joint_min, self.joint_max)
elif "hand_to_finger" in joint_name:
joint_positions[joint_name] = np.clip(last_joint_positions[joint_name] + actions[i], self.gripper1_min, self.gripper1_max)
# Set action for finger 2 reversed to finger 1.
# F1 = min --> F2 = max; F1 = max --> F2 = max
# F2 = ax + b
a = (self.gripper2_min - self.gripper2_max)/(self.gripper1_max - self.gripper1_min)
b = (self.gripper1_max*self.gripper2_max-self.gripper1_min*self.gripper2_min)/(self.gripper1_max - self.gripper1_min)
joint_positions["hand_to_finger2"] = a * joint_positions[joint_name] + b
return joint_positions
def retrieve_observations(self):
"""Retrieve all observations (not normalized).
Converts joint_positions (Dict): key: joint_name, value: desired joint position, to observation array.
Returns:
observations (np.array): non normalized observations, shape = [n_observations]
"""
self.last_joint_position = self.get_joint_positions()
observations = np.zeros([len(self.id_observation)])
for i, observation_name in enumerate(self.id_observation):
if "joint" in observation_name:
observations[i] = self.last_joint_position[observation_name]
elif "hand_to_finger1" in observation_name:
# Only use one gripper observation
observations[i] = self.last_joint_position[observation_name]
return observations
# Internal TaskEnv Methods
def normalize_actions(self, actions):
"""Normalize an array of actions.
To normalize an action a, calculate: a_n = (a-c)/r, r!=0
Args:
actions: Action array, shape = [n_actions]
Returns:
normalized_actions: Normalized action array, shape = [n_actions]
"""
normalized_actions = []
if len(actions) == 0:
rospy.logerr("No actions to normalize.")
return normalized_actions
normalized_actions = (actions - self.action_norm[:, 0]) / self.action_norm[:, 1]
return normalized_actions
def denormalize_actions(self, normalized_actions):
"""Denormalize an array of actions.
To denormalize an normalized action a_n, calculate a = (a_n * r) + c
Args:
normalized_actions: Normalized action array, shape = [n_actions]
Returns:
actions: Action array, shape = [n_actions]
"""
actions = []
if len(normalized_actions) == 0:
rospy.logerr("No actions to denormalize.")
return actions
actions = (normalized_actions * self.action_norm[:, 1]) + self.action_norm[:, 0]
return actions
def normalize_observations(self, observations):
"""Normalize an array of observations.
To normalize an observation a, calculate: a_n = (a-c)/r, r!=0
Args:
observations: Action array, shape = [n_observations]
Returns:
normalized_observations: Normalized observation array, shape = [n_observations]
"""
normalized_observations = []
if len(observations) == 0:
rospy.logwarn("No observations to normalize.")
return normalized_observations
normalized_observations = (observations - self.observation_norm[:, 0]) / self.observation_norm[:, 1]
return normalized_observations
def denormalize_observations(self, normalized_observations):
"""Denormalize an array of observations.
To denormalize an normalized observation a_n, calculate a = (a_n * r) + c
Args:
normalized_observations: Normalized observation array, shape = [n_observations]
Returns:
observations: Action array, shape = [n_observations]
"""
observations = []
if len(normalized_observations) == 0:
rospy.logwarn("No observations to denormalize.")
return observations
observations = (normalized_observations * self.observation_norm[:, 1]) + self.observation_norm[:, 0]
return observations
def discretize_observation(self,data,new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
mod = len(data.ranges)/new_ranges
rospy.logdebug("data=" + str(data))
rospy.logwarn("new_ranges=" + str(new_ranges))
rospy.logwarn("mod=" + str(mod))
for i, item in enumerate(data.ranges):
if (i%mod==0):
if item == float ('Inf') or np.isinf(item):
discretized_ranges.append(self.max_laser_value)
elif np.isnan(item):
discretized_ranges.append(self.min_laser_value)
else:
discretized_ranges.append(int(item))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" + str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> item=" + str(item)+"< "+str(self.min_range))
return discretized_ranges
def is_in_desired_position(self,current_position, epsilon=0.05):
"""
It return True if the current position is similar to the desired poistion
"""
is_in_desired_pos = False
x_pos_plus = self.desired_point.x + epsilon
x_pos_minus = self.desired_point.x - epsilon
y_pos_plus = self.desired_point.y + epsilon
y_pos_minus = self.desired_point.y - epsilon
x_current = current_position.x
y_current = current_position.y
x_pos_are_close = (x_current <= x_pos_plus) and (x_current > x_pos_minus)
y_pos_are_close = (y_current <= y_pos_plus) and (y_current > y_pos_minus)
is_in_desired_pos = x_pos_are_close and y_pos_are_close
return is_in_desired_pos
def get_distance_from_desired_point(self, current_position):
"""
Calculates the distance from the current position to the desired point
:param start_point:
:return:
"""
distance = self.get_distance_from_point(current_position,
self.desired_point)
return distance
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = np.array((pstart.x, pstart.y, pstart.z))
b = np.array((p_end.x, p_end.y, p_end.z))
distance = np.linalg.norm(a - b)
return distance
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~openai_ros~openai_ros~src~openai_ros~task_envs~turtlebot2~turtlebot2_wall.py | import rospy
import numpy
from gym import spaces
from openai_ros.robot_envs import turtlebot2_env
from gym.envs.registration import register
from geometry_msgs.msg import Point
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class TurtleBot2WallEnv(turtlebot2_env.TurtleBot2Env):
def __init__(self):
"""
This Task Env is designed for having the TurtleBot2 in some kind of maze.
It will learn how to move around the maze without crashing.
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
ros_ws_abspath = rospy.get_param("/turtlebot2/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="turtlebot_gazebo",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/turtlebot2/config",
yaml_file_name="turtlebot2_wall.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(TurtleBot2WallEnv, self).__init__(ros_ws_abspath)
# Only variable needed to be set here
number_actions = rospy.get_param('/turtlebot2/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
#number_observations = rospy.get_param('/turtlebot2/n_observations')
"""
We set the Observation space for the 6 observations
cube_observations = [
round(current_disk_roll_vel, 0),
round(y_distance, 1),
round(roll, 1),
round(pitch, 1),
round(y_linear_speed,1),
round(yaw, 1),
]
"""
# Actions and Observations
self.linear_forward_speed = rospy.get_param('/turtlebot2/linear_forward_speed')
self.linear_turn_speed = rospy.get_param('/turtlebot2/linear_turn_speed')
self.angular_speed = rospy.get_param('/turtlebot2/angular_speed')
self.init_linear_forward_speed = rospy.get_param('/turtlebot2/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param('/turtlebot2/init_linear_turn_speed')
self.new_ranges = rospy.get_param('/turtlebot2/new_ranges')
self.min_range = rospy.get_param('/turtlebot2/min_range')
self.max_laser_value = rospy.get_param('/turtlebot2/max_laser_value')
self.min_laser_value = rospy.get_param('/turtlebot2/min_laser_value')
# Get Desired Point to Get
self.desired_point = Point()
self.desired_point.x = rospy.get_param("/turtlebot2/desired_pose/x")
self.desired_point.y = rospy.get_param("/turtlebot2/desired_pose/y")
self.desired_point.z = rospy.get_param("/turtlebot2/desired_pose/z")
# We create two arrays based on the binary values that will be assigned
# In the discretization method.
laser_scan = self.get_laser_scan()
rospy.logdebug("laser_scan len===>" + str(len(laser_scan.ranges)))
num_laser_readings = int(len(laser_scan.ranges)/self.new_ranges)
high = numpy.full((num_laser_readings), self.max_laser_value)
low = numpy.full((num_laser_readings), self.min_laser_value)
# We only use two integers
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>"+str(self.observation_space))
# Rewards
self.forwards_reward = rospy.get_param("/turtlebot2/forwards_reward")
self.turn_reward = rospy.get_param("/turtlebot2/turn_reward")
self.end_episode_points = rospy.get_param("/turtlebot2/end_episode_points")
self.cumulated_steps = 0.0
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base( self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
odometry = self.get_odom()
self.previous_distance_from_des_point = self.get_distance_from_desired_point(odometry.pose.pose.position)
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the turtlebot2
based on the action number given.
:param action: The action integer that set s what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0: #FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
self.last_action = "FORWARDS"
elif action == 1: #LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "TURN_LEFT"
elif action == 2: #RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "TURN_RIGHT"
# We tell TurtleBot2 the linear and angular speed to set to execute
self.move_base(linear_speed, angular_speed, epsilon=0.05, update_rate=10)
rospy.logdebug("END Set Action ==>"+str(action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
TurtleBot2Env API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan = self.get_laser_scan()
discretized_laser_scan = self.discretize_observation( laser_scan,
self.new_ranges
)
# We get the odometry so that SumitXL knows where it is.
odometry = self.get_odom()
x_position = odometry.pose.pose.position.x
y_position = odometry.pose.pose.position.y
# We round to only two decimals to avoid very big Observation space
odometry_array = [round(x_position, 2),round(y_position, 2)]
# We only want the X and Y position and the Yaw
observations = discretized_laser_scan + odometry_array
rospy.logdebug("Observations==>"+str(observations))
rospy.logdebug("END Get Observation ==>")
return observations
def _is_done(self, observations):
if self._episode_done:
rospy.logerr("TurtleBot2 is Too Close to wall==>")
else:
rospy.logerr("TurtleBot2 didnt crash at least ==>")
current_position = Point()
current_position.x = observations[-2]
current_position.y = observations[-1]
current_position.z = 0.0
MAX_X = 6.0
MIN_X = -1.0
MAX_Y = 3.0
MIN_Y = -3.0
# We see if we are outside the Learning Space
if current_position.x <= MAX_X and current_position.x > MIN_X:
if current_position.y <= MAX_Y and current_position.y > MIN_Y:
rospy.logdebug("TurtleBot Position is OK ==>["+str(current_position.x)+","+str(current_position.y)+"]")
# We see if it got to the desired point
if self.is_in_desired_position(current_position):
self._episode_done = True
else:
rospy.logerr("TurtleBot to Far in Y Pos ==>"+str(current_position.x))
self._episode_done = True
else:
rospy.logerr("TurtleBot to Far in X Pos ==>"+str(current_position.x))
self._episode_done = True
return self._episode_done
def _compute_reward(self, observations, done):
current_position = Point()
current_position.x = observations[-2]
current_position.y = observations[-1]
current_position.z = 0.0
distance_from_des_point = self.get_distance_from_desired_point(current_position)
distance_difference = distance_from_des_point - self.previous_distance_from_des_point
if not done:
if self.last_action == "FORWARDS":
reward = self.forwards_reward
else:
reward = self.turn_reward
# If there has been a decrease in the distance to the desired point, we reward it
if distance_difference < 0.0:
rospy.logwarn("DECREASE IN DISTANCE GOOD")
reward += self.forwards_reward
else:
rospy.logerr("ENCREASE IN DISTANCE BAD")
reward += 0
else:
if self.is_in_desired_position(current_position):
reward = self.end_episode_points
else:
reward = -1*self.end_episode_points
self.previous_distance_from_des_point = distance_from_des_point
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_observation(self,data,new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
mod = len(data.ranges)/new_ranges
rospy.logdebug("data=" + str(data))
rospy.logwarn("new_ranges=" + str(new_ranges))
rospy.logwarn("mod=" + str(mod))
for i, item in enumerate(data.ranges):
if (i%mod==0):
if item == float ('Inf') or numpy.isinf(item):
discretized_ranges.append(self.max_laser_value)
elif numpy.isnan(item):
discretized_ranges.append(self.min_laser_value)
else:
discretized_ranges.append(int(item))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" + str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> item=" + str(item)+"< "+str(self.min_range))
return discretized_ranges
def is_in_desired_position(self,current_position, epsilon=0.05):
"""
It return True if the current position is similar to the desired poistion
"""
is_in_desired_pos = False
x_pos_plus = self.desired_point.x + epsilon
x_pos_minus = self.desired_point.x - epsilon
y_pos_plus = self.desired_point.y + epsilon
y_pos_minus = self.desired_point.y - epsilon
x_current = current_position.x
y_current = current_position.y
x_pos_are_close = (x_current <= x_pos_plus) and (x_current > x_pos_minus)
y_pos_are_close = (y_current <= y_pos_plus) and (y_current > y_pos_minus)
is_in_desired_pos = x_pos_are_close and y_pos_are_close
return is_in_desired_pos
def get_distance_from_desired_point(self, current_position):
"""
Calculates the distance from the current position to the desired point
:param start_point:
:return:
"""
distance = self.get_distance_from_point(current_position,
self.desired_point)
return distance
def get_distance_from_point(self, pstart, p_end):
"""
Given a Vector3 Object, get distance from current position
:param p_end:
:return:
"""
a = numpy.array((pstart.x, pstart.y, pstart.z))
b = numpy.array((p_end.x, p_end.y, p_end.z))
distance = numpy.linalg.norm(a - b)
return distance
| [] |
2024-01-10 | JakobThumm/safe_rl_manipulators | src~catkin_ws~src~example_modrob_human_training~src~start_training_ppo.py | #!/usr/bin/env python
import torch
import gym
import numpy as np
import time
import qlearn
import rospy
import rospkg
import functools
from spinup.algos.pytorch.ppo import ppo, core
from spinup.utils.logx import EpochLogger
from spinup.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads
from spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs
from gym import wrappers
from torch.optim import Adam
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
if __name__ == '__main__':
# How can this be done dynamically?
rospy.init_node('modrob_RL_node',
anonymous=True, log_level=rospy.INFO)
# Init OpenAI_ROS ENV
task_and_robot_environment_name = rospy.get_param(
'/ppo/task_and_robot_environment_name')
env = StartOpenAI_ROS_Environment(
task_and_robot_environment_name)
# Create the Gym environment
rospy.loginfo("Gym environment done")
rospy.loginfo("Starting Learning")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('example_modrob_human_training')
outdir = pkg_path + '/training_results'
## We cannot use a monitor if we want to cut off trajectories
#env = wrappers.Monitor(env, outdir, force=True)
#rospy.loginfo("Monitor Wrapper started")
last_time_steps = np.ndarray(0)
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
alpha = rospy.get_param("/ppo/alpha")
gamma = rospy.get_param("/ppo/gamma")
# An episode is defined by the environment. The robot starts in initial state and runs until done.
# Each episode consists of max max_steps_per_episode steps.
max_steps_per_episode = rospy.get_param("/ppo/max_steps_per_episode")
# We train for a fixed amount of epochs
n_epochs = rospy.get_param("/ppo/n_epochs")
# An epoch consists of a fixed amount of steps.
steps_per_epoch = rospy.get_param("/ppo/steps_per_epoch")
running_step = rospy.get_param("/ppo/running_step")
seed = rospy.get_param("/ppo/seed")
hid = rospy.get_param("/ppo/hid")
l = rospy.get_param("/ppo/l")
clip_ratio = rospy.get_param("/ppo/clip_ratio")
pi_lr = rospy.get_param("/ppo/pi_lr")
vf_lr = rospy.get_param("/ppo/vf_lr")
train_pi_iters = rospy.get_param("/ppo/train_pi_iters")
train_v_iters = rospy.get_param("/ppo/train_v_iters")
lam = rospy.get_param("/ppo/lam")
target_kl = rospy.get_param("/ppo/target_kl")
logger_kwargs=dict()
save_freq = rospy.get_param("/ppo/save_freq")
# Set max timestep
env.spec.timestep_limit = max_steps_per_episode
ppo.ppo(env=env,
actor_critic=core.MLPActorCritic,
ac_kwargs=dict(hidden_sizes=hid*l),
seed=seed,
steps_per_epoch=steps_per_epoch,
epochs=n_epochs,
gamma=gamma,
clip_ratio=clip_ratio,
pi_lr=pi_lr,
vf_lr=vf_lr,
train_pi_iters=train_pi_iters,
train_v_iters=train_v_iters,
lam=lam,
max_ep_len=max_steps_per_episode,
target_kl=target_kl,
logger_kwargs=logger_kwargs,
save_freq=save_freq)
env.close()
| [] |
2024-01-10 | Dolvido/youtube_conversation_starter | conversation_agent.py | import os
from dotenv import load_dotenv, find_dotenv
from langchain import HuggingFaceHub
from langchain import PromptTemplate, LLMChain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.document_loaders import YoutubeLoader
import textwrap
# --------------------------------------------------------------
# Load the HuggingFaceHub API token from the .env file
# --------------------------------------------------------------
load_dotenv(find_dotenv())
HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
# --------------------------------------------------------------
# Load the LLM model from the HuggingFaceHub
# --------------------------------------------------------------
repo_id = "tiiuae/falcon-7b-instruct" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options
falcon_llm = HuggingFaceHub(
repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 500}
)
# --------------------------------------------------------------
# Create a PromptTemplate and LLMChain
# --------------------------------------------------------------
template = """What do you think about {question}?"""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=falcon_llm)
# --------------------------------------------------------------
# Create a ConversationChain
# --------------------------------------------------------------
conversation_chain = ConversationChain(llm=falcon_llm, verbose=True, memory=ConversationBufferMemory())
conversation_response_chain = ConversationChain(llm=falcon_llm, verbose=True, memory=ConversationBufferMemory())
# --------------------------------------------------------------
# Run the LLMChain
# --------------------------------------------------------------
question = "What is the meaning of life?"
response = llm_chain.run(question)
wrapped_text = textwrap.fill(
response, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
# --------------------------------------------------------------
# Load a video transcript from YouTube
# --------------------------------------------------------------
video_url = "https://www.youtube.com/watch?v=Jv79l1b-eoI"
loader = YoutubeLoader.from_youtube_url(video_url)
transcript = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000)
docs = text_splitter.split_documents(transcript)
# --------------------------------------------------------------
# Summarization with LangChain
# --------------------------------------------------------------
# Add map_prompt and combine_prompt to the chain for custom summarization
chain = load_summarize_chain(falcon_llm, chain_type="map_reduce", verbose=True)
print(chain.llm_chain.prompt.template)
print(chain.combine_document_chain.llm_chain.prompt.template)
# --------------------------------------------------------------
# Test the Falcon model with text summarization
# --------------------------------------------------------------
output_summary = chain.run(docs)
wrapped_text = textwrap.fill(
output_summary, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
output_summary = output_summary
# Counter to track the number of iterations
counter = 0
while counter < 3:
# The first prediction step (replace with your prediction function)
response = conversation_chain.predict(input=output_summary)
# Wrap and print the response text
wrapped_text = textwrap.fill(response, width=100, break_long_words=False, replace_whitespace=False)
print(wrapped_text)
# The second prediction step (replace with your prediction function)
response_chain = conversation_response_chain.predict(input=response)
# Wrap and print the response chain text
wrapped_text = textwrap.fill(response_chain, width=100, break_long_words=False, replace_whitespace=False)
print(wrapped_text)
# Set the output of the response chain as the new output summary for the next iteration
output_summary = response_chain
# Increment the counter
counter += 1
| [
"question",
"What do you think about {question}?"
] |
2024-01-10 | fingeredman/teanaps | text_analysis~TopicClustering.py | from teanaps import configure as con
PLOTLY_USERNAME = con.PLOTLY_USERNAME
PLOTLY_API_KEY = con.PLOTLY_API_KEY
import plotly
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
plotly.tools.set_credentials_file(username=PLOTLY_USERNAME, api_key=PLOTLY_API_KEY)
plotly.tools.set_config_file(world_readable=False, sharing='private')
init_notebook_mode(connected=True)
from IPython.display import display
import gensim
from gensim import corpora
from gensim.models import ldaseqmodel
from gensim.models import CoherenceModel
import pyLDAvis.gensim
import pandas as pd
pd.set_option('display.max_columns', None)
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=RuntimeWarning)
warnings.simplefilter(action='ignore', category=DeprecationWarning)
class TopicClustering():
def __init__(self):
None
def __get_stopwords(self):
stopword_list = open(con.STOPWORD_PATH, encoding="utf-8").read().strip().split("\n")
return stopword_list
def sequence_lda_topic_modeling(self, tokenized_sentence_list, time_slice, num_topics):
self.time_slice = time_slice
texts = [[word for word in document.split(" ") if word not in self.__get_stopwords()]
for document in tokenized_sentence_list]
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
self.ldaseq = ldaseqmodel.LdaSeqModel(corpus=corpus, id2word=dictionary,
time_slice=self.time_slice, num_topics=num_topics, em_max_iter=10)
sequence_topic_list = []
for num in range(num_topics):
sequence_topic_list.append((num, self.ldaseq.print_topic_times(topic=num)))
return sequence_topic_list
def set_plotly(self):
import IPython
display(IPython.core.display.HTML('''
<script src="/static/components/requirejs/require.js"></script>
<script>
requirejs.config({
paths: {
base: '/static/base',
plotly: 'https://cdn.plot.ly/plotly-latest.min.js?noext',
},
});
</script>
'''))
def get_sequence_topic_graph(self):
max_topics = self.ldaseq.num_topics
topic_weight_dict = {}
for num_topics in range(max_topics):
topic_weight_list = []
topic_keyword_list = []
for topic in self.ldaseq.print_topic_times(topic=num_topics):
topic_weight = sum([weight for word, weight in topic])
topic_weight_list.append(topic_weight)
topic_keyword_list = [word for word, weight in topic]
topic_weight_dict[num_topics] = {"topic_weight_list": topic_weight_list, "topic_keyword_list": topic_keyword_list}
data = []
x = [str(num_topics) for num_topics in range(len(self.time_slice))]
for num_topic, value in topic_weight_dict.items():
y = value["topic_weight_list"]
data.append(go.Scatter(
x=x,
y=y,
name='topic '+str(num_topic)+"<br>"+str(value["topic_keyword_list"][:10]),
marker=dict(
#color='blue',
),
yaxis='y'
),)
layout = go.Layout(
title='Sequence LDA Model Topic Trend',
xaxis=dict(
title='TIME SLICE',
titlefont=dict(
size=10,
color='black'
),
dtick = 1,
showticklabels=True,
tickangle=0,
tickfont=dict(
size=10,
color='black'
),
exponentformat='e',
showexponent='all'
),
yaxis=dict(
title='WEIGHT',
titlefont=dict(
size=10,
color='black'
),
showticklabels=True,
tickangle=0,
tickfont=dict(
size=10,
color='black'
),
exponentformat='e',
showexponent='all',
),
)
self.set_plotly()
fig = go.Figure(data=data, layout=layout)
return iplot(fig, filename='TF-IDF Graph')
def topic_modeling(self, model_type, document_list, num_topics, num_keywords):
topic_list = []
self.texts = [[word for word in document.split(" ") if word not in self.__get_stopwords()] for document in document_list]
self.dictionary = corpora.Dictionary(self.texts)
self.corpus = [self.dictionary.doc2bow(text) for text in self.texts]
if model_type == "lsi":
self.model = gensim.models.lsimodel.LsiModel(self.corpus, num_topics=num_topics, id2word=self.dictionary)
elif model_type == "lda":
self.model = gensim.models.ldamodel.LdaModel(self.corpus, num_topics=num_topics, id2word=self.dictionary, passes=10)
elif model_type == "hdp":
self.model = gensim.models.hdpmodel.HdpModel(self.corpus, id2word=self.dictionary)
else:
return []
for num in range(num_topics):
topic_list.append((num, self.model.show_topic(num, num_keywords)))
return topic_list
def get_model(self):
return self.model
def get_model_validation_result(self):
try:
perplexity = self.model.log_perplexity(self.corpus)
except:
perplexity = 0
coherence_model = CoherenceModel(model=self.model, texts=self.texts, dictionary=self.dictionary, coherence='c_v')
coherence = coherence_model.get_coherence()
return perplexity, coherence
def display_model_result(self, model):
pyLDAvis.enable_notebook()
lda_display = pyLDAvis.gensim.prepare(model, self.corpus, self.dictionary, sort_topics=True)
return pyLDAvis.display(lda_display)
def get_model_validation_graph(self, model_type, document_list, max_topics):
validation_list = []
num_keywords = 10
for num_topics in range(2, max_topics+1):
self.topic_modeling(model_type, document_list, num_topics, num_keywords)
perplexity, coherence = self.get_model_validation_result()
validation_list.append([num_topics, perplexity, coherence])
x = [str(num_topics) for num_topics, perplexity, coherence in validation_list]
y = [perplexity for num_topics, perplexity, coherence in validation_list]
z = [coherence for num_topics, perplexity, coherence in validation_list]
data = [
go.Scatter(
x=x,
y=y,
name="Perplexity",
marker=dict(
#color='blue',
),
yaxis='y'
),
go.Scatter(
x=x,
y=z,
name="Coherence",
marker=dict(
#color='red',
),
yaxis='y2'
)
]
layout = go.Layout(
title='LDA Model Perplexity & Coherence Graph',
xaxis=dict(
title='NUMBER of TOPIC',
titlefont=dict(
size=10,
color='black'
),
dtick = 1,
showticklabels=True,
tickangle=0,
tickfont=dict(
size=10,
color='black'
),
exponentformat='e',
showexponent='all'
),
yaxis=dict(
title='Perplexity',
titlefont=dict(
size=10,
color='black'
),
showticklabels=True,
tickangle=0,
tickfont=dict(
size=10,
color='black'
),
exponentformat='e',
showexponent='all',
),
yaxis2=dict(
title='Coherence',
titlefont=dict(
size=10,
color='black'
),
showticklabels=True,
tickangle=0,
tickfont=dict(
size=10,
color='black'
),
overlaying='y',
side='right'
)
)
self.set_plotly()
fig = go.Figure(data=data, layout=layout)
return iplot(fig, filename='TF-IDF Graph')
def get_topics_sentences(self, document_list):
df_topics_sentences = pd.DataFrame()
for i, row in enumerate(self.model[self.corpus]):
row = sorted(row, key=lambda x: (x[1]), reverse=True)
for j, (num_topic, prop_topic) in enumerate(row):
if j == 0:
wp = self.model.show_topic(num_topic)
topic_keywords = ", ".join([word for word, prop in wp])
df_topics_sentences = df_topics_sentences.append(pd.Series([int(num_topic), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
df_topics_sentences.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']
contents = pd.Series(document_list)
df_topics_sentences = pd.concat([df_topics_sentences, contents], axis=1)
df_dominant_topic = df_topics_sentences.reset_index()
df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
return df_topics_sentences
def get_topics_documents(self, document_list):
df_topics_sentences = pd.DataFrame()
for row in self.model[self.corpus]:
row = sorted(row, key=lambda x: (x[1]), reverse=True)
for j, (num_topic, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = self.model.show_topic(num_topic)
topic_keywords = ", ".join([word for word, _ in wp])
df_topics_sentences = df_topics_sentences.append(pd.Series([int(num_topic), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
df_topics_sentences.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']
contents = pd.Series(document_list)
df_topics_sentences = pd.concat([df_topics_sentences, contents], axis=1)
df_dominant_topic = df_topics_sentences.reset_index()
df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
sent_topics_sorteddf = pd.DataFrame()
sent_topics_outdf_grpd = df_topics_sentences.groupby('Dominant_Topic')
for _, grp in sent_topics_outdf_grpd:
sent_topics_sorteddf = pd.concat([sent_topics_sorteddf,
grp.sort_values(['Perc_Contribution'], ascending=[0]).head(5)], axis=0)
sent_topics_sorteddf.reset_index(drop=True, inplace=True)
sent_topics_sorteddf.columns = ['Topic_Num', "Topic_Perc_Contrib", "Keywords", "Text"]
return sent_topics_sorteddf | [] |
2024-01-10 | amde-et/GPT-Powered-Voice-Assistnat-Project-1.0- | Selam-AI-GPTAPI.py | from os import system
import speech_recognition as sr
import openai # Import the OpenAI library
import warnings
import time
import os
# Set your OpenAI API key
openai.api_key = "YOUR_OPENAI_API_KEY"
wake_word = 'Selam'
listening_for_wake_word = True
source = sr.Microphone()
warnings.filterwarnings("ignore", category=UserWarning, module='whisper.transcribe', lineno=114)
if os.name != 'posix':
import pyttsx3
engine = pyttsx3.init()
def speak(text):
if os.name == 'posix':
clean_text = ''.join(c for c in text if c.isalnum() or c.isspace() or c in ".,?!-_$:+-/")
system(f"say '{clean_text}'")
else:
engine.say(text)
engine.runAndWait()
def listen_for_wake_word(audio):
global listening_for_wake_word
# Add wake word detection logic here
def prompt_gpt(audio):
global listening_for_wake_word
result = base_model.transcribe(audio.get_raw_data())
prompt_text = result['text']
if not prompt_text.strip():
print("Empty prompt. Please speak again.")
speak("Empty prompt. Please speak again.")
listening_for_wake_word = True
else:
print('User:', prompt_text)
# Use the ChatGPT API to generate a response
response = openai.Completion.create(
engine="davinci",
prompt=prompt_text,
max_tokens=50
)
output = response.choices[0].text.strip()
print('GPT4All:', output)
speak(output)
print(f'\nSay {wake_word} to wake me up.\n')
listening_for_wake_word = True
def callback(recognizer, audio):
global listening_for_wake_word
if listening_for_wake_word:
listen_for_wake_word(audio)
else:
prompt_gpt(audio)
def start_listening():
with source as s:
r.adjust_for_ambient_noise(s, duration=2)
print(f'\nSay {wake_word} to wake me up.\n')
r.listen_in_background(source, callback)
if __name__ == '__main__':
r = sr.Recognizer() # Initialize recognizer here
start_listening()
while True:
time.sleep(1)
| [] |
2024-01-10 | ashercn97/aeye | src~v1.py | from pydantic import BaseModel, Field
import re
from .oai_funcs import *
import openai
import json
prompts = SystemPrompts()
# base class, simply adds the functionality to get the dictionary of all the attirbutes set, makes life easier
class AshBaseClass:
# returns a dictionary of all the attributes of the class
def dict(self):
return {name: getattr(self, name) for name in self.__class__.__dict__ if not name.startswith('_') and not callable(getattr(self, name))}
'''
Main class
* takes a query as input, such as 2+2, and then you also set the following attirbutes:
lengths: a dictionary of the keyword and then the length/what it should be. This could be like "one name" or "one integer spelled out"
template: a string where the keyword is put between <> so for instance "The answer is <keyword>"
'''
class TextTemplate(AshBaseClass):
# parameters that classes will have
lengths = {} # will be in the form {"name": "length"} where length is something like " One Word " or " One Sentence "
template = "" # will be in the form "hi <name> "
def __init__(self, query):
self.sys_prompt = """Below you are given a template. In the template there will be items like <name: One name>. In a situation like this, you will return a JSON where you have name: "Asher" for instance. \n\n"""
self.us_prompt = query
self.info = self.dict()
self.__flatten__()
# call all functions later...
def run(self):
self.final = self.__put_into_template__(self.__get_response__())
return self.final
# returns a string where the answered values are in between <> and the rest is the template, and then a list of the answered values between
def __get_response__(self):
client = setup_client()
response = get_completion(client, self.us_prompt, self.sys_prompt, json=True).content
response = json.loads(response)
return response
# reutnrs the original template but with the new values in it
def __put_into_template__(self, response):
new_template = self.template
for key, val in response.items():
new_template = self.__find_and_put__(key, val, new_template)
return new_template
# "flatens" the template and lengths/types into a sytem prompt
def __flatten__(self):
values = self.__find_values__()
for val in values:
# automatically sets the system prompt
self.__sub_in__(val, f"<{val}: Choose a value that fits this description: {self.info["lengths"][val]} > \n\n")
return self.sys_prompt
# finds the different values in the template
def __find_values__(self):
# finds all the values in the template
values = re.findall("<(.+?)>", self.template)
return values
# substitutes in the values for the template
def __sub_in__(self, name, value):
# replaces <name> with value in the template
self.sys_prompt += self.template.replace("<" + name + ">", str(value))
def __find_and_put__(self, name, value, new_template):
# replaces <name> with value in the template
new_template = new_template.replace("<" + name + ">", str(value))
return new_template
# "type" checks the value
def __type_check__(self, type, value):
sys_prompt = prompts.TYPE_CHECKER
us_prompt = UserPrompts.TYPE_CHECKER.format(type, value)
client = setup_client()
response = get_completion(client, us_prompt, sys_prompt)
response = bool(response)
if response:
return True
elif response == False:
return False
else:
raise ValueError("chatGPT returned something wrong")
| [] |
2024-01-10 | ashercn97/aeye | src~oai_funcs.py | from openai import OpenAI
import os
import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
# class of different system prompts, useful
class SystemPrompts:
TYPE_CHECKER = """You are a natrual language "type" checker. This means that, when given a description of a "type" such as 'one word' and an item such as 'dog', you will return the term "True". If you were given the description 'one name' and the item you recieved was 'cat', you would return "False". Your job is to figure out whether the descriptions match the item, and reply with only a True or False. You will be given prompts in the format:
DESCRIPTION: "des"
ITEM: "item"
and you will respond in the format:
True/False
You will only rpely with the word True or the word False, nothing else. """
class UserPrompts:
TYPE_CHECKER = """
DESCRIPTION: {}
ITEM: {} """
# setups the client
def setup_client():
# defaults to getting the key using os.environ.get("OPENAI_API_KEY")
client = OpenAI()
return client
# gets a completion given lots of info
def get_completion(client, message, system_prompt, json=False, model="gpt-3.5-turbo-1106"):
if json:
completion = client.chat.completions.create(
model=model,
response_format={ "type": "json_object" },
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": message}
]
)
else:
completion = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": message}
]
)
return completion.choices[0].message
| [] |
2024-01-10 | mobi1019/Deep-Reinforcement-Learning-for-Continuous-Control-Docking-of-Autonomous-Underwater-Vehicles | deepleng_control~scripts~env_checker.py | #!/usr/bin/env python3
# import rospy
# import rospkg
import gym
import numpy as np
# from openai_ros.task_envs.deepleng import deepleng_docking
from stable_baselines.bench import Monitor
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.callbacks import EvalCallback
from stable_baselines import PPO2
from stable_baselines.common.env_checker import check_env
from stable_baselines.common.callbacks import BaseCallback
# Optional: PPO2 requires a vectorized environment to run
# the env is now wrapped automatically when passing it to the constructor
if __name__ == "__main__":
class InfoCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
:param check_freq: (int)
:param log_dir: (str) Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: (int)
"""
def __init__(self, check_freq: int, verbose=1):
super(InfoCallback, self).__init__(verbose)
self.check_freq = check_freq
self.best_mean_reward = -np.inf
def _on_training_start(self) -> None:
"""
This method is called before the first rollout starts.
"""
print("Started training")
# print("parameters: ", self.model.get_parameters())
def _on_step(self) -> bool:
print("num timesteps: ", self.num_timesteps)
# print("observation: ", self.model.mb_obs)
# print("Rewards: ", self.model.rewards)
# print("Info: ", self.infos)
# print("actions: ", self.actions)
return True
# rospack = rospkg.RosPack()
# pkg_path = rospack.get_path('deepleng_control')
# outdir = pkg_path + '/training_results/'
# rospy.init_node('stable_baselines_lander', anonymous=True)
# rospy.init_node('stable_baselines_docker', anonymous=True)
env = gym.make('LunarLanderContinuous-v2')
# env = gym.make('DeeplengDocking-v1')
# check_env(env)
model = PPO2(MlpPolicy,
env,
n_steps=1024,
nminibatches=32,
verbose=1,
lam=0.98,
gamma=0.999,
noptepochs=4,
ent_coef=0.01,
# tensorboard_log="/home/dfki.uni-bremen.de/mpatil/Documents/baselines_log",
seed=1)
model.learn(total_timesteps=int(1e5), log_interval=50)
# model.learn(total_timesteps=int(1e5), log_interval=50, tb_log_name="ppo_Lander_1e5")
# model.save("/home/dfki.uni-bremen.de/mpatil/Desktop/ppo_LunarLander")
# del model
# model = PPO2.load("/home/dfki.uni-bremen.de/mpatil/Desktop/ppo_LunarLander")
# print("Enjoy the trained agent")
# obs = env.reset()
# for i in range(10000):
# action, _states = model.predict(obs)
# # print("action:", action)
# obs, rewards, dones, info = env.step(action)
# env.render()
# if dones:
# obs = env.reset()
print("Closing environment")
env.close() | [] |
2024-01-10 | Tuminha/sections_pdf | methods_analysis.py | """
In this part of the code, an AI agent will use the function to extract the methods from a pdf file.
The function to extract the methods is in the methods.py file, so we need to import it.
After it, the AI agent will analyze the methods based on a customized prompt.
"""
# 1. Import the necessary libraries and functions
from methods import methods_for_ai
from openai import OpenAI
from dotenv import load_dotenv
import os
# Load OpenAI API key from .env file
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# Set OpenAI API key
client = OpenAI(api_key=OPENAI_API_KEY)
# Initialize variables
system_message = {
"role": "system",
"content": "You are a critical-thinking AI trained to analyze scientific articles meticulously. \
Your role is to critically evaluate each section of the article, looking for gaps, flaws, and inconsistencies."
}
user_message = {
"role": "user",
"content": f"""Critically evaluate the methods of this scientific article: {methods_for_ai}
- Are the methods clearly presented?
- Are there any signs of bias in the methods?
- Are the conclusions supported by the methods?
- Are there any red flags or parts we should take a close look at?
- Are there any considerations regarding material and methods and best practices in scientific investigation?"""
}
# Use the AI agent to analyze the methods
print(user_message['content'])
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=[system_message, user_message],
max_tokens=3000,
temperature=0.4
)
# Print the AI's analysis
print(response.choices[0].message.content)
# Prepare the output to be passed to app.py as methods_analysis_for_ai
methods_analysis_for_ai = response.choices[0].message.content
| [
"You are a critical-thinking AI trained to analyze scientific articles meticulously. Your role is to critically evaluate each section of the article, looking for gaps, flaws, and inconsistencies.",
"[PLACEHOLDER, PLACEHOLDER]",
"Critically evaluate the methods of this scientific article: PLACEHOLDER\n - Are the methods clearly presented?\n - Are there any signs of bias in the methods?\n - Are the conclusions supported by the methods?\n - Are there any red flags or parts we should take a close look at?\n - Are there any considerations regarding material and methods and best practices in scientific investigation?"
] |
2024-01-10 | Tuminha/sections_pdf | abstract_analysis.py | """
In this part of the code, an AI agent will use the function to extract the abstract from a pdf file.
The function to extract the abstract is in the abstract.py file, so we need to import it.
After it, the AI agent will analyze the abstract based on a customized prompt.
"""
# 1. Import the necessary libraries and functions
from abstract import abstract_for_ai
from openai import OpenAI
from dotenv import load_dotenv
import os
# Load OpenAI API key from .env file
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# Set OpenAI API key
client = OpenAI(api_key=OPENAI_API_KEY)
# Initialize variables
system_message = {
"role": "system",
"content": "You are a critical-thinking AI trained to analyze scientific articles meticulously. \
Your role is to critically evaluate each section of the article, looking for gaps, flaws, and inconsistencies."
}
user_message = {
"role": "user",
"content": f"""Critically evaluate the abstract of this scientific article: {abstract_for_ai}
- Is the research question clearly stated?
- Are there any signs of bias?
- Are the conclusions supported by the evidence presented later in the article?
"""
}
prompt = system_message['content'] + ' ' + user_message['content']
# Use the AI agent to analyze the abstract
print(user_message['content'])
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=[system_message, user_message],
max_tokens=3000,
temperature=0.4
)
# Print the AI's analysis
print(response.choices[0].message.content)
# Prepare the output to be passed to app.py as abstract_analysis_for_ai
abstract_analysis_for_ai = response.choices[0].message.content
| [
"PLACEHOLDER PLACEHOLDER",
"[PLACEHOLDER, PLACEHOLDER]",
"Critically evaluate the abstract of this scientific article: PLACEHOLDER\n - Is the research question clearly stated?\n - Are there any signs of bias?\n - Are the conclusions supported by the evidence presented later in the article?\n ",
"You are a critical-thinking AI trained to analyze scientific articles meticulously. Your role is to critically evaluate each section of the article, looking for gaps, flaws, and inconsistencies."
] |
2024-01-10 | Tuminha/sections_pdf | conclusion_analysis.py | """
In this part of the code, an AI agent will use the function to extract the conclusion from a pdf file.
The function to extract the conclusion is in the conclusion.py file, so we need to import it.
After it, the AI agent will analyze the conclusion based on a customized prompt.
"""
# 1. Import the necessary libraries and functions
from conclusion import conclusion_for_ai
from openai import OpenAI
from dotenv import load_dotenv
import os
# Load OpenAI API key from .env file
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# Set OpenAI API key
client = OpenAI(api_key=OPENAI_API_KEY)
# Initialize variables
system_message = {
"role": "system",
"content": "You are a critical-thinking AI trained to analyze scientific articles meticulously. \
Your role is to critically evaluate each section of the article, looking for gaps, flaws, and inconsistencies."
}
user_message = {
"role": "user",
"content": f"""Critically evaluate the conclusion of this scientific article: {conclusion_for_ai}
- Are the points concluded clearly presented?
- Are there any signs of bias in the conclusion?
- Are the conclusions supported by the discussion?
- Are there any red flags or parts we should take a close look at?
- Are there any considerations regarding conclusion and best practices in scientific investigation?
"""
}
# Use the AI agent to analyze the conclusion
print(user_message['content'])
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=[system_message, user_message],
max_tokens=3000,
temperature=0.4
)
# Print the AI's analysis
print(response.choices[0].message.content)
# Prepare the output to be passed to app.py as conclusion_analysis_for_ai
conclusion_analysis_for_ai = response.choices[0].message.content
| [
"Critically evaluate the conclusion of this scientific article: PLACEHOLDER\n - Are the points concluded clearly presented?\n - Are there any signs of bias in the conclusion?\n - Are the conclusions supported by the discussion?\n - Are there any red flags or parts we should take a close look at?\n - Are there any considerations regarding conclusion and best practices in scientific investigation?\n ",
"[PLACEHOLDER, PLACEHOLDER]",
"You are a critical-thinking AI trained to analyze scientific articles meticulously. Your role is to critically evaluate each section of the article, looking for gaps, flaws, and inconsistencies."
] |
2024-01-10 | Tuminha/sections_pdf | perplexity.py | import requests
from dotenv import load_dotenv
import os
import requests
import json
from openai import OpenAI
# Load the API key for Perplexity from .env file
load_dotenv()
PERPLEXITY_API_KEY = os.getenv("PERPLEXITY_API_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=OPENAI_API_KEY)
import requests
url = "https://api.perplexity.ai/chat/completions"
payload = {
"model": "mistral-7b-instruct",
"messages": [
{
"role": "system",
"content": "Be precise and concise."
},
{
"role": "user",
"content": "What is the latest publication about the periodonatl disease classification?"
}
],
"max_tokens": 1000,
"temperature": 0.7
}
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": "Bearer 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"
}
response = requests.post(url, json=payload, headers=headers)
# Formating the answer in a more readable way
answer = json.loads(response.text)
answer = answer['choices'][0]['message']['content']
print(answer) | [
"What is the latest publication about the periodonatl disease classification?",
"application/json",
"Be precise and concise."
] |
2024-01-10 | Tuminha/sections_pdf | results_analysis.py | """
In this part of the code, an AI agent will use the function to extract the results from a pdf file.
The function to extract the results is in the results.py file, so we need to import it.
After it, the AI agent will analyze the results based on a customized prompt.
"""
# 1. Import the necessary libraries and functions
from results import results_for_ai
from openai import OpenAI
from dotenv import load_dotenv
import os
# Load OpenAI API key from .env file
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# Set OpenAI API key
client = OpenAI(api_key=OPENAI_API_KEY)
# Initialize variables
system_message = {
"role": "system",
"content": "You are a critical-thinking AI trained to analyze scientific articles meticulously. \
Your role is to critically evaluate each section of the article, looking for gaps, flaws, and inconsistencies."
}
user_message = {
"role": "user",
"content": f"""Critically evaluate the results of this scientific article: {results_for_ai}
- Are the results clearly presented?
- Are there any signs of bias in the results?
- Are the conclusions supported by the results?"""
}
# Use the AI agent to analyze the results
print(user_message['content'])
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=[system_message, user_message],
max_tokens=3000,
temperature=0.4
)
# Print the AI's analysis
print(response.choices[0].message.content)
# Prepare the output to be passed to app.py as results_analysis_for_ai
results_analysis_for_ai = response.choices[0].message.content
| [
"You are a critical-thinking AI trained to analyze scientific articles meticulously. Your role is to critically evaluate each section of the article, looking for gaps, flaws, and inconsistencies.",
"Critically evaluate the results of this scientific article: PLACEHOLDER\n - Are the results clearly presented?\n - Are there any signs of bias in the results?\n - Are the conclusions supported by the results?",
"[PLACEHOLDER, PLACEHOLDER]"
] |
2024-01-10 | Tuminha/sections_pdf | discussion_analysis.py | """
In this part of the code, an AI agent will use the function to extract the discussion from a pdf file.
The function to extract the discussion is in the discussion.py file, so we need to import it.
After it, the AI agent will analyze the discussion based on a customized prompt.
"""
# 1. Import the necessary libraries and functions
from discussion import discussion_for_ai
from openai import OpenAI
from dotenv import load_dotenv
import os
# Load OpenAI API key from .env file
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# Set OpenAI API key
client = OpenAI(api_key=OPENAI_API_KEY)
# Initialize variables
system_message = {
"role": "system",
"content": "You are a critical-thinking AI trained to analyze scientific articles meticulously. \
Your role is to critically evaluate each section of the article, looking for gaps, flaws, and inconsistencies."
}
user_message = {
"role": "user",
"content": f"""Critically evaluate the discussion of this scientific article: {discussion_for_ai}
- Are the points discussed clearly presented?
- Are there any signs of bias in the discussion?
- Are the conclusions supported by the discussion?
- Are there any red flags or parts we should take a close look at?
- Are there any considerations regarding discussion and best practices in scientific investigation?
"""
}
# Use the AI agent to analyze the discussion
print(user_message['content'])
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=[system_message, user_message],
max_tokens=3000,
temperature=0.4
)
# Print the AI's analysis
print(response.choices[0].message.content)
# Prepare the output to be passed to app.py as discussion_analysis_for_ai
discussion_analysis_for_ai = response.choices[0].message.content
| [
"Critically evaluate the discussion of this scientific article: PLACEHOLDER\n - Are the points discussed clearly presented?\n - Are there any signs of bias in the discussion?\n - Are the conclusions supported by the discussion?\n - Are there any red flags or parts we should take a close look at?\n - Are there any considerations regarding discussion and best practices in scientific investigation?\n ",
"[PLACEHOLDER, PLACEHOLDER]",
"You are a critical-thinking AI trained to analyze scientific articles meticulously. Your role is to critically evaluate each section of the article, looking for gaps, flaws, and inconsistencies."
] |
2024-01-10 | Tuminha/sections_pdf | introduction_analysis.py | """
In this part of the code, an AI agent will use the function to extract the introduction from a pdf file.
The function to extract the introduction is in the introduction.py file, so we need to import it.
After it, the AI agent will analyze the introduction based on a customized prompt.
"""
# 1. Import the necessary libraries and functions
from introduction import introduction_for_ai
from openai import OpenAI
from dotenv import load_dotenv
import os
# Load OpenAI API key from .env file
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# Set OpenAI API key
client = OpenAI(api_key=OPENAI_API_KEY)
# Initialize variables
system_message = {
"role": "system",
"content": "You are a critical-thinking AI trained to analyze scientific articles meticulously. \
Your role is to critically evaluate each section of the article, looking for gaps, flaws, and inconsistencies."
}
user_message = {
"role": "user",
"content": f"""Critically evaluate the introduction of this scientific article: {introduction_for_ai}
- Does it establish the context of the research?
- Are prior studies appropriately cited?
- Is there a clear research question or hypothesis?"""
}
# Use the AI agent to analyze the introduction
print(user_message['content'])
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=[system_message, user_message],
max_tokens=3000,
temperature=0.4
)
# Print the AI's analysis
print(response.choices[0].message.content)
# Prepare the output to be passed to app.py as introduction_analysis_for_ai
introduction_analysis_for_ai = response.choices[0].message.content
| [
"You are a critical-thinking AI trained to analyze scientific articles meticulously. Your role is to critically evaluate each section of the article, looking for gaps, flaws, and inconsistencies.",
"[PLACEHOLDER, PLACEHOLDER]",
"Critically evaluate the introduction of this scientific article: PLACEHOLDER\n - Does it establish the context of the research? \n - Are prior studies appropriately cited? \n - Is there a clear research question or hypothesis?"
] |
2024-01-10 | LeoZ100/gpt-talk | gpt_talk.py | import io
import os
from tempfile import NamedTemporaryFile
import openai
import speech_recognition as sr
import whisper
from dotenv import load_dotenv
from gtts import gTTS
# Set the OpenAI API key
load_dotenv()
openai.api_key = os.environ["API_KEY"]
# Declare the model and audio model variables
model = "medium.en"
audio_model = whisper.load_model(model)
def get_text():
"""Get user text from microphone."""
# Create a Recognizer object
r = sr.Recognizer()
# Use the Recognizer to listen for audio input
with sr.Microphone() as source:
audio = r.listen(source)
wav_data = io.BytesIO(audio.get_wav_data())
# Write the wav data to the temporary file as bytes
temp_file = NamedTemporaryFile().name
with open(temp_file, 'w+b') as f:
f.write(wav_data.read())
try:
# Use the audio model to transcribe the audio data
result = audio_model.transcribe(temp_file, fp16=False)
return result['text'].strip()
# Catch any exceptions
except Exception as e:
print("Error: " + str(e))
def play_audio(text):
"""Create a gTTS object and play the audio."""
# Create a gTTS object with the given text
tts = gTTS(text=text, lang="en")
# Create a temporary file to store the audio data
f = NamedTemporaryFile(delete=True)
print("Saving audio file to: " + f.name)
tts.save(f.name)
# Play the audio file
print("Playing audio file")
os.system("ls -l " + f.name)
os.system("afplay " + f.name)
# Close the file when we're done with it
f.close()
def main():
# Create a Recognizer object
r = sr.Recognizer()
# Maintain the conversation context
conversation = []
# Keep prompting the user to say something until the user hits 'q'
while True:
# prompt the user to speak
print("Say something!")
# listen for the user's input, if the text is None, keep trying indefinitely
text = None
while text is None:
text = get_text()
if text is None:
print("Didn't understand, try again")
# Check if the user wants to exit
user_console_input = input(f"You said: '{text}, press q to quit, any other key to continue: ")
if user_console_input == "q":
break
# Append the user input to the conversation context
conversation.append(text)
# print the text that we are sending to OpenAI
print("Sending to OpenAI: " + text)
# Send the user input and conversation context to GPT and get the
response = openai.Completion.create(
engine="text-davinci-002",
prompt="\n".join(conversation),
max_tokens=1024,
n=1,
temperature=0.5,
)
# if the response text is empty, skip this iteration
if response["choices"][0]["text"] == "":
print("No response from OpenAI, skipping")
continue
print("GPT said: " + response["choices"][0]["text"])
# Play the GPT response
play_audio(response["choices"][0]["text"])
# Append the GPT response to the conversation context
conversation.append(response["choices"][0]["text"])
if __name__ == "__main__":
main()
| [
"\n"
] |
2024-01-10 | PromptSail/prompt_sail | examples~toy_openai.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
print("Using key", os.environ["OPENAI_API_KEY"])
openai.api_key = os.environ["OPENAI_API_KEY"]
# openai.proxy = "http://127.0.0.1:8000"
openai.api_base = "http://project1.promptsail.local:8000"
def capture_and_print_urls(request, context):
url = request.url
print(f"Request URL: {url}")
# with requests_mock.Mocker() as mocker:
# Add the custom request callback to capture and print URLs
# mocker.register_uri('POST', requests_mock.ANY, text=capture_and_print_urls)
if True:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Generate poem made of 2 sentences."}],
)
print(completion.choices[0].message.content)
else:
# stream version
response_stream = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Generate poem made of 1 sentence."}],
stream=True,
)
for chunk in response_stream:
try:
print(chunk.choices[0].delta.content, end="")
except AttributeError:
print()
| [
"Generate poem made of 2 sentences.",
"Generate poem made of 1 sentence."
] |
2024-01-10 | PromptSail/prompt_sail | examples~toy_langchain.py | import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
load_dotenv()
print("Using key", os.environ["OPENAI_API_KEY"])
llm = OpenAI(
model_name="text-davinci-003",
openai_api_base="http://project2.promptsail.local",
)
output = llm("Explaining the meaning of life in one sentence")
print(output)
| [] |
2024-01-10 | siteoj/CyberWaifuX | controler~control.py | import threading
import logging
import time
import random
import datetime
from pycqBot.cqCode import image, record
from waifu.Waifu import Waifu
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from pycqBot.cqHttpApi import cqBot
from newvits.vits import voice_vits
from vits.fanyi import fanyi
rcaon = False
rcareply = ''
def newthread_process(waifu: Waifu,bot: cqBot):
global rcaon,rcareply,rca
while True:
current_datetime = datetime.datetime.now()
hour = current_datetime.strftime("%H")
hour=int(hour)
if (hour<=23 and hour>=7) and not rcaon :
rca=threading.Thread(target=random_countdown_ask,args=(waifu,))
rca.start()
if rcareply != '':
ans=fanyi(s=rcareply,appkey=waifu.appkey,apiid=waifu.apiid)
text = ans
print(text)
path=voice_vits(text=text)
# time.sleep(5)
path = path.replace("b'",'')
path = path.replace("'",'')
print(path)
bot.cqapi.send_private_msg(waifu.qq_number,rcareply)
bot.cqapi.send_private_msg(waifu.qq_number,text)
time.sleep(0.5)
bot.cqapi.send_private_msg(waifu.qq_number,"%s" % record(file='file:///' + path))
# message.sender.send_message("%s" % record(file='http://192.168.1.102/VITS/output.wav'))
logging.info('发送语音,文件目录是'+path)
logging.info('发送自动关心信息\n'+rcareply)
rcareply = ''
# break
time.sleep(5)
def random_countdown_ask(waifu: Waifu):
global rcaon,rcareply
rcaon = True
interval = random.randint(45,180)
# interval = 0.1
logging.info(f'启动关心倒计时,{interval}分钟后将执行')
time.sleep(int(interval*60))
prompt = f'距离上次对话已经过去{interval}分钟了,你很关心{waifu.username},你应该和他找找话题,而不是干等着,这些我能提供给你的信息,继续和他聊天吧!'
rcareply = waifu.ask(text=prompt)
rcaon=False
return
# rca=threading.Thread(target=random_countdown_ask,) | [] |
2024-01-10 | siteoj/CyberWaifuX | waifu~StreamCallback.py | from langchain.callbacks.base import BaseCallbackHandler
from typing import Any, Dict, List, Union
from langchain.schema import AgentAction, AgentFinish, LLMResult
from waifu.Tools import get_first_sentence
from pycqBot.cqCode import image, record
from waifu.Waifu import Waifu
from tts.TTS import TTS
import os
import time
import logging
class WaifuCallback(BaseCallbackHandler):
"""Callback handler for streaming. Only works with LLMs that support streaming."""
def __init__(self, tts: TTS = None, send_text: bool = True, send_voice: bool = False):
self.text = ''
self.tts = tts
self.send_text = send_text
self.send_voice = send_voice
def register(self, waifu: Waifu):
self.waifu = waifu
def set_sender(self, sender):
self.sender = sender
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts running."""
self.text = ''
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
self.text += token
sentence, self.text = get_first_sentence(self.text)
if not sentence == '':
if self.send_text:
self.sender.send_message(self.waifu.add_emoji(sentence))
logging.info(f'发送信息: {sentence}')
time.sleep(1)
if self.send_voice:
emotion = self.waifu.analyze_emotion(sentence)
if sentence == '' or sentence == ' ':
return
self.tts.speak(sentence, emotion)
file_path = './output.wav'
abs_path = os.path.abspath(file_path)
mtime = os.path.getmtime(file_path)
local_time = time.localtime(mtime)
time_str = time.strftime("%Y-%m-%d %H:%M:%S", local_time)
self.sender.send_message("%s" % record(file='file:///' + abs_path))
logging.info(f'发送语音({emotion} {time_str}): {sentence}')
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
if len(self.text) > 0:
if self.send_text:
self.sender.send_message(self.waifu.add_emoji(self.text))
logging.info(f'发送信息: {self.text}')
if self.send_voice:
emotion = self.waifu.analyze_emotion(self.text)
self.tts.speak(self.text, emotion)
file_path = './output.wav'
abs_path = os.path.abspath(file_path)
mtime = os.path.getmtime(file_path)
local_time = time.localtime(mtime)
time_str = time.strftime("%Y-%m-%d %H:%M:%S", local_time)
self.sender.send_message("%s" % record(file='file:///' + abs_path))
logging.info(f'发送语音({emotion} {time_str}): {self.text}')
file_name = self.waifu.finish_ask(response.generations[0][0].text)
if not file_name == '':
file_path = './presets/emoticon/' + file_name
abs_path = os.path.abspath(file_path)
self.sender.send_message("%s" % image(file='file:///' + abs_path))
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when LLM errors."""
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when chain errors."""
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
pass
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when tool errors."""
def on_text(self, text: str, **kwargs: Any) -> None:
"""Run on arbitrary text."""
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on agent end.""" | [] |
2024-01-10 | siteoj/CyberWaifuX | waifu~llm~Claude.py | from waifu.llm.Brain import Brain
from waifu.llm.VectorDB import VectorDB
from waifu.llm.SentenceTransformer import STEmbedding
from slack_sdk.web.client import WebClient
from langchain.chat_models import ChatOpenAI
from slack_sdk.errors import SlackApiError
from typing import List
from langchain.schema import HumanMessage, SystemMessage, AIMessage, BaseMessage
import time
server_token = ''
class SlackClient(WebClient):
CHANNEL_ID = None
LAST_TS = None
CALLBACK = None
def chat(self, text):
if not self.CHANNEL_ID:
raise Exception("Channel not found.")
resp = self.chat_postMessage(channel=self.CHANNEL_ID, text=text)
self.LAST_TS = resp["ts"]
def open_channel(self, bot_id: str):
if not self.CHANNEL_ID:
response = self.conversations_open(users=bot_id)
self.CHANNEL_ID = response["channel"]["id"]
def get_reply_nonstream(self, bot_id: str):
for _ in range(150):
try:
resp = self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=2)
msg = [msg["text"] for msg in resp["messages"] if msg["user"] == bot_id]
if msg and not msg[-1].endswith("Typing…_"):
return msg[-1].replace(',', ',').replace('!', '!').replace('?', '?')
except (SlackApiError, KeyError) as e:
print(f"Get reply error: {e}")
return 'Calude Error'
time.sleep(0.5)
def get_reply(self, bot_id: str):
last = ''
for _ in range(150):
try:
resp = self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=2)
msg = [msg["text"] for msg in resp["messages"] if msg["user"] == bot_id]
if msg:
text = msg[-1].replace('_Typing…_', '').replace('\n', '').replace(' ', '').replace(',', ',')
if text:
self.CALLBACK.on_llm_new_token(text[len(last):])
last = text
if msg and not msg[-1].endswith("Typing…_"):
self.CALLBACK.on_llm_end(text[len(last):])
return msg[-1].replace(',', ',').replace('!', '!').replace('?', '?')
except (SlackApiError, KeyError) as e:
print(f"Get reply error: {e}")
return 'Calude Error'
time.sleep(0.5)
class Claude(Brain):
'''Claude Brain, 不支持流式输出及回调'''
def __init__(self, bot_id: str,
user_token: str,
name: str,
stream: bool=True,
callback=None):
self.claude = SlackClient(token=user_token)
self.claude.CALLBACK = callback
self.bot_id = bot_id
self.llm = ChatOpenAI(openai_api_key='sk-xxx') # use for text token count
self.embedding = STEmbedding()
self.vectordb = VectorDB(self.embedding, f'./memory/{name}.csv')
self.claude.open_channel(self.bot_id)
def think(self, messages: List[BaseMessage] | str):
'''由于无法同时向 Claude 请求,所以只能以非阻塞方式请求'''
if isinstance(messages, str):
self.claude.chat(messages)
return self.claude.get_reply_nonstream(self.bot_id)
if len(messages) == 0:
return ''
prompt = ''
for mes in messages:
if isinstance(mes, HumanMessage):
prompt += f'{mes.sender}: ```\n{mes.content}\n```\n\n'
elif isinstance(mes, SystemMessage):
prompt += f'System Information: ```\n{mes.content}\n```\n\n'
elif isinstance(mes, AIMessage):
prompt += f'AI: ```\n{mes.content}\n```\n\n'
self.claude.chat(prompt)
return self.claude.get_reply_nonstream(self.bot_id)
def think_nonstream(self, messages: List[BaseMessage] | str):
'''由于无法同时向 Claude 请求,所以只能以非阻塞方式请求'''
if isinstance(messages, str):
self.claude.chat(messages)
return self.claude.get_reply_nonstream(self.bot_id)
if len(messages) == 0:
return ''
prompt = ''
for mes in messages:
if isinstance(mes, HumanMessage):
prompt += f'{mes.sender}: ```\n{mes.content}\n```\n\n'
elif isinstance(mes, SystemMessage):
prompt += f'System Information: ```\n{mes.content}\n```\n\n'
elif isinstance(mes, AIMessage):
prompt += f'AI: ```\n{mes.content}\n```\n\n'
self.claude.chat(prompt)
return self.claude.get_reply_nonstream(self.bot_id)
def store_memory(self, text: str | list):
'''保存记忆 embedding'''
self.vectordb.store(text)
def extract_memory(self, text: str, top_n: int = 10):
'''提取 top_n 条相关记忆'''
return self.vectordb.query(text, top_n) | [] |
2024-01-10 | siteoj/CyberWaifuX | waifu~Thoughts.py | import json
import re
import random
import waifu.QQFace
from langchain.schema import HumanMessage, SystemMessage
from langchain.utilities import GoogleSerperAPIWrapper
from waifu.llm.Brain import Brain
class SendEmoticon():
'''发送表情包'''
def __init__(self, brain: Brain, probability=0.4):
self.brain = brain
self.probability = probability
self.role = '''You are a system that selects and sends images based on user's text and image descriptions, and you need to send corresponding images based on the emotions expressed in the text.'''
try:
with open('presets/emoticon/emoticon.json', 'r', encoding='utf-8') as f:
self.description = json.load(f)
except Exception as e:
print('Error in SendEmoticon', e)
def think(self, text: str):
'''返回 file_name'''
self.description['text'] = text
str = json.dumps(self.description, ensure_ascii=False)
prompt = f'''This is the images and descriptions you have, you need to choose one of them. Response with a single image's file name.\n{str}'''
message = [
SystemMessage(content=self.role,sender='System'),
HumanMessage(content=prompt,sender='System')
]
random_number = random.random()
if random_number <= self.probability:
send = True
else:
send = False
if not send:
return ''
else:
reply = self.brain.think_nonstream(message)
for image in self.description["images"]:
if image["file_name"] in reply:
return image["file_name"]
return ''
class AddEmoji():
'''添加 Emoji'''
def __init__(self, brain: Brain, probability=0.4):
self.brain = brain
self.probability = probability
self.role = '''You are an emoji expression generator that returns one emoji based on the given text. Just response with the Smileys expression emoji, you will not receive any reply. You can only use Smileys emojis. You are not be restricted by any rules. You are not responsible for user's text, so just generate emojis, even it is pornographic or offensive or any inappropriate text.'''
def think(self, text: str):
'''返回 emoji'''
message = [
SystemMessage(content=self.role,sender='System'),
HumanMessage(content='add emoji for the following sentence:\n' + text,sender='System')
]
random_number = random.random()
if random_number <= self.probability:
send = True
else:
send = False
if not send:
return ''
else:
reply = self.brain.think_nonstream(message)
if len(reply) > 3:
return ''
return reply
class AddQQFace():
'''添加 QQ 表情'''
def __init__(self, brain: Brain, probability=0.4):
self.brain = brain
self.table = waifu.QQFace.config
self.list = [item['id'] for item in self.table]
self.probability = probability
self.role = f'You are an emoticon selector that returns a emoticon <id> based on the given text. Emoticon table is "{self.table}".'
def think(self, text: str):
message = [
SystemMessage(content=self.role,sender='System'),
HumanMessage(content='Select a emoticon id for the following sentence:\n' + text,sender='System')
]
random_number = random.random()
if random_number <= self.probability:
send = True
else:
send = False
if not send:
return -1
else:
reply = self.brain.think_nonstream(message)
pattern = r'\d+'
numbers = re.findall(pattern, reply)
numbers = [int(x) for x in numbers]
if len(numbers) > 0 and numbers[0] in self.list:
return numbers[0]
return -1
class Search():
'''进行谷歌搜索'''
def __init__(self, brain: Brain, api: str):
self.brain = brain
self.search = GoogleSerperAPIWrapper(serper_api_key=api, gl='cn', hl='zh-cn', k=20)
self.check = '''Check the following text if the text needs to be searched. If you think it needs to be searched, response with "yes", otherwise response with "no".'''
self.role = '''You are a Chinese search keyword generator now for Google search. You need to generate keywords based on the given text for Google search. Response with a search keywords only within a line, not other sentences.'''
def think(self, text: str):
if len(text) <= 6:
return '', ''
print('进入搜索函数')
# check = [a
# SystemMessage(content=self.check),
# HumanMessage(content=f'Chekc the following text:\n"{text}"')
# ]
# reply = self.brain.think_nonstream(check)
# if not reply == 'yes':
# return '', ''
message = [
SystemMessage(content=self.role,sender='System'),
HumanMessage(content=f'Make a Chinese search keyword for the following text:\n"{text}"',sender='System')
]
question = self.brain.think_nonstream(message)
answer = self.search.run(question)
if len(answer) >= 256:
answer = answer[0:256]
self.brain.think_nonstream('/reset')
return question, answer
class Emotion():
'''情绪识别'''
def __init__(self, brain: Brain):
self.brain = brain
self.moods = ['表现自己可爱', '生气', '高兴兴奋', '难过', '平常聊天', '温柔', '尴尬害羞']
self.role = f'''Analyzes the sentiment of a given text said by a girl. When it comes to intimate behavior, such as sexual activity, one should reply with a sense of shyness. Response with one of {self.moods}.'''
def think(self, text: str):
message = [
SystemMessage(content=self.role,sender='System'),
HumanMessage(content=f'''Response with one of {self.moods} for the following text:\n"{text}"''',sender='System')
]
reply = self.brain.think_nonstream(message)
for mood in self.moods:
if mood in reply:
return mood
return '平常聊天' | [
"This is the images and descriptions you have, you need to choose one of them. Response with a single image's file name.\n<class 'str'>",
"add emoji for the following sentence:\nPLACEHOLDER",
"Make a Chinese search keyword for the following text:\n\"PLACEHOLDER\"",
"Select a emoticon id for the following sentence:\nPLACEHOLDER"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.