Spaces:
Runtime error
Runtime error
Commit
路
70c7f04
1
Parent(s):
8b82e6e
think definitions draft
Browse files- lib/grapher.py +25 -22
- lib/me.py +92 -3
- lib/memory.py +11 -0
lib/grapher.py
CHANGED
|
@@ -18,7 +18,7 @@ class APIRequester:
|
|
| 18 |
else:
|
| 19 |
return None
|
| 20 |
|
| 21 |
-
class
|
| 22 |
def __init__(self, memoria_nlp, threshold=70):
|
| 23 |
self.threshold = threshold
|
| 24 |
self.graph = nx.Graph()
|
|
@@ -89,27 +89,30 @@ class JSONParser:
|
|
| 89 |
for vecino in otro_grafo.neighbors(nodo):
|
| 90 |
self.graph.add_edge(nodo, vecino)
|
| 91 |
|
| 92 |
-
# Ejemplo de uso
|
| 93 |
-
memoria_nlp = MemoriaRobotNLP(max_size=100)
|
| 94 |
-
json_parser = JSONParser(memoria_nlp)
|
| 95 |
|
| 96 |
-
|
| 97 |
-
url = "https://jsonplaceholder.typicode.com/posts"
|
| 98 |
-
data = api_requester.make_request(url)
|
| 99 |
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
json_parser
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
|
|
|
| 18 |
else:
|
| 19 |
return None
|
| 20 |
|
| 21 |
+
class Grapher:
|
| 22 |
def __init__(self, memoria_nlp, threshold=70):
|
| 23 |
self.threshold = threshold
|
| 24 |
self.graph = nx.Graph()
|
|
|
|
| 89 |
for vecino in otro_grafo.neighbors(nodo):
|
| 90 |
self.graph.add_edge(nodo, vecino)
|
| 91 |
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
+
if __name__ == "__main__":
|
|
|
|
|
|
|
| 94 |
|
| 95 |
+
# Ejemplo de uso
|
| 96 |
+
memoria_nlp = MemoriaRobotNLP(max_size=100)
|
| 97 |
+
json_parser = JSONParser(memoria_nlp)
|
| 98 |
+
|
| 99 |
+
api_requester = APIRequester()
|
| 100 |
+
url = "https://jsonplaceholder.typicode.com/posts"
|
| 101 |
+
data = api_requester.make_request(url)
|
| 102 |
+
|
| 103 |
+
if data:
|
| 104 |
+
json_parser.parse_json(data)
|
| 105 |
+
json_parser.draw_graph()
|
| 106 |
+
|
| 107 |
+
otro_parser = JSONParser(MemoriaRobotNLP(max_size=100))
|
| 108 |
+
otro_parser.parse_json({"id": 101, "title": "New Title", "userId": 11})
|
| 109 |
+
|
| 110 |
+
print("Uniendo los grafos...")
|
| 111 |
+
json_parser.unir_grafos(otro_parser.graph, umbral=80)
|
| 112 |
+
print("Grafo unido:")
|
| 113 |
+
json_parser.draw_graph()
|
| 114 |
+
|
| 115 |
+
json_parser.guardar_en_memoria()
|
| 116 |
+
else:
|
| 117 |
+
print("Error al realizar la solicitud a la API.")
|
| 118 |
|
lib/me.py
CHANGED
|
@@ -1,10 +1,19 @@
|
|
|
|
|
| 1 |
from lib.memory import *
|
| 2 |
from lib.grapher import *
|
| 3 |
from lib.pipes import *
|
| 4 |
from lib.entropy import *
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
class I:
|
| 7 |
-
def __init__(self, frases_yo, preferencias, propiedades_persona):
|
| 8 |
self.frases_yo = frases_yo
|
| 9 |
self.preferencias = preferencias
|
| 10 |
self.propiedades_persona = propiedades_persona
|
|
@@ -21,15 +30,95 @@ class I:
|
|
| 21 |
|
| 22 |
pass
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
def crear_circuito_logico(self):
|
| 25 |
# Funci贸n para crear un circuito l贸gico con un algoritmo espec铆fico
|
| 26 |
pass
|
| 27 |
|
| 28 |
-
def tomar_decision_sentimiento(self,
|
|
|
|
|
|
|
| 29 |
# Funci贸n para tomar una decisi贸n booleana con un an谩lisis de sentimiento
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
def hacer_predicciones_texto(self, texto):
|
|
|
|
| 33 |
# Funci贸n para hacer predicciones de texto futuro por similitud
|
| 34 |
pass
|
| 35 |
|
|
|
|
| 1 |
+
from lib.files import *
|
| 2 |
from lib.memory import *
|
| 3 |
from lib.grapher import *
|
| 4 |
from lib.pipes import *
|
| 5 |
from lib.entropy import *
|
| 6 |
+
from lib.sonsofstars import *
|
| 7 |
+
import internetarchive
|
| 8 |
+
|
| 9 |
+
longMem = TextFinder("resources")
|
| 10 |
+
coreAi = AIAssistant()
|
| 11 |
+
memory = MemoriaRobotNLP(max_size=200000)
|
| 12 |
+
grapher = Grapher(memoria_nlp)
|
| 13 |
+
sensor_request = APIRequester()
|
| 14 |
|
| 15 |
class I:
|
| 16 |
+
def __init__(self, prompt, frases_yo, preferencias, propiedades_persona):
|
| 17 |
self.frases_yo = frases_yo
|
| 18 |
self.preferencias = preferencias
|
| 19 |
self.propiedades_persona = propiedades_persona
|
|
|
|
| 30 |
|
| 31 |
pass
|
| 32 |
|
| 33 |
+
## create questions from internet archive
|
| 34 |
+
def crear_preguntas(self,txt):
|
| 35 |
+
search = internetarchive.search_items(sys.argv[1])
|
| 36 |
+
res = []
|
| 37 |
+
for result in search:
|
| 38 |
+
print(result['identifier'])
|
| 39 |
+
idc=result["identifier"]
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
headers = {"accept": "application/json"}
|
| 43 |
+
|
| 44 |
+
## get book pages
|
| 45 |
+
req2 = requests.get("https://archive.org/stream/"+idc+"/"+idc+"_djvu.txt",headers=headers)
|
| 46 |
+
#print(req2.text)
|
| 47 |
+
try:
|
| 48 |
+
txt = req2.text.split("<pre>")[1].split("</pre>")[0].split(" <!--")[0]
|
| 49 |
+
|
| 50 |
+
for x in txt.split("\n"):
|
| 51 |
+
if "?" in x:
|
| 52 |
+
res.append(x)
|
| 53 |
+
|
| 54 |
+
except:
|
| 55 |
+
pass
|
| 56 |
+
|
| 57 |
+
return res
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# generate thinks and questions over prompt data, compare with ourself datasets, return matches with sentiment analysys
|
| 61 |
+
def think_gen(self,txt):
|
| 62 |
+
|
| 63 |
+
think_about = longMem.find_matches(txt)
|
| 64 |
+
for T in thinkabout:
|
| 65 |
+
## get subject by entropy or pos tagger
|
| 66 |
+
subjects = coreAi.entity_pos_tagger(txt)
|
| 67 |
+
## get NC from , filtering from gramatical tags
|
| 68 |
+
subjects_low = coreAi.grammatical_pos_tagger(txt)
|
| 69 |
+
## generate questoins
|
| 70 |
+
questions=[]
|
| 71 |
+
## create cuestions from internet archive books
|
| 72 |
+
for sub in subjects:
|
| 73 |
+
questions.append(this.crear_preguntas(txt))
|
| 74 |
+
|
| 75 |
+
## fast checks from gematria similarity
|
| 76 |
+
##questions_togem =
|
| 77 |
+
## gematria_search =
|
| 78 |
+
|
| 79 |
+
questions_subj=[]
|
| 80 |
+
for q in questoins:
|
| 81 |
+
questions_subj.append(coreAi.entity_pos_tagger(q))
|
| 82 |
+
|
| 83 |
+
memoryShortTags = memory.buscar_conceptos_patron(subjects)
|
| 84 |
+
|
| 85 |
+
## get tags of subject
|
| 86 |
+
subj_tags = coreAi.entity_pos_tagger(T)
|
| 87 |
+
|
| 88 |
+
for sub in subjects:
|
| 89 |
+
memory.agregar_concepto(sub,",".(questions_subj)+",".join(memoryShortTags))
|
| 90 |
+
memory.agregar_concepto(sub,T+",".join(memoryShortTags))
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
## check if something is need to add to ourself datasets
|
| 94 |
+
## make sentiment analys
|
| 95 |
+
## check if dopamine prompt is true or false over the information
|
| 96 |
+
## set weight to information depending of generated dopamine
|
| 97 |
+
## add dopamine wights to the dopamine concept dataset
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def crear_path_grafo(self,text):
|
| 102 |
+
pos_tags = assistant.grammatical_pos_tagger(text)
|
| 103 |
+
ner_results = coreAi.entity_pos_tagger(text)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
def crear_circuito_logico(self):
|
| 107 |
# Funci贸n para crear un circuito l贸gico con un algoritmo espec铆fico
|
| 108 |
pass
|
| 109 |
|
| 110 |
+
def tomar_decision_sentimiento(self, sentimiento):
|
| 111 |
+
|
| 112 |
+
sentiments = coreAi.sentiment_tags(sentimiento)
|
| 113 |
# Funci贸n para tomar una decisi贸n booleana con un an谩lisis de sentimiento
|
| 114 |
+
similarity = coreAi.similarity_tag(self, sentenceA,sentenceB)
|
| 115 |
+
## Check by similarity over memory tag paths
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
return sentiments
|
| 119 |
|
| 120 |
def hacer_predicciones_texto(self, texto):
|
| 121 |
+
|
| 122 |
# Funci贸n para hacer predicciones de texto futuro por similitud
|
| 123 |
pass
|
| 124 |
|
lib/memory.py
CHANGED
|
@@ -24,6 +24,17 @@ class MemoriaRobotNLP:
|
|
| 24 |
if concepto in self.memoria:
|
| 25 |
self.memoria[concepto] = [(s, p) for s, p in self.memoria[concepto] if s != string]
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
def obtener_conceptos_acotados(self, espacio_disponible):
|
| 28 |
memoria_ordenada = sorted(self.memoria.items(), key=lambda x: sum(prioridad for _, prioridad in x[1]), reverse=True)
|
| 29 |
espacio_utilizado = 0
|
|
|
|
| 24 |
if concepto in self.memoria:
|
| 25 |
self.memoria[concepto] = [(s, p) for s, p in self.memoria[concepto] if s != string]
|
| 26 |
|
| 27 |
+
def buscar_conceptos_patron(self, patron):
|
| 28 |
+
resultados = {}
|
| 29 |
+
|
| 30 |
+
for concepto, strings in self.memoria.items():
|
| 31 |
+
for string, _ in strings:
|
| 32 |
+
if re.search(patron, string):
|
| 33 |
+
if concepto not in resultados:
|
| 34 |
+
resultados[concepto] = []
|
| 35 |
+
resultados[concepto].append(string)
|
| 36 |
+
|
| 37 |
+
return resultados
|
| 38 |
def obtener_conceptos_acotados(self, espacio_disponible):
|
| 39 |
memoria_ordenada = sorted(self.memoria.items(), key=lambda x: sum(prioridad for _, prioridad in x[1]), reverse=True)
|
| 40 |
espacio_utilizado = 0
|