Spaces:
Running
on
Zero
Running
on
Zero
Update main.py
Browse files
main.py
CHANGED
@@ -42,7 +42,7 @@ def search(query: str, k: int) -> tuple[str]:
|
|
42 |
|
43 |
search_results = "You are an AI assistant who delights in helping people" \
|
44 |
+ "learn about research from the Design Research Collective. Here are" \
|
45 |
-
+ "several really cool
|
46 |
|
47 |
references = "\n\n## References\n\n"
|
48 |
|
@@ -51,13 +51,13 @@ def search(query: str, k: int) -> tuple[str]:
|
|
51 |
references += str(i+1) + ". " + ", ".join([author.split(" ")[-1] for author in top_five["bib_dict"].values[i]["author"].split(" and ")]) + ". (" + str(int(top_five["bib_dict"].values[i]["pub_year"])) + "). [" + top_five["bib_dict"].values[i]["title"] + "]" \
|
52 |
+ "(https://scholar.google.com/citations?view_op=view_citation&citation_for_view=" + top_five["author_pub_id"].values[i] + ").\n"
|
53 |
|
54 |
-
search_results += "\
|
55 |
|
56 |
return search_results, references
|
57 |
|
58 |
|
59 |
# Create an LLM pipeline that we can send queries to
|
60 |
-
model_name = "Qwen/Qwen2-
|
61 |
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
|
62 |
streamer = transformers.TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
63 |
chatmodel = transformers.AutoModelForCausalLM.from_pretrained(
|
|
|
42 |
|
43 |
search_results = "You are an AI assistant who delights in helping people" \
|
44 |
+ "learn about research from the Design Research Collective. Here are" \
|
45 |
+
+ "several abstracts from really cool, and really relevant, papers:\n\n"
|
46 |
|
47 |
references = "\n\n## References\n\n"
|
48 |
|
|
|
51 |
references += str(i+1) + ". " + ", ".join([author.split(" ")[-1] for author in top_five["bib_dict"].values[i]["author"].split(" and ")]) + ". (" + str(int(top_five["bib_dict"].values[i]["pub_year"])) + "). [" + top_five["bib_dict"].values[i]["title"] + "]" \
|
52 |
+ "(https://scholar.google.com/citations?view_op=view_citation&citation_for_view=" + top_five["author_pub_id"].values[i] + ").\n"
|
53 |
|
54 |
+
search_results += "\nResponse to the following query from the perspective of tehse examples only:"
|
55 |
|
56 |
return search_results, references
|
57 |
|
58 |
|
59 |
# Create an LLM pipeline that we can send queries to
|
60 |
+
model_name = "Qwen/Qwen2-7B-Instruct"
|
61 |
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
|
62 |
streamer = transformers.TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
63 |
chatmodel = transformers.AutoModelForCausalLM.from_pretrained(
|