File size: 2,588 Bytes
434144a
c355718
 
 
 
 
434144a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c355718
 
 
 
 
469f650
 
c355718
 
acc8b42
 
 
 
 
9f20b49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
acc8b42
 
 
 
 
9f20b49
 
acc8b42
 
434144a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
from os import environ as env
from os import system as run
from subprocess import check_output

import gradio as gr


def inference_binary_check():
    # Without a GPU, we need to re-install llama-cpp-python to avoid an error.
    # We use a shell command to detect if we have an NVIDIA GPU available:
    use_gpu = True
    try:
        command = "nvidia-debugdump --list|grep Device"
        output = str(check_output(command, shell=True).decode())
        if "NVIDIA" in output and "ID" in output:
            print("NVIDIA GPU detected.")
    except Exception as e:
        print("No NVIDIA GPU detected, using CPU. GPU check result:", e)
        use_gpu = False

    if use_gpu:
        print("GPU detected, existing GPU focused llama-cpp-python should work.")
    else:
        print("Avoiding error by re-installing non-GPU llama-cpp-python build because no GPU was detected.")
        run('pip uninstall llama-cpp-python -y')
        run('pip install git+https://github.com/lukestanley/llama-cpp-python.git@expose_json_grammar_convert_function --upgrade --no-cache-dir --force-reinstall')
        print("llama-cpp-python re-installed, will now attempt to load.")


LLM_WORKER = env.get("LLM_WORKER", "runpod")

if LLM_WORKER == "http" or LLM_WORKER == "in_memory":
    inference_binary_check()

# Now chill can import llama-cpp-python without an error:
from chill import improvement_loop


def chill_out(text):
    print("Got this input:", text)
    return str(improvement_loop(text))

examples = [
    ["You guys are so slow, we will never ship it!"],
    ["Your idea of a balanced diet is a biscuit in each hand."]
]

description = """
# ❄️ ChillTranslator 🤬 ➡️ 😎💬

This is an early experimental tool aimed at reducing online toxicity by automatically transforming spicy or toxic comments into constructive, kinder dialogues using AI and large language models.

ChillTranslator aims to help make online interactions more healthy and is now available for you to try directly in your browser.

- **Converts** text to less toxic variations
- **Preserves original intent**, focusing on constructive dialogue

Try out the ChillTranslator here, or check out the project on GitHub:
[https://github.com/lukestanley/ChillTranslator](https://github.com/lukestanley/ChillTranslator)

ChillTranslator is released under the MIT License and contributions are very welcome!
"""

demo = gr.Interface(
    fn=chill_out, 
    inputs="text", 
    outputs="text",
    examples=examples,
    cache_examples=True,
    description=description
)

demo.launch(max_threads=1, share=True)