File size: 5,267 Bytes
1d777c4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": [],
      "gpuType": "T4",
      "private_outputs": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "9DQbiOOZLTsM",
        "cellView": "form"
      },
      "outputs": [],
      "source": [
        "#@markdown #INSTALL\n",
        "##@markdown #SETTINGS\n",
        "\n",
        "##@markdown ⚙️ Save settings and pics to google-disk:\n",
        "##Images_and_Settings_on_google_disk = True #@param {type:\"boolean\"}\n",
        "##@markdown 📁 StableDiffusion dir at google-disk:\n",
        "##google_disk_path = \"llm_telegram_bot\" #@param {type:\"string\"}\n",
        "\n",
        "# SET OUTPUT_DIR\n",
        "#OUTPUT_DIR = \"/content/llm_telegram_bot/\"\n",
        "#if Images_and_Settings_on_google_disk:\n",
        "#    from google.colab import drive\n",
        "#    drive.mount('/content/drive')\n",
        "#    OUTPUT_DIR = \"/content/drive/MyDrive/\" + google_disk_path\n",
        "\n",
        "#@markdown * Choose llm model:\n",
        "model = \"collectivecognition-v1-mistral-7b.Q4_K_M.gguf\" #@param [\"collectivecognition-v1-mistral-7b.Q4_K_M.gguf\", \"speechless-tora-code-7b-v1.0.Q4_K_M.gguf\", \"mistral-7b-v0.1.Q4_K_M.gguf\", \"xwin-lm-13b-v0.2.Q4_K_M.gguf\"]\n",
        "#@markdown * Choose if VM with GPU or not:\n",
        "is_GPU_VM = \"on\" #@param [\"on\", \"off\"]\n",
        "#@markdown * Choose num of layers on GPU (0 if no GPU support):\n",
        "num_GPU_layers = \"35\" #@param {type:\"string\"}\n",
        "\n",
        "# INSTALL A1111 SD\n",
        "!git clone https://github.com/innightwolfsleep/text-generation-webui-telegram_bot llm_telegram_bot\n",
        "!pip install -r /content/llm_telegram_bot/requirements_ext.txt\n",
        "# CPU usage mode\n",
        "#!pip install llama-cpp-python\n",
        "# GPU usage mode\n",
        "!CMAKE_ARGS=\"-DLLAMA_CUBLAS=$is_GPU_VM\" pip install llama-cpp-python\n",
        "!pip install -q wget\n",
        "\n",
        "# LOAD MODEL\n",
        "!pip install -q wget\n",
        "import wget\n",
        "import os\n",
        "def load_sd_model(url,):\n",
        "  if not os.path.exists(\"/content/llm_telegram_bot/models/\" + url.split(\"/\")[-1]):\n",
        "    print(\"Start downloading model: \" + url.split(\"/\")[-1])\n",
        "    wget.download(url, \"/content/llm_telegram_bot/models/\" + url.split(\"/\")[-1])\n",
        "    print(\"Download completed: \" + url.split(\"/\")[-1])\n",
        "  else:\n",
        "    print(\"Already downloaded model: \" + url.split(\"/\")[-1])\n",
        "\n",
        "# Prepare temp config file and set model path\n",
        "!cp /content/llm_telegram_bot/configs/collab_config.json /content/llm_telegram_bot/configs/temp_collab_config.json\n",
        "!sed -i 's/model_to_load_name/'$model'/g' /content/llm_telegram_bot/configs/temp_collab_config.json\n",
        "!cp /content/llm_telegram_bot/configs/generator_params.json /content/llm_telegram_bot/configs/temp_generator_params.json\n",
        "!sed -i 's/\"n_gpu_layers\": 0/\"n_gpu_layers\": '$num_GPU_layers'/g' /content/llm_telegram_bot/configs/temp_generator_params.json\n",
        "\n",
        "# Load model\n",
        "model_URL = \"Not implemented yet\"\n",
        "if model_URL.startswith(\"http\"):\n",
        "  load_sd_model(model_URL)\n",
        "else:\n",
        "  if model==\"collectivecognition-v1-mistral-7b.Q4_K_M.gguf\": load_sd_model(\"https://huggingface.co/TheBloke/CollectiveCognition-v1-Mistral-7B-GGUF/resolve/main/collectivecognition-v1-mistral-7b.Q4_K_M.gguf\")\n",
        "  if model==\"speechless-tora-code-7b-v1.0.Q4_K_M.gguf\": load_sd_model(\"https://huggingface.co/TheBloke/speechless-tora-code-7B-v1.0-GGUF/resolve/main/speechless-tora-code-7b-v1.0.Q4_K_M.gguf\")\n",
        "  if model==\"mistral-7b-v0.1.Q4_K_M.gguf\": load_sd_model(\"https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF/resolve/main/mistral-7b-v0.1.Q4_K_M.gguf\")\n",
        "  if model==\"xwin-lm-13b-v0.2.Q4_K_M.gguf\": load_sd_model(\"https://huggingface.co/TheBloke/Xwin-LM-13B-v0.2-GGUF/resolve/main/xwin-lm-13b-v0.2.Q4_K_M.gguf\")\n",
        "\n",
        "#@markdown * Press ▶️ to install bot.\n",
        "#@markdown * Wait 5-10 min until instalation finished message\n",
        "!echo !!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",
        "!echo !!! INSTALATION FINISHED !!!\n",
        "!echo !!!!!!!!!!!!!!!!!!!!!!!!!!!!"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "#@markdown #LAUNCH\n",
        "#@markdown * set your telegram bot token.\n",
        "telegram_token = \"\" #@param {type:\"string\"}\n",
        "#@markdown * Press ▶️ left bottom to run bot.\n",
        "\n",
        "!python llm_telegram_bot/run.py $telegram_token llm_telegram_bot/configs/temp_collab_config.json"
      ],
      "metadata": {
        "cellView": "form",
        "id": "GnbSISEThboQ"
      },
      "execution_count": null,
      "outputs": []
    }
  ]
}