Spaces:
Sleeping
Sleeping
File size: 3,224 Bytes
cfd3735 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
"""Test Self-hosted LLMs."""
import pickle
from typing import Any, List, Optional
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from langchain.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline
model_reqs = ["pip:./", "transformers", "torch"]
def get_remote_instance() -> Any:
"""Get remote instance for testing."""
import runhouse as rh
return rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)
def test_self_hosted_huggingface_pipeline_text_generation() -> None:
"""Test valid call to self-hosted HuggingFace text generation model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="gpt2",
task="text-generation",
model_kwargs={"n_positions": 1024},
hardware=gpu,
model_reqs=model_reqs,
)
output = llm("Say foo:") # type: ignore
assert isinstance(output, str)
def test_self_hosted_huggingface_pipeline_text2text_generation() -> None:
"""Test valid call to self-hosted HuggingFace text2text generation model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-small",
task="text2text-generation",
hardware=gpu,
model_reqs=model_reqs,
)
output = llm("Say foo:") # type: ignore
assert isinstance(output, str)
def load_pipeline() -> Any:
"""Load pipeline for testing."""
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
)
return pipe
def inference_fn(pipeline: Any, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Inference function for testing."""
return pipeline(prompt)[0]["generated_text"]
def test_init_with_local_pipeline() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
llm = SelfHostedPipeline.from_pipeline(
pipeline=pipeline,
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm("Say foo:") # type: ignore
assert isinstance(output, str)
def test_init_with_pipeline_path() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
import runhouse as rh
rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(
gpu, path="models"
)
llm = SelfHostedPipeline.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm("Say foo:") # type: ignore
assert isinstance(output, str)
def test_init_with_pipeline_fn() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
llm = SelfHostedPipeline(
model_load_fn=load_pipeline,
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm("Say foo:") # type: ignore
assert isinstance(output, str)
|