prateekbh commited on
Commit
373b3b1
·
verified ·
1 Parent(s): 572b329

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -21
app.py CHANGED
@@ -38,30 +38,30 @@ class StopOnTokens(StoppingCriteria):
38
  return False
39
 
40
  def getProductDetails(history, image):
41
- product_description=getImageDescription(image)
42
- clients = InferenceClient("google/gemma-7b")
43
- rand_val = random.randint(1, 1111111111111111)
44
  if not history:
45
  history = []
46
- generate_kwargs = dict(
47
- temperature=temp,
48
- max_new_tokens=tokens,
49
- top_p=top_p,
50
- repetition_penalty=rep_p,
51
- do_sample=True,
52
- seed=seed,
53
- )
54
- system_prompt="you're a helpful e-commerce marketting assitant"
55
- prompt="Write me a poem"
56
- formatted_prompt = self.format_prompt(f"{system_prompt}, {prompt}", history)
57
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=stream_output, details=True, return_full_text=False)
58
- output = ""
59
 
60
- for response in stream:
61
- output += response.token.text
62
- yield [(prompt, output)]
63
- gr.Info('Gemma:' + output)
64
- history.append((prompt, output))
65
  yield history
66
 
67
  @torch.no_grad()
 
38
  return False
39
 
40
  def getProductDetails(history, image):
41
+ # product_description=getImageDescription(image)
42
+ # clients = InferenceClient("google/gemma-7b")
43
+ # rand_val = random.randint(1, 1111111111111111)
44
  if not history:
45
  history = []
46
+ # generate_kwargs = dict(
47
+ # temperature=temp,
48
+ # max_new_tokens=tokens,
49
+ # top_p=top_p,
50
+ # repetition_penalty=rep_p,
51
+ # do_sample=True,
52
+ # seed=seed,
53
+ # )
54
+ # system_prompt="you're a helpful e-commerce marketting assitant"
55
+ # prompt="Write me a poem"
56
+ # formatted_prompt = self.format_prompt(f"{system_prompt}, {prompt}", history)
57
+ # stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=stream_output, details=True, return_full_text=False)
58
+ # output = ""
59
 
60
+ # for response in stream:
61
+ # output += response.token.text
62
+ # yield [(prompt, output)]
63
+ # gr.Info('Gemma:' + output)
64
+ # history.append((prompt, output))
65
  yield history
66
 
67
  @torch.no_grad()