Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	sliders added
Browse files
    	
        app.py
    CHANGED
    
    | 
         @@ -48,7 +48,7 @@ def generate_video(prompt,negative_prompt, guidance_scale, num_inference_steps, 
     | 
|
| 48 | 
         
             
                pipe.to(device)
         
     | 
| 49 | 
         | 
| 50 | 
         
             
                messages = [
         
     | 
| 51 | 
         
            -
                    {"role": "user", "content": "You have to complete my given prompt into a complete description. The description should be heavily detailed.  
     | 
| 52 | 
         
             
                ]
         
     | 
| 53 | 
         | 
| 54 | 
         
             
                pipe_llm = pipeline(
         
     | 
| 
         @@ -58,9 +58,9 @@ def generate_video(prompt,negative_prompt, guidance_scale, num_inference_steps, 
     | 
|
| 58 | 
         
             
                    device_map='auto'
         
     | 
| 59 | 
         
             
                )
         
     | 
| 60 | 
         
             
                generation_args = {
         
     | 
| 61 | 
         
            -
                    "max_new_tokens":  
     | 
| 62 | 
         
             
                    "return_full_text": False,
         
     | 
| 63 | 
         
            -
                    "temperature":  
     | 
| 64 | 
         
             
                    "do_sample": False,
         
     | 
| 65 | 
         
             
                }
         
     | 
| 66 | 
         | 
| 
         @@ -78,7 +78,7 @@ def generate_video(prompt,negative_prompt, guidance_scale, num_inference_steps, 
     | 
|
| 78 | 
         
             
                    print(adapter_choices)
         
     | 
| 79 | 
         | 
| 80 | 
         
             
                output = pipe(
         
     | 
| 81 | 
         
            -
                    prompt= 
     | 
| 82 | 
         
             
                    negative_prompt=negative_prompt,
         
     | 
| 83 | 
         
             
                    num_frames=16,
         
     | 
| 84 | 
         
             
                    guidance_scale=guidance_scale,
         
     | 
| 
         | 
|
| 48 | 
         
             
                pipe.to(device)
         
     | 
| 49 | 
         | 
| 50 | 
         
             
                messages = [
         
     | 
| 51 | 
         
            +
                    {"role": "user", "content": "You have to complete my given prompt into a complete description. The description should be heavily detailed. The purpose of this description is to descibe a video generation. My Prompt: " + prompt},
         
     | 
| 52 | 
         
             
                ]
         
     | 
| 53 | 
         | 
| 54 | 
         
             
                pipe_llm = pipeline(
         
     | 
| 
         | 
|
| 58 | 
         
             
                    device_map='auto'
         
     | 
| 59 | 
         
             
                )
         
     | 
| 60 | 
         
             
                generation_args = {
         
     | 
| 61 | 
         
            +
                    "max_new_tokens": 128,
         
     | 
| 62 | 
         
             
                    "return_full_text": False,
         
     | 
| 63 | 
         
            +
                    "temperature": 0.0,
         
     | 
| 64 | 
         
             
                    "do_sample": False,
         
     | 
| 65 | 
         
             
                }
         
     | 
| 66 | 
         | 
| 
         | 
|
| 78 | 
         
             
                    print(adapter_choices)
         
     | 
| 79 | 
         | 
| 80 | 
         
             
                output = pipe(
         
     | 
| 81 | 
         
            +
                    prompt=output[0]['generated_text'],
         
     | 
| 82 | 
         
             
                    negative_prompt=negative_prompt,
         
     | 
| 83 | 
         
             
                    num_frames=16,
         
     | 
| 84 | 
         
             
                    guidance_scale=guidance_scale,
         
     |