Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | 
         @@ -23,7 +23,7 @@ def load_model(): 
     | 
|
| 23 | 
         
             
                    )
         
     | 
| 24 | 
         | 
| 25 | 
         
             
                # Load the processor and model using the correct identifier
         
     | 
| 26 | 
         
            -
                model_id = "google/paligemma2- 
     | 
| 27 | 
         
             
                processor = PaliGemmaProcessor.from_pretrained(model_id, use_auth_token=token)
         
     | 
| 28 | 
         
             
                device = "cuda" if torch.cuda.is_available() else "cpu"
         
     | 
| 29 | 
         
             
                model = PaliGemmaForConditionalGeneration.from_pretrained(
         
     | 
| 
         @@ -40,11 +40,8 @@ def process_image_and_text(image_pil, text_input): 
     | 
|
| 40 | 
         
             
                device = "cuda" if torch.cuda.is_available() else "cpu"
         
     | 
| 41 | 
         | 
| 42 | 
         
             
                # Load the image using load_image
         
     | 
| 43 | 
         
            -
                #  
     | 
| 44 | 
         
            -
                 
     | 
| 45 | 
         
            -
                image_pil.save(buffered, format="JPEG")
         
     | 
| 46 | 
         
            -
                image_bytes = buffered.getvalue()
         
     | 
| 47 | 
         
            -
                image = load_image(image_bytes)
         
     | 
| 48 | 
         | 
| 49 | 
         
             
                # Use the provided text input
         
     | 
| 50 | 
         
             
                model_inputs = processor(text=text_input, images=image, return_tensors="pt").to(
         
     | 
| 
         | 
|
| 23 | 
         
             
                    )
         
     | 
| 24 | 
         | 
| 25 | 
         
             
                # Load the processor and model using the correct identifier
         
     | 
| 26 | 
         
            +
                model_id = "google/paligemma2-28b-pt-896"
         
     | 
| 27 | 
         
             
                processor = PaliGemmaProcessor.from_pretrained(model_id, use_auth_token=token)
         
     | 
| 28 | 
         
             
                device = "cuda" if torch.cuda.is_available() else "cpu"
         
     | 
| 29 | 
         
             
                model = PaliGemmaForConditionalGeneration.from_pretrained(
         
     | 
| 
         | 
|
| 40 | 
         
             
                device = "cuda" if torch.cuda.is_available() else "cpu"
         
     | 
| 41 | 
         | 
| 42 | 
         
             
                # Load the image using load_image
         
     | 
| 43 | 
         
            +
                # We can pass the PIL image directly to load_image
         
     | 
| 44 | 
         
            +
                image = load_image(image_pil)
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 45 | 
         | 
| 46 | 
         
             
                # Use the provided text input
         
     | 
| 47 | 
         
             
                model_inputs = processor(text=text_input, images=image, return_tensors="pt").to(
         
     |