Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -13,8 +13,7 @@ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENT | |
| 13 |  | 
| 14 | 
             
            model = AutoModelForCausalLM.from_pretrained(
         | 
| 15 | 
             
                'PJMixers-Dev/Florence-2-base-danbooru2022-316k',
         | 
| 16 | 
            -
                trust_remote_code=True | 
| 17 | 
            -
                torch_dtype=torch.float32
         | 
| 18 | 
             
            ).eval()
         | 
| 19 | 
             
            processor = AutoProcessor.from_pretrained(
         | 
| 20 | 
             
                'PJMixers-Dev/Florence-2-base-danbooru2022-316k',
         | 
| @@ -34,7 +33,7 @@ def process_image(image): | |
| 34 | 
             
                if image.mode != "RGB":
         | 
| 35 | 
             
                    image = image.convert("RGB")
         | 
| 36 |  | 
| 37 | 
            -
                inputs = processor(text="<CAPTION>", images=image, return_tensors="pt") | 
| 38 | 
             
                generated_ids = model.generate(
         | 
| 39 | 
             
                    input_ids=inputs["input_ids"],
         | 
| 40 | 
             
                    pixel_values=inputs["pixel_values"],
         | 
|  | |
| 13 |  | 
| 14 | 
             
            model = AutoModelForCausalLM.from_pretrained(
         | 
| 15 | 
             
                'PJMixers-Dev/Florence-2-base-danbooru2022-316k',
         | 
| 16 | 
            +
                trust_remote_code=True
         | 
|  | |
| 17 | 
             
            ).eval()
         | 
| 18 | 
             
            processor = AutoProcessor.from_pretrained(
         | 
| 19 | 
             
                'PJMixers-Dev/Florence-2-base-danbooru2022-316k',
         | 
|  | |
| 33 | 
             
                if image.mode != "RGB":
         | 
| 34 | 
             
                    image = image.convert("RGB")
         | 
| 35 |  | 
| 36 | 
            +
                inputs = processor(text="<CAPTION>", images=image, return_tensors="pt")
         | 
| 37 | 
             
                generated_ids = model.generate(
         | 
| 38 | 
             
                    input_ids=inputs["input_ids"],
         | 
| 39 | 
             
                    pixel_values=inputs["pixel_values"],
         |