davanstrien HF Staff commited on
Commit
b4a7f49
·
1 Parent(s): 90312a8

Add Flash Attention 3 support with fallback to eager

Browse files
Files changed (1) hide show
  1. gpt_oss_transformers.py +25 -14
gpt_oss_transformers.py CHANGED
@@ -264,13 +264,16 @@ def main(
264
  logger.warning("Consider using --flavor a10g-large (48GB) or a100-large (80GB)")
265
 
266
  try:
 
 
267
  model = AutoModelForCausalLM.from_pretrained(
268
  model_id,
269
  torch_dtype=torch.bfloat16,
270
- attn_implementation="eager", # As per cookbook, avoid flash attention issues
271
  **model_kwargs,
272
  )
273
  model.eval()
 
274
  except torch.cuda.OutOfMemoryError as e:
275
  logger.error(f"Out of memory error: {e}")
276
  logger.error("\nThe GPT OSS models require significant memory:")
@@ -278,19 +281,27 @@ def main(
278
  logger.error("- 120B model: ~240GB VRAM (use 4xa100 or 8xl40s)")
279
  sys.exit(1)
280
  except Exception as e:
281
- logger.error(f"Failed to load model: {e}")
282
- logger.error("Trying with fallback configuration...")
283
- # Fallback to simpler loading
284
- try:
285
- model = AutoModelForCausalLM.from_pretrained(
286
- model_id,
287
- torch_dtype="auto",
288
- device_map="auto",
289
- attn_implementation="eager",
290
- )
291
- model.eval()
292
- except Exception as fallback_error:
293
- logger.error(f"Fallback also failed: {fallback_error}")
 
 
 
 
 
 
 
 
294
  sys.exit(1)
295
 
296
  # Generation configuration
 
264
  logger.warning("Consider using --flavor a10g-large (48GB) or a100-large (80GB)")
265
 
266
  try:
267
+ # Try Flash Attention 3 first for better performance
268
+ logger.info("Attempting to load with Flash Attention 3 (optimized)")
269
  model = AutoModelForCausalLM.from_pretrained(
270
  model_id,
271
  torch_dtype=torch.bfloat16,
272
+ attn_implementation="kernels-community/vllm-flash-attn3", # Flash Attention with Sinks
273
  **model_kwargs,
274
  )
275
  model.eval()
276
+ logger.info("Successfully loaded with Flash Attention 3")
277
  except torch.cuda.OutOfMemoryError as e:
278
  logger.error(f"Out of memory error: {e}")
279
  logger.error("\nThe GPT OSS models require significant memory:")
 
281
  logger.error("- 120B model: ~240GB VRAM (use 4xa100 or 8xl40s)")
282
  sys.exit(1)
283
  except Exception as e:
284
+ if "kernels-community" in str(e) or "flash" in str(e).lower() or "attn" in str(e).lower():
285
+ logger.warning(f"Flash Attention 3 not available: {type(e).__name__}")
286
+ logger.info("Falling back to eager attention (standard implementation)")
287
+ try:
288
+ model = AutoModelForCausalLM.from_pretrained(
289
+ model_id,
290
+ torch_dtype=torch.bfloat16,
291
+ attn_implementation="eager", # Fallback to eager attention
292
+ **model_kwargs,
293
+ )
294
+ model.eval()
295
+ logger.info("Successfully loaded with eager attention")
296
+ except torch.cuda.OutOfMemoryError as oom_error:
297
+ logger.error(f"Out of memory with eager attention: {oom_error}")
298
+ logger.error("The model requires more GPU memory than available")
299
+ sys.exit(1)
300
+ except Exception as eager_error:
301
+ logger.error(f"Failed with eager attention: {eager_error}")
302
+ sys.exit(1)
303
+ else:
304
+ logger.error(f"Unexpected error loading model: {e}")
305
  sys.exit(1)
306
 
307
  # Generation configuration