davanstrien HF Staff commited on
Commit
b1ee428
·
1 Parent(s): b4a7f49

Add kernels dependency for Flash Attention 3

Browse files
Files changed (1) hide show
  1. gpt_oss_transformers.py +4 -3
gpt_oss_transformers.py CHANGED
@@ -7,6 +7,7 @@
7
  # "transformers>=4.45.0",
8
  # "tqdm",
9
  # "accelerate",
 
10
  # ]
11
  # ///
12
  """
@@ -281,9 +282,9 @@ def main(
281
  logger.error("- 120B model: ~240GB VRAM (use 4xa100 or 8xl40s)")
282
  sys.exit(1)
283
  except Exception as e:
284
- if "kernels-community" in str(e) or "flash" in str(e).lower() or "attn" in str(e).lower():
285
- logger.warning(f"Flash Attention 3 not available: {type(e).__name__}")
286
- logger.info("Falling back to eager attention (standard implementation)")
287
  try:
288
  model = AutoModelForCausalLM.from_pretrained(
289
  model_id,
 
7
  # "transformers>=4.45.0",
8
  # "tqdm",
9
  # "accelerate",
10
+ # "kernels>=0.9.0", # For Flash Attention 3 support (optional but recommended)
11
  # ]
12
  # ///
13
  """
 
282
  logger.error("- 120B model: ~240GB VRAM (use 4xa100 or 8xl40s)")
283
  sys.exit(1)
284
  except Exception as e:
285
+ if "kernels" in str(e) or "flash" in str(e).lower() or "attn" in str(e).lower():
286
+ logger.info(f"Flash Attention 3 not available (needs 'kernels' package)")
287
+ logger.info("Using eager attention instead (standard implementation)")
288
  try:
289
  model = AutoModelForCausalLM.from_pretrained(
290
  model_id,