Update Transformers.js usage to v3

#43
by Xenova HF staff - opened
Files changed (1) hide show
  1. README.md +2 -4
README.md CHANGED
@@ -2742,12 +2742,10 @@ The model natively supports scaling of the sequence length past 2048 tokens. To
2742
  ### Transformers.js
2743
 
2744
  ```js
2745
- import { pipeline, layer_norm } from '@xenova/transformers';
2746
 
2747
  // Create a feature extraction pipeline
2748
- const extractor = await pipeline('feature-extraction', 'nomic-ai/nomic-embed-text-v1.5', {
2749
- quantized: false, // Comment out this line to use the quantized version
2750
- });
2751
 
2752
  // Define sentences
2753
  const texts = ['search_query: What is TSNE?', 'search_query: Who is Laurens van der Maaten?'];
 
2742
  ### Transformers.js
2743
 
2744
  ```js
2745
+ import { pipeline, layer_norm } from '@huggingface/transformers';
2746
 
2747
  // Create a feature extraction pipeline
2748
+ const extractor = await pipeline('feature-extraction', 'nomic-ai/nomic-embed-text-v1.5');
 
 
2749
 
2750
  // Define sentences
2751
  const texts = ['search_query: What is TSNE?', 'search_query: Who is Laurens van der Maaten?'];