snippets.curl
• Const curlSnippets: Partial\<Record\<PipelineType, (model: ModelDataMinimal, accessToken: string, provider: SnippetInferenceProvider, providerModelId?: string, opts?: Record\<string, unknown>) => InferenceSnippet[]>>
inference/src/snippets/curl.ts:129
▸ getCurlInferenceSnippet(model, accessToken, provider, providerModelId?, opts?): InferenceSnippet[]
| Name | Type |
|---|---|
model | ModelDataMinimal |
accessToken | string |
provider | string |
providerModelId? | string |
opts? | Record\<string, unknown> |
InferenceSnippet[]
inference/src/snippets/curl.ts:167
▸ snippetBasic(model, accessToken, provider): InferenceSnippet[]
| Name | Type |
|---|---|
model | ModelDataMinimal |
accessToken | string |
provider | string |
InferenceSnippet[]
inference/src/snippets/curl.ts:12
▸ snippetFile(model, accessToken, provider): InferenceSnippet[]
| Name | Type |
|---|---|
model | ModelDataMinimal |
accessToken | string |
provider | string |
InferenceSnippet[]
inference/src/snippets/curl.ts:110
▸ snippetTextGeneration(model, accessToken, provider, providerModelId?, opts?): InferenceSnippet[]
| Name | Type |
|---|---|
model | ModelDataMinimal |
accessToken | string |
provider | string |
providerModelId? | string |
opts? | Object |
opts.max_tokens? | unknown |
opts.messages? | ChatCompletionInputMessage[] |
opts.streaming? | boolean |
opts.temperature? | number |
opts.top_p? | number |
InferenceSnippet[]
inference/src/snippets/curl.ts:33
▸ snippetZeroShotClassification(model, accessToken, provider): InferenceSnippet[]
| Name | Type |
|---|---|
model | ModelDataMinimal |
accessToken | string |
provider | string |
InferenceSnippet[]
inference/src/snippets/curl.ts:90
< > Update on GitHub