Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 2,737 Bytes
94753b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
import { InferenceOutputError } from "../../lib/InferenceOutputError";
import type { BaseArgs, Options } from "../../types";
import { toArray } from "../../utils/toArray";
import { request } from "../custom/request";
export type TokenClassificationArgs = BaseArgs & {
/**
* A string to be classified
*/
inputs: string;
parameters?: {
/**
* (Default: simple). There are several aggregation strategies:
*
* none: Every token gets classified without further aggregation.
*
* simple: Entities are grouped according to the default schema (B-, I- tags get merged when the tag is similar).
*
* first: Same as the simple strategy except words cannot end up with different tags. Words will use the tag of the first token when there is ambiguity.
*
* average: Same as the simple strategy except words cannot end up with different tags. Scores are averaged across tokens and then the maximum label is applied.
*
* max: Same as the simple strategy except words cannot end up with different tags. Word entity will be the token with the maximum score.
*/
aggregation_strategy?: "none" | "simple" | "first" | "average" | "max";
};
};
export interface TokenClassificationOutputValue {
/**
* The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times.
*/
end: number;
/**
* The type for the entity being recognized (model specific).
*/
entity_group: string;
/**
* How likely the entity was recognized.
*/
score: number;
/**
* The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times.
*/
start: number;
/**
* The string that was captured
*/
word: string;
}
export type TokenClassificationOutput = TokenClassificationOutputValue[];
/**
* Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. Recommended model: dbmdz/bert-large-cased-finetuned-conll03-english
*/
export async function tokenClassification(
args: TokenClassificationArgs,
options?: Options
): Promise<TokenClassificationOutput> {
const res = toArray(
await request<TokenClassificationOutput[number] | TokenClassificationOutput>(args, {
...options,
taskHint: "token-classification",
})
);
const isValidOutput =
Array.isArray(res) &&
res.every(
(x) =>
typeof x.end === "number" &&
typeof x.entity_group === "string" &&
typeof x.score === "number" &&
typeof x.start === "number" &&
typeof x.word === "string"
);
if (!isValidOutput) {
throw new InferenceOutputError(
"Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>"
);
}
return res;
}
|