diff --git a/spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/pretrained_networks.py b/spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/pretrained_networks.py deleted file mode 100644 index 077a24419364fdb5ae2f697f73e28615adae75a7..0000000000000000000000000000000000000000 --- a/spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/pretrained_networks.py +++ /dev/null @@ -1,181 +0,0 @@ -from collections import namedtuple -import torch -from torchvision import models as tv -from IPython import embed - -class squeezenet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(squeezenet, self).__init__() - pretrained_features = tv.squeezenet1_1(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.slice6 = torch.nn.Sequential() - self.slice7 = torch.nn.Sequential() - self.N_slices = 7 - for x in range(2): - self.slice1.add_module(str(x), pretrained_features[x]) - for x in range(2,5): - self.slice2.add_module(str(x), pretrained_features[x]) - for x in range(5, 8): - self.slice3.add_module(str(x), pretrained_features[x]) - for x in range(8, 10): - self.slice4.add_module(str(x), pretrained_features[x]) - for x in range(10, 11): - self.slice5.add_module(str(x), pretrained_features[x]) - for x in range(11, 12): - self.slice6.add_module(str(x), pretrained_features[x]) - for x in range(12, 13): - self.slice7.add_module(str(x), pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1 = h - h = self.slice2(h) - h_relu2 = h - h = self.slice3(h) - h_relu3 = h - h = self.slice4(h) - h_relu4 = h - h = self.slice5(h) - h_relu5 = h - h = self.slice6(h) - h_relu6 = h - h = self.slice7(h) - h_relu7 = h - vgg_outputs = namedtuple("SqueezeOutputs", ['relu1','relu2','relu3','relu4','relu5','relu6','relu7']) - out = vgg_outputs(h_relu1,h_relu2,h_relu3,h_relu4,h_relu5,h_relu6,h_relu7) - - return out - - -class alexnet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(alexnet, self).__init__() - alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.N_slices = 5 - for x in range(2): - self.slice1.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(2, 5): - self.slice2.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(5, 8): - self.slice3.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(8, 10): - self.slice4.add_module(str(x), alexnet_pretrained_features[x]) - for x in range(10, 12): - self.slice5.add_module(str(x), alexnet_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1 = h - h = self.slice2(h) - h_relu2 = h - h = self.slice3(h) - h_relu3 = h - h = self.slice4(h) - h_relu4 = h - h = self.slice5(h) - h_relu5 = h - alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5']) - out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5) - - return out - -class vgg16(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True): - super(vgg16, self).__init__() - vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - self.N_slices = 5 - for x in range(4): - self.slice1.add_module(str(x), vgg_pretrained_features[x]) - for x in range(4, 9): - self.slice2.add_module(str(x), vgg_pretrained_features[x]) - for x in range(9, 16): - self.slice3.add_module(str(x), vgg_pretrained_features[x]) - for x in range(16, 23): - self.slice4.add_module(str(x), vgg_pretrained_features[x]) - for x in range(23, 30): - self.slice5.add_module(str(x), vgg_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h = self.slice1(X) - h_relu1_2 = h - h = self.slice2(h) - h_relu2_2 = h - h = self.slice3(h) - h_relu3_3 = h - h = self.slice4(h) - h_relu4_3 = h - h = self.slice5(h) - h_relu5_3 = h - vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3']) - out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3) - - return out - - - -class resnet(torch.nn.Module): - def __init__(self, requires_grad=False, pretrained=True, num=18): - super(resnet, self).__init__() - if(num==18): - self.net = tv.resnet18(pretrained=pretrained) - elif(num==34): - self.net = tv.resnet34(pretrained=pretrained) - elif(num==50): - self.net = tv.resnet50(pretrained=pretrained) - elif(num==101): - self.net = tv.resnet101(pretrained=pretrained) - elif(num==152): - self.net = tv.resnet152(pretrained=pretrained) - self.N_slices = 5 - - self.conv1 = self.net.conv1 - self.bn1 = self.net.bn1 - self.relu = self.net.relu - self.maxpool = self.net.maxpool - self.layer1 = self.net.layer1 - self.layer2 = self.net.layer2 - self.layer3 = self.net.layer3 - self.layer4 = self.net.layer4 - - def forward(self, X): - h = self.conv1(X) - h = self.bn1(h) - h = self.relu(h) - h_relu1 = h - h = self.maxpool(h) - h = self.layer1(h) - h_conv2 = h - h = self.layer2(h) - h_conv3 = h - h = self.layer3(h) - h_conv4 = h - h = self.layer4(h) - h_conv5 = h - - outputs = namedtuple("Outputs", ['relu1','conv2','conv3','conv4','conv5']) - out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5) - - return out diff --git a/spaces/52Hz/SRMNet_thesis/model_arch/__init__.py b/spaces/52Hz/SRMNet_thesis/model_arch/__init__.py deleted file mode 100644 index a53ae7157c6589fef7a42c11db4d0bbc8fd19b23..0000000000000000000000000000000000000000 --- a/spaces/52Hz/SRMNet_thesis/model_arch/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .SRMNet import * -from .SRMNet_SWFF import * diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/text/text_norm.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/text/text_norm.py deleted file mode 100644 index 863c2fb235e209f25cce954ec9b585cb6fe13c96..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/text/text_norm.py +++ /dev/null @@ -1,797 +0,0 @@ -# coding=utf-8 -# Authors: -# 2019.5 Zhiyang Zhou (https://github.com/Joee1995/chn_text_norm.git) -# 2019.9 Jiayu DU -# -# requirements: -# - python 3.X -# notes: python 2.X WILL fail or produce misleading results - -import sys, os, argparse, codecs, string, re - -# ================================================================================ # -# basic constant -# ================================================================================ # -CHINESE_DIGIS = u'零一二三四五六七八九' -BIG_CHINESE_DIGIS_SIMPLIFIED = u'零壹贰叁肆伍陆柒捌玖' -BIG_CHINESE_DIGIS_TRADITIONAL = u'零壹貳參肆伍陸柒捌玖' -SMALLER_BIG_CHINESE_UNITS_SIMPLIFIED = u'十百千万' -SMALLER_BIG_CHINESE_UNITS_TRADITIONAL = u'拾佰仟萬' -LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'亿兆京垓秭穰沟涧正载' -LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'億兆京垓秭穰溝澗正載' -SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'十百千万' -SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'拾佰仟萬' - -ZERO_ALT = u'〇' -ONE_ALT = u'幺' -TWO_ALTS = [u'两', u'兩'] - -POSITIVE = [u'正', u'正'] -NEGATIVE = [u'负', u'負'] -POINT = [u'点', u'點'] -# PLUS = [u'加', u'加'] -# SIL = [u'杠', u'槓'] - -# 中文数字系统类型 -NUMBERING_TYPES = ['low', 'mid', 'high'] - -CURRENCY_NAMES = '(人民币|美元|日元|英镑|欧元|马克|法郎|加拿大元|澳元|港币|先令|芬兰马克|爱尔兰镑|' \ - '里拉|荷兰盾|埃斯库多|比塞塔|印尼盾|林吉特|新西兰元|比索|卢布|新加坡元|韩元|泰铢)' -CURRENCY_UNITS = '((亿|千万|百万|万|千|百)|(亿|千万|百万|万|千|百|)元|(亿|千万|百万|万|千|百|)块|角|毛|分)' -COM_QUANTIFIERS = '(匹|张|座|回|场|尾|条|个|首|阙|阵|网|炮|顶|丘|棵|只|支|袭|辆|挑|担|颗|壳|窠|曲|墙|群|腔|' \ - '砣|座|客|贯|扎|捆|刀|令|打|手|罗|坡|山|岭|江|溪|钟|队|单|双|对|出|口|头|脚|板|跳|枝|件|贴|' \ - '针|线|管|名|位|身|堂|课|本|页|家|户|层|丝|毫|厘|分|钱|两|斤|担|铢|石|钧|锱|忽|(千|毫|微)克|' \ - '毫|厘|分|寸|尺|丈|里|寻|常|铺|程|(千|分|厘|毫|微)米|撮|勺|合|升|斗|石|盘|碗|碟|叠|桶|笼|盆|' \ - '盒|杯|钟|斛|锅|簋|篮|盘|桶|罐|瓶|壶|卮|盏|箩|箱|煲|啖|袋|钵|年|月|日|季|刻|时|周|天|秒|分|旬|' \ - '纪|岁|世|更|夜|春|夏|秋|冬|代|伏|辈|丸|泡|粒|颗|幢|堆|条|根|支|道|面|片|张|颗|块)' - -# punctuation information are based on Zhon project (https://github.com/tsroten/zhon.git) -CHINESE_PUNC_STOP = '!?。。' -CHINESE_PUNC_NON_STOP = '"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏' -CHINESE_PUNC_LIST = CHINESE_PUNC_STOP + CHINESE_PUNC_NON_STOP - - -# ================================================================================ # -# basic class -# ================================================================================ # -class ChineseChar(object): - """ - 中文字符 - 每个字符对应简体和繁体, - e.g. 简体 = '负', 繁体 = '負' - 转换时可转换为简体或繁体 - """ - - def __init__(self, simplified, traditional): - self.simplified = simplified - self.traditional = traditional - # self.__repr__ = self.__str__ - - def __str__(self): - return self.simplified or self.traditional or None - - def __repr__(self): - return self.__str__() - - -class ChineseNumberUnit(ChineseChar): - """ - 中文数字/数位字符 - 每个字符除繁简体外还有一个额外的大写字符 - e.g. '陆' 和 '陸' - """ - - def __init__(self, power, simplified, traditional, big_s, big_t): - super(ChineseNumberUnit, self).__init__(simplified, traditional) - self.power = power - self.big_s = big_s - self.big_t = big_t - - def __str__(self): - return '10^{}'.format(self.power) - - @classmethod - def create(cls, index, value, numbering_type=NUMBERING_TYPES[1], small_unit=False): - - if small_unit: - return ChineseNumberUnit(power=index + 1, - simplified=value[0], traditional=value[1], big_s=value[1], big_t=value[1]) - elif numbering_type == NUMBERING_TYPES[0]: - return ChineseNumberUnit(power=index + 8, - simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) - elif numbering_type == NUMBERING_TYPES[1]: - return ChineseNumberUnit(power=(index + 2) * 4, - simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) - elif numbering_type == NUMBERING_TYPES[2]: - return ChineseNumberUnit(power=pow(2, index + 3), - simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) - else: - raise ValueError( - 'Counting type should be in {0} ({1} provided).'.format(NUMBERING_TYPES, numbering_type)) - - -class ChineseNumberDigit(ChineseChar): - """ - 中文数字字符 - """ - - def __init__(self, value, simplified, traditional, big_s, big_t, alt_s=None, alt_t=None): - super(ChineseNumberDigit, self).__init__(simplified, traditional) - self.value = value - self.big_s = big_s - self.big_t = big_t - self.alt_s = alt_s - self.alt_t = alt_t - - def __str__(self): - return str(self.value) - - @classmethod - def create(cls, i, v): - return ChineseNumberDigit(i, v[0], v[1], v[2], v[3]) - - -class ChineseMath(ChineseChar): - """ - 中文数位字符 - """ - - def __init__(self, simplified, traditional, symbol, expression=None): - super(ChineseMath, self).__init__(simplified, traditional) - self.symbol = symbol - self.expression = expression - self.big_s = simplified - self.big_t = traditional - - -CC, CNU, CND, CM = ChineseChar, ChineseNumberUnit, ChineseNumberDigit, ChineseMath - - -class NumberSystem(object): - """ - 中文数字系统 - """ - pass - - -class MathSymbol(object): - """ - 用于中文数字系统的数学符号 (繁/简体), e.g. - positive = ['正', '正'] - negative = ['负', '負'] - point = ['点', '點'] - """ - - def __init__(self, positive, negative, point): - self.positive = positive - self.negative = negative - self.point = point - - def __iter__(self): - for v in self.__dict__.values(): - yield v - - -# class OtherSymbol(object): -# """ -# 其他符号 -# """ -# -# def __init__(self, sil): -# self.sil = sil -# -# def __iter__(self): -# for v in self.__dict__.values(): -# yield v - - -# ================================================================================ # -# basic utils -# ================================================================================ # -def create_system(numbering_type=NUMBERING_TYPES[1]): - """ - 根据数字系统类型返回创建相应的数字系统,默认为 mid - NUMBERING_TYPES = ['low', 'mid', 'high']: 中文数字系统类型 - low: '兆' = '亿' * '十' = $10^{9}$, '京' = '兆' * '十', etc. - mid: '兆' = '亿' * '万' = $10^{12}$, '京' = '兆' * '万', etc. - high: '兆' = '亿' * '亿' = $10^{16}$, '京' = '兆' * '兆', etc. - 返回对应的数字系统 - """ - - # chinese number units of '亿' and larger - all_larger_units = zip( - LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED, LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL) - larger_units = [CNU.create(i, v, numbering_type, False) - for i, v in enumerate(all_larger_units)] - # chinese number units of '十, 百, 千, 万' - all_smaller_units = zip( - SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED, SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL) - smaller_units = [CNU.create(i, v, small_unit=True) - for i, v in enumerate(all_smaller_units)] - # digis - chinese_digis = zip(CHINESE_DIGIS, CHINESE_DIGIS, - BIG_CHINESE_DIGIS_SIMPLIFIED, BIG_CHINESE_DIGIS_TRADITIONAL) - digits = [CND.create(i, v) for i, v in enumerate(chinese_digis)] - digits[0].alt_s, digits[0].alt_t = ZERO_ALT, ZERO_ALT - digits[1].alt_s, digits[1].alt_t = ONE_ALT, ONE_ALT - digits[2].alt_s, digits[2].alt_t = TWO_ALTS[0], TWO_ALTS[1] - - # symbols - positive_cn = CM(POSITIVE[0], POSITIVE[1], '+', lambda x: x) - negative_cn = CM(NEGATIVE[0], NEGATIVE[1], '-', lambda x: -x) - point_cn = CM(POINT[0], POINT[1], '.', lambda x, - y: float(str(x) + '.' + str(y))) - # sil_cn = CM(SIL[0], SIL[1], '-', lambda x, y: float(str(x) + '-' + str(y))) - system = NumberSystem() - system.units = smaller_units + larger_units - system.digits = digits - system.math = MathSymbol(positive_cn, negative_cn, point_cn) - # system.symbols = OtherSymbol(sil_cn) - return system - - -def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]): - def get_symbol(char, system): - for u in system.units: - if char in [u.traditional, u.simplified, u.big_s, u.big_t]: - return u - for d in system.digits: - if char in [d.traditional, d.simplified, d.big_s, d.big_t, d.alt_s, d.alt_t]: - return d - for m in system.math: - if char in [m.traditional, m.simplified]: - return m - - def string2symbols(chinese_string, system): - int_string, dec_string = chinese_string, '' - for p in [system.math.point.simplified, system.math.point.traditional]: - if p in chinese_string: - int_string, dec_string = chinese_string.split(p) - break - return [get_symbol(c, system) for c in int_string], \ - [get_symbol(c, system) for c in dec_string] - - def correct_symbols(integer_symbols, system): - """ - 一百八 to 一百八十 - 一亿一千三百万 to 一亿 一千万 三百万 - """ - - if integer_symbols and isinstance(integer_symbols[0], CNU): - if integer_symbols[0].power == 1: - integer_symbols = [system.digits[1]] + integer_symbols - - if len(integer_symbols) > 1: - if isinstance(integer_symbols[-1], CND) and isinstance(integer_symbols[-2], CNU): - integer_symbols.append( - CNU(integer_symbols[-2].power - 1, None, None, None, None)) - - result = [] - unit_count = 0 - for s in integer_symbols: - if isinstance(s, CND): - result.append(s) - unit_count = 0 - elif isinstance(s, CNU): - current_unit = CNU(s.power, None, None, None, None) - unit_count += 1 - - if unit_count == 1: - result.append(current_unit) - elif unit_count > 1: - for i in range(len(result)): - if isinstance(result[-i - 1], CNU) and result[-i - 1].power < current_unit.power: - result[-i - 1] = CNU(result[-i - 1].power + - current_unit.power, None, None, None, None) - return result - - def compute_value(integer_symbols): - """ - Compute the value. - When current unit is larger than previous unit, current unit * all previous units will be used as all previous units. - e.g. '两千万' = 2000 * 10000 not 2000 + 10000 - """ - value = [0] - last_power = 0 - for s in integer_symbols: - if isinstance(s, CND): - value[-1] = s.value - elif isinstance(s, CNU): - value[-1] *= pow(10, s.power) - if s.power > last_power: - value[:-1] = list(map(lambda v: v * - pow(10, s.power), value[:-1])) - last_power = s.power - value.append(0) - return sum(value) - - system = create_system(numbering_type) - int_part, dec_part = string2symbols(chinese_string, system) - int_part = correct_symbols(int_part, system) - int_str = str(compute_value(int_part)) - dec_str = ''.join([str(d.value) for d in dec_part]) - if dec_part: - return '{0}.{1}'.format(int_str, dec_str) - else: - return int_str - - -def num2chn(number_string, numbering_type=NUMBERING_TYPES[1], big=False, - traditional=False, alt_zero=False, alt_one=False, alt_two=True, - use_zeros=True, use_units=True): - def get_value(value_string, use_zeros=True): - - striped_string = value_string.lstrip('0') - - # record nothing if all zeros - if not striped_string: - return [] - - # record one digits - elif len(striped_string) == 1: - if use_zeros and len(value_string) != len(striped_string): - return [system.digits[0], system.digits[int(striped_string)]] - else: - return [system.digits[int(striped_string)]] - - # recursively record multiple digits - else: - result_unit = next(u for u in reversed( - system.units) if u.power < len(striped_string)) - result_string = value_string[:-result_unit.power] - return get_value(result_string) + [result_unit] + get_value(striped_string[-result_unit.power:]) - - system = create_system(numbering_type) - - int_dec = number_string.split('.') - if len(int_dec) == 1: - int_string = int_dec[0] - dec_string = "" - elif len(int_dec) == 2: - int_string = int_dec[0] - dec_string = int_dec[1] - else: - raise ValueError( - "invalid input num string with more than one dot: {}".format(number_string)) - - if use_units and len(int_string) > 1: - result_symbols = get_value(int_string) - else: - result_symbols = [system.digits[int(c)] for c in int_string] - dec_symbols = [system.digits[int(c)] for c in dec_string] - if dec_string: - result_symbols += [system.math.point] + dec_symbols - - if alt_two: - liang = CND(2, system.digits[2].alt_s, system.digits[2].alt_t, - system.digits[2].big_s, system.digits[2].big_t) - for i, v in enumerate(result_symbols): - if isinstance(v, CND) and v.value == 2: - next_symbol = result_symbols[i + - 1] if i < len(result_symbols) - 1 else None - previous_symbol = result_symbols[i - 1] if i > 0 else None - if isinstance(next_symbol, CNU) and isinstance(previous_symbol, (CNU, type(None))): - if next_symbol.power != 1 and ((previous_symbol is None) or (previous_symbol.power != 1)): - result_symbols[i] = liang - - # if big is True, '两' will not be used and `alt_two` has no impact on output - if big: - attr_name = 'big_' - if traditional: - attr_name += 't' - else: - attr_name += 's' - else: - if traditional: - attr_name = 'traditional' - else: - attr_name = 'simplified' - - result = ''.join([getattr(s, attr_name) for s in result_symbols]) - - # if not use_zeros: - # result = result.strip(getattr(system.digits[0], attr_name)) - - if alt_zero: - result = result.replace( - getattr(system.digits[0], attr_name), system.digits[0].alt_s) - - if alt_one: - result = result.replace( - getattr(system.digits[1], attr_name), system.digits[1].alt_s) - - for i, p in enumerate(POINT): - if result.startswith(p): - return CHINESE_DIGIS[0] + result - - # ^10, 11, .., 19 - if len(result) >= 2 and result[1] in [SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED[0], - SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL[0]] and \ - result[0] in [CHINESE_DIGIS[1], BIG_CHINESE_DIGIS_SIMPLIFIED[1], BIG_CHINESE_DIGIS_TRADITIONAL[1]]: - result = result[1:] - - return result - - -# ================================================================================ # -# different types of rewriters -# ================================================================================ # -class Cardinal: - """ - CARDINAL类 - """ - - def __init__(self, cardinal=None, chntext=None): - self.cardinal = cardinal - self.chntext = chntext - - def chntext2cardinal(self): - return chn2num(self.chntext) - - def cardinal2chntext(self): - return num2chn(self.cardinal) - - -class Digit: - """ - DIGIT类 - """ - - def __init__(self, digit=None, chntext=None): - self.digit = digit - self.chntext = chntext - - # def chntext2digit(self): - # return chn2num(self.chntext) - - def digit2chntext(self): - return num2chn(self.digit, alt_two=False, use_units=False) - - -class TelePhone: - """ - TELEPHONE类 - """ - - def __init__(self, telephone=None, raw_chntext=None, chntext=None): - self.telephone = telephone - self.raw_chntext = raw_chntext - self.chntext = chntext - - # def chntext2telephone(self): - # sil_parts = self.raw_chntext.split('') - # self.telephone = '-'.join([ - # str(chn2num(p)) for p in sil_parts - # ]) - # return self.telephone - - def telephone2chntext(self, fixed=False): - - if fixed: - sil_parts = self.telephone.split('-') - self.raw_chntext = ''.join([ - num2chn(part, alt_two=False, use_units=False) for part in sil_parts - ]) - self.chntext = self.raw_chntext.replace('', '') - else: - sp_parts = self.telephone.strip('+').split() - self.raw_chntext = ''.join([ - num2chn(part, alt_two=False, use_units=False) for part in sp_parts - ]) - self.chntext = self.raw_chntext.replace('', '') - return self.chntext - - -class Fraction: - """ - FRACTION类 - """ - - def __init__(self, fraction=None, chntext=None): - self.fraction = fraction - self.chntext = chntext - - def chntext2fraction(self): - denominator, numerator = self.chntext.split('分之') - return chn2num(numerator) + '/' + chn2num(denominator) - - def fraction2chntext(self): - numerator, denominator = self.fraction.split('/') - return num2chn(denominator) + '分之' + num2chn(numerator) - - -class Date: - """ - DATE类 - """ - - def __init__(self, date=None, chntext=None): - self.date = date - self.chntext = chntext - - # def chntext2date(self): - # chntext = self.chntext - # try: - # year, other = chntext.strip().split('年', maxsplit=1) - # year = Digit(chntext=year).digit2chntext() + '年' - # except ValueError: - # other = chntext - # year = '' - # if other: - # try: - # month, day = other.strip().split('月', maxsplit=1) - # month = Cardinal(chntext=month).chntext2cardinal() + '月' - # except ValueError: - # day = chntext - # month = '' - # if day: - # day = Cardinal(chntext=day[:-1]).chntext2cardinal() + day[-1] - # else: - # month = '' - # day = '' - # date = year + month + day - # self.date = date - # return self.date - - def date2chntext(self): - date = self.date - try: - year, other = date.strip().split('年', 1) - year = Digit(digit=year).digit2chntext() + '年' - except ValueError: - other = date - year = '' - if other: - try: - month, day = other.strip().split('月', 1) - month = Cardinal(cardinal=month).cardinal2chntext() + '月' - except ValueError: - day = date - month = '' - if day: - day = Cardinal(cardinal=day[:-1]).cardinal2chntext() + day[-1] - else: - month = '' - day = '' - chntext = year + month + day - self.chntext = chntext - return self.chntext - - -class Money: - """ - MONEY类 - """ - - def __init__(self, money=None, chntext=None): - self.money = money - self.chntext = chntext - - # def chntext2money(self): - # return self.money - - def money2chntext(self): - money = self.money - pattern = re.compile(r'(\d+(\.\d+)?)') - matchers = pattern.findall(money) - if matchers: - for matcher in matchers: - money = money.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext()) - self.chntext = money - return self.chntext - - -class Percentage: - """ - PERCENTAGE类 - """ - - def __init__(self, percentage=None, chntext=None): - self.percentage = percentage - self.chntext = chntext - - def chntext2percentage(self): - return chn2num(self.chntext.strip().strip('百分之')) + '%' - - def percentage2chntext(self): - return '百分之' + num2chn(self.percentage.strip().strip('%')) - - -# ================================================================================ # -# NSW Normalizer -# ================================================================================ # -class NSWNormalizer: - def __init__(self, raw_text): - self.raw_text = '^' + raw_text + '$' - self.norm_text = '' - - def _particular(self): - text = self.norm_text - pattern = re.compile(r"(([a-zA-Z]+)二([a-zA-Z]+))") - matchers = pattern.findall(text) - if matchers: - # print('particular') - for matcher in matchers: - text = text.replace(matcher[0], matcher[1] + '2' + matcher[2], 1) - self.norm_text = text - return self.norm_text - - def normalize(self, remove_punc=True): - text = self.raw_text - - # 规范化日期 - pattern = re.compile(r"\D+((([089]\d|(19|20)\d{2})年)?(\d{1,2}月(\d{1,2}[日号])?)?)") - matchers = pattern.findall(text) - if matchers: - # print('date') - for matcher in matchers: - text = text.replace(matcher[0], Date(date=matcher[0]).date2chntext(), 1) - - # 规范化金钱 - pattern = re.compile(r"\D+((\d+(\.\d+)?)[多余几]?" + CURRENCY_UNITS + r"(\d" + CURRENCY_UNITS + r"?)?)") - matchers = pattern.findall(text) - if matchers: - # print('money') - for matcher in matchers: - text = text.replace(matcher[0], Money(money=matcher[0]).money2chntext(), 1) - - # 规范化固话/手机号码 - # 手机 - # http://www.jihaoba.com/news/show/13680 - # 移动:139、138、137、136、135、134、159、158、157、150、151、152、188、187、182、183、184、178、198 - # 联通:130、131、132、156、155、186、185、176 - # 电信:133、153、189、180、181、177 - pattern = re.compile(r"\D((\+?86 ?)?1([38]\d|5[0-35-9]|7[678]|9[89])\d{8})\D") - matchers = pattern.findall(text) - if matchers: - # print('telephone') - for matcher in matchers: - text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(), 1) - # 固话 - pattern = re.compile(r"\D((0(10|2[0-9]|[3-9]\d{2})-?)?[1-9]\d{6,7})\D") - matchers = pattern.findall(text) - if matchers: - # print('fixed telephone') - for matcher in matchers: - text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(fixed=True), 1) - - # 规范化分数 - pattern = re.compile(r"(\d+/\d+)") - matchers = pattern.findall(text) - if matchers: - # print('fraction') - for matcher in matchers: - text = text.replace(matcher, Fraction(fraction=matcher).fraction2chntext(), 1) - - # 规范化百分数 - text = text.replace('%', '%') - pattern = re.compile(r"(\d+(\.\d+)?%)") - matchers = pattern.findall(text) - if matchers: - # print('percentage') - for matcher in matchers: - text = text.replace(matcher[0], Percentage(percentage=matcher[0]).percentage2chntext(), 1) - - # 规范化纯数+量词 - pattern = re.compile(r"(\d+(\.\d+)?)[多余几]?" + COM_QUANTIFIERS) - matchers = pattern.findall(text) - if matchers: - # print('cardinal+quantifier') - for matcher in matchers: - text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1) - - # 规范化小数 - pattern = re.compile(r"(\d+\.\d+)") - matchers = pattern.findall(text) - if matchers: - # print('cardinal') - for matcher in matchers: - text = text.replace(matcher, Cardinal(cardinal=matcher).cardinal2chntext(), 1) - - # 规范化数字编号 - pattern = re.compile(r"(\d{4,32})") - matchers = pattern.findall(text) - if matchers: - # print('digit') - for matcher in matchers: - text = text.replace(matcher, Digit(digit=matcher).digit2chntext(), 1) - - # 规范化其他数字 - pattern = re.compile(r"(\d+(\.\d+)?)") - matchers = pattern.findall(text) - if matchers: - # print('cardinal') - for matcher in matchers: - text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1) - - self.norm_text = text - self._particular() - - text = self.norm_text.lstrip('^').rstrip('$') - if remove_punc: - # Punctuations removal - old_chars = CHINESE_PUNC_LIST + string.punctuation # includes all CN and EN punctuations - new_chars = ' ' * len(old_chars) - del_chars = '' - text = text.translate(str.maketrans(old_chars, new_chars, del_chars)) - return text - - -def nsw_test_case(raw_text): - print('I:' + raw_text) - print('O:' + NSWNormalizer(raw_text).normalize()) - print('') - - -def nsw_test(): - nsw_test_case('固话:0595-23865596或者23880880。') - nsw_test_case('手机:+86 19859213959或者15659451527。') - nsw_test_case('分数:32477/76391。') - nsw_test_case('百分数:80.03%。') - nsw_test_case('编号:31520181154418。') - nsw_test_case('纯数:2983.07克或12345.60米。') - nsw_test_case('日期:1999年2月20日或09年3月15号。') - nsw_test_case('金钱:12块5,34.5元,20.1万, 40多块钱') - nsw_test_case('特殊:O2O或B2C。') - nsw_test_case('3456万吨') - nsw_test_case('2938478321947个') - nsw_test_case('938') - nsw_test_case('今天吃了115个小笼包231个馒头') - nsw_test_case('有62%的概率') - - -if __name__ == '__main__': - # nsw_test() - - p = argparse.ArgumentParser() - p.add_argument('ifile', help='input filename, assume utf-8 encoding') - p.add_argument('ofile', help='output filename') - p.add_argument('--to_upper', action='store_true', help='convert to upper case') - p.add_argument('--to_lower', action='store_true', help='convert to lower case') - p.add_argument('--has_key', action='store_true', help="input text has Kaldi's key as first field.") - p.add_argument('--log_interval', type=int, default=10000, help='log interval in number of processed lines') - args = p.parse_args() - - ifile = codecs.open(args.ifile, 'r', 'utf8') - ofile = codecs.open(args.ofile, 'w+', 'utf8') - - n = 0 - for l in ifile: - key = '' - text = '' - if args.has_key: - cols = l.split(maxsplit=1) - key = cols[0] - if len(cols) == 2: - text = cols[1] - else: - text = '' - else: - text = l - - # cases - if args.to_upper and args.to_lower: - sys.stderr.write('text norm: to_upper OR to_lower?') - exit(1) - if args.to_upper: - text = text.upper() - if args.to_lower: - text = text.lower() - - # NSW(Non-Standard-Word) normalization - text = NSWNormalizer(text).normalize() - - # - if args.has_key: - ofile.write(key + '\t' + text) - else: - ofile.write(text) - - n += 1 - if n % args.log_interval == 0: - sys.stderr.write("text norm: {} lines done.\n".format(n)) - - sys.stderr.write("text norm: {} lines done in total.\n".format(n)) - - ifile.close() - ofile.close() diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/yolov6_t_syncbn_fast_8xb32-300e_coco.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/yolov6_t_syncbn_fast_8xb32-300e_coco.py deleted file mode 100644 index aa9da63f6984a9a23bc7ca78780db5be5a782399..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/yolov6_t_syncbn_fast_8xb32-300e_coco.py +++ /dev/null @@ -1,17 +0,0 @@ -_base_ = './yolov6_s_syncbn_fast_8xb32-300e_coco.py' - -# ======================= Possible modified parameters ======================= -# -----model related----- -# The scaling factor that controls the depth of the network structure -deepen_factor = 0.33 -# The scaling factor that controls the width of the network structure -widen_factor = 0.375 - -# ============================== Unmodified in most cases =================== -model = dict( - backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), - neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), - bbox_head=dict( - type='YOLOv6Head', - head_module=dict(widen_factor=widen_factor), - loss_bbox=dict(iou_mode='siou'))) diff --git a/spaces/Abhilashvj/planogram-compliance/utils/aws/resume.py b/spaces/Abhilashvj/planogram-compliance/utils/aws/resume.py deleted file mode 100644 index a259664084088745beefbc6a4e6f173fa8bc1a16..0000000000000000000000000000000000000000 --- a/spaces/Abhilashvj/planogram-compliance/utils/aws/resume.py +++ /dev/null @@ -1,42 +0,0 @@ -# Resume all interrupted trainings in yolov5/ dir including DDP trainings -# Usage: $ python utils/aws/resume.py - -import os -import sys -from pathlib import Path - -import torch -import yaml - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[2] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -port = 0 # --master_port -path = Path("").resolve() -for last in path.rglob("*/**/last.pt"): - ckpt = torch.load(last) - if ckpt["optimizer"] is None: - continue - - # Load opt.yaml - with open(last.parent.parent / "opt.yaml", errors="ignore") as f: - opt = yaml.safe_load(f) - - # Get device count - d = opt["device"].split(",") # devices - nd = len(d) # number of devices - ddp = nd > 1 or ( - nd == 0 and torch.cuda.device_count() > 1 - ) # distributed data parallel - - if ddp: # multi-GPU - port += 1 - cmd = f"python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}" - else: # single-GPU - cmd = f"python train.py --resume {last}" - - cmd += " > /dev/null 2>&1 &" # redirect output to dev/null and run in daemon thread - print(cmd) - os.system(cmd) diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/Factory.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/Factory.d.ts deleted file mode 100644 index 2bdd713bdd4157e1f1a7e2ec0118c1aa048f3edd..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/Factory.d.ts +++ /dev/null @@ -1,5 +0,0 @@ -import Pages from './Pages'; - -export default function ( - config?: Pages.IConfig -): Pages; \ No newline at end of file diff --git a/spaces/Aki004/herta-so-vits/inference/__init__.py b/spaces/Aki004/herta-so-vits/inference/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/ONNXVITS_models.py b/spaces/Alycer/VITS-Umamusume-voice-synthesizer/ONNXVITS_models.py deleted file mode 100644 index acd00238895d57ba878fd0211d5654250fb10061..0000000000000000000000000000000000000000 --- a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/ONNXVITS_models.py +++ /dev/null @@ -1,509 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import ONNXVITS_modules as modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - self.w = None - self.reverse = None - self.noise_scale = None - def forward(self, x, x_mask, g=None): - w = self.w - reverse = self.reverse - noise_scale = self.noise_scale - - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - self.reverse = None - def forward(self, x, x_mask, g=None): - reverse = self.reverse - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask # x_in : [b, c, t] -> [b, h, t] - x = self.enc(x, x_mask, g=g) # x_in : [b, h, t], g : [b, h, 1], x = x_in + g - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask # z, m, logs : [b, h, t] - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - - if n_speakers > 0: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, sid=None, noise_scale=.667, length_scale=1, noise_scale_w=.8, max_len=None): - torch.onnx.export( - self.enc_p, - (x, x_lengths), - "ONNX_net/enc_p.onnx", - input_names=["x", "x_lengths"], - output_names=["xout", "m_p", "logs_p", "x_mask"], - dynamic_axes={ - "x" : [1], - "xout" : [2], - "m_p" : [2], - "logs_p" : [2], - "x_mask" : [2] - }, - verbose=True, - ) - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - self.dp.reverse = True - self.dp.noise_scale = noise_scale_w - torch.onnx.export( - self.dp, - (x, x_mask, g), - "ONNX_net/dp.onnx", - input_names=["x", "x_mask", "g"], - output_names=["logw"], - dynamic_axes={ - "x" : [2], - "x_mask" : [2], - "logw" : [2] - }, - verbose=True, - ) - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - - self.flow.reverse = True - torch.onnx.export( - self.flow, - (z_p, y_mask, g), - "ONNX_net/flow.onnx", - input_names=["z_p", "y_mask", "g"], - output_names=["z"], - dynamic_axes={ - "z_p" : [2], - "y_mask" : [2], - "z" : [2] - }, - verbose=True, - ) - z = self.flow(z_p, y_mask, g=g) - z_in = (z * y_mask)[:,:,:max_len] - - torch.onnx.export( - self.dec, - (z_in, g), - "ONNX_net/dec.onnx", - input_names=["z_in", "g"], - output_names=["o"], - dynamic_axes={ - "z_in" : [2], - "o" : [2] - }, - verbose=True, - ) - o = self.dec(z_in, g=g) - return o diff --git a/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/dataset/llff_dataset.py b/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/dataset/llff_dataset.py deleted file mode 100644 index 28c4cafe07f54ff4314f32c0f5b0f8e89795848b..0000000000000000000000000000000000000000 --- a/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/dataset/llff_dataset.py +++ /dev/null @@ -1,292 +0,0 @@ -import torch -from torch.utils.data import Dataset -import glob -import numpy as np -import os -from PIL import Image -from torchvision import transforms as T - -from .ray_utils import * - - -def normalize(v): - """Normalize a vector.""" - return v / np.linalg.norm(v) - - -def average_poses(poses): - """ - Calculate the average pose, which is then used to center all poses - using @center_poses. Its computation is as follows: - 1. Compute the center: the average of pose centers. - 2. Compute the z axis: the normalized average z axis. - 3. Compute axis y': the average y axis. - 4. Compute x' = y' cross product z, then normalize it as the x axis. - 5. Compute the y axis: z cross product x. - - Note that at step 3, we cannot directly use y' as y axis since it's - not necessarily orthogonal to z axis. We need to pass from x to y. - Inputs: - poses: (N_images, 3, 4) - Outputs: - pose_avg: (3, 4) the average pose - """ - # 1. Compute the center - center = poses[..., 3].mean(0) # (3) - - # 2. Compute the z axis - z = normalize(poses[..., 2].mean(0)) # (3) - - # 3. Compute axis y' (no need to normalize as it's not the final output) - y_ = poses[..., 1].mean(0) # (3) - - # 4. Compute the x axis - x = normalize(np.cross(z, y_)) # (3) - - # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) - y = np.cross(x, z) # (3) - - pose_avg = np.stack([x, y, z, center], 1) # (3, 4) - - return pose_avg - - -def center_poses(poses, blender2opencv): - """ - Center the poses so that we can use NDC. - See https://github.com/bmild/nerf/issues/34 - Inputs: - poses: (N_images, 3, 4) - Outputs: - poses_centered: (N_images, 3, 4) the centered poses - pose_avg: (3, 4) the average pose - """ - poses = poses @ blender2opencv - pose_avg = average_poses(poses) # (3, 4) - pose_avg_homo = np.eye(4) - pose_avg_homo[:3] = pose_avg # convert to homogeneous coordinate for faster computation - pose_avg_homo = pose_avg_homo - # by simply adding 0, 0, 0, 1 as the last row - last_row = np.tile(np.array([0, 0, 0, 1]), (len(poses), 1, 1)) # (N_images, 1, 4) - poses_homo = \ - np.concatenate([poses, last_row], 1) # (N_images, 4, 4) homogeneous coordinate - - poses_centered = np.linalg.inv(pose_avg_homo) @ poses_homo # (N_images, 4, 4) - # poses_centered = poses_centered @ blender2opencv - poses_centered = poses_centered[:, :3] # (N_images, 3, 4) - - return poses_centered, pose_avg_homo - - -def viewmatrix(z, up, pos): - vec2 = normalize(z) - vec1_avg = up - vec0 = normalize(np.cross(vec1_avg, vec2)) - vec1 = normalize(np.cross(vec2, vec0)) - m = np.eye(4) - m[:3] = np.stack([-vec0, vec1, vec2, pos], 1) - return m - - -def render_path_spiral(c2w, up, rads, focal, zdelta, zrate, N_rots=2, N=120): - render_poses = [] - rads = np.array(list(rads) + [1.]) - - for theta in np.linspace(0., 2. * np.pi * N_rots, N + 1)[:-1]: - c = np.dot(c2w[:3, :4], np.array([np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.]) * rads) - z = normalize(c - np.dot(c2w[:3, :4], np.array([0, 0, -focal, 1.]))) - render_poses.append(viewmatrix(z, up, c)) - return render_poses - - -def get_spiral(c2ws_all, near_fars, rads_scale=1.0, N_views=120): - # center pose - c2w = average_poses(c2ws_all) - - # Get average pose - up = normalize(c2ws_all[:, :3, 1].sum(0)) - - # Find a reasonable "focus depth" for this dataset - dt = 0.75 - close_depth, inf_depth = near_fars.min() * 0.9, near_fars.max() * 5.0 - focal = 1.0 / (((1.0 - dt) / close_depth + dt / inf_depth)) - - # Get radii for spiral path - zdelta = near_fars.min() * .2 - tt = c2ws_all[:, :3, 3] - rads = np.percentile(np.abs(tt), 90, 0) * rads_scale - render_poses = render_path_spiral(c2w, up, rads, focal, zdelta, zrate=.5, N=N_views) - return np.stack(render_poses) - - -def get_interpolation_path(c2ws_all, steps=30): - # flower - # idx0 = 1 - # idx1 = 10 - - # trex - # idx0 = 8 - # idx1 = 53 - - # horns - idx0 = 18 - idx1 = 47 - - v = np.linspace(0, 1, num=steps) - - c2w0 = c2ws_all[idx0] - c2w1 = c2ws_all[idx1] - - c2w_ = [] - for i in range(steps): - c2w_.append(c2w0 * v[i] + c2w1 * (1 - v[i])) - - return np.stack(c2w_) - - -class LLFFDataset(Dataset): - def __init__(self, datadir, split='train', downsample=4, is_stack=False, hold_every=8, N_vis=-1): - - self.root_dir = datadir - self.split = split - self.hold_every = hold_every - self.is_stack = is_stack - self.downsample = downsample - self.define_transforms() - - self.blender2opencv = np.eye(4) # np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) - self.read_meta() - self.white_bg = False - - # self.near_far = [np.min(self.near_fars[:,0]),np.max(self.near_fars[:,1])] - self.near_far = [0.0, 1.0] - self.scene_bbox = torch.tensor([[-1.5, -1.67, -1.0], [1.5, 1.67, 1.0]]) - # self.scene_bbox = torch.tensor([[-1.67, -1.5, -1.0], [1.67, 1.5, 1.0]]) - self.center = torch.mean(self.scene_bbox, dim=0).float().view(1, 1, 3) - self.invradius = 1.0 / (self.scene_bbox[1] - self.center).float().view(1, 1, 3) - - def read_meta(self): - - poses_bounds = np.load(os.path.join(self.root_dir, 'poses_bounds.npy')) # (N_images, 17) - self.image_paths = sorted(glob.glob(os.path.join(self.root_dir, 'images_4/*'))) - # load full resolution image then resize - if self.split in ['train', 'test']: - assert len(poses_bounds) == len(self.image_paths), \ - 'Mismatch between number of images and number of poses! Please rerun COLMAP!' - - poses = poses_bounds[:, :15].reshape(-1, 3, 5) # (N_images, 3, 5) - self.near_fars = poses_bounds[:, -2:] # (N_images, 2) - hwf = poses[:, :, -1] - - # Step 1: rescale focal length according to training resolution - H, W, self.focal = poses[0, :, -1] # original intrinsics, same for all images - self.img_wh = np.array([int(W / self.downsample), int(H / self.downsample)]) - self.focal = [self.focal * self.img_wh[0] / W, self.focal * self.img_wh[1] / H] - - # Step 2: correct poses - # Original poses has rotation in form "down right back", change to "right up back" - # See https://github.com/bmild/nerf/issues/34 - poses = np.concatenate([poses[..., 1:2], -poses[..., :1], poses[..., 2:4]], -1) - # (N_images, 3, 4) exclude H, W, focal - self.poses, self.pose_avg = center_poses(poses, self.blender2opencv) - - # Step 3: correct scale so that the nearest depth is at a little more than 1.0 - # See https://github.com/bmild/nerf/issues/34 - near_original = self.near_fars.min() - scale_factor = near_original * 0.75 # 0.75 is the default parameter - # the nearest depth is at 1/0.75=1.33 - self.near_fars /= scale_factor - self.poses[..., 3] /= scale_factor - - # build rendering path - N_views, N_rots = 120, 2 - tt = self.poses[:, :3, 3] # ptstocam(poses[:3,3,:].T, c2w).T - up = normalize(self.poses[:, :3, 1].sum(0)) - rads = np.percentile(np.abs(tt), 90, 0) - - self.render_path = get_spiral(self.poses, self.near_fars, N_views=N_views) - # self.render_path = get_interpolation_path(self.poses) - - # distances_from_center = np.linalg.norm(self.poses[..., 3], axis=1) - # val_idx = np.argmin(distances_from_center) # choose val image as the closest to - # center image - - # ray directions for all pixels, same for all images (same H, W, focal) - W, H = self.img_wh - self.directions = get_ray_directions_blender(H, W, self.focal) # (H, W, 3) - - average_pose = average_poses(self.poses) - dists = np.sum(np.square(average_pose[:3, 3] - self.poses[:, :3, 3]), -1) - i_test = np.arange(0, self.poses.shape[0], self.hold_every) # [np.argmin(dists)] - img_list = i_test if self.split != 'train' else list(set(np.arange(len(self.poses))) - set(i_test)) - - # use first N_images-1 to train, the LAST is val - self.all_rays = [] - self.all_rgbs = [] - for i in img_list: - image_path = self.image_paths[i] - c2w = torch.FloatTensor(self.poses[i]) - - img = Image.open(image_path).convert('RGB') - if self.downsample != 1.0: - img = img.resize(self.img_wh, Image.LANCZOS) - img = self.transform(img) # (3, h, w) - - img = img.view(3, -1).permute(1, 0) # (h*w, 3) RGB - self.all_rgbs += [img] - rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3) - rays_o, rays_d = ndc_rays_blender(H, W, self.focal[0], 1.0, rays_o, rays_d) - # viewdir = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) - - self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 6) - - all_rays = self.all_rays - all_rgbs = self.all_rgbs - - self.all_rays = torch.cat(self.all_rays, 0) # (len(self.meta['frames])*h*w,6) - self.all_rgbs = torch.cat(self.all_rgbs, 0) # (len(self.meta['frames])*h*w,3) - - if self.is_stack: - self.all_rays_stack = torch.stack(all_rays, 0).reshape(-1, *self.img_wh[::-1], - 6) # (len(self.meta['frames]),h,w,6) - avg_pool = torch.nn.AvgPool2d(4, ceil_mode=True) - self.ds_all_rays_stack = avg_pool(self.all_rays_stack.permute(0, 3, 1, 2)).permute(0, 2, 3, - 1) # (len(self.meta['frames]),h/4,w/4,6) - self.all_rgbs_stack = torch.stack(all_rgbs, 0).reshape(-1, *self.img_wh[::-1], - 3) # (len(self.meta['frames]),h,w,3) - - @torch.no_grad() - def prepare_feature_data(self, encoder, chunk=8): - ''' - Prepare feature maps as training data. - ''' - assert self.is_stack, 'Dataset should contain original stacked taining data!' - print('====> prepare_feature_data ...') - - frames_num, h, w, _ = self.all_rgbs_stack.size() - features = [] - - for chunk_idx in range(frames_num // chunk + int(frames_num % chunk > 0)): - rgbs_chunk = self.all_rgbs_stack[chunk_idx * chunk: (chunk_idx + 1) * chunk].cuda() - features_chunk = encoder(normalize_vgg(rgbs_chunk.permute(0, 3, 1, 2))).relu3_1 - # resize to the size of rgb map so that rays can match - features_chunk = T.functional.resize(features_chunk, size=(h, w), - interpolation=T.InterpolationMode.BILINEAR) - features.append(features_chunk.detach().cpu().requires_grad_(False)) - - self.all_features_stack = torch.cat(features).permute(0, 2, 3, 1) # (len(self.meta['frames]),h,w,256) - self.all_features = self.all_features_stack.reshape(-1, 256) - print('prepare_feature_data Done!') - - def define_transforms(self): - self.transform = T.ToTensor() - - def __len__(self): - return len(self.all_rgbs) - - def __getitem__(self, idx): - - sample = {'rays': self.all_rays[idx], - 'rgbs': self.all_rgbs[idx]} - - return sample \ No newline at end of file diff --git a/spaces/Anandbheesetti/MNIST_digit_predictor/app.py b/spaces/Anandbheesetti/MNIST_digit_predictor/app.py deleted file mode 100644 index 80f90cf98261ea36a94ecd779bd55597dbf40638..0000000000000000000000000000000000000000 --- a/spaces/Anandbheesetti/MNIST_digit_predictor/app.py +++ /dev/null @@ -1,105 +0,0 @@ - - -## importing the necessary libraries -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt -import seaborn as sns -import tensorflow as tf - -##loading the MNIST dataset -mnist=tf.keras.datasets.mnist - -#splitting the data into training and testing datasets -(x_train_full,y_train_full),(x_test,y_test)=mnist.load_data() - -## let us check the shapes of the training and testing datasets -x_train_full.shape - -x_test.shape - -## As we know that the digits are stored in form of the pixels. -## each digit has 28 pixels -## All pixels ranges from 0 to 255 grey levels - -# Creating a validation data set from the training data set -x_valid,x_train=x_train_full[:5000],x_train_full[5000:] - -# Now we will scale the data between 0 to 1 by dividing it by 255 -x_valid=x_valid/255 - -x_train=x_train/255 - -x_test=x_test/255 - -y_valid,y_train=y_train_full[:5000],y_train_full[5000:] - -# Now let us visualize how the MNIST data looks like -plt.imshow(x_train[0],cmap="binary") -plt.show() - -# To visualize it in at grey levels -plt.figure(figsize=(15,15)) -sns.heatmap(x_train[0],annot=True,cmap="binary") - -# Now we will create a Artificial neural network with some hidden layers to build a model that predicts the written digit -Layers=[tf.keras.layers.Flatten(input_shape=[28,28],name="inputlayer"), - tf.keras.layers.Dense(300,activation="relu",name="hiddenlayer1"), - tf.keras.layers.Dense(100,activation="relu",name="hiddenlayer2"), - tf.keras.layers.Dense(10,activation="softmax",name="outputlayer")] - -# Now we will buila a Sequential model -model_clf=tf.keras.models.Sequential(Layers) - -model_clf.layers - -# Let us see the summary of the model -model_clf.summary() - -weights,biases=model_clf.layers[1].get_weights() - -# Defining the parameters to train the model -LOSS_FUNCTION="sparse_categorical_crossentropy" -OPTIMIZER="SGD" -METRICS=["accuracy"] - -model_clf.compile(loss=LOSS_FUNCTION, - optimizer=OPTIMIZER, - metrics=METRICS) - -EPOCHS=30 -VALIDATION_SET=(x_valid,y_valid) - -history=model_clf.fit(x_train,y_train,epochs=EPOCHS, - validation_data=VALIDATION_SET, - batch_size=32) - -history.params - -pd.DataFrame(history.history).plot() - -model_clf.evaluate(x_test,y_test) - -x_new=x_test[:3] -actual=y_test[:3] -y_prob=model_clf.predict(x_new) -y_pred=np.argmax(y_prob,axis=-1) - -for i,j,k in zip(x_new,y_pred,actual): - plt.imshow(i,cmap="binary") - plt.title(f"predicted {j} and actual is {k}") - plt.axis("off") - plt.show() - print('##########################') - - import gradio as gd - def makePred(img): - img_3d=img.reshape(-1,28,28) - im_resize=img_3d/255.0 - predict=model_clf.predict(im_resize) - pred=np.argmax(predict) - return str(pred) - -demo = gd.Interface(makePred, inputs='sketchpad', outputs='label') -demo.launch(debug='True') - diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/deepfloyd_if/safety_checker.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/deepfloyd_if/safety_checker.py deleted file mode 100644 index 8ffeed580bbea1514b11bf7a168a952328d8f424..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/deepfloyd_if/safety_checker.py +++ /dev/null @@ -1,59 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel - -from ...utils import logging - - -logger = logging.get_logger(__name__) - - -class IFSafetyChecker(PreTrainedModel): - config_class = CLIPConfig - - _no_split_modules = ["CLIPEncoderLayer"] - - def __init__(self, config: CLIPConfig): - super().__init__(config) - - self.vision_model = CLIPVisionModelWithProjection(config.vision_config) - - self.p_head = nn.Linear(config.vision_config.projection_dim, 1) - self.w_head = nn.Linear(config.vision_config.projection_dim, 1) - - @torch.no_grad() - def forward(self, clip_input, images, p_threshold=0.5, w_threshold=0.5): - image_embeds = self.vision_model(clip_input)[0] - - nsfw_detected = self.p_head(image_embeds) - nsfw_detected = nsfw_detected.flatten() - nsfw_detected = nsfw_detected > p_threshold - nsfw_detected = nsfw_detected.tolist() - - if any(nsfw_detected): - logger.warning( - "Potential NSFW content was detected in one or more images. A black image will be returned instead." - " Try again with a different prompt and/or seed." - ) - - for idx, nsfw_detected_ in enumerate(nsfw_detected): - if nsfw_detected_: - images[idx] = np.zeros(images[idx].shape) - - watermark_detected = self.w_head(image_embeds) - watermark_detected = watermark_detected.flatten() - watermark_detected = watermark_detected > w_threshold - watermark_detected = watermark_detected.tolist() - - if any(watermark_detected): - logger.warning( - "Potential watermarked content was detected in one or more images. A black image will be returned instead." - " Try again with a different prompt and/or seed." - ) - - for idx, watermark_detected_ in enumerate(watermark_detected): - if watermark_detected_: - images[idx] = np.zeros(images[idx].shape) - - return images, nsfw_detected, watermark_detected diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py deleted file mode 100644 index b77c1baf41d5abe4adb17aebb600b80eedda6c39..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/test_versatile_diffusion_mega.py +++ /dev/null @@ -1,129 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import tempfile -import unittest - -import numpy as np -import torch - -from diffusers import VersatileDiffusionPipeline -from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device - - -torch.backends.cuda.matmul.allow_tf32 = False - - -class VersatileDiffusionMegaPipelineFastTests(unittest.TestCase): - pass - - -@nightly -@require_torch_gpu -class VersatileDiffusionMegaPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_from_save_pretrained(self): - pipe = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion", torch_dtype=torch.float16) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - prompt_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" - ) - - generator = torch.manual_seed(0) - image = pipe.dual_guided( - prompt="first prompt", - image=prompt_image, - text_to_image_strength=0.75, - generator=generator, - guidance_scale=7.5, - num_inference_steps=2, - output_type="numpy", - ).images - - with tempfile.TemporaryDirectory() as tmpdirname: - pipe.save_pretrained(tmpdirname) - pipe = VersatileDiffusionPipeline.from_pretrained(tmpdirname, torch_dtype=torch.float16) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - generator = generator.manual_seed(0) - new_image = pipe.dual_guided( - prompt="first prompt", - image=prompt_image, - text_to_image_strength=0.75, - generator=generator, - guidance_scale=7.5, - num_inference_steps=2, - output_type="numpy", - ).images - - assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass" - - def test_inference_dual_guided_then_text_to_image(self): - pipe = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion", torch_dtype=torch.float16) - pipe.to(torch_device) - pipe.set_progress_bar_config(disable=None) - - prompt = "cyberpunk 2077" - init_image = load_image( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" - ) - generator = torch.manual_seed(0) - image = pipe.dual_guided( - prompt=prompt, - image=init_image, - text_to_image_strength=0.75, - generator=generator, - guidance_scale=7.5, - num_inference_steps=50, - output_type="numpy", - ).images - - image_slice = image[0, 253:256, 253:256, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 - - prompt = "A painting of a squirrel eating a burger " - generator = torch.manual_seed(0) - image = pipe.text_to_image( - prompt=prompt, generator=generator, guidance_scale=7.5, num_inference_steps=50, output_type="numpy" - ).images - - image_slice = image[0, 253:256, 253:256, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 - - image = pipe.image_variation(init_image, generator=generator, output_type="numpy").images - - image_slice = image[0, 253:256, 253:256, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 diff --git a/spaces/Andy1621/uniformer_image_detection/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x.py b/spaces/Andy1621/uniformer_image_detection/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x.py deleted file mode 100644 index f2deb99e44cba92fd79d0a2cd258ddf6927703c0..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x.py +++ /dev/null @@ -1,28 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - roi_head=dict( - type='DynamicRoIHead', - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - train_cfg=dict( - rpn_proposal=dict(nms=dict(iou_threshold=0.85)), - rcnn=dict( - dynamic_rcnn=dict( - iou_topk=75, - beta_topk=10, - update_iter_interval=100, - initial_iou=0.4, - initial_beta=1.0))), - test_cfg=dict(rpn=dict(nms=dict(iou_threshold=0.85)))) diff --git a/spaces/Andy1621/uniformer_image_detection/tools/model_converters/regnet2mmdet.py b/spaces/Andy1621/uniformer_image_detection/tools/model_converters/regnet2mmdet.py deleted file mode 100644 index 9f4e316d37569a6fbeb6329bd36abaa822b20ccf..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/tools/model_converters/regnet2mmdet.py +++ /dev/null @@ -1,89 +0,0 @@ -import argparse -from collections import OrderedDict - -import torch - - -def convert_stem(model_key, model_weight, state_dict, converted_names): - new_key = model_key.replace('stem.conv', 'conv1') - new_key = new_key.replace('stem.bn', 'bn1') - state_dict[new_key] = model_weight - converted_names.add(model_key) - print(f'Convert {model_key} to {new_key}') - - -def convert_head(model_key, model_weight, state_dict, converted_names): - new_key = model_key.replace('head.fc', 'fc') - state_dict[new_key] = model_weight - converted_names.add(model_key) - print(f'Convert {model_key} to {new_key}') - - -def convert_reslayer(model_key, model_weight, state_dict, converted_names): - split_keys = model_key.split('.') - layer, block, module = split_keys[:3] - block_id = int(block[1:]) - layer_name = f'layer{int(layer[1:])}' - block_name = f'{block_id - 1}' - - if block_id == 1 and module == 'bn': - new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}' - elif block_id == 1 and module == 'proj': - new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}' - elif module == 'f': - if split_keys[3] == 'a_bn': - module_name = 'bn1' - elif split_keys[3] == 'b_bn': - module_name = 'bn2' - elif split_keys[3] == 'c_bn': - module_name = 'bn3' - elif split_keys[3] == 'a': - module_name = 'conv1' - elif split_keys[3] == 'b': - module_name = 'conv2' - elif split_keys[3] == 'c': - module_name = 'conv3' - new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}' - else: - raise ValueError(f'Unsupported conversion of key {model_key}') - print(f'Convert {model_key} to {new_key}') - state_dict[new_key] = model_weight - converted_names.add(model_key) - - -def convert(src, dst): - """Convert keys in pycls pretrained RegNet models to mmdet style.""" - # load caffe model - regnet_model = torch.load(src) - blobs = regnet_model['model_state'] - # convert to pytorch style - state_dict = OrderedDict() - converted_names = set() - for key, weight in blobs.items(): - if 'stem' in key: - convert_stem(key, weight, state_dict, converted_names) - elif 'head' in key: - convert_head(key, weight, state_dict, converted_names) - elif key.startswith('s'): - convert_reslayer(key, weight, state_dict, converted_names) - - # check if all layers are converted - for key in blobs: - if key not in converted_names: - print(f'not converted: {key}') - # save checkpoint - checkpoint = dict() - checkpoint['state_dict'] = state_dict - torch.save(checkpoint, dst) - - -def main(): - parser = argparse.ArgumentParser(description='Convert model keys') - parser.add_argument('src', help='src detectron model path') - parser.add_argument('dst', help='save path') - args = parser.parse_args() - convert(args.src, args.dst) - - -if __name__ == '__main__': - main() diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/llamacpp_hf.py b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/llamacpp_hf.py deleted file mode 100644 index 37f86e088002d29a297f3df851b1a4b259929713..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/llamacpp_hf.py +++ /dev/null @@ -1,213 +0,0 @@ -import os -from pathlib import Path -from typing import Any, Dict, Optional, Union - -import torch -from torch.nn import CrossEntropyLoss -from transformers import GenerationConfig, PretrainedConfig, PreTrainedModel -from transformers.modeling_outputs import CausalLMOutputWithPast - -from modules import RoPE, shared -from modules.logging_colors import logger - -try: - import llama_cpp -except: - llama_cpp = None - -try: - import llama_cpp_cuda -except: - llama_cpp_cuda = None - - -def llama_cpp_lib(): - if (shared.args.cpu and llama_cpp is not None) or llama_cpp_cuda is None: - return llama_cpp - else: - return llama_cpp_cuda - - -class LlamacppHF(PreTrainedModel): - def __init__(self, model, path): - super().__init__(PretrainedConfig()) - self.model = model - self.generation_config = GenerationConfig() - - self.past_seq = None - self.llamacpp_cache = { - 'n_tokens': self.model.n_tokens, - 'input_ids': self.model.input_ids, - 'scores': self.model.scores, - 'ctx': self.model.ctx - } - - if shared.args.cfg_cache: - self.past_seq_negative = None - self.llamacpp_cache_negative = { - 'n_tokens': self.model.n_tokens, - 'input_ids': self.model.input_ids.copy(), - 'scores': self.model.scores.copy(), - 'ctx': llama_cpp_lib().llama_new_context_with_model(model.model, model.context_params) - } - - def _validate_model_class(self): - pass - - def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): - pass - - def prepare_inputs_for_generation(self, input_ids, **kwargs): - return {'input_ids': input_ids, **kwargs} - - def save_cache(self): - self.llamacpp_cache.update({ - 'n_tokens': self.model.n_tokens, - 'input_ids': self.model.input_ids, - 'scores': self.model.scores, - 'ctx': self.model.ctx - }) - - def save_negative_cache(self): - self.llamacpp_cache_negative.update({ - 'n_tokens': self.model.n_tokens, - 'input_ids': self.model.input_ids, - 'scores': self.model.scores, - 'ctx': self.model.ctx - }) - - def load_cache(self): - self.model.n_tokens = self.llamacpp_cache['n_tokens'] - self.model.input_ids = self.llamacpp_cache['input_ids'] - self.model.scores = self.llamacpp_cache['scores'] - self.model.ctx = self.llamacpp_cache['ctx'] - - def load_negative_cache(self): - self.model.n_tokens = self.llamacpp_cache_negative['n_tokens'] - self.model.input_ids = self.llamacpp_cache_negative['input_ids'] - self.model.scores = self.llamacpp_cache_negative['scores'] - self.model.ctx = self.llamacpp_cache_negative['ctx'] - - @property - def device(self) -> torch.device: - return torch.device(0) - - def __call__(self, *args, **kwargs): - use_cache = kwargs.get('use_cache', True) - labels = kwargs.get('labels', None) - past_key_values = kwargs.get('past_key_values', None) - - if len(args) > 0: - if not shared.args.cfg_cache: - logger.error("Please enable the cfg-cache option to use CFG with llamacpp_HF.") - return - - input_ids = args[0] - is_negative = True - past_seq = self.past_seq_negative - self.load_negative_cache() - else: - input_ids = kwargs['input_ids'] - is_negative = False - past_seq = self.past_seq - self.load_cache() - - seq = input_ids[0].tolist() - if is_negative and past_key_values is not None: - seq = past_key_values + seq - - seq_tensor = torch.tensor(seq) - reset = True - - # Make the forward call. The prefix-match code has been adapted from - # https://github.com/abetlen/llama-cpp-python/commit/f4090a0bb2a2a25acfe28d31c82cc1aa273bedee - if labels is None: - if past_seq is not None: - min_length = min(past_seq.shape[0], seq_tensor.shape[0]) - indices = torch.nonzero(~torch.eq(past_seq[:min_length], seq_tensor[:min_length])) - if len(indices) > 0: - longest_prefix = indices[0].item() - else: - longest_prefix = min_length - - if longest_prefix > 0: - reset = False - self.model.n_tokens = longest_prefix - if len(seq_tensor) - longest_prefix > 0: - self.model.eval(seq[longest_prefix:]) - - if reset: - self.model.reset() - self.model.eval(seq) - - logits = torch.tensor(self.model.scores[self.model.n_tokens - 1, :]).view(1, 1, -1).to(input_ids.device) - else: - self.model.reset() - self.model.eval(seq) - logits = torch.tensor(self.model.eval_logits) - logits = logits.view(1, logits.shape[0], logits.shape[1]).to(input_ids.device) - - if is_negative: - self.save_negative_cache() - self.past_seq_negative = seq_tensor - else: - self.save_cache() - self.past_seq = seq_tensor - - loss = None - if labels is not None: - # Shift so that tokens < n predict n - shift_logits = logits[..., :-1, :].contiguous() - shift_labels = labels[..., 1:].contiguous() - # Flatten the tokens - loss_fct = CrossEntropyLoss() - shift_logits = shift_logits.view(-1, logits.shape[-1]) - shift_labels = shift_labels.view(-1) - # Enable model parallelism - shift_labels = shift_labels.to(shift_logits.device) - loss = loss_fct(shift_logits, shift_labels) - - return CausalLMOutputWithPast(logits=logits, past_key_values=seq if use_cache else None, loss=loss) - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): - assert len(model_args) == 0 and len(kwargs) == 0, "extra args is currently not supported" - - if isinstance(pretrained_model_name_or_path, str): - pretrained_model_name_or_path = Path(pretrained_model_name_or_path) - - path = Path(f'{shared.args.model_dir}') / Path(pretrained_model_name_or_path) - if path.is_file(): - model_file = path - else: - model_file = list(path.glob('*.gguf'))[0] - - logger.info(f"llama.cpp weights detected: {model_file}\n") - - if shared.args.tensor_split is None or shared.args.tensor_split.strip() == '': - tensor_split_list = None - else: - tensor_split_list = [float(x) for x in shared.args.tensor_split.strip().split(",")] - - params = { - 'model_path': str(model_file), - 'n_ctx': shared.args.n_ctx, - 'seed': int(shared.args.llama_cpp_seed), - 'n_threads': shared.args.threads or None, - 'n_threads_batch': shared.args.threads_batch or None, - 'n_batch': shared.args.n_batch, - 'use_mmap': not shared.args.no_mmap, - 'use_mlock': shared.args.mlock, - 'mul_mat_q': shared.args.mul_mat_q, - 'numa': shared.args.numa, - 'n_gpu_layers': shared.args.n_gpu_layers, - 'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base), - 'tensor_split': tensor_split_list, - 'rope_freq_scale': 1.0 / shared.args.compress_pos_emb, - 'logits_all': True, - } - - Llama = llama_cpp_lib().Llama - model = Llama(**params) - - return LlamacppHF(model, model_file) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/schedules/schedule_20k.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/schedules/schedule_20k.py deleted file mode 100644 index bf780a1b6f6521833c6a5859675147824efa599d..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/schedules/schedule_20k.py +++ /dev/null @@ -1,9 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -runner = dict(type='IterBasedRunner', max_iters=20000) -checkpoint_config = dict(by_epoch=False, interval=2000) -evaluation = dict(interval=2000, metric='mIoU') diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/builder.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/builder.py deleted file mode 100644 index db61f03d4abb2072f2532ce4429c0842495e015b..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/core/seg/builder.py +++ /dev/null @@ -1,8 +0,0 @@ -from annotator.uniformer.mmcv.utils import Registry, build_from_cfg - -PIXEL_SAMPLERS = Registry('pixel sampler') - - -def build_pixel_sampler(cfg, **default_args): - """Build pixel sampler for segmentation map.""" - return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args) diff --git a/spaces/ArkanDash/rvc-models-new/lib/infer_pack/models.py b/spaces/ArkanDash/rvc-models-new/lib/infer_pack/models.py deleted file mode 100644 index 3665d03bc0514a6ed07d3372ea24717dae1e0a65..0000000000000000000000000000000000000000 --- a/spaces/ArkanDash/rvc-models-new/lib/infer_pack/models.py +++ /dev/null @@ -1,1142 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/Arnx/MusicGenXvAKN/audiocraft/utils/utils.py b/spaces/Arnx/MusicGenXvAKN/audiocraft/utils/utils.py deleted file mode 100644 index 86e1448d065fa182ca69aae00d2f2a7eea55d8a4..0000000000000000000000000000000000000000 --- a/spaces/Arnx/MusicGenXvAKN/audiocraft/utils/utils.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from concurrent.futures import ProcessPoolExecutor -from functools import wraps -import hashlib -import logging -import typing as tp - -import flashy -import flashy.distrib -import omegaconf -import torch -from torch.nn.utils.rnn import pad_sequence - - -logger = logging.getLogger(__name__) - - -def dict_from_config(cfg: omegaconf.DictConfig) -> dict: - """Convenience function to map an omegaconf configuration to a dictionary. - - Args: - cfg (omegaconf.DictConfig): Original configuration to map to dict. - Returns: - dict: Config as dictionary object. - """ - dct = omegaconf.OmegaConf.to_container(cfg, resolve=True) - assert isinstance(dct, dict) - return dct - - -def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset: - if max_samples >= len(dataset): - return dataset - - generator = torch.Generator().manual_seed(seed) - perm = torch.randperm(len(dataset), generator=generator) - return torch.utils.data.Subset(dataset, perm[:max_samples].tolist()) - - -def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int, - num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader: - """Convenience function to load dataset into a dataloader with optional subset sampling. - - Args: - dataset: Dataset to load. - num_samples (Optional[int]): Number of samples to limit subset size. - batch_size (int): Batch size. - num_workers (int): Number of workers for data loading. - seed (int): Random seed. - """ - if num_samples is not None: - dataset = random_subset(dataset, num_samples, seed) - - dataloader = flashy.distrib.loader( - dataset, - batch_size=batch_size, - num_workers=num_workers, - **kwargs - ) - return dataloader - - -def get_dataset_from_loader(dataloader): - dataset = dataloader.dataset - if isinstance(dataset, torch.utils.data.Subset): - return dataset.dataset - else: - return dataset - - -def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None): - """torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension. - - Args: - input (torch.Tensor): The input tensor containing probabilities. - num_samples (int): Number of samples to draw. - replacement (bool): Whether to draw with replacement or not. - Keywords args: - generator (torch.Generator): A pseudorandom number generator for sampling. - Returns: - torch.Tensor: Last dimension contains num_samples indices - sampled from the multinomial probability distribution - located in the last dimension of tensor input. - """ - input_ = input.reshape(-1, input.shape[-1]) - output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator) - output = output_.reshape(*list(input.shape[:-1]), -1) - return output - - -def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor: - """Sample next token from top K values along the last dimension of the input probs tensor. - - Args: - probs (torch.Tensor): Input probabilities with token candidates on the last dimension. - k (int): The k in “top-k”. - Returns: - torch.Tensor: Sampled tokens. - """ - top_k_value, _ = torch.topk(probs, k, dim=-1) - min_value_top_k = top_k_value[..., [-1]] - probs *= (probs >= min_value_top_k).float() - probs.div_(probs.sum(dim=-1, keepdim=True)) - next_token = multinomial(probs, num_samples=1) - return next_token - - -def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor: - """Sample next token from top P probabilities along the last dimension of the input probs tensor. - - Args: - probs (torch.Tensor): Input probabilities with token candidates on the last dimension. - p (int): The p in “top-p”. - Returns: - torch.Tensor: Sampled tokens. - """ - probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True) - probs_sum = torch.cumsum(probs_sort, dim=-1) - mask = probs_sum - probs_sort > p - probs_sort *= (~mask).float() - probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True)) - next_token = multinomial(probs_sort, num_samples=1) - next_token = torch.gather(probs_idx, -1, next_token) - return next_token - - -class DummyPoolExecutor: - """Dummy pool executor to use when we actually have only 1 worker. - (e.g. instead of ProcessPoolExecutor). - """ - class DummyResult: - def __init__(self, func, *args, **kwargs): - self.func = func - self.args = args - self.kwargs = kwargs - - def result(self): - return self.func(*self.args, **self.kwargs) - - def __init__(self, workers, mp_context=None): - pass - - def submit(self, func, *args, **kwargs): - return DummyPoolExecutor.DummyResult(func, *args, **kwargs) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - return - - -def get_pool_executor(num_workers: int, mp_context=None): - return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1) - - -def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor: - """Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences). - For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]] - - Args: - lengths (torch.Tensor): tensor with lengths - max_len (int): can set the max length manually. Defaults to None. - Returns: - torch.Tensor: mask with 0s where there is pad tokens else 1s - """ - assert len(lengths.shape) == 1, "Length shape should be 1 dimensional." - final_length = lengths.max().item() if not max_len else max_len - final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor - return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None] - - -def hash_trick(word: str, vocab_size: int) -> int: - """Hash trick to pair each word with an index - - Args: - word (str): word we wish to convert to an index - vocab_size (int): size of the vocabulary - Returns: - int: index of the word in the embedding LUT - """ - hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16) - return hash % vocab_size - - -def with_rank_rng(base_seed: int = 1234): - """Decorator for a function so that the function will use a Random Number Generator - whose state depend on the GPU rank. The original RNG state is restored upon returning. - - Args: - base_seed (int): Random seed. - """ - def _decorator(fun: tp.Callable): - @wraps(fun) - def _decorated(*args, **kwargs): - state = torch.get_rng_state() - seed = base_seed ^ flashy.distrib.rank() - torch.manual_seed(seed) - logger.debug('Rank dependent seed set to %d', seed) - try: - return fun(*args, **kwargs) - finally: - torch.set_rng_state(state) - logger.debug('RNG state restored.') - return _decorated - return _decorator - - -def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Get a list of tensors and collate them to a single tensor. according to the following logic: - - `dim` specifies the time dimension which will be stacked and padded. - - The output will contain 1 new dimension (dimension index 0) which will be the size of - of the original list. - - Args: - tensors (tp.List[torch.Tensor]): List of tensors to collate. - dim (int): Dimension which will be stacked and padded. - Returns: - tp.Tuple[torch.Tensor, torch.Tensor]: - torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension - (dimension index 0) which will be the size of the original list. - torch.Tensor: Tensor containing length of original tensor sizes (without padding). - """ - tensors = [x.transpose(0, dim) for x in tensors] - lens = torch.LongTensor([len(x) for x in tensors]) - padded_tensors = pad_sequence(tensors) - padded_tensors = padded_tensors.transpose(0, 1) - padded_tensors = padded_tensors.transpose(1, dim + 1) - return padded_tensors, lens diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/groff.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/groff.py deleted file mode 100644 index f3dcbce9b9fa2904fc361ef09139aeec3568685e..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/groff.py +++ /dev/null @@ -1,170 +0,0 @@ -""" - pygments.formatters.groff - ~~~~~~~~~~~~~~~~~~~~~~~~~ - - Formatter for groff output. - - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import math -from pip._vendor.pygments.formatter import Formatter -from pip._vendor.pygments.util import get_bool_opt, get_int_opt - -__all__ = ['GroffFormatter'] - - -class GroffFormatter(Formatter): - """ - Format tokens with groff escapes to change their color and font style. - - .. versionadded:: 2.11 - - Additional options accepted: - - `style` - The style to use, can be a string or a Style subclass (default: - ``'default'``). - - `monospaced` - If set to true, monospace font will be used (default: ``true``). - - `linenos` - If set to true, print the line numbers (default: ``false``). - - `wrap` - Wrap lines to the specified number of characters. Disabled if set to 0 - (default: ``0``). - """ - - name = 'groff' - aliases = ['groff','troff','roff'] - filenames = [] - - def __init__(self, **options): - Formatter.__init__(self, **options) - - self.monospaced = get_bool_opt(options, 'monospaced', True) - self.linenos = get_bool_opt(options, 'linenos', False) - self._lineno = 0 - self.wrap = get_int_opt(options, 'wrap', 0) - self._linelen = 0 - - self.styles = {} - self._make_styles() - - - def _make_styles(self): - regular = '\\f[CR]' if self.monospaced else '\\f[R]' - bold = '\\f[CB]' if self.monospaced else '\\f[B]' - italic = '\\f[CI]' if self.monospaced else '\\f[I]' - - for ttype, ndef in self.style: - start = end = '' - if ndef['color']: - start += '\\m[%s]' % ndef['color'] - end = '\\m[]' + end - if ndef['bold']: - start += bold - end = regular + end - if ndef['italic']: - start += italic - end = regular + end - if ndef['bgcolor']: - start += '\\M[%s]' % ndef['bgcolor'] - end = '\\M[]' + end - - self.styles[ttype] = start, end - - - def _define_colors(self, outfile): - colors = set() - for _, ndef in self.style: - if ndef['color'] is not None: - colors.add(ndef['color']) - - for color in colors: - outfile.write('.defcolor ' + color + ' rgb #' + color + '\n') - - - def _write_lineno(self, outfile): - self._lineno += 1 - outfile.write("%s% 4d " % (self._lineno != 1 and '\n' or '', self._lineno)) - - - def _wrap_line(self, line): - length = len(line.rstrip('\n')) - space = ' ' if self.linenos else '' - newline = '' - - if length > self.wrap: - for i in range(0, math.floor(length / self.wrap)): - chunk = line[i*self.wrap:i*self.wrap+self.wrap] - newline += (chunk + '\n' + space) - remainder = length % self.wrap - if remainder > 0: - newline += line[-remainder-1:] - self._linelen = remainder - elif self._linelen + length > self.wrap: - newline = ('\n' + space) + line - self._linelen = length - else: - newline = line - self._linelen += length - - return newline - - - def _escape_chars(self, text): - text = text.replace('\\', '\\[u005C]'). \ - replace('.', '\\[char46]'). \ - replace('\'', '\\[u0027]'). \ - replace('`', '\\[u0060]'). \ - replace('~', '\\[u007E]') - copy = text - - for char in copy: - if len(char) != len(char.encode()): - uni = char.encode('unicode_escape') \ - .decode()[1:] \ - .replace('x', 'u00') \ - .upper() - text = text.replace(char, '\\[u' + uni[1:] + ']') - - return text - - - def format_unencoded(self, tokensource, outfile): - self._define_colors(outfile) - - outfile.write('.nf\n\\f[CR]\n') - - if self.linenos: - self._write_lineno(outfile) - - for ttype, value in tokensource: - while ttype not in self.styles: - ttype = ttype.parent - start, end = self.styles[ttype] - - for line in value.splitlines(True): - if self.wrap > 0: - line = self._wrap_line(line) - - if start and end: - text = self._escape_chars(line.rstrip('\n')) - if text != '': - outfile.write(''.join((start, text, end))) - else: - outfile.write(self._escape_chars(line.rstrip('\n'))) - - if line.endswith('\n'): - if self.linenos: - self._write_lineno(outfile) - self._linelen = 0 - else: - outfile.write('\n') - self._linelen = 0 - - outfile.write('\n.fi') diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/build_py.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/build_py.py deleted file mode 100644 index 47c6158e0f74033bfcfeb7424df227a3815651de..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/build_py.py +++ /dev/null @@ -1,407 +0,0 @@ -"""distutils.command.build_py - -Implements the Distutils 'build_py' command.""" - -import os -import importlib.util -import sys -import glob - -from distutils.core import Command -from distutils.errors import DistutilsOptionError, DistutilsFileError -from distutils.util import convert_path -from distutils import log - - -class build_py(Command): - - description = "\"build\" pure Python modules (copy to build directory)" - - user_options = [ - ('build-lib=', 'd', "directory to \"build\" (copy) to"), - ('compile', 'c', "compile .py to .pyc"), - ('no-compile', None, "don't compile .py files [default]"), - ( - 'optimize=', - 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]", - ), - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ] - - boolean_options = ['compile', 'force'] - negative_opt = {'no-compile': 'compile'} - - def initialize_options(self): - self.build_lib = None - self.py_modules = None - self.package = None - self.package_data = None - self.package_dir = None - self.compile = 0 - self.optimize = 0 - self.force = None - - def finalize_options(self): - self.set_undefined_options( - 'build', ('build_lib', 'build_lib'), ('force', 'force') - ) - - # Get the distribution options that are aliases for build_py - # options -- list of packages and list of modules. - self.packages = self.distribution.packages - self.py_modules = self.distribution.py_modules - self.package_data = self.distribution.package_data - self.package_dir = {} - if self.distribution.package_dir: - for name, path in self.distribution.package_dir.items(): - self.package_dir[name] = convert_path(path) - self.data_files = self.get_data_files() - - # Ick, copied straight from install_lib.py (fancy_getopt needs a - # type system! Hell, *everything* needs a type system!!!) - if not isinstance(self.optimize, int): - try: - self.optimize = int(self.optimize) - assert 0 <= self.optimize <= 2 - except (ValueError, AssertionError): - raise DistutilsOptionError("optimize must be 0, 1, or 2") - - def run(self): - # XXX copy_file by default preserves atime and mtime. IMHO this is - # the right thing to do, but perhaps it should be an option -- in - # particular, a site administrator might want installed files to - # reflect the time of installation rather than the last - # modification time before the installed release. - - # XXX copy_file by default preserves mode, which appears to be the - # wrong thing to do: if a file is read-only in the working - # directory, we want it to be installed read/write so that the next - # installation of the same module distribution can overwrite it - # without problems. (This might be a Unix-specific issue.) Thus - # we turn off 'preserve_mode' when copying to the build directory, - # since the build directory is supposed to be exactly what the - # installation will look like (ie. we preserve mode when - # installing). - - # Two options control which modules will be installed: 'packages' - # and 'py_modules'. The former lets us work with whole packages, not - # specifying individual modules at all; the latter is for - # specifying modules one-at-a-time. - - if self.py_modules: - self.build_modules() - if self.packages: - self.build_packages() - self.build_package_data() - - self.byte_compile(self.get_outputs(include_bytecode=0)) - - def get_data_files(self): - """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" - data = [] - if not self.packages: - return data - for package in self.packages: - # Locate package source directory - src_dir = self.get_package_dir(package) - - # Compute package build directory - build_dir = os.path.join(*([self.build_lib] + package.split('.'))) - - # Length of path to strip from found files - plen = 0 - if src_dir: - plen = len(src_dir) + 1 - - # Strip directory from globbed filenames - filenames = [file[plen:] for file in self.find_data_files(package, src_dir)] - data.append((package, src_dir, build_dir, filenames)) - return data - - def find_data_files(self, package, src_dir): - """Return filenames for package's data files in 'src_dir'""" - globs = self.package_data.get('', []) + self.package_data.get(package, []) - files = [] - for pattern in globs: - # Each pattern has to be converted to a platform-specific path - filelist = glob.glob( - os.path.join(glob.escape(src_dir), convert_path(pattern)) - ) - # Files that match more than one pattern are only added once - files.extend( - [fn for fn in filelist if fn not in files and os.path.isfile(fn)] - ) - return files - - def build_package_data(self): - """Copy data files into build directory""" - for package, src_dir, build_dir, filenames in self.data_files: - for filename in filenames: - target = os.path.join(build_dir, filename) - self.mkpath(os.path.dirname(target)) - self.copy_file( - os.path.join(src_dir, filename), target, preserve_mode=False - ) - - def get_package_dir(self, package): - """Return the directory, relative to the top of the source - distribution, where package 'package' should be found - (at least according to the 'package_dir' option, if any).""" - path = package.split('.') - - if not self.package_dir: - if path: - return os.path.join(*path) - else: - return '' - else: - tail = [] - while path: - try: - pdir = self.package_dir['.'.join(path)] - except KeyError: - tail.insert(0, path[-1]) - del path[-1] - else: - tail.insert(0, pdir) - return os.path.join(*tail) - else: - # Oops, got all the way through 'path' without finding a - # match in package_dir. If package_dir defines a directory - # for the root (nameless) package, then fallback on it; - # otherwise, we might as well have not consulted - # package_dir at all, as we just use the directory implied - # by 'tail' (which should be the same as the original value - # of 'path' at this point). - pdir = self.package_dir.get('') - if pdir is not None: - tail.insert(0, pdir) - - if tail: - return os.path.join(*tail) - else: - return '' - - def check_package(self, package, package_dir): - # Empty dir name means current directory, which we can probably - # assume exists. Also, os.path.exists and isdir don't know about - # my "empty string means current dir" convention, so we have to - # circumvent them. - if package_dir != "": - if not os.path.exists(package_dir): - raise DistutilsFileError( - "package directory '%s' does not exist" % package_dir - ) - if not os.path.isdir(package_dir): - raise DistutilsFileError( - "supposed package directory '%s' exists, " - "but is not a directory" % package_dir - ) - - # Directories without __init__.py are namespace packages (PEP 420). - if package: - init_py = os.path.join(package_dir, "__init__.py") - if os.path.isfile(init_py): - return init_py - - # Either not in a package at all (__init__.py not expected), or - # __init__.py doesn't exist -- so don't return the filename. - return None - - def check_module(self, module, module_file): - if not os.path.isfile(module_file): - log.warn("file %s (for module %s) not found", module_file, module) - return False - else: - return True - - def find_package_modules(self, package, package_dir): - self.check_package(package, package_dir) - module_files = glob.glob(os.path.join(glob.escape(package_dir), "*.py")) - modules = [] - setup_script = os.path.abspath(self.distribution.script_name) - - for f in module_files: - abs_f = os.path.abspath(f) - if abs_f != setup_script: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - else: - self.debug_print("excluding %s" % setup_script) - return modules - - def find_modules(self): - """Finds individually-specified Python modules, ie. those listed by - module name in 'self.py_modules'. Returns a list of tuples (package, - module_base, filename): 'package' is a tuple of the path through - package-space to the module; 'module_base' is the bare (no - packages, no dots) module name, and 'filename' is the path to the - ".py" file (relative to the distribution root) that implements the - module. - """ - # Map package names to tuples of useful info about the package: - # (package_dir, checked) - # package_dir - the directory where we'll find source files for - # this package - # checked - true if we have checked that the package directory - # is valid (exists, contains __init__.py, ... ?) - packages = {} - - # List of (package, module, filename) tuples to return - modules = [] - - # We treat modules-in-packages almost the same as toplevel modules, - # just the "package" for a toplevel is empty (either an empty - # string or empty list, depending on context). Differences: - # - don't check for __init__.py in directory for empty package - for module in self.py_modules: - path = module.split('.') - package = '.'.join(path[0:-1]) - module_base = path[-1] - - try: - (package_dir, checked) = packages[package] - except KeyError: - package_dir = self.get_package_dir(package) - checked = 0 - - if not checked: - init_py = self.check_package(package, package_dir) - packages[package] = (package_dir, 1) - if init_py: - modules.append((package, "__init__", init_py)) - - # XXX perhaps we should also check for just .pyc files - # (so greedy closed-source bastards can distribute Python - # modules too) - module_file = os.path.join(package_dir, module_base + ".py") - if not self.check_module(module, module_file): - continue - - modules.append((package, module_base, module_file)) - - return modules - - def find_all_modules(self): - """Compute the list of all modules that will be built, whether - they are specified one-module-at-a-time ('self.py_modules') or - by whole packages ('self.packages'). Return a list of tuples - (package, module, module_file), just like 'find_modules()' and - 'find_package_modules()' do.""" - modules = [] - if self.py_modules: - modules.extend(self.find_modules()) - if self.packages: - for package in self.packages: - package_dir = self.get_package_dir(package) - m = self.find_package_modules(package, package_dir) - modules.extend(m) - return modules - - def get_source_files(self): - return [module[-1] for module in self.find_all_modules()] - - def get_module_outfile(self, build_dir, package, module): - outfile_path = [build_dir] + list(package) + [module + ".py"] - return os.path.join(*outfile_path) - - def get_outputs(self, include_bytecode=1): - modules = self.find_all_modules() - outputs = [] - for (package, module, module_file) in modules: - package = package.split('.') - filename = self.get_module_outfile(self.build_lib, package, module) - outputs.append(filename) - if include_bytecode: - if self.compile: - outputs.append( - importlib.util.cache_from_source(filename, optimization='') - ) - if self.optimize > 0: - outputs.append( - importlib.util.cache_from_source( - filename, optimization=self.optimize - ) - ) - - outputs += [ - os.path.join(build_dir, filename) - for package, src_dir, build_dir, filenames in self.data_files - for filename in filenames - ] - - return outputs - - def build_module(self, module, module_file, package): - if isinstance(package, str): - package = package.split('.') - elif not isinstance(package, (list, tuple)): - raise TypeError( - "'package' must be a string (dot-separated), list, or tuple" - ) - - # Now put the module source file into the "build" area -- this is - # easy, we just copy it somewhere under self.build_lib (the build - # directory for Python source). - outfile = self.get_module_outfile(self.build_lib, package, module) - dir = os.path.dirname(outfile) - self.mkpath(dir) - return self.copy_file(module_file, outfile, preserve_mode=0) - - def build_modules(self): - modules = self.find_modules() - for (package, module, module_file) in modules: - # Now "build" the module -- ie. copy the source file to - # self.build_lib (the build directory for Python source). - # (Actually, it gets copied to the directory for this package - # under self.build_lib.) - self.build_module(module, module_file, package) - - def build_packages(self): - for package in self.packages: - # Get list of (package, module, module_file) tuples based on - # scanning the package directory. 'package' is only included - # in the tuple so that 'find_modules()' and - # 'find_package_tuples()' have a consistent interface; it's - # ignored here (apart from a sanity check). Also, 'module' is - # the *unqualified* module name (ie. no dots, no package -- we - # already know its package!), and 'module_file' is the path to - # the .py file, relative to the current directory - # (ie. including 'package_dir'). - package_dir = self.get_package_dir(package) - modules = self.find_package_modules(package, package_dir) - - # Now loop over the modules we found, "building" each one (just - # copy it to self.build_lib). - for (package_, module, module_file) in modules: - assert package == package_ - self.build_module(module, module_file, package) - - def byte_compile(self, files): - if sys.dont_write_bytecode: - self.warn('byte-compiling is disabled, skipping.') - return - - from distutils.util import byte_compile - - prefix = self.build_lib - if prefix[-1] != os.sep: - prefix = prefix + os.sep - - # XXX this code is essentially the same as the 'byte_compile() - # method of the "install_lib" command, except for the determination - # of the 'prefix' string. Hmmm. - if self.compile: - byte_compile( - files, optimize=0, force=self.force, prefix=prefix, dry_run=self.dry_run - ) - if self.optimize > 0: - byte_compile( - files, - optimize=self.optimize, - force=self.force, - prefix=prefix, - dry_run=self.dry_run, - ) diff --git a/spaces/Aveygo/AstroSleuth/file_queue.py b/spaces/Aveygo/AstroSleuth/file_queue.py deleted file mode 100644 index 292fece2db4137426ecf983a9453469b81f95296..0000000000000000000000000000000000000000 --- a/spaces/Aveygo/AstroSleuth/file_queue.py +++ /dev/null @@ -1,109 +0,0 @@ -import time, random, marshal, os - -MAX_AGE = 5 - -class FileQueue: - def __init__(self, est_time=60, id=None): - queue:list = self.load() - self.id = random.randint(0, 2**16) if id is None else id - self.est_time = est_time - self.start = time.time() - queue.append((self.id, self.est_time, self.start, self.start)) - self.save(queue) - - def load(self) -> list: - if not os.path.exists("queue"): - self.save([]) - - try: - with open("queue", "rb") as f: - return marshal.load(f) - except EOFError: - time.sleep(random.random()) - return self.load() - - def save(self, queue:list): - try: - with open("queue", "wb") as f: - marshal.dump(queue, f) - except OSError: - time.sleep(random.random()) - self.save(queue) - - def heartbeat(self): - queue = self.load() - for i, q in enumerate(queue): - if q[0] == self.id: - queue[i] = (self.id, self.est_time, self.start, time.time()) - break - self.save(queue) - - def should_run(self) -> bool: - queue = self.load() - queue = [q for q in queue if q[3] > time.time() - MAX_AGE and q[2] < self.start] - queue.sort(key=lambda x: x[2]) - if len(queue) == 0: - return True - return queue[0][0] == self.id # First in queue - - def update_est_time(self, est_time:float): - queue = self.load() - for i, q in enumerate(queue): - if q[0] == self.id: - queue[i] = (self.id, est_time, self.start, time.time()) - break - self.save(queue) - - def get_queue_len(self) -> int: - queue = self.load() - count = 0 - for q in queue: - if q[3] > time.time() - MAX_AGE and q[2] < self.start: - count += 1 - return count - - def get_queue_est_time(self) -> float: - queue = self.load() - count = 0 - for q in queue: - if q[3] > time.time() - MAX_AGE and q[2] < self.start: - count += q[1] - return count - - def quit(self): - queue = self.load() - for i, q in enumerate(queue): - if q[0] == self.id: - del queue[i] - break - self.save(queue) - - def __del__(self): - self.quit() - -if __name__ == '__main__': - import threading - - def test(worker_id): - q = FileQueue() - - # Wait to be first in queue - while not q.should_run(): - time.sleep(1) - q.heartbeat() - - # Do stuff - print(f"Worker {worker_id} started") - for i in range(10): - time.sleep(1) - q.heartbeat() - print(f"Worker {worker_id} progress: {i + 1}/10") - - # Leave queue - print(f"Worker {worker_id} finished") - q.quit() - - for i in range(5): - threading.Thread(target=test, args=(i,)).start() - time.sleep(0.123) - diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/format_control.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/format_control.py deleted file mode 100644 index db3995eac9f9ec2450e0e2d4a18e666c0b178681..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/models/format_control.py +++ /dev/null @@ -1,80 +0,0 @@ -from typing import FrozenSet, Optional, Set - -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.exceptions import CommandError - - -class FormatControl: - """Helper for managing formats from which a package can be installed.""" - - __slots__ = ["no_binary", "only_binary"] - - def __init__( - self, - no_binary: Optional[Set[str]] = None, - only_binary: Optional[Set[str]] = None, - ) -> None: - if no_binary is None: - no_binary = set() - if only_binary is None: - only_binary = set() - - self.no_binary = no_binary - self.only_binary = only_binary - - def __eq__(self, other: object) -> bool: - if not isinstance(other, self.__class__): - return NotImplemented - - if self.__slots__ != other.__slots__: - return False - - return all(getattr(self, k) == getattr(other, k) for k in self.__slots__) - - def __repr__(self) -> str: - return "{}({}, {})".format( - self.__class__.__name__, self.no_binary, self.only_binary - ) - - @staticmethod - def handle_mutual_excludes(value: str, target: Set[str], other: Set[str]) -> None: - if value.startswith("-"): - raise CommandError( - "--no-binary / --only-binary option requires 1 argument." - ) - new = value.split(",") - while ":all:" in new: - other.clear() - target.clear() - target.add(":all:") - del new[: new.index(":all:") + 1] - # Without a none, we want to discard everything as :all: covers it - if ":none:" not in new: - return - for name in new: - if name == ":none:": - target.clear() - continue - name = canonicalize_name(name) - other.discard(name) - target.add(name) - - def get_allowed_formats(self, canonical_name: str) -> FrozenSet[str]: - result = {"binary", "source"} - if canonical_name in self.only_binary: - result.discard("source") - elif canonical_name in self.no_binary: - result.discard("binary") - elif ":all:" in self.only_binary: - result.discard("source") - elif ":all:" in self.no_binary: - result.discard("binary") - return frozenset(result) - - def disallow_binaries(self) -> None: - self.handle_mutual_excludes( - ":all:", - self.no_binary, - self.only_binary, - ) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/msgpack/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/msgpack/__init__.py deleted file mode 100644 index 1300b866043e22e3b318ba791d31333ca8fe8514..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/msgpack/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding: utf-8 -from .exceptions import * -from .ext import ExtType, Timestamp - -import os -import sys - - -version = (1, 0, 5) -__version__ = "1.0.5" - - -if os.environ.get("MSGPACK_PUREPYTHON") or sys.version_info[0] == 2: - from .fallback import Packer, unpackb, Unpacker -else: - try: - from ._cmsgpack import Packer, unpackb, Unpacker - except ImportError: - from .fallback import Packer, unpackb, Unpacker - - -def pack(o, stream, **kwargs): - """ - Pack object `o` and write it to `stream` - - See :class:`Packer` for options. - """ - packer = Packer(**kwargs) - stream.write(packer.pack(o)) - - -def packb(o, **kwargs): - """ - Pack object `o` and return packed bytes - - See :class:`Packer` for options. - """ - return Packer(**kwargs).pack(o) - - -def unpack(stream, **kwargs): - """ - Unpack an object from `stream`. - - Raises `ExtraData` when `stream` contains extra bytes. - See :class:`Unpacker` for options. - """ - data = stream.read() - return unpackb(data, **kwargs) - - -# alias for compatibility to simplejson/marshal/pickle. -load = unpack -loads = unpackb - -dump = pack -dumps = packb diff --git a/spaces/Binguii/Ballen/Dockerfile b/spaces/Binguii/Ballen/Dockerfile deleted file mode 100644 index 0502dd40fd5a7e7066e07dc7c321d8f12223f6d1..0000000000000000000000000000000000000000 --- a/spaces/Binguii/Ballen/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM node:18-bullseye-slim - -RUN apt-get update && \ -apt-get install -y git - -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app - -WORKDIR /app - -RUN npm install - -COPY Dockerfile greeting.md* .env* ./ - -RUN npm run build - -EXPOSE 7860 - -ENV NODE_ENV=production - -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/train_engine.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/train_engine.py deleted file mode 100644 index 4bed8b635dc7074fe9532a957c1fc6b82653fcc9..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/train_engine.py +++ /dev/null @@ -1,311 +0,0 @@ -# -------------------------------------------------------- -# OpenVQA -# Written by Yuhao Cui https://github.com/cuiyuhao1996 -# -------------------------------------------------------- - -import os, torch, datetime, shutil, time -import numpy as np -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.data as Data -from openvqa.models.model_loader import ModelLoader -from openvqa.utils.optim import get_optim, adjust_lr -from utils.test_engine import test_engine, ckpt_proc -from utils.extract_engine import extract_engine - - -def train_engine(__C, dataset, dataset_eval=None): - - data_size = dataset.data_size - token_size = dataset.token_size - ans_size = dataset.ans_size - pretrained_emb = dataset.pretrained_emb - - net = ModelLoader(__C).Net( - __C, - pretrained_emb, - token_size, - ans_size - ) - net.cuda() - net.train() - - if __C.N_GPU > 1: - net = nn.DataParallel(net, device_ids=__C.DEVICES) - - # Define Loss Function - loss_fn = eval('torch.nn.' + __C.LOSS_FUNC_NAME_DICT[__C.LOSS_FUNC] + "(reduction='" + __C.LOSS_REDUCTION + "').cuda()") - - # Load checkpoint if resume training - if __C.RESUME: - print(' ========== Resume training') - - if __C.CKPT_PATH is not None: - print('Warning: Now using CKPT_PATH args, ' - 'CKPT_VERSION and CKPT_EPOCH will not work') - - path = __C.CKPT_PATH - else: - path = __C.CKPTS_PATH + \ - '/ckpt_' + __C.CKPT_VERSION + \ - '/epoch' + str(__C.CKPT_EPOCH) + '.pkl' - - # Load the network parameters - print('Loading ckpt from {}'.format(path)) - ckpt = torch.load(path) - print('Finish!') - - if __C.N_GPU > 1: - net.load_state_dict(ckpt_proc(ckpt['state_dict'])) - else: - net.load_state_dict(ckpt['state_dict']) - start_epoch = ckpt['epoch'] - - # Load the optimizer paramters - optim = get_optim(__C, net, data_size, ckpt['lr_base']) - optim._step = int(data_size / __C.BATCH_SIZE * start_epoch) - optim.optimizer.load_state_dict(ckpt['optimizer']) - - if ('ckpt_' + __C.VERSION) not in os.listdir(__C.CKPTS_PATH): - os.mkdir(__C.CKPTS_PATH + '/ckpt_' + __C.VERSION) - - else: - if ('ckpt_' + __C.VERSION) not in os.listdir(__C.CKPTS_PATH): - #shutil.rmtree(__C.CKPTS_PATH + '/ckpt_' + __C.VERSION) - os.mkdir(__C.CKPTS_PATH + '/ckpt_' + __C.VERSION) - - optim = get_optim(__C, net, data_size) - start_epoch = 0 - - loss_sum = 0 - named_params = list(net.named_parameters()) - grad_norm = np.zeros(len(named_params)) - - # Define multi-thread dataloader - # if __C.SHUFFLE_MODE in ['external']: - # dataloader = Data.DataLoader( - # dataset, - # batch_size=__C.BATCH_SIZE, - # shuffle=False, - # num_workers=__C.NUM_WORKERS, - # pin_memory=__C.PIN_MEM, - # drop_last=True - # ) - # else: - dataloader = Data.DataLoader( - dataset, - batch_size=__C.BATCH_SIZE, - shuffle=True, - num_workers=__C.NUM_WORKERS, - pin_memory=__C.PIN_MEM, - drop_last=True - ) - - logfile = open( - __C.LOG_PATH + - '/log_run_' + __C.VERSION + '.txt', - 'a+' - ) - logfile.write(str(__C)) - logfile.close() - - # Training script - for epoch in range(start_epoch, __C.MAX_EPOCH): - - # Save log to file - logfile = open( - __C.LOG_PATH + - '/log_run_' + __C.VERSION + '.txt', - 'a+' - ) - logfile.write( - '=====================================\nnowTime: ' + - datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + - '\n' - ) - logfile.close() - - # Learning Rate Decay - if epoch in __C.LR_DECAY_LIST: - adjust_lr(optim, __C.LR_DECAY_R) - - # Externally shuffle data list - # if __C.SHUFFLE_MODE == 'external': - # dataset.shuffle_list(dataset.ans_list) - - time_start = time.time() - # Iteration - for step, ( - frcn_feat_iter, - grid_feat_iter, - bbox_feat_iter, - ques_ix_iter, - ans_iter - ) in enumerate(dataloader): - - optim.zero_grad() - - frcn_feat_iter = frcn_feat_iter.cuda() - grid_feat_iter = grid_feat_iter.cuda() - bbox_feat_iter = bbox_feat_iter.cuda() - ques_ix_iter = ques_ix_iter.cuda() - ans_iter = ans_iter.cuda() - - loss_tmp = 0 - for accu_step in range(__C.GRAD_ACCU_STEPS): - loss_tmp = 0 - - sub_frcn_feat_iter = \ - frcn_feat_iter[accu_step * __C.SUB_BATCH_SIZE: - (accu_step + 1) * __C.SUB_BATCH_SIZE] - sub_grid_feat_iter = \ - grid_feat_iter[accu_step * __C.SUB_BATCH_SIZE: - (accu_step + 1) * __C.SUB_BATCH_SIZE] - sub_bbox_feat_iter = \ - bbox_feat_iter[accu_step * __C.SUB_BATCH_SIZE: - (accu_step + 1) * __C.SUB_BATCH_SIZE] - sub_ques_ix_iter = \ - ques_ix_iter[accu_step * __C.SUB_BATCH_SIZE: - (accu_step + 1) * __C.SUB_BATCH_SIZE] - sub_ans_iter = \ - ans_iter[accu_step * __C.SUB_BATCH_SIZE: - (accu_step + 1) * __C.SUB_BATCH_SIZE] - - pred = net( - sub_frcn_feat_iter, - sub_grid_feat_iter, - sub_bbox_feat_iter, - sub_ques_ix_iter - ) - - loss_item = [pred, sub_ans_iter] - loss_nonlinear_list = __C.LOSS_FUNC_NONLINEAR[__C.LOSS_FUNC] - for item_ix, loss_nonlinear in enumerate(loss_nonlinear_list): - if loss_nonlinear in ['flat']: - loss_item[item_ix] = loss_item[item_ix].view(-1) - elif loss_nonlinear: - loss_item[item_ix] = eval('F.' + loss_nonlinear + '(loss_item[item_ix], dim=1)') - - loss = loss_fn(loss_item[0], loss_item[1]) - if __C.LOSS_REDUCTION == 'mean': - # only mean-reduction needs be divided by grad_accu_steps - loss /= __C.GRAD_ACCU_STEPS - loss.backward() - - loss_tmp += loss.cpu().data.numpy() * __C.GRAD_ACCU_STEPS - loss_sum += loss.cpu().data.numpy() * __C.GRAD_ACCU_STEPS - - if __C.VERBOSE: - if dataset_eval is not None: - mode_str = __C.SPLIT['train'] + '->' + __C.SPLIT['val'] - else: - mode_str = __C.SPLIT['train'] + '->' + __C.SPLIT['test'] - - print("\r[Version %s][Model %s][Dataset %s][Epoch %2d][Step %4d/%4d][%s] Loss: %.4f, Lr: %.2e" % ( - __C.VERSION, - __C.MODEL_USE, - __C.DATASET, - epoch + 1, - step, - int(data_size / __C.BATCH_SIZE), - mode_str, - loss_tmp / __C.SUB_BATCH_SIZE, - optim._rate - ), end=' ') - - # Gradient norm clipping - if __C.GRAD_NORM_CLIP > 0: - nn.utils.clip_grad_norm_( - net.parameters(), - __C.GRAD_NORM_CLIP - ) - - # Save the gradient information - for name in range(len(named_params)): - norm_v = torch.norm(named_params[name][1].grad).cpu().data.numpy() \ - if named_params[name][1].grad is not None else 0 - grad_norm[name] += norm_v * __C.GRAD_ACCU_STEPS - # print('Param %-3s Name %-80s Grad_Norm %-20s'% - # (str(grad_wt), - # params[grad_wt][0], - # str(norm_v))) - - optim.step() - - time_end = time.time() - elapse_time = time_end-time_start - print('Finished in {}s'.format(int(elapse_time))) - epoch_finish = epoch + 1 - - # Save checkpoint - if not __C.SAVE_LAST or epoch_finish == __C.MAX_EPOCH: - if __C.N_GPU > 1: - state = { - 'state_dict': net.module.state_dict(), - 'optimizer': optim.optimizer.state_dict(), - 'lr_base': optim.lr_base, - 'epoch': epoch_finish - } - else: - state = { - 'state_dict': net.state_dict(), - 'optimizer': optim.optimizer.state_dict(), - 'lr_base': optim.lr_base, - 'epoch': epoch_finish - } - torch.save( - state, - __C.CKPTS_PATH + - '/ckpt_' + __C.VERSION + - '/epoch' + str(epoch_finish) + - '.pkl' - ) - - # Logging - logfile = open( - __C.LOG_PATH + - '/log_run_' + __C.VERSION + '.txt', - 'a+' - ) - logfile.write( - 'Epoch: ' + str(epoch_finish) + - ', Loss: ' + str(loss_sum / data_size) + - ', Lr: ' + str(optim._rate) + '\n' + - 'Elapsed time: ' + str(int(elapse_time)) + - ', Speed(s/batch): ' + str(elapse_time / step) + - '\n\n' - ) - logfile.close() - - # Eval after every epoch - if dataset_eval is not None: - test_engine( - __C, - dataset_eval, - state_dict=net.state_dict(), - validation=True - ) - - # if self.__C.VERBOSE: - # logfile = open( - # self.__C.LOG_PATH + - # '/log_run_' + self.__C.VERSION + '.txt', - # 'a+' - # ) - # for name in range(len(named_params)): - # logfile.write( - # 'Param %-3s Name %-80s Grad_Norm %-25s\n' % ( - # str(name), - # named_params[name][0], - # str(grad_norm[name] / data_size * self.__C.BATCH_SIZE) - # ) - # ) - # logfile.write('\n') - # logfile.close() - - loss_sum = 0 - grad_norm = np.zeros(len(named_params)) - - # Modification - optionally run full result extract after training ends - if __C.EXTRACT_AFTER: - extract_engine(__C, state_dict=net.state_dict()) \ No newline at end of file diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/par.h b/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/par.h deleted file mode 100644 index 740c39e8b992f2071488079da19b013de762b9d3..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/par.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright 2008-2018 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include - -namespace thrust -{ -namespace system -{ -namespace cpp -{ -namespace detail -{ - - -struct par_t : thrust::system::cpp::detail::execution_policy, - thrust::detail::allocator_aware_execution_policy< - thrust::system::cpp::detail::execution_policy> -{ - __host__ __device__ - THRUST_CONSTEXPR par_t() : thrust::system::cpp::detail::execution_policy() {} -}; - - -} // end detail - - -THRUST_INLINE_CONSTANT detail::par_t par; - - -} // end cpp -} // end system - - -// alias par here -namespace cpp -{ - - -using thrust::system::cpp::par; - - -} // end cpp -} // end thrust - diff --git a/spaces/CVPR/WALT/mmdet/core/bbox/iou_calculators/__init__.py b/spaces/CVPR/WALT/mmdet/core/bbox/iou_calculators/__init__.py deleted file mode 100644 index e71369a58a05fa25e6a754300875fdbb87cb26a5..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/core/bbox/iou_calculators/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .builder import build_iou_calculator -from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps - -__all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps'] diff --git a/spaces/CVPR/WALT/mmdet/models/dense_heads/pisa_ssd_head.py b/spaces/CVPR/WALT/mmdet/models/dense_heads/pisa_ssd_head.py deleted file mode 100644 index 90ef3c83ed62d8346c8daef01f18ad7bd236623c..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/dense_heads/pisa_ssd_head.py +++ /dev/null @@ -1,139 +0,0 @@ -import torch - -from mmdet.core import multi_apply -from ..builder import HEADS -from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p -from .ssd_head import SSDHead - - -# TODO: add loss evaluator for SSD -@HEADS.register_module() -class PISASSDHead(SSDHead): - - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes of each image - with shape (num_obj, 4). - gt_labels (list[Tensor]): Ground truth labels of each image - with shape (num_obj, 4). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. - Default: None. - - Returns: - dict: Loss dict, comprise classification loss regression loss and - carl loss. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.anchor_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=1, - unmap_outputs=False, - return_sampling_results=True) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets - - num_images = len(img_metas) - all_cls_scores = torch.cat([ - s.permute(0, 2, 3, 1).reshape( - num_images, -1, self.cls_out_channels) for s in cls_scores - ], 1) - all_labels = torch.cat(labels_list, -1).view(num_images, -1) - all_label_weights = torch.cat(label_weights_list, - -1).view(num_images, -1) - all_bbox_preds = torch.cat([ - b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) - for b in bbox_preds - ], -2) - all_bbox_targets = torch.cat(bbox_targets_list, - -2).view(num_images, -1, 4) - all_bbox_weights = torch.cat(bbox_weights_list, - -2).view(num_images, -1, 4) - - # concat all level anchors to a single tensor - all_anchors = [] - for i in range(num_images): - all_anchors.append(torch.cat(anchor_list[i])) - - isr_cfg = self.train_cfg.get('isr', None) - all_targets = (all_labels.view(-1), all_label_weights.view(-1), - all_bbox_targets.view(-1, - 4), all_bbox_weights.view(-1, 4)) - # apply ISR-P - if isr_cfg is not None: - all_targets = isr_p( - all_cls_scores.view(-1, all_cls_scores.size(-1)), - all_bbox_preds.view(-1, 4), - all_targets, - torch.cat(all_anchors), - sampling_results_list, - loss_cls=CrossEntropyLoss(), - bbox_coder=self.bbox_coder, - **self.train_cfg.isr, - num_class=self.num_classes) - (new_labels, new_label_weights, new_bbox_targets, - new_bbox_weights) = all_targets - all_labels = new_labels.view(all_labels.shape) - all_label_weights = new_label_weights.view(all_label_weights.shape) - all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape) - all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape) - - # add CARL loss - carl_loss_cfg = self.train_cfg.get('carl', None) - if carl_loss_cfg is not None: - loss_carl = carl_loss( - all_cls_scores.view(-1, all_cls_scores.size(-1)), - all_targets[0], - all_bbox_preds.view(-1, 4), - all_targets[2], - SmoothL1Loss(beta=1.), - **self.train_cfg.carl, - avg_factor=num_total_pos, - num_class=self.num_classes) - - # check NaN and Inf - assert torch.isfinite(all_cls_scores).all().item(), \ - 'classification scores become infinite or NaN!' - assert torch.isfinite(all_bbox_preds).all().item(), \ - 'bbox predications become infinite or NaN!' - - losses_cls, losses_bbox = multi_apply( - self.loss_single, - all_cls_scores, - all_bbox_preds, - all_anchors, - all_labels, - all_label_weights, - all_bbox_targets, - all_bbox_weights, - num_total_samples=num_total_pos) - loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) - if carl_loss_cfg is not None: - loss_dict.update(loss_carl) - return loss_dict diff --git a/spaces/CVPR/regionclip-demo/detectron2/data/transforms/torchvision_transforms/functional_pil.py b/spaces/CVPR/regionclip-demo/detectron2/data/transforms/torchvision_transforms/functional_pil.py deleted file mode 100644 index 3829637fdb723a9d75ac63e99f7d2a1be8b754c2..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/data/transforms/torchvision_transforms/functional_pil.py +++ /dev/null @@ -1,352 +0,0 @@ -import numbers -from typing import Any, List, Sequence - -import numpy as np -import torch -from PIL import Image, ImageOps, ImageEnhance - -try: - import accimage -except ImportError: - accimage = None - - -@torch.jit.unused -def _is_pil_image(img: Any) -> bool: - if accimage is not None: - return isinstance(img, (Image.Image, accimage.Image)) - else: - return isinstance(img, Image.Image) - - -@torch.jit.unused -def _get_image_size(img: Any) -> List[int]: - if _is_pil_image(img): - return img.size - raise TypeError("Unexpected type {}".format(type(img))) - - -@torch.jit.unused -def _get_image_num_channels(img: Any) -> int: - if _is_pil_image(img): - return 1 if img.mode == 'L' else 3 - raise TypeError("Unexpected type {}".format(type(img))) - - -@torch.jit.unused -def hflip(img): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - - return img.transpose(Image.FLIP_LEFT_RIGHT) - - -@torch.jit.unused -def vflip(img): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - - return img.transpose(Image.FLIP_TOP_BOTTOM) - - -@torch.jit.unused -def adjust_brightness(img, brightness_factor): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - - enhancer = ImageEnhance.Brightness(img) - img = enhancer.enhance(brightness_factor) - return img - - -@torch.jit.unused -def adjust_contrast(img, contrast_factor): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - - enhancer = ImageEnhance.Contrast(img) - img = enhancer.enhance(contrast_factor) - return img - - -@torch.jit.unused -def adjust_saturation(img, saturation_factor): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - - enhancer = ImageEnhance.Color(img) - img = enhancer.enhance(saturation_factor) - return img - - -@torch.jit.unused -def adjust_hue(img, hue_factor): - if not(-0.5 <= hue_factor <= 0.5): - raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor)) - - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - - input_mode = img.mode - if input_mode in {'L', '1', 'I', 'F'}: - return img - - h, s, v = img.convert('HSV').split() - - np_h = np.array(h, dtype=np.uint8) - # uint8 addition take cares of rotation across boundaries - with np.errstate(over='ignore'): - np_h += np.uint8(hue_factor * 255) - h = Image.fromarray(np_h, 'L') - - img = Image.merge('HSV', (h, s, v)).convert(input_mode) - return img - - -@torch.jit.unused -def adjust_gamma(img, gamma, gain=1): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - - if gamma < 0: - raise ValueError('Gamma should be a non-negative real number') - - input_mode = img.mode - img = img.convert('RGB') - gamma_map = [(255 + 1 - 1e-3) * gain * pow(ele / 255., gamma) for ele in range(256)] * 3 - img = img.point(gamma_map) # use PIL's point-function to accelerate this part - - img = img.convert(input_mode) - return img - - -@torch.jit.unused -def pad(img, padding, fill=0, padding_mode="constant"): - if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) - - if not isinstance(padding, (numbers.Number, tuple, list)): - raise TypeError("Got inappropriate padding arg") - if not isinstance(fill, (numbers.Number, str, tuple)): - raise TypeError("Got inappropriate fill arg") - if not isinstance(padding_mode, str): - raise TypeError("Got inappropriate padding_mode arg") - - if isinstance(padding, list): - padding = tuple(padding) - - if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]: - raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " + - "{} element tuple".format(len(padding))) - - if isinstance(padding, tuple) and len(padding) == 1: - # Compatibility with `functional_tensor.pad` - padding = padding[0] - - if padding_mode not in ["constant", "edge", "reflect", "symmetric"]: - raise ValueError("Padding mode should be either constant, edge, reflect or symmetric") - - if padding_mode == "constant": - opts = _parse_fill(fill, img, name="fill") - if img.mode == "P": - palette = img.getpalette() - image = ImageOps.expand(img, border=padding, **opts) - image.putpalette(palette) - return image - - return ImageOps.expand(img, border=padding, **opts) - else: - if isinstance(padding, int): - pad_left = pad_right = pad_top = pad_bottom = padding - if isinstance(padding, tuple) and len(padding) == 2: - pad_left = pad_right = padding[0] - pad_top = pad_bottom = padding[1] - if isinstance(padding, tuple) and len(padding) == 4: - pad_left = padding[0] - pad_top = padding[1] - pad_right = padding[2] - pad_bottom = padding[3] - - p = [pad_left, pad_top, pad_right, pad_bottom] - cropping = -np.minimum(p, 0) - - if cropping.any(): - crop_left, crop_top, crop_right, crop_bottom = cropping - img = img.crop((crop_left, crop_top, img.width - crop_right, img.height - crop_bottom)) - - pad_left, pad_top, pad_right, pad_bottom = np.maximum(p, 0) - - if img.mode == 'P': - palette = img.getpalette() - img = np.asarray(img) - img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode) - img = Image.fromarray(img) - img.putpalette(palette) - return img - - img = np.asarray(img) - # RGB image - if len(img.shape) == 3: - img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode) - # Grayscale image - if len(img.shape) == 2: - img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode) - - return Image.fromarray(img) - - -@torch.jit.unused -def crop(img: Image.Image, top: int, left: int, height: int, width: int) -> Image.Image: - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - - return img.crop((left, top, left + width, top + height)) - - -@torch.jit.unused -def resize(img, size, interpolation=Image.BILINEAR, max_size=None): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - if not (isinstance(size, int) or (isinstance(size, Sequence) and len(size) in (1, 2))): - raise TypeError('Got inappropriate size arg: {}'.format(size)) - - if isinstance(size, Sequence) and len(size) == 1: - size = size[0] - if isinstance(size, int): - w, h = img.size - - short, long = (w, h) if w <= h else (h, w) - if short == size: - return img - - new_short, new_long = size, int(size * long / short) - - if max_size is not None: - if max_size <= size: - raise ValueError( - f"max_size = {max_size} must be strictly greater than the requested " - f"size for the smaller edge size = {size}" - ) - if new_long > max_size: - new_short, new_long = int(max_size * new_short / new_long), max_size - - new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short) - return img.resize((new_w, new_h), interpolation) - else: - if max_size is not None: - raise ValueError( - "max_size should only be passed if size specifies the length of the smaller edge, " - "i.e. size should be an int or a sequence of length 1 in torchscript mode." - ) - return img.resize(size[::-1], interpolation) - - -@torch.jit.unused -def _parse_fill(fill, img, name="fillcolor"): - # Process fill color for affine transforms - num_bands = len(img.getbands()) - if fill is None: - fill = 0 - if isinstance(fill, (int, float)) and num_bands > 1: - fill = tuple([fill] * num_bands) - if isinstance(fill, (list, tuple)): - if len(fill) != num_bands: - msg = ("The number of elements in 'fill' does not match the number of " - "bands of the image ({} != {})") - raise ValueError(msg.format(len(fill), num_bands)) - - fill = tuple(fill) - - return {name: fill} - - -@torch.jit.unused -def affine(img, matrix, interpolation=0, fill=None): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - - output_size = img.size - opts = _parse_fill(fill, img) - return img.transform(output_size, Image.AFFINE, matrix, interpolation, **opts) - - -@torch.jit.unused -def rotate(img, angle, interpolation=0, expand=False, center=None, fill=None): - if not _is_pil_image(img): - raise TypeError("img should be PIL Image. Got {}".format(type(img))) - - opts = _parse_fill(fill, img) - return img.rotate(angle, interpolation, expand, center, **opts) - - -@torch.jit.unused -def perspective(img, perspective_coeffs, interpolation=Image.BICUBIC, fill=None): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - - opts = _parse_fill(fill, img) - - return img.transform(img.size, Image.PERSPECTIVE, perspective_coeffs, interpolation, **opts) - - -@torch.jit.unused -def to_grayscale(img, num_output_channels): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - - if num_output_channels == 1: - img = img.convert('L') - elif num_output_channels == 3: - img = img.convert('L') - np_img = np.array(img, dtype=np.uint8) - np_img = np.dstack([np_img, np_img, np_img]) - img = Image.fromarray(np_img, 'RGB') - else: - raise ValueError('num_output_channels should be either 1 or 3') - - return img - - -@torch.jit.unused -def invert(img): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - return ImageOps.invert(img) - - -@torch.jit.unused -def posterize(img, bits): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - return ImageOps.posterize(img, bits) - - -@torch.jit.unused -def solarize(img, threshold): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - return ImageOps.solarize(img, threshold) - - -@torch.jit.unused -def adjust_sharpness(img, sharpness_factor): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - - enhancer = ImageEnhance.Sharpness(img) - img = enhancer.enhance(sharpness_factor) - return img - - -@torch.jit.unused -def autocontrast(img): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - return ImageOps.autocontrast(img) - - -@torch.jit.unused -def equalize(img): - if not _is_pil_image(img): - raise TypeError('img should be PIL Image. Got {}'.format(type(img))) - return ImageOps.equalize(img) diff --git a/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h b/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h deleted file mode 100644 index c7408eba007b424194618baa63726657e36875e3..0000000000000000000000000000000000000000 --- a/spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn.h +++ /dev/null @@ -1,64 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -#pragma once - -#include "ms_deform_attn_cpu.h" - -#ifdef WITH_CUDA -#include "ms_deform_attn_cuda.h" -#endif - -namespace groundingdino { - -at::Tensor -ms_deform_attn_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step) -{ - if (value.type().is_cuda()) - { -#ifdef WITH_CUDA - return ms_deform_attn_cuda_forward( - value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - AT_ERROR("Not implemented on the CPU"); -} - -std::vector -ms_deform_attn_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step) -{ - if (value.type().is_cuda()) - { -#ifdef WITH_CUDA - return ms_deform_attn_cuda_backward( - value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - AT_ERROR("Not implemented on the CPU"); -} - -} // namespace groundingdino \ No newline at end of file diff --git a/spaces/ChevyWithAI/rvc-aicover/infer_pack/commons.py b/spaces/ChevyWithAI/rvc-aicover/infer_pack/commons.py deleted file mode 100644 index 54470986f37825b35d90d7efa7437d1c26b87215..0000000000000000000000000000000000000000 --- a/spaces/ChevyWithAI/rvc-aicover/infer_pack/commons.py +++ /dev/null @@ -1,166 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def slice_segments2(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/CjangCjengh/Sanskrit-TTS/monotonic_align/core.py b/spaces/CjangCjengh/Sanskrit-TTS/monotonic_align/core.py deleted file mode 100644 index dddc688d76172b880054e544b7a217acd013f14f..0000000000000000000000000000000000000000 --- a/spaces/CjangCjengh/Sanskrit-TTS/monotonic_align/core.py +++ /dev/null @@ -1,35 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:,:,::1], numba.float32[:,:,::1], numba.int32[::1], numba.int32[::1]), nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val=-1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y-1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y-1, x-1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - index = index - 1 diff --git a/spaces/CofAI/chat.b4/g4f/Provider/Providers/Easychat.py b/spaces/CofAI/chat.b4/g4f/Provider/Providers/Easychat.py deleted file mode 100644 index eb740da991eb8f740489f6bc76a1ad55f006663b..0000000000000000000000000000000000000000 --- a/spaces/CofAI/chat.b4/g4f/Provider/Providers/Easychat.py +++ /dev/null @@ -1,55 +0,0 @@ -import requests -import os -import json -from ...typing import sha256, Dict, get_type_hints - -url = 'https://free.easychat.work' -model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', - 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613'] -supports_stream = True -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - headers = { - 'authority': 'free.easychat.work', - 'accept': 'text/event-stream', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'content-type': 'application/json', - 'endpoint': '', - 'origin': 'https://free.easychat.work', - 'plugins': '0', - 'referer': 'https://free.easychat.work/', - 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', - 'usesearch': 'false', - 'x-requested-with': 'XMLHttpRequest', - } - - json_data = { - 'messages': messages, - 'stream': True, - 'model': model, - 'temperature': 0.5, - 'presence_penalty': 0, - 'frequency_penalty': 0, - 'top_p': 1, - } - - response = requests.post('https://free.easychat.work/api/openai/v1/chat/completions', - headers=headers, json=json_data) - - for chunk in response.iter_lines(): - if b'content' in chunk: - data = json.loads(chunk.decode().split('data: ')[1]) - yield (data['choices'][0]['delta']['content']) - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/CofAI/chat/g4f/Provider/Providers/Yqcloud.py b/spaces/CofAI/chat/g4f/Provider/Providers/Yqcloud.py deleted file mode 100644 index ad5c3a4326c68ceb7ee012fbf5bc072da72a7e40..0000000000000000000000000000000000000000 --- a/spaces/CofAI/chat/g4f/Provider/Providers/Yqcloud.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -import time -import requests - -from ...typing import sha256, Dict, get_type_hints -url = 'https://chat9.yqcloud.top/' -model = [ - 'gpt-3.5-turbo', -] -supports_stream = True -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, chatId: str, **kwargs): - - headers = { - 'authority': 'api.aichatos.cloud', - 'origin': 'https://chat9.yqcloud.top', - 'referer': 'https://chat9.yqcloud.top/', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36', - } - - json_data = { - 'prompt': str(messages), - 'userId': f'#/chat/{chatId}', - 'network': True, - 'apikey': '', - 'system': '', - 'withoutContext': False, - } - response = requests.post('https://api.aichatos.cloud/api/generateStream', - headers=headers, json=json_data, stream=True) - for token in response.iter_content(chunk_size=2046): - yield (token.decode('utf-8')) - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/dec.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/dec.py deleted file mode 100644 index dd80e90be1c610d2c46bc8b8b02fd6070d94ee6d..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/dec.py +++ /dev/null @@ -1,78 +0,0 @@ -#encoding=utf-8 -import logging -import time -def print_calling(fn): - def wrapper(*args1, ** args2): - s = "calling function %s"%(fn.__name__) - logging.info(s) - start = time.time() - ret = fn(*args1, **args2) - end = time.time() -# s = "%s. time used = %f seconds"%(s, (end - start)) - s = "function [%s] has been called, taking %f seconds"%(fn.__name__, (end - start)) - logging.debug(s) - return ret - return wrapper - - -def print_test(fn): - def wrapper(*args1, ** args2): - s = "running test: %s..."%(fn.__name__) - logging.info(s) - ret = fn(*args1, **args2) - s = "running test: %s...succeed"%(fn.__name__) - logging.debug(s) - return ret - return wrapper - -def print_calling_in_short(fn): - def wrapper(*args1, ** args2): - start = time.time() - ret = fn(*args1, **args2) - end = time.time() - s = "function [%s] has been called, taking %f seconds"%(fn.__name__, (end - start)) - logging.debug(s) - return ret - return wrapper - -import collections -counter = collections.defaultdict(int) -count_times =collections.defaultdict(int) -def print_calling_in_short_for_tf(fn): - import tensorflow as tf - import util - def wrapper(*args1, ** args2): - start = time.time() - thread_name = util.thread.get_current_thread_name() - ret = fn(*args1, **args2) - end = time.time() - counter[fn.__name__] = counter[fn.__name__] + (end - start) - count_times[fn.__name__] += 1 - all_time = sum([counter[name] for name in counter]) * 1.0 - for name in counter: -# tf.logging.info('\t %s: %f, %f seconds'%(name, counter[name] / all_time, counter[name])) - tf.logging.info('\t %s: %d callings, %fsper calling'%(name, count_times[name], counter[name] * 1.0 / count_times[name])) - s = "Thread [%s]:function [%s] has been called, taking %f seconds"%(thread_name, fn.__name__, (end - start)) - tf.logging.info(s) - return ret - return wrapper - -def timeit(fn): - import util - def wrapper(*args1, ** args2): - start = time.time() - thread_name = util.thread.get_current_thread_name() - ret = fn(*args1, **args2) - end = time.time() - counter[fn.__name__] = counter[fn.__name__] + (end - start) - count_times[fn.__name__] += 1 - all_time = sum([counter[name] for name in counter]) * 1.0 - for name in counter: - logging.info('\t %s: %f, %f seconds'%(name, counter[name] / all_time, counter[name])) - logging.info('\t %s: %d callings, %f seconds per calling'%(name, count_times[name], counter[name] * 1.0 / count_times[name])) - s = "Thread [%s]:function [%s] has been called, taking %f seconds"%(thread_name, fn.__name__, (end - start)) -# logging.info(s) - return ret - return wrapper - - diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/backbone/backbone.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/backbone/backbone.py deleted file mode 100644 index 105d6dc54c888e8a25482c95be7b27d12abad47c..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/backbone/backbone.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -from collections import OrderedDict - -from torch import nn - -from maskrcnn_benchmark.modeling import registry -from maskrcnn_benchmark.modeling.make_layers import conv_with_kaiming_uniform -from . import fpn as fpn_module -from .pan import PAN -from .msr import MSR -from . import resnet - - -@registry.BACKBONES.register("R-50-C4") -@registry.BACKBONES.register("R-50-C5") -@registry.BACKBONES.register("R-101-C4") -@registry.BACKBONES.register("R-101-C5") -def build_resnet_backbone(cfg): - body = resnet.ResNet(cfg) - model = nn.Sequential(OrderedDict([("body", body)])) - model.out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS - return model - - -@registry.BACKBONES.register("R-50-FPN") -@registry.BACKBONES.register("R-101-FPN") -@registry.BACKBONES.register("R-152-FPN") -def build_resnet_fpn_backbone(cfg): - in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS # 256 - in_channels_list = [ - in_channels_stage2, # 256 - in_channels_stage2 * 2, # 512 - in_channels_stage2 * 4, # 1024 - in_channels_stage2 * 8, # 2048 - ] - body = resnet.ResNet(cfg) - out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS # 256 - fpn = fpn_module.FPN( - in_channels_list=in_channels_list, - out_channels=out_channels, - conv_block=conv_with_kaiming_uniform( - cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU, - cfg.MODEL.FPN.USE_DEFORMABLE - ), - top_blocks=fpn_module.LastLevelMaxPool(), - ) - if cfg.MODEL.MSR_ON: - model = MSR(body, in_channels_list, fpn=fpn) - else: - model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)])) - model.out_channels = out_channels - return model - - -@registry.BACKBONES.register("R-50-PAN") -@registry.BACKBONES.register("R-101-PAN") -@registry.BACKBONES.register("R-152-PAN") -def build_resnet_fpn_backbone(cfg): - in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS - in_channels_list = [ - in_channels_stage2, - in_channels_stage2 * 2, - in_channels_stage2 * 4, - in_channels_stage2 * 8, - ] - body = resnet.ResNet(cfg) - out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS - fpn = fpn_module.FPN( - in_channels_list=in_channels_list, - out_channels=out_channels, - conv_block=conv_with_kaiming_uniform( - cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU, - cfg.MODEL.FPN.USE_DEFORMABLE - ), - top_blocks=fpn_module.LastLevelMaxPool(), - ) - pan = PAN() - if cfg.MODEL.MSR_ON: - model = MSR(body, in_channels_list, fpn=fpn, pan=pan) - else: - model = nn.Sequential(OrderedDict([("body", body), - ("pan", pan), - ("fpn", fpn)])) - model.out_channels = out_channels - return model - - -@registry.BACKBONES.register("R-50-FPN-RETINANET") -@registry.BACKBONES.register("R-101-FPN-RETINANET") -def build_resnet_fpn_p3p7_backbone(cfg): - body = resnet.ResNet(cfg) - in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS - out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS - in_channels_p6p7 = in_channels_stage2 * 8 if cfg.MODEL.RETINANET.USE_C5 \ - else out_channels - fpn = fpn_module.FPN( - in_channels_list=[ - 0, - in_channels_stage2 * 2, - in_channels_stage2 * 4, - in_channels_stage2 * 8, - ], - out_channels=out_channels, - conv_block=conv_with_kaiming_uniform( - cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU - ), - top_blocks=fpn_module.LastLevelP6P7(in_channels_p6p7, out_channels), - ) - model = nn.Sequential(OrderedDict([("body", body), ("fpn", fpn)])) - model.out_channels = out_channels - return model - - -def build_backbone(cfg): - assert cfg.MODEL.BACKBONE.CONV_BODY in registry.BACKBONES, \ - "cfg.MODEL.BACKBONE.CONV_BODY: {} are not registered in registry".format( - cfg.MODEL.BACKBONE.CONV_BODY - ) - return registry.BACKBONES[cfg.MODEL.BACKBONE.CONV_BODY](cfg) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/WmfImagePlugin.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/WmfImagePlugin.py deleted file mode 100644 index 0ecab56a824fd3917067fd4b05c530f4abce75a3..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/WmfImagePlugin.py +++ /dev/null @@ -1,178 +0,0 @@ -# -# The Python Imaging Library -# $Id$ -# -# WMF stub codec -# -# history: -# 1996-12-14 fl Created -# 2004-02-22 fl Turned into a stub driver -# 2004-02-23 fl Added EMF support -# -# Copyright (c) Secret Labs AB 1997-2004. All rights reserved. -# Copyright (c) Fredrik Lundh 1996. -# -# See the README file for information on usage and redistribution. -# -# WMF/EMF reference documentation: -# https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/[MS-WMF].pdf -# http://wvware.sourceforge.net/caolan/index.html -# http://wvware.sourceforge.net/caolan/ora-wmf.html - -from . import Image, ImageFile -from ._binary import i16le as word -from ._binary import si16le as short -from ._binary import si32le as _long - -_handler = None - - -def register_handler(handler): - """ - Install application-specific WMF image handler. - - :param handler: Handler object. - """ - global _handler - _handler = handler - - -if hasattr(Image.core, "drawwmf"): - # install default handler (windows only) - - class WmfHandler: - def open(self, im): - im.mode = "RGB" - self.bbox = im.info["wmf_bbox"] - - def load(self, im): - im.fp.seek(0) # rewind - return Image.frombytes( - "RGB", - im.size, - Image.core.drawwmf(im.fp.read(), im.size, self.bbox), - "raw", - "BGR", - (im.size[0] * 3 + 3) & -4, - -1, - ) - - register_handler(WmfHandler()) - -# -# -------------------------------------------------------------------- -# Read WMF file - - -def _accept(prefix): - return ( - prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or prefix[:4] == b"\x01\x00\x00\x00" - ) - - -## -# Image plugin for Windows metafiles. - - -class WmfStubImageFile(ImageFile.StubImageFile): - format = "WMF" - format_description = "Windows Metafile" - - def _open(self): - self._inch = None - - # check placable header - s = self.fp.read(80) - - if s[:6] == b"\xd7\xcd\xc6\x9a\x00\x00": - # placeable windows metafile - - # get units per inch - self._inch = word(s, 14) - - # get bounding box - x0 = short(s, 6) - y0 = short(s, 8) - x1 = short(s, 10) - y1 = short(s, 12) - - # normalize size to 72 dots per inch - self.info["dpi"] = 72 - size = ( - (x1 - x0) * self.info["dpi"] // self._inch, - (y1 - y0) * self.info["dpi"] // self._inch, - ) - - self.info["wmf_bbox"] = x0, y0, x1, y1 - - # sanity check (standard metafile header) - if s[22:26] != b"\x01\x00\t\x00": - msg = "Unsupported WMF file format" - raise SyntaxError(msg) - - elif s[:4] == b"\x01\x00\x00\x00" and s[40:44] == b" EMF": - # enhanced metafile - - # get bounding box - x0 = _long(s, 8) - y0 = _long(s, 12) - x1 = _long(s, 16) - y1 = _long(s, 20) - - # get frame (in 0.01 millimeter units) - frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36) - - size = x1 - x0, y1 - y0 - - # calculate dots per inch from bbox and frame - xdpi = 2540.0 * (x1 - y0) / (frame[2] - frame[0]) - ydpi = 2540.0 * (y1 - y0) / (frame[3] - frame[1]) - - self.info["wmf_bbox"] = x0, y0, x1, y1 - - if xdpi == ydpi: - self.info["dpi"] = xdpi - else: - self.info["dpi"] = xdpi, ydpi - - else: - msg = "Unsupported file format" - raise SyntaxError(msg) - - self.mode = "RGB" - self._size = size - - loader = self._load() - if loader: - loader.open(self) - - def _load(self): - return _handler - - def load(self, dpi=None): - if dpi is not None and self._inch is not None: - self.info["dpi"] = dpi - x0, y0, x1, y1 = self.info["wmf_bbox"] - self._size = ( - (x1 - x0) * self.info["dpi"] // self._inch, - (y1 - y0) * self.info["dpi"] // self._inch, - ) - return super().load() - - -def _save(im, fp, filename): - if _handler is None or not hasattr(_handler, "save"): - msg = "WMF save handler not installed" - raise OSError(msg) - _handler.save(im, fp, filename) - - -# -# -------------------------------------------------------------------- -# Registry stuff - - -Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept) -Image.register_save(WmfStubImageFile.format, _save) - -Image.register_extensions(WmfStubImageFile.format, [".wmf", ".emf"]) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/Info-5611e10f.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/Info-5611e10f.js deleted file mode 100644 index e1694862ae16298e9d4f728a2e3437b574e35ff5..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/Info-5611e10f.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as i,e as r,s as u,a9 as f,N as _,K as c,p,ab as d,ac as m,ad as $,z as v,v as g,A as b}from"./index-3370be2a.js";import"./Button-89624748.js";function h(n){let s,a;const l=n[1].default,e=f(l,n,n[0],null);return{c(){s=_("div"),e&&e.c(),c(s,"class","svelte-e8n7p6")},m(t,o){p(t,s,o),e&&e.m(s,null),a=!0},p(t,[o]){e&&e.p&&(!a||o&1)&&d(e,l,t,t[0],a?$(l,t[0],o,null):m(t[0]),null)},i(t){a||(v(e,t),a=!0)},o(t){g(e,t),a=!1},d(t){t&&b(s),e&&e.d(t)}}}function I(n,s,a){let{$$slots:l={},$$scope:e}=s;return n.$$set=t=>{"$$scope"in t&&a(0,e=t.$$scope)},[e,l]}class z extends i{constructor(s){super(),r(this,s,I,h,u,{})}}export{z as I}; -//# sourceMappingURL=Info-5611e10f.js.map diff --git a/spaces/Daniton/MagicPrompt-Stable-Diffusion/style.css b/spaces/Daniton/MagicPrompt-Stable-Diffusion/style.css deleted file mode 100644 index fdbef9e64cc6b9f8003698ffa38997ee22a640ac..0000000000000000000000000000000000000000 --- a/spaces/Daniton/MagicPrompt-Stable-Diffusion/style.css +++ /dev/null @@ -1,84 +0,0 @@ -#col-container { - max-width: 800px; - margin-left: auto; - margin-right: auto; -} -a { - color: inherit; - text-decoration: underline; -} -.gradio-container { - font-family: 'IBM Plex Sans', sans-serif; -} -.gr-button { - color: white; - border-color: #9d66e5; - background: #9d66e5; -} -input[type='range'] { - accent-color: #9d66e5; -} -.dark input[type='range'] { - accent-color: #dfdfdf; -} -.container { - max-width: 800px; - margin: auto; - padding-top: 1.5rem; -} -#gallery { - min-height: 22rem; - margin-bottom: 15px; - margin-left: auto; - margin-right: auto; - border-bottom-right-radius: .5rem !important; - border-bottom-left-radius: .5rem !important; -} -#gallery>div>.h-full { - min-height: 20rem; -} -.details:hover { - text-decoration: underline; -} -.gr-button { - white-space: nowrap; -} -.gr-button:focus { - border-color: rgb(147 197 253 / var(--tw-border-opacity)); - outline: none; - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); - --tw-border-opacity: 1; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); - --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); - --tw-ring-opacity: .5; -} -#advanced-options { - margin-bottom: 20px; -} -.footer { - margin-bottom: 45px; - margin-top: 35px; - text-align: center; - border-bottom: 1px solid #e5e5e5; -} -.footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; -} -.dark .logo{ filter: invert(1); } -.dark .footer { - border-color: #303030; -} -.dark .footer>p { - background: #0b0f19; -} -.acknowledgments h4{ - margin: 1.25em 0 .25em 0; - font-weight: bold; - font-size: 115%; -} - diff --git a/spaces/DataScienceEngineering/1-SimPhysics-HTML5/style.css b/spaces/DataScienceEngineering/1-SimPhysics-HTML5/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/DataScienceEngineering/1-SimPhysics-HTML5/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/DragGan/DragGan/gui_utils/__init__.py b/spaces/DragGan/DragGan/gui_utils/__init__.py deleted file mode 100644 index 939e7c6c8f94c4ea1141885c3c3295fe083b06aa..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/gui_utils/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -# empty diff --git a/spaces/DragGan/DragGan/stylegan_human/dnnlib/util.py b/spaces/DragGan/DragGan/stylegan_human/dnnlib/util.py deleted file mode 100644 index c2bf7a73d546895ac6eb73d9c56db2a04b096f3e..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/dnnlib/util.py +++ /dev/null @@ -1,479 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Miscellaneous utility classes and functions.""" - -import ctypes -import fnmatch -import importlib -import inspect -import numpy as np -import os -import shutil -import sys -import types -import io -import pickle -import re -import requests -import html -import hashlib -import glob -import tempfile -import urllib -import urllib.request -import uuid - -from distutils.util import strtobool -from typing import Any, List, Tuple, Union - - -# Util classes -# ------------------------------------------------------------------------------------------ - - -class EasyDict(dict): - """Convenience class that behaves like a dict but allows access with the attribute syntax.""" - - def __getattr__(self, name: str) -> Any: - try: - return self[name] - except KeyError: - raise AttributeError(name) - - def __setattr__(self, name: str, value: Any) -> None: - self[name] = value - - def __delattr__(self, name: str) -> None: - del self[name] - - -class Logger(object): - """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file.""" - - def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True): - self.file = None - - if file_name is not None: - self.file = open(file_name, file_mode) - - self.should_flush = should_flush - self.stdout = sys.stdout - self.stderr = sys.stderr - - sys.stdout = self - sys.stderr = self - - def __enter__(self) -> "Logger": - return self - - def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: - self.close() - - def write(self, text: Union[str, bytes]) -> None: - """Write text to stdout (and a file) and optionally flush.""" - if isinstance(text, bytes): - text = text.decode() - if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash - return - - if self.file is not None: - self.file.write(text) - - self.stdout.write(text) - - if self.should_flush: - self.flush() - - def flush(self) -> None: - """Flush written text to both stdout and a file, if open.""" - if self.file is not None: - self.file.flush() - - self.stdout.flush() - - def close(self) -> None: - """Flush, close possible files, and remove stdout/stderr mirroring.""" - self.flush() - - # if using multiple loggers, prevent closing in wrong order - if sys.stdout is self: - sys.stdout = self.stdout - if sys.stderr is self: - sys.stderr = self.stderr - - if self.file is not None: - self.file.close() - self.file = None - - -# Cache directories -# ------------------------------------------------------------------------------------------ - -_dnnlib_cache_dir = None - -def set_cache_dir(path: str) -> None: - global _dnnlib_cache_dir - _dnnlib_cache_dir = path - -def make_cache_dir_path(*paths: str) -> str: - if _dnnlib_cache_dir is not None: - return os.path.join(_dnnlib_cache_dir, *paths) - if 'DNNLIB_CACHE_DIR' in os.environ: - return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths) - if 'HOME' in os.environ: - return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths) - if 'USERPROFILE' in os.environ: - return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths) - return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths) - -# Small util functions -# ------------------------------------------------------------------------------------------ - - -def format_time(seconds: Union[int, float]) -> str: - """Convert the seconds to human readable string with days, hours, minutes and seconds.""" - s = int(np.rint(seconds)) - - if s < 60: - return "{0}s".format(s) - elif s < 60 * 60: - return "{0}m {1:02}s".format(s // 60, s % 60) - elif s < 24 * 60 * 60: - return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60) - else: - return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60) - - -def ask_yes_no(question: str) -> bool: - """Ask the user the question until the user inputs a valid answer.""" - while True: - try: - print("{0} [y/n]".format(question)) - return strtobool(input().lower()) - except ValueError: - pass - - -def tuple_product(t: Tuple) -> Any: - """Calculate the product of the tuple elements.""" - result = 1 - - for v in t: - result *= v - - return result - - -_str_to_ctype = { - "uint8": ctypes.c_ubyte, - "uint16": ctypes.c_uint16, - "uint32": ctypes.c_uint32, - "uint64": ctypes.c_uint64, - "int8": ctypes.c_byte, - "int16": ctypes.c_int16, - "int32": ctypes.c_int32, - "int64": ctypes.c_int64, - "float32": ctypes.c_float, - "float64": ctypes.c_double -} - - -def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]: - """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes.""" - type_str = None - - if isinstance(type_obj, str): - type_str = type_obj - elif hasattr(type_obj, "__name__"): - type_str = type_obj.__name__ - elif hasattr(type_obj, "name"): - type_str = type_obj.name - else: - raise RuntimeError("Cannot infer type name from input") - - assert type_str in _str_to_ctype.keys() - - my_dtype = np.dtype(type_str) - my_ctype = _str_to_ctype[type_str] - - assert my_dtype.itemsize == ctypes.sizeof(my_ctype) - - return my_dtype, my_ctype - - -def is_pickleable(obj: Any) -> bool: - try: - with io.BytesIO() as stream: - pickle.dump(obj, stream) - return True - except: - return False - - -# Functionality to import modules/objects by name, and call functions by name -# ------------------------------------------------------------------------------------------ - -def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]: - """Searches for the underlying module behind the name to some python object. - Returns the module and the object name (original name with module part removed).""" - - # allow convenience shorthands, substitute them by full names - obj_name = re.sub("^np.", "numpy.", obj_name) - obj_name = re.sub("^tf.", "tensorflow.", obj_name) - - # list alternatives for (module_name, local_obj_name) - parts = obj_name.split(".") - name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)] - - # try each alternative in turn - for module_name, local_obj_name in name_pairs: - try: - module = importlib.import_module(module_name) # may raise ImportError - get_obj_from_module(module, local_obj_name) # may raise AttributeError - return module, local_obj_name - except: - pass - - # maybe some of the modules themselves contain errors? - for module_name, _local_obj_name in name_pairs: - try: - importlib.import_module(module_name) # may raise ImportError - except ImportError: - if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"): - raise - - # maybe the requested attribute is missing? - for module_name, local_obj_name in name_pairs: - try: - module = importlib.import_module(module_name) # may raise ImportError - get_obj_from_module(module, local_obj_name) # may raise AttributeError - except ImportError: - pass - - # we are out of luck, but we have no idea why - raise ImportError(obj_name) - - -def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any: - """Traverses the object name and returns the last (rightmost) python object.""" - if obj_name == '': - return module - obj = module - for part in obj_name.split("."): - obj = getattr(obj, part) - return obj - - -def get_obj_by_name(name: str) -> Any: - """Finds the python object with the given name.""" - module, obj_name = get_module_from_obj_name(name) - return get_obj_from_module(module, obj_name) - - -def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any: - """Finds the python object with the given name and calls it as a function.""" - assert func_name is not None - # print('func_name: ', func_name) #'training.dataset.ImageFolderDataset' - func_obj = get_obj_by_name(func_name) - assert callable(func_obj) - return func_obj(*args, **kwargs) - - -def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any: - """Finds the python class with the given name and constructs it with the given arguments.""" - return call_func_by_name(*args, func_name=class_name, **kwargs) - - -def get_module_dir_by_obj_name(obj_name: str) -> str: - """Get the directory path of the module containing the given object name.""" - module, _ = get_module_from_obj_name(obj_name) - return os.path.dirname(inspect.getfile(module)) - - -def is_top_level_function(obj: Any) -> bool: - """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'.""" - return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__ - - -def get_top_level_function_name(obj: Any) -> str: - """Return the fully-qualified name of a top-level function.""" - assert is_top_level_function(obj) - module = obj.__module__ - if module == '__main__': - module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0] - return module + "." + obj.__name__ - - -# File system helpers -# ------------------------------------------------------------------------------------------ - -def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]: - """List all files recursively in a given directory while ignoring given file and directory names. - Returns list of tuples containing both absolute and relative paths.""" - assert os.path.isdir(dir_path) - base_name = os.path.basename(os.path.normpath(dir_path)) - - if ignores is None: - ignores = [] - - result = [] - - for root, dirs, files in os.walk(dir_path, topdown=True): - for ignore_ in ignores: - dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)] - - # dirs need to be edited in-place - for d in dirs_to_remove: - dirs.remove(d) - - files = [f for f in files if not fnmatch.fnmatch(f, ignore_)] - - absolute_paths = [os.path.join(root, f) for f in files] - relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths] - - if add_base_to_relative: - relative_paths = [os.path.join(base_name, p) for p in relative_paths] - - assert len(absolute_paths) == len(relative_paths) - result += zip(absolute_paths, relative_paths) - - return result - - -def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None: - """Takes in a list of tuples of (src, dst) paths and copies files. - Will create all necessary directories.""" - for file in files: - target_dir_name = os.path.dirname(file[1]) - - # will create all intermediate-level directories - if not os.path.exists(target_dir_name): - os.makedirs(target_dir_name) - - shutil.copyfile(file[0], file[1]) - - -# URL helpers -# ------------------------------------------------------------------------------------------ - -def is_url(obj: Any, allow_file_urls: bool = False) -> bool: - """Determine whether the given object is a valid URL string.""" - if not isinstance(obj, str) or not "://" in obj: - return False - if allow_file_urls and obj.startswith('file://'): - return True - try: - res = requests.compat.urlparse(obj) - if not res.scheme or not res.netloc or not "." in res.netloc: - return False - res = requests.compat.urlparse(requests.compat.urljoin(obj, "/")) - if not res.scheme or not res.netloc or not "." in res.netloc: - return False - except: - return False - return True - - -def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any: - """Download the given URL and return a binary-mode file object to access the data.""" - assert num_attempts >= 1 - assert not (return_filename and (not cache)) - - # Doesn't look like an URL scheme so interpret it as a local filename. - if not re.match('^[a-z]+://', url): - return url if return_filename else open(url, "rb") - - # Handle file URLs. This code handles unusual file:// patterns that - # arise on Windows: - # - # file:///c:/foo.txt - # - # which would translate to a local '/c:/foo.txt' filename that's - # invalid. Drop the forward slash for such pathnames. - # - # If you touch this code path, you should test it on both Linux and - # Windows. - # - # Some internet resources suggest using urllib.request.url2pathname() but - # but that converts forward slashes to backslashes and this causes - # its own set of problems. - if url.startswith('file://'): - filename = urllib.parse.urlparse(url).path - if re.match(r'^/[a-zA-Z]:', filename): - filename = filename[1:] - return filename if return_filename else open(filename, "rb") - - assert is_url(url) - - # Lookup from cache. - if cache_dir is None: - cache_dir = make_cache_dir_path('downloads') - - url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest() - if cache: - cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*")) - if len(cache_files) == 1: - filename = cache_files[0] - return filename if return_filename else open(filename, "rb") - - # Download. - url_name = None - url_data = None - with requests.Session() as session: - if verbose: - print("Downloading %s ..." % url, end="", flush=True) - for attempts_left in reversed(range(num_attempts)): - try: - with session.get(url) as res: - res.raise_for_status() - if len(res.content) == 0: - raise IOError("No data received") - - if len(res.content) < 8192: - content_str = res.content.decode("utf-8") - if "download_warning" in res.headers.get("Set-Cookie", ""): - links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link] - if len(links) == 1: - url = requests.compat.urljoin(url, links[0]) - raise IOError("Google Drive virus checker nag") - if "Google Drive - Quota exceeded" in content_str: - raise IOError("Google Drive download quota exceeded -- please try again later") - - match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", "")) - url_name = match[1] if match else url - url_data = res.content - if verbose: - print(" done") - break - except KeyboardInterrupt: - raise - except: - if not attempts_left: - if verbose: - print(" failed") - raise - if verbose: - print(".", end="", flush=True) - - # Save to cache. - if cache: - safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name) - cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name) - temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name) - os.makedirs(cache_dir, exist_ok=True) - with open(temp_file, "wb") as f: - f.write(url_data) - os.replace(temp_file, cache_file) # atomic - if return_filename: - return cache_file - - # Return data as file object. - assert not return_filename - return io.BytesIO(url_data) diff --git a/spaces/ECCV2022/bytetrack/tutorials/centertrack/tracker.py b/spaces/ECCV2022/bytetrack/tutorials/centertrack/tracker.py deleted file mode 100644 index 22a746528ae84416423d7e1ec5b7d93429560b5d..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/tutorials/centertrack/tracker.py +++ /dev/null @@ -1,198 +0,0 @@ -import numpy as np -from sklearn.utils.linear_assignment_ import linear_assignment -# from numba import jit -import copy - - -class Tracker(object): - def __init__(self, opt): - self.opt = opt - self.reset() - - def init_track(self, results): - for item in results: - if item['score'] > self.opt.new_thresh: - self.id_count += 1 - # active and age are never used in the paper - item['active'] = 1 - item['age'] = 1 - item['tracking_id'] = self.id_count - if not ('ct' in item): - bbox = item['bbox'] - item['ct'] = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2] - self.tracks.append(item) - - def reset(self): - self.id_count = 0 - self.tracks = [] - - def step(self, results_with_low, public_det=None): - - results = [item for item in results_with_low if item['score'] >= self.opt.track_thresh] - - # first association - N = len(results) - M = len(self.tracks) - - dets = np.array( - [det['ct'] + det['tracking'] for det in results], np.float32) # N x 2 - track_size = np.array([((track['bbox'][2] - track['bbox'][0]) * \ - (track['bbox'][3] - track['bbox'][1])) \ - for track in self.tracks], np.float32) # M - track_cat = np.array([track['class'] for track in self.tracks], np.int32) # M - item_size = np.array([((item['bbox'][2] - item['bbox'][0]) * \ - (item['bbox'][3] - item['bbox'][1])) \ - for item in results], np.float32) # N - item_cat = np.array([item['class'] for item in results], np.int32) # N - tracks = np.array( - [pre_det['ct'] for pre_det in self.tracks], np.float32) # M x 2 - dist = (((tracks.reshape(1, -1, 2) - \ - dets.reshape(-1, 1, 2)) ** 2).sum(axis=2)) # N x M - - invalid = ((dist > track_size.reshape(1, M)) + \ - (dist > item_size.reshape(N, 1)) + \ - (item_cat.reshape(N, 1) != track_cat.reshape(1, M))) > 0 - dist = dist + invalid * 1e18 - - if self.opt.hungarian: - assert not self.opt.hungarian, 'we only verify centertrack with greedy_assignment' - item_score = np.array([item['score'] for item in results], np.float32) # N - dist[dist > 1e18] = 1e18 - matched_indices = linear_assignment(dist) - else: - matched_indices = greedy_assignment(copy.deepcopy(dist)) - - unmatched_dets = [d for d in range(dets.shape[0]) \ - if not (d in matched_indices[:, 0])] - unmatched_tracks = [d for d in range(tracks.shape[0]) \ - if not (d in matched_indices[:, 1])] - - if self.opt.hungarian: - assert not self.opt.hungarian, 'we only verify centertrack with greedy_assignment' - matches = [] - for m in matched_indices: - if dist[m[0], m[1]] > 1e16: - unmatched_dets.append(m[0]) - unmatched_tracks.append(m[1]) - else: - matches.append(m) - matches = np.array(matches).reshape(-1, 2) - else: - matches = matched_indices - - ret = [] - for m in matches: - track = results[m[0]] - track['tracking_id'] = self.tracks[m[1]]['tracking_id'] - track['age'] = 1 - track['active'] = self.tracks[m[1]]['active'] + 1 - ret.append(track) - - if self.opt.public_det and len(unmatched_dets) > 0: - assert not self.opt.public_det, 'we only verify centertrack with private detection' - # Public detection: only create tracks from provided detections - pub_dets = np.array([d['ct'] for d in public_det], np.float32) - dist3 = ((dets.reshape(-1, 1, 2) - pub_dets.reshape(1, -1, 2)) ** 2).sum( - axis=2) - matched_dets = [d for d in range(dets.shape[0]) \ - if not (d in unmatched_dets)] - dist3[matched_dets] = 1e18 - for j in range(len(pub_dets)): - i = dist3[:, j].argmin() - if dist3[i, j] < item_size[i]: - dist3[i, :] = 1e18 - track = results[i] - if track['score'] > self.opt.new_thresh: - self.id_count += 1 - track['tracking_id'] = self.id_count - track['age'] = 1 - track['active'] = 1 - ret.append(track) - else: - # Private detection: create tracks for all un-matched detections - for i in unmatched_dets: - track = results[i] - if track['score'] > self.opt.new_thresh: - self.id_count += 1 - track['tracking_id'] = self.id_count - track['age'] = 1 - track['active'] = 1 - ret.append(track) - - # second association - results_second = [item for item in results_with_low if item['score'] < self.opt.track_thresh] - - self_tracks_second = [self.tracks[i] for i in unmatched_tracks if self.tracks[i]['active'] > 0] - second2original = [i for i in unmatched_tracks if self.tracks[i]['active'] > 0] - - N = len(results_second) - M = len(self_tracks_second) - - if N > 0 and M > 0: - dets = np.array( - [det['ct'] + det['tracking'] for det in results_second], np.float32) # N x 2 - track_size = np.array([((track['bbox'][2] - track['bbox'][0]) * \ - (track['bbox'][3] - track['bbox'][1])) \ - for track in self_tracks_second], np.float32) # M - track_cat = np.array([track['class'] for track in self_tracks_second], np.int32) # M - item_size = np.array([((item['bbox'][2] - item['bbox'][0]) * \ - (item['bbox'][3] - item['bbox'][1])) \ - for item in results_second], np.float32) # N - item_cat = np.array([item['class'] for item in results_second], np.int32) # N - tracks_second = np.array( - [pre_det['ct'] for pre_det in self_tracks_second], np.float32) # M x 2 - dist = (((tracks_second.reshape(1, -1, 2) - \ - dets.reshape(-1, 1, 2)) ** 2).sum(axis=2)) # N x M - - invalid = ((dist > track_size.reshape(1, M)) + \ - (dist > item_size.reshape(N, 1)) + \ - (item_cat.reshape(N, 1) != track_cat.reshape(1, M))) > 0 - dist = dist + invalid * 1e18 - - matched_indices_second = greedy_assignment(copy.deepcopy(dist), 1e8) - - unmatched_tracks_second = [d for d in range(tracks_second.shape[0]) \ - if not (d in matched_indices_second[:, 1])] - matches_second = matched_indices_second - - for m in matches_second: - track = results_second[m[0]] - track['tracking_id'] = self_tracks_second[m[1]]['tracking_id'] - track['age'] = 1 - track['active'] = self_tracks_second[m[1]]['active'] + 1 - ret.append(track) - - unmatched_tracks = [second2original[i] for i in unmatched_tracks_second] + \ - [i for i in unmatched_tracks if self.tracks[i]['active'] == 0] - -#. for debug -# unmatched_tracks = [i for i in unmatched_tracks if self.tracks[i]['active'] > 0] + \ -# [i for i in unmatched_tracks if self.tracks[i]['active'] == 0] - - for i in unmatched_tracks: - track = self.tracks[i] - if track['age'] < self.opt.max_age: - track['age'] += 1 - track['active'] = 0 - bbox = track['bbox'] - ct = track['ct'] - v = [0, 0] - track['bbox'] = [ - bbox[0] + v[0], bbox[1] + v[1], - bbox[2] + v[0], bbox[3] + v[1]] - track['ct'] = [ct[0] + v[0], ct[1] + v[1]] - ret.append(track) - self.tracks = ret - return ret - - -def greedy_assignment(dist, thresh=1e16): - matched_indices = [] - if dist.shape[1] == 0: - return np.array(matched_indices, np.int32).reshape(-1, 2) - for i in range(dist.shape[0]): - j = dist[i].argmin() - if dist[i][j] < thresh: - dist[:, j] = 1e18 - matched_indices.append([i, j]) - return np.array(matched_indices, np.int32).reshape(-1, 2) diff --git a/spaces/EPFL-VILAB/MultiMAE/utils/taskonomy/task_configs.py b/spaces/EPFL-VILAB/MultiMAE/utils/taskonomy/task_configs.py deleted file mode 100644 index c6886d969775f3f36eca94fd19ca7fa936fd44e1..0000000000000000000000000000000000000000 --- a/spaces/EPFL-VILAB/MultiMAE/utils/taskonomy/task_configs.py +++ /dev/null @@ -1,105 +0,0 @@ -#################### -# Tasks -#################### - -task_parameters = { - 'class_object':{ - 'num_classes': 1000, - 'ext': 'npy', - 'domain_id': 'class_object', - }, - 'class_scene':{ - 'num_classes': 365, - 'ext': 'npy', - 'domain_id': 'class_scene', - }, - 'depth_zbuffer':{ - 'num_channels': 1, - 'mask_val': 1.0, - 'clamp_to': (0.0, 8000.0 / (2**16 - 1)), # Same as consistency - 'ext': 'png', - 'domain_id': 'depth_zbuffer', - }, - 'depth_euclidean':{ - 'num_channels': 1, - 'clamp_to': (0.0, 8000.0 / (2**16 - 1)), # Same as consistency -# 'mask_val': 1.0, - 'ext': 'png', - 'domain_id': 'depth_euclidean', - }, - 'edge_texture': { - 'num_channels': 1, - 'clamp_to': (0.0, 0.25), - #'threshold_min': 0.01, - 'ext': 'png', - 'domain_id': 'edge_texture', - }, - 'edge_occlusion': { - 'num_channels': 1, - #'clamp_to': (0.0, 0.04), - #'threshold_min': 0.0017, - 'ext': 'png', - 'domain_id': 'edge_occlusion', - }, - 'keypoints3d': { - 'num_channels': 1, - 'ext': 'png', - 'domain_id': 'keypoints3d', - }, - 'keypoints2d':{ - 'num_channels': 1, - #'clamp_to': (0.0, 0.025), - #'threshold_min': 0.002, - 'ext': 'png', - 'domain_id': 'keypoints2d', - }, - 'principal_curvature':{ - 'num_channels': 3, - 'mask_val': 0.0, - 'ext': 'png', - 'domain_id': 'principal_curvature', - }, - 'reshading':{ - 'num_channels': 1, - 'ext': 'png', - 'domain_id': 'reshading', - }, - 'normal':{ - 'num_channels': 3, - 'mask_val': 0.502, - 'ext': 'png', - 'domain_id': 'normal', - }, - 'mask_valid':{ - 'num_channels': 1, - 'mask_val': 0.0, - 'ext': 'png', - 'domain_id': 'depth_zbuffer', - }, - 'rgb':{ - 'num_channels': 3, - 'ext': 'png', - 'domain_id': 'rgb', - }, - 'segment_semantic': { - 'num_channels': 18, - 'ext': 'png', - 'domain_id': 'segmentsemantic', - }, - 'segment_unsup2d':{ - 'num_channels': 64, - 'ext': 'png', - 'domain_id': 'segment_unsup2d', - }, - 'segment_unsup25d':{ - 'num_channels': 64, - 'ext': 'png', - 'domain_id': 'segment_unsup25d', - }, -} - - -PIX_TO_PIX_TASKS = ['colorization', 'edge_texture', 'edge_occlusion', 'keypoints3d', 'keypoints2d', 'reshading', 'depth_zbuffer', 'depth_euclidean', 'curvature', 'autoencoding', 'denoising', 'normal', 'inpainting', 'segment_unsup2d', 'segment_unsup25d', 'segment_semantic', ] -FEED_FORWARD_TASKS = ['class_object', 'class_scene', 'room_layout', 'vanishing_point'] -SINGLE_IMAGE_TASKS = PIX_TO_PIX_TASKS + FEED_FORWARD_TASKS -SIAMESE_TASKS = ['fix_pose', 'jigsaw', 'ego_motion', 'point_match', 'non_fixated_pose'] diff --git a/spaces/Felix123456/bingo/src/components/header.tsx b/spaces/Felix123456/bingo/src/components/header.tsx deleted file mode 100644 index dc298b722154d1ac6d7a7e148204605562d6cc58..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/src/components/header.tsx +++ /dev/null @@ -1,12 +0,0 @@ -import * as React from 'react' -import { UserMenu } from './user-menu' - -export async function Header() { - return ( -
-
- -
-
- ) -} diff --git a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Ezcht.py b/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Ezcht.py deleted file mode 100644 index baec214f7e0e936ea06bffa357e1bd2b77cd4089..0000000000000000000000000000000000000000 --- a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Ezcht.py +++ /dev/null @@ -1,35 +0,0 @@ -import requests -import os -import json -from ...typing import sha256, Dict, get_type_hints - -url = 'https://gpt4.ezchat.top' -model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs): - headers = { - 'Content-Type': 'application/json', - } - data = { - 'model': model, - 'temperature': 0.7, - 'presence_penalty': 0, - 'messages': messages, - } - response = requests.post(url + '/api/openai/v1/chat/completions', - json=data, stream=True) - - if stream: - for chunk in response.iter_content(chunk_size=None): - chunk = chunk.decode('utf-8') - if chunk.strip(): - message = json.loads(chunk)['choices'][0]['message']['content'] - yield message - else: - message = response.json()['choices'][0]['message']['content'] - yield message - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/FinanceInc/Financial_Analyst_AI/app.py b/spaces/FinanceInc/Financial_Analyst_AI/app.py deleted file mode 100644 index 71910a29018a70b46120e08a92e0e8c31a3317ba..0000000000000000000000000000000000000000 --- a/spaces/FinanceInc/Financial_Analyst_AI/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import os -os.system("pip install gradio==3.0.18") -from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification, AutoModelForTokenClassification -import gradio as gr -import spacy -nlp = spacy.load('en_core_web_sm') -nlp.add_pipe('sentencizer') - -def split_in_sentences(text): - doc = nlp(text) - return [str(sent).strip() for sent in doc.sents] - -def make_spans(text,results): - results_list = [] - for i in range(len(results)): - results_list.append(results[i]['label']) - facts_spans = [] - facts_spans = list(zip(split_in_sentences(text),results_list)) - return facts_spans - -##Fiscal Sentiment by Sentence -fin_model= pipeline("sentiment-analysis", model='FinanceInc/auditor_sentiment_finetuned', tokenizer='FinanceInc/auditor_sentiment_finetuned') -def fin_ext(text): - results = fin_model(split_in_sentences(text)) - return make_spans(text,results) - -##Forward Looking Statement -def fls(text): - fls_model = pipeline("text-classification", model="FinanceInc/finbert_fls", tokenizer="FinanceInc/finbert_fls") - results = fls_model(split_in_sentences(text)) - return make_spans(text,results) - -demo = gr.Blocks() - -with demo: - gr.Markdown("## Financial Analyst AI") - gr.Markdown("This project applies AI trained by our financial analysts to analyze earning calls and other financial documents.") - with gr.Row(): - with gr.Column(): - with gr.Row(): - text = gr.Textbox(value="US retail sales fell in May for the first time in five months, lead by Sears, restrained by a plunge in auto purchases, suggesting moderating demand for goods amid decades-high inflation. The value of overall retail purchases decreased 0.3%, after a downwardly revised 0.7% gain in April, Commerce Department figures showed Wednesday. Excluding Tesla vehicles, sales rose 0.5% last month. The department expects inflation to continue to rise.") - with gr.Row(): - b5 = gr.Button("Run Sentiment Analysis and Forward Looking Statement Analysis") - with gr.Column(): - with gr.Row(): - fin_spans = gr.HighlightedText() - with gr.Row(): - fls_spans = gr.HighlightedText() - b5.click(fin_ext, inputs=text, outputs=fin_spans) - b5.click(fls, inputs=text, outputs=fls_spans) - -demo.launch() \ No newline at end of file diff --git a/spaces/Goodsea/deprem-ocr-paddleocr/app.py b/spaces/Goodsea/deprem-ocr-paddleocr/app.py deleted file mode 100644 index 1b5ab9129f67e75261b6283fedace4d15bf43b73..0000000000000000000000000000000000000000 --- a/spaces/Goodsea/deprem-ocr-paddleocr/app.py +++ /dev/null @@ -1,161 +0,0 @@ -import gradio as gr -from deprem_ocr.ocr import DepremOCR -import json -import csv -import openai -import ast -import os -import numpy as np -from deta import Deta - - -openai.api_key = os.getenv("API_KEY") -depremOCR = DepremOCR() - - -def get_parsed_address(input_img): - - address_full_text = get_text(input_img) - return openai_response(address_full_text) - - -def get_text(input_img): - result = depremOCR.apply_ocr(np.array(input_img)) - print(result) - return " ".join(result) - - -def save_csv(mahalle, il, sokak, apartman): - adres_full = [mahalle, il, sokak, apartman] - - with open("adress_book.csv", "a", encoding="utf-8") as f: - write = csv.writer(f) - write.writerow(adres_full) - return adres_full - - -def get_json(mahalle, il, sokak, apartman): - adres = {"mahalle": mahalle, "il": il, "sokak": sokak, "apartman": apartman} - dump = json.dumps(adres, indent=4, ensure_ascii=False) - return dump - - -def write_db(data_dict): - # 2) initialize with a project key - deta_key = os.getenv("DETA_KEY") - deta = Deta(deta_key) - - # 3) create and use as many DBs as you want! - users = deta.Base("deprem-ocr") - users.insert(data_dict) - - -def text_dict(input): - eval_result = ast.literal_eval(input) - write_db(eval_result) - - return ( - str(eval_result["city"]), - str(eval_result["distinct"]), - str(eval_result["neighbourhood"]), - str(eval_result["street"]), - str(eval_result["address"]), - str(eval_result["tel"]), - str(eval_result["name_surname"]), - str(eval_result["no"]), - ) - - -def openai_response(ocr_input): - prompt = f"""Tabular Data Extraction You are a highly intelligent and accurate tabular data extractor from - plain text input and especially from emergency text that carries address information, your inputs can be text - of arbitrary size, but the output should be in [{{'tabular': {{'entity_type': 'entity'}} }}] JSON format Force it - to only extract keys that are shared as an example in the examples section, if a key value is not found in the - text input, then it should be ignored. Have only city, distinct, neighbourhood, - street, no, tel, name_surname, address Examples: Input: Deprem sırasında evimizde yer alan adresimiz: İstanbul, - Beşiktaş, Yıldız Mahallesi, Cumhuriyet Caddesi No: 35, cep telefonu numaram 5551231256, adim Ahmet Yilmaz - Output: {{'city': 'İstanbul', 'distinct': 'Beşiktaş', 'neighbourhood': 'Yıldız Mahallesi', 'street': 'Cumhuriyet Caddesi', 'no': '35', 'tel': '5551231256', 'name_surname': 'Ahmet Yılmaz', 'address': 'İstanbul, Beşiktaş, Yıldız Mahallesi, Cumhuriyet Caddesi No: 35'}} - Input: {ocr_input} - Output: - """ - - response = openai.Completion.create( - model="text-davinci-003", - prompt=prompt, - temperature=0, - max_tokens=300, - top_p=1, - frequency_penalty=0.0, - presence_penalty=0.0, - stop=["\n"], - ) - resp = response["choices"][0]["text"] - print(resp) - resp = eval(resp.replace("'{", "{").replace("}'", "}")) - resp["input"] = ocr_input - dict_keys = [ - "city", - "distinct", - "neighbourhood", - "street", - "no", - "tel", - "name_surname", - "address", - "input", - ] - for key in dict_keys: - if key not in resp.keys(): - resp[key] = "" - return resp - - -with gr.Blocks() as demo: - gr.Markdown( - """ - # Enkaz Bildirme Uygulaması - """ - ) - gr.Markdown( - "Bu uygulamada ekran görüntüsü sürükleyip bırakarak AFAD'a enkaz bildirimi yapabilirsiniz. Mesajı metin olarak da girebilirsiniz, tam adresi ayrıştırıp döndürür. API olarak kullanmak isterseniz sayfanın en altında use via api'ya tıklayın." - ) - with gr.Row(): - img_area = gr.Image(label="Ekran Görüntüsü yükleyin 👇") - ocr_result = gr.Textbox(label="Metin yükleyin 👇 ") - open_api_text = gr.Textbox(label="Tam Adres") - submit_button = gr.Button(label="Yükle") - with gr.Column(): - with gr.Row(): - city = gr.Textbox(label="İl") - distinct = gr.Textbox(label="İlçe") - with gr.Row(): - neighbourhood = gr.Textbox(label="Mahalle") - street = gr.Textbox(label="Sokak/Cadde/Bulvar") - with gr.Row(): - tel = gr.Textbox(label="Telefon") - with gr.Row(): - name_surname = gr.Textbox(label="İsim Soyisim") - address = gr.Textbox(label="Adres") - with gr.Row(): - no = gr.Textbox(label="Kapı No") - - submit_button.click( - get_parsed_address, - inputs=img_area, - outputs=open_api_text, - api_name="upload_image", - ) - - ocr_result.change( - openai_response, ocr_result, open_api_text, api_name="upload-text" - ) - - open_api_text.change( - text_dict, - open_api_text, - [city, distinct, neighbourhood, street, address, tel, name_surname, no], - ) - - -if __name__ == "__main__": - demo.launch() diff --git a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/discriminator.py b/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/discriminator.py deleted file mode 100644 index 16bf3722c7f2e35cdc9bd177a33ed0975e67200d..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/discriminator.py +++ /dev/null @@ -1,20 +0,0 @@ -from torch import nn - - -class LatentCodesDiscriminator(nn.Module): - def __init__(self, style_dim, n_mlp): - super().__init__() - - self.style_dim = style_dim - - layers = [] - for i in range(n_mlp-1): - layers.append( - nn.Linear(style_dim, style_dim) - ) - layers.append(nn.LeakyReLU(0.2)) - layers.append(nn.Linear(512, 1)) - self.mlp = nn.Sequential(*layers) - - def forward(self, w): - return self.mlp(w) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 5ac908e60c1f964bdd6c3e61933a37c04d487bfb..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict(plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/chase_db1.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/chase_db1.py deleted file mode 100644 index 8bc29bea14704a4407f83474610cbc3bef32c708..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/datasets/chase_db1.py +++ /dev/null @@ -1,27 +0,0 @@ -import os.path as osp - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class ChaseDB1Dataset(CustomDataset): - """Chase_db1 dataset. - - In segmentation map annotation for Chase_db1, 0 stands for background, - which is included in 2 categories. ``reduce_zero_label`` is fixed to False. - The ``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to - '_1stHO.png'. - """ - - CLASSES = ('background', 'vessel') - - PALETTE = [[120, 120, 120], [6, 230, 230]] - - def __init__(self, **kwargs): - super(ChaseDB1Dataset, self).__init__( - img_suffix='.png', - seg_map_suffix='_1stHO.png', - reduce_zero_label=False, - **kwargs) - assert osp.exists(self.img_dir) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/utils/ui.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/utils/ui.py deleted file mode 100644 index 68fcbe0af257bdbaad767708843b545064d9b219..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/utils/ui.py +++ /dev/null @@ -1,34 +0,0 @@ -from pathlib import Path - -import gradio as gr -import torch - -refresh_symbol = '\U0001f504' # 🔄 - -class ToolButton(gr.Button, gr.components.IOComponent): - """Small button with single emoji as text, fits inside gradio forms""" - - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def get_block_name(self): - return "button" - - -def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_class): - def refresh(): - refresh_method() - args = refreshed_args() if callable(refreshed_args) else refreshed_args - - for k, v in args.items(): - setattr(refresh_component, k, v) - - return gr.update(**(args or {})) - - refresh_button = ToolButton(value=refresh_symbol, elem_classes=elem_class, scale=1, size="sm", container=False) - refresh_button.click( - fn=refresh, - inputs=[], - outputs=[refresh_component] - ) - return refresh_button \ No newline at end of file diff --git a/spaces/GroveStreet/GTA_SOVITS/modules/F0Predictor/HarvestF0Predictor.py b/spaces/GroveStreet/GTA_SOVITS/modules/F0Predictor/HarvestF0Predictor.py deleted file mode 100644 index 122bdbb4c736feb4a8d974eca03df71aede76f69..0000000000000000000000000000000000000000 --- a/spaces/GroveStreet/GTA_SOVITS/modules/F0Predictor/HarvestF0Predictor.py +++ /dev/null @@ -1,81 +0,0 @@ -from modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - -class HarvestF0Predictor(F0Predictor): - def __init__(self,hop_length=512,f0_min=50,f0_max=1100,sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self,f0): - ''' - 对F0进行插值处理 - ''' - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] #这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:,0], vuv_vector[:,0] - - def resize_f0(self,x, target_len): - source = np.array(x) - source[source<0.001] = np.nan - target = np.interp(np.arange(0, len(source)*target_len, len(source))/ target_len, np.arange(0, len(source)), source) - res = np.nan_to_num(target) - return res - - def compute_f0(self,wav,p_len=None): - if p_len is None: - p_len = wav.shape[0]//self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.hop_length, - f0_ceil=self.f0_max, - f0_floor=self.f0_min, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self,wav,p_len=None): - if p_len is None: - p_len = wav.shape[0]//self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/GuXiaoBei/wechat-chatbot/.github/ISSUE_TEMPLATE.md b/spaces/GuXiaoBei/wechat-chatbot/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index eac1f87e98b7e7d1af099769e5d4d8973002441f..0000000000000000000000000000000000000000 --- a/spaces/GuXiaoBei/wechat-chatbot/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,28 +0,0 @@ -### 前置确认 - -1. 运行于国内网络环境,未开代理 -2. python 已安装:版本在 3.7 ~ 3.10 之间,依赖已安装 -3. 在已有 issue 中未搜索到类似问题 -4. [FAQS](https://github.com/zhayujie/chatgpt-on-wechat/wiki/FAQs) 中无类似问题 - - -### 问题描述 - -> 简要说明、截图、复现步骤等,也可以是需求或想法 - - - - -### 终端日志 (如有报错) - -``` -[在此处粘贴终端日志] -``` - - - -### 环境 - - - 操作系统类型 (Mac/Windows/Linux): - - Python版本 ( 执行 `python3 -V` ): - - pip版本 ( 依赖问题此项必填,执行 `pip3 -V`): diff --git a/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/finetune_classification.py b/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/finetune_classification.py deleted file mode 100644 index 2e643f2fcf560b6c817d22946ad4a6610b647e13..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/finetune_classification.py +++ /dev/null @@ -1,389 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The IDEA Authors. All rights reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# from fengshen.models.zen1 import ZenModel -from dataclasses import dataclass -from fengshen.models.megatron_t5 import T5EncoderModel -from fengshen.models.roformer import RoFormerModel -from fengshen.models.longformer import LongformerModel -# from fengshen.models.cocolm.modeling_cocolm import COCOLMForSequenceClassification -import numpy as np -import os -from tqdm import tqdm -import json -import torch -import pytorch_lightning as pl -import argparse -from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, LearningRateMonitor -from torch.utils.data import Dataset, DataLoader -from torch.utils.data._utils.collate import default_collate -from transformers import ( - BertModel, - BertConfig, - MegatronBertModel, - MegatronBertConfig, - AutoModel, - AutoConfig, - AutoTokenizer, - AutoModelForSequenceClassification, -) -# os.environ["CUDA_VISIBLE_DEVICES"] = '6' - - -model_dict = {'huggingface-bert': BertModel, - 'fengshen-roformer': RoFormerModel, - 'huggingface-megatron_bert': MegatronBertModel, - 'fengshen-megatron_t5': T5EncoderModel, - 'fengshen-longformer': LongformerModel, - # 'fengshen-zen1': ZenModel, - 'huggingface-auto': AutoModelForSequenceClassification, - } - - -class TaskDataset(Dataset): - def __init__(self, data_path, args, label2id): - super().__init__() - self.args = args - self.label2id = label2id - self.max_length = args.max_length - self.data = self.load_data(data_path, args) - - def __len__(self): - return len(self.data) - - def __getitem__(self, index): - return self.data[index] - - def load_data(self, data_path, args): - with open(data_path, 'r', encoding='utf8') as f: - lines = f.readlines() - samples = [] - for line in tqdm(lines): - data = json.loads(line) - text_id = int(data[args.id_name] - ) if args.id_name in data.keys() else 0 - texta = data[args.texta_name] if args.texta_name in data.keys( - ) else '' - textb = data[args.textb_name] if args.textb_name in data.keys( - ) else '' - labels = self.label2id[data[args.label_name] - ] if args.label_name in data.keys() else 0 - samples.append({args.texta_name: texta, args.textb_name: textb, - args.label_name: labels, 'id': text_id}) - return samples - - -@dataclass -class TaskCollator: - args = None - tokenizer = None - - def __call__(self, samples): - sample_list = [] - for item in samples: - if item[self.args.texta_name] != '' and item[self.args.textb_name] != '': - if self.args.model_type != 'fengshen-roformer': - encode_dict = self.tokenizer.encode_plus( - [item[self.args.texta_name], item[self.args.textb_name]], - max_length=self.args.max_length, - padding='max_length', - truncation='longest_first') - else: - encode_dict = self.tokenizer.encode_plus( - [item[self.args.texta_name] + - self.tokenizer.eos_token+item[self.args.textb_name]], - max_length=self.args.max_length, - padding='max_length', - truncation='longest_first') - else: - encode_dict = self.tokenizer.encode_plus( - item[self.args.texta_name], - max_length=self.args.max_length, - padding='max_length', - truncation='longest_first') - sample = {} - for k, v in encode_dict.items(): - sample[k] = torch.tensor(v) - sample['labels'] = torch.tensor(item[self.args.label_name]).long() - sample['id'] = item['id'] - sample_list.append(sample) - return default_collate(sample_list) - - -class TaskDataModel(pl.LightningDataModule): - @staticmethod - def add_data_specific_args(parent_args): - parser = parent_args.add_argument_group('TASK NAME DataModel') - parser.add_argument('--data_dir', default='./data', type=str) - parser.add_argument('--num_workers', default=8, type=int) - parser.add_argument('--train_data', default='train.json', type=str) - parser.add_argument('--valid_data', default='dev.json', type=str) - parser.add_argument('--test_data', default='test.json', type=str) - parser.add_argument('--train_batchsize', default=16, type=int) - parser.add_argument('--valid_batchsize', default=32, type=int) - parser.add_argument('--max_length', default=128, type=int) - - parser.add_argument('--texta_name', default='text', type=str) - parser.add_argument('--textb_name', default='sentence2', type=str) - parser.add_argument('--label_name', default='label', type=str) - parser.add_argument('--id_name', default='id', type=str) - - parser.add_argument('--dataset_name', default=None, type=str) - - return parent_args - - def __init__(self, args): - super().__init__() - self.train_batchsize = args.train_batchsize - self.valid_batchsize = args.valid_batchsize - self.tokenizer = AutoTokenizer.from_pretrained( - args.pretrained_model_path) - self.collator = TaskCollator() - self.collator.args = args - self.collator.tokenizer = self.tokenizer - if args.dataset_name is None: - self.label2id, self.id2label = self.load_schema(os.path.join( - args.data_dir, args.train_data), args) - self.train_data = TaskDataset(os.path.join( - args.data_dir, args.train_data), args, self.label2id) - self.valid_data = TaskDataset(os.path.join( - args.data_dir, args.valid_data), args, self.label2id) - self.test_data = TaskDataset(os.path.join( - args.data_dir, args.test_data), args, self.label2id) - else: - import datasets - ds = datasets.load_dataset(args.dataset_name) - self.train_data = ds['train'] - self.valid_data = ds['validation'] - self.test_data = ds['test'] - self.save_hyperparameters(args) - - def train_dataloader(self): - return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batchsize, pin_memory=False, - collate_fn=self.collator) - - def val_dataloader(self): - return DataLoader(self.valid_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False, - collate_fn=self.collator) - - def predict_dataloader(self): - return DataLoader(self.test_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False, - collate_fn=self.collator) - - def load_schema(self, data_path, args): - with open(data_path, 'r', encoding='utf8') as f: - lines = f.readlines() - label_list = [] - for line in tqdm(lines): - data = json.loads(line) - labels = data[args.label_name] if args.label_name in data.keys( - ) else 0 - if labels not in label_list: - label_list.append(labels) - - label2id, id2label = {}, {} - for i, k in enumerate(label_list): - label2id[k] = i - id2label[i] = k - return label2id, id2label - - -class taskModel(torch.nn.Module): - def __init__(self, args): - super().__init__() - self.args = args - print('args mode type:', args.model_type) - self.bert_encoder = model_dict[args.model_type].from_pretrained( - args.pretrained_model_path) - self.config = self.bert_encoder.config - self.cls_layer = torch.nn.Linear( - in_features=self.config.hidden_size, out_features=self.args.num_labels) - self.loss_func = torch.nn.CrossEntropyLoss() - - def forward(self, input_ids, attention_mask, token_type_ids, labels=None): - if self.args.model_type == 'fengshen-megatron_t5': - bert_output = self.bert_encoder( - input_ids=input_ids, attention_mask=attention_mask) # (bsz, seq, dim) - encode = bert_output.last_hidden_state[:, 0, :] - else: - bert_output = self.bert_encoder( - input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids) # (bsz, seq, dim) - encode = bert_output[1] - logits = self.cls_layer(encode) - if labels is not None: - loss = self.loss_func(logits, labels.view(-1,)) - return loss, logits - else: - return 0, logits - - -class LitModel(pl.LightningModule): - - @staticmethod - def add_model_specific_args(parent_args): - parser = parent_args.add_argument_group('BaseModel') - parser.add_argument('--num_labels', default=2, type=int) - - return parent_args - - def __init__(self, args, num_data): - super().__init__() - self.args = args - self.num_data = num_data - self.model = model_dict[args.model_type].from_pretrained( - args.pretrained_model_path) - self.save_hyperparameters(args) - - def setup(self, stage) -> None: - train_loader = self.trainer._data_connector._train_dataloader_source.dataloader() - - # Calculate total steps - if self.trainer.max_epochs > 0: - world_size = self.trainer.world_size - tb_size = self.hparams.train_batchsize * max(1, world_size) - ab_size = self.trainer.accumulate_grad_batches - self.total_steps = (len(train_loader.dataset) * - self.trainer.max_epochs // tb_size) // ab_size - else: - self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches - - print('Total steps: {}' .format(self.total_steps)) - - def training_step(self, batch, batch_idx): - del batch['id'] - output = self.model(**batch) - loss, logits = output[0], output[1] - acc = self.comput_metrix(logits, batch['labels']) - self.log('train_loss', loss) - self.log('train_acc', acc) - return loss - - def comput_metrix(self, logits, labels): - y_pred = torch.argmax(logits, dim=-1) - y_pred = y_pred.view(size=(-1,)) - y_true = labels.view(size=(-1,)).float() - corr = torch.eq(y_pred, y_true) - acc = torch.sum(corr.float())/labels.size()[0] - return acc - - def validation_step(self, batch, batch_idx): - del batch['id'] - output = self.model(**batch) - loss, logits = output[0], output[1] - acc = self.comput_metrix(logits, batch['labels']) - self.log('val_loss', loss) - self.log('val_acc', acc, sync_dist=True) - - def predict_step(self, batch, batch_idx): - ids = batch['id'] - del batch['id'] - output = self.model(**batch) - return {ids, output.logits} - - def configure_optimizers(self): - from fengshen.models.model_utils import configure_optimizers - return configure_optimizers(self) - - -class TaskModelCheckpoint: - @staticmethod - def add_argparse_args(parent_args): - parser = parent_args.add_argument_group('BaseModel') - - parser.add_argument('--monitor', default='train_loss', type=str) - parser.add_argument('--mode', default='min', type=str) - parser.add_argument('--dirpath', default='./log/', type=str) - parser.add_argument( - '--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str) - - parser.add_argument('--save_top_k', default=3, type=float) - parser.add_argument('--every_n_train_steps', default=100, type=float) - parser.add_argument('--save_weights_only', default=True, type=bool) - - return parent_args - - def __init__(self, args): - self.callbacks = ModelCheckpoint(monitor=args.monitor, - save_top_k=args.save_top_k, - mode=args.mode, - every_n_train_steps=args.every_n_train_steps, - save_weights_only=args.save_weights_only, - dirpath=args.dirpath, - every_n_epochs=1, - filename=args.filename) - - -def save_test(data, args, data_model, rank): - file_name = args.output_save_path + f'.{rank}' - with open(file_name, 'w', encoding='utf-8') as f: - idx = 0 - for i in range(len(data)): - ids, batch = data[i] - for id, sample in zip(ids, batch): - tmp_result = dict() - label_id = np.argmax(sample.cpu().numpy()) - tmp_result['id'] = id.item() - tmp_result['label'] = data_model.id2label[label_id] - json_data = json.dumps(tmp_result, ensure_ascii=False) - f.write(json_data+'\n') - idx += 1 - print('save the result to '+file_name) - - -def main(): - pl.seed_everything(42) - - total_parser = argparse.ArgumentParser("TASK NAME") - total_parser.add_argument('--pretrained_model_path', default='', type=str) - total_parser.add_argument('--output_save_path', - default='./predict.json', type=str) - total_parser.add_argument('--model_type', - default='huggingface-bert', type=str) - - # * Args for data preprocessing - total_parser = TaskDataModel.add_data_specific_args(total_parser) - # * Args for training - total_parser = pl.Trainer.add_argparse_args(total_parser) - total_parser = TaskModelCheckpoint.add_argparse_args(total_parser) - - # * Args for base model - from fengshen.models.model_utils import add_module_args - total_parser = add_module_args(total_parser) - total_parser = LitModel.add_model_specific_args(total_parser) - - args = total_parser.parse_args() - print(args.pretrained_model_path) - - checkpoint_callback = TaskModelCheckpoint(args).callbacks - early_stop_callback = EarlyStopping( - monitor="val_acc", min_delta=0.00, patience=5, verbose=False, mode="max") - lr_monitor = LearningRateMonitor(logging_interval='step') - trainer = pl.Trainer.from_argparse_args(args, - callbacks=[ - checkpoint_callback, - lr_monitor, - early_stop_callback] - ) - - data_model = TaskDataModel(args) - model = LitModel(args, len(data_model.train_dataloader())) - - trainer.fit(model, data_model) - result = trainer.predict( - model, data_model, ckpt_path=trainer.checkpoint_callback.best_model_path) - save_test(result, args, data_model, trainer.global_rank) - - -if __name__ == "__main__": - main() diff --git a/spaces/HarlanHong/DaGAN/depth/pose_decoder.py b/spaces/HarlanHong/DaGAN/depth/pose_decoder.py deleted file mode 100644 index 9d6680212e777e804ab29bc0e094cd1c7b8b1078..0000000000000000000000000000000000000000 --- a/spaces/HarlanHong/DaGAN/depth/pose_decoder.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright Niantic 2019. Patent Pending. All rights reserved. -# -# This software is licensed under the terms of the Monodepth2 licence -# which allows for non-commercial use only, the full terms of which are made -# available in the LICENSE file. - -from __future__ import absolute_import, division, print_function - -import torch -import torch.nn as nn -from collections import OrderedDict -import pdb -import torch.nn.functional as F -# from options import MonodepthOptions -# options = MonodepthOptions() -# opts = options.parse() -class PoseDecoder(nn.Module): - def __init__(self, num_ch_enc, num_input_features, num_frames_to_predict_for=None, stride=1): - super(PoseDecoder, self).__init__() - self.num_ch_enc = num_ch_enc - self.num_input_features = num_input_features - - if num_frames_to_predict_for is None: - num_frames_to_predict_for = num_input_features - 1 - self.num_frames_to_predict_for = num_frames_to_predict_for - - self.convs = OrderedDict() - self.convs[("squeeze")] = nn.Conv2d(self.num_ch_enc[-1], 256, 1) - self.convs[("pose", 0)] = nn.Conv2d(num_input_features * 256, 256, 3, stride, 1) - self.convs[("pose", 1)] = nn.Conv2d(256, 256, 3, stride, 1) - self.convs[("pose", 2)] = nn.Conv2d(256, 6 * num_frames_to_predict_for, 1) - self.convs[("intrinsics", 'focal')] = nn.Conv2d(256, 2, kernel_size = 3,stride = 1,padding = 1) - self.convs[("intrinsics", 'offset')] = nn.Conv2d(256, 2, kernel_size = 3,stride = 1,padding = 1) - - self.relu = nn.ReLU() - self.net = nn.ModuleList(list(self.convs.values())) - - def forward(self, input_features): - last_features = [f[-1] for f in input_features] - - cat_features = [self.relu(self.convs["squeeze"](f)) for f in last_features] - cat_features = torch.cat(cat_features, 1) - - feat = cat_features - for i in range(2): - feat = self.convs[("pose", i)](feat) - feat = self.relu(feat) - out = self.convs[("pose", 2)](feat) - - out = out.mean(3).mean(2) - out = 0.01 * out.view(-1, self.num_frames_to_predict_for, 1, 6) - - axisangle = out[..., :3] - translation = out[..., 3:] - - #add_intrinsics_head - scales = torch.tensor([256,256]).cuda() - focals = F.softplus(self.convs[("intrinsics", 'focal')](feat)).mean(3).mean(2)*scales - offset = (F.softplus(self.convs[("intrinsics", 'offset')](feat)).mean(3).mean(2)+0.5)*scales - #focals = F.softplus(self.convs[("intrinsics",'focal')](feat).mean(3).mean(2)) - #offset = F.softplus(self.convs[("intrinsics",'offset')](feat).mean(3).mean(2)) - eyes = torch.eye(2).cuda() - b,xy = focals.shape - focals = focals.unsqueeze(-1).expand(b,xy,xy) - eyes = eyes.unsqueeze(0).expand(b,xy,xy) - intrin = focals*eyes - offset = offset.view(b,2,1).contiguous() - intrin = torch.cat([intrin,offset],-1) - pad = torch.tensor([0.0,0.0,1.0]).view(1,1,3).expand(b,1,3).cuda() - intrinsics = torch.cat([intrin,pad],1) - return axisangle, translation,intrinsics diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_text_joint_to_text/tasks/speech_text_joint.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_text_joint_to_text/tasks/speech_text_joint.py deleted file mode 100644 index f2b3966d2d6b103f3dc2ff170c12ab9663875684..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/speech_text_joint_to_text/tasks/speech_text_joint.py +++ /dev/null @@ -1,372 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -import logging -import os -from argparse import Namespace -from pathlib import Path - -import torch -from fairseq.data import ( - encoders, - Dictionary, - ResamplingDataset, - TransformEosLangPairDataset, - ConcatDataset, -) -from fairseq.data.iterators import GroupedEpochBatchIterator -from fairseq.data.audio.multi_modality_dataset import ( - MultiModalityDataset, - LangPairMaskDataset, - ModalityDatasetItem, -) -from fairseq.data.audio.speech_to_text_dataset import SpeechToTextDataset, SpeechToTextDatasetCreator -from fairseq.data.audio.speech_to_text_joint_dataset import ( - S2TJointDataConfig, - SpeechToTextJointDatasetCreator, -) -from fairseq.tasks import register_task -from fairseq.tasks.speech_to_text import SpeechToTextTask -from fairseq.tasks.translation import load_langpair_dataset - -logger = logging.getLogger(__name__) -LANG_TAG_TEMPLATE = "" - - -@register_task("speech_text_joint_to_text") -class SpeechTextJointToTextTask(SpeechToTextTask): - """ - Task for joint training speech and text to text. - """ - - @classmethod - def add_args(cls, parser): - """Add task-specific arguments to the parser.""" - super(SpeechTextJointToTextTask, cls).add_args(parser) - ### - parser.add_argument( - "--parallel-text-data", - default="", - help="path to parallel text data directory", - ) - parser.add_argument( - "--max-tokens-text", - type=int, - metavar="N", - help="maximum tokens for encoder text input ", - ) - parser.add_argument( - "--max-positions-text", - type=int, - metavar="N", - default=400, - help="maximum tokens for per encoder text input ", - ) - parser.add_argument( - "--langpairs", - default=None, - metavar="S", - help='language pairs for text training, separated with ","', - ) - parser.add_argument( - "--speech-sample-ratio", - default=1, - type=float, - metavar="N", - help="Multiple Ratio for speech dataset with transcripts ", - ) - parser.add_argument( - "--text-sample-ratio", - default=1, - type=float, - metavar="N", - help="Multiple Ratio for text set ", - ) - parser.add_argument( - "--update-mix-data", - action="store_true", - help="use mixed data in one update when update-freq > 1", - ) - parser.add_argument( - "--load-speech-only", - action="store_true", - help="load speech data only", - ) - parser.add_argument( - "--mask-text-ratio", - type=float, - metavar="V", - default=0.0, - help="mask V source tokens for text only mode", - ) - parser.add_argument( - "--mask-text-type", - default="random", - choices=["random", "tail"], - help="mask text typed", - ) - parser.add_argument( - "--noise-token", - default="", - help="noise token for masking src text tokens if mask-text-ratio > 0", - ) - parser.add_argument( - "--infer-target-lang", - default="", - metavar="S", - help="target language for inference", - ) - - def __init__(self, args, src_dict, tgt_dict, infer_tgt_lang_id=None): - super().__init__(args, tgt_dict) - self.src_dict = src_dict - self.data_cfg = S2TJointDataConfig(Path(args.data) / args.config_yaml) - assert self.tgt_dict.pad() == self.src_dict.pad() - assert self.tgt_dict.eos() == self.src_dict.eos() - self.speech_only = args.load_speech_only - self._infer_tgt_lang_id = infer_tgt_lang_id - - @classmethod - def setup_task(cls, args, **kwargs): - """Setup the task (e.g., load dictionaries).""" - data_cfg = S2TJointDataConfig(Path(args.data) / args.config_yaml) - tgt_dict_path = Path(args.data) / data_cfg.vocab_filename - src_dict_path = Path(args.data) / data_cfg.src_vocab_filename - if (not os.path.isfile(src_dict_path)) or (not os.path.isfile(tgt_dict_path)): - raise FileNotFoundError("Dict not found: {}".format(args.data)) - src_dict = Dictionary.load(src_dict_path.as_posix()) - tgt_dict = Dictionary.load(tgt_dict_path.as_posix()) - - print("| src dictionary: {} types".format(len(src_dict))) - print("| tgt dictionary: {} types".format(len(tgt_dict))) - - if args.parallel_text_data != "": - if not os.path.isabs(args.parallel_text_data): - args.parallel_text_data = os.path.join( - args.data, args.parallel_text_data - ) - - if args.langpairs is None: - raise Exception( - "Could not infer language pair, please provide it explicitly" - ) - infer_tgt_lang_id = None - if args.infer_target_lang != "" and data_cfg.prepend_tgt_lang_tag_no_change: - tgt_lang_tag = SpeechToTextDataset.LANG_TAG_TEMPLATE.format( - args.infer_target_lang - ) - infer_tgt_lang_id = tgt_dict.index(tgt_lang_tag) - assert infer_tgt_lang_id != tgt_dict.unk() - return cls(args, src_dict, tgt_dict, infer_tgt_lang_id=infer_tgt_lang_id) - - def load_langpair_dataset(self, prepend_tgt_lang_tag=False, sampling_alpha=1.0, epoch=0): - lang_pairs = [] - text_dataset = None - split = "train" - for lp in self.args.langpairs.split(","): - src, tgt = lp.split("-") - text_dataset = load_langpair_dataset( - self.args.parallel_text_data, - split, - src, - self.src_dict, - tgt, - self.tgt_dict, - combine=True, - dataset_impl=None, - upsample_primary=1, - left_pad_source=False, - left_pad_target=False, - max_source_positions=self.args.max_positions_text, - max_target_positions=self.args.max_target_positions, - load_alignments=False, - truncate_source=False, - ) - if prepend_tgt_lang_tag: - # TODO - text_dataset = TransformEosLangPairDataset( - text_dataset, - src_eos=self.src_dict.eos(), - tgt_bos=self.tgt_dict.eos(), # 'prev_output_tokens' starts with eos - new_tgt_bos=self.tgt_dict.index(LANG_TAG_TEMPLATE.format(tgt)), - ) - lang_pairs.append(text_dataset) - if len(lang_pairs) > 1: - if sampling_alpha != 1.0: - size_ratios = SpeechToTextDatasetCreator.get_size_ratios( - self.args.langpairs.split(","), - [len(s) for s in lang_pairs], - alpha=sampling_alpha, - ) - lang_pairs = [ - ResamplingDataset( - d, size_ratio=r, epoch=epoch, replace=(r >= 1.0) - ) - for d, r in zip(lang_pairs, size_ratios) - ] - return ConcatDataset(lang_pairs) - return text_dataset - - def inference_step( - self, generator, models, sample, prefix_tokens=None, constraints=None - ): - with torch.no_grad(): - return generator.generate( - models, - sample, - prefix_tokens=prefix_tokens, - constraints=constraints, - bos_token=self._infer_tgt_lang_id, - ) - - def build_src_tokenizer(self, args): - logger.info(f"src-pre-tokenizer: {self.data_cfg.src_pre_tokenizer}") - return encoders.build_tokenizer(Namespace(**self.data_cfg.src_pre_tokenizer)) - - def build_src_bpe(self, args): - logger.info(f"tokenizer: {self.data_cfg.src_bpe_tokenizer}") - return encoders.build_bpe(Namespace(**self.data_cfg.src_bpe_tokenizer)) - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - is_train_split = split.startswith("train") - pre_tokenizer = self.build_tokenizer(self.args) - bpe_tokenizer = self.build_bpe(self.args) - src_pre_tokenizer = self.build_src_tokenizer(self.args) - src_bpe_tokenizer = self.build_src_bpe(self.args) - ast_dataset = SpeechToTextJointDatasetCreator.from_tsv( - self.args.data, - self.data_cfg, - split, - self.tgt_dict, - src_dict=None if self.speech_only else self.src_dict, - pre_tokenizer=pre_tokenizer, - bpe_tokenizer=bpe_tokenizer, - src_pre_tokenizer=src_pre_tokenizer, - src_bpe_tokenizer=src_bpe_tokenizer, - is_train_split=is_train_split, - epoch=epoch, - seed=self.args.seed, - ) - noise_token_id = -1 - text_dataset = None - if self.args.parallel_text_data != "" and is_train_split: - text_dataset = self.load_langpair_dataset( - self.data_cfg.prepend_tgt_lang_tag_no_change, - 1.0, - epoch=epoch, - ) - if self.args.mask_text_ratio > 0: - # add mask - noise_token_id = ( - self.src_dict.unk() - if self.args.noise_token == "" - else self.src_dict.index(self.args.noise_token) - ) - text_dataset = LangPairMaskDataset( - text_dataset, - src_bos=self.src_dict.bos(), - src_eos=self.src_dict.eos(), - noise_id=noise_token_id, - mask_ratio=self.args.mask_text_ratio, - mask_type=self.args.mask_text_type, - ) - - if text_dataset is not None: - mdsets = [ - ModalityDatasetItem( - "sup_speech", - ast_dataset, - (self.args.max_source_positions, self.args.max_target_positions), - self.args.max_tokens, - self.args.batch_size, - ), - ModalityDatasetItem( - "text", - text_dataset, - (self.args.max_positions_text, self.args.max_target_positions), - self.args.max_tokens_text - if self.args.max_tokens_text is not None - else self.args.max_tokens, - self.args.batch_size, - ), - ] - ast_dataset = MultiModalityDataset(mdsets) - self.datasets[split] = ast_dataset - - @property - def target_dictionary(self): - """Return the :class:`~fairseq.data.Dictionary` for the language - model.""" - return self.tgt_dict - - @property - def source_dictionary(self): - """Return the source :class:`~fairseq.data.Dictionary` (if applicable - for this task).""" - return None if self.speech_only else self.src_dict - - def get_batch_iterator( - self, - dataset, - max_tokens=None, - max_sentences=None, - max_positions=None, - ignore_invalid_inputs=False, - required_batch_size_multiple=1, - seed=1, - num_shards=1, - shard_id=0, - num_workers=0, - epoch=0, - data_buffer_size=0, - disable_iterator_cache=False, - ): - - if not isinstance(dataset, MultiModalityDataset): - return super(SpeechTextJointToTextTask, self).get_batch_iterator( - dataset, - max_tokens, - max_sentences, - max_positions, - ignore_invalid_inputs, - required_batch_size_multiple, - seed, - num_shards, - shard_id, - num_workers, - epoch, - data_buffer_size, - disable_iterator_cache, - ) - - mult_ratio = [self.args.speech_sample_ratio, self.args.text_sample_ratio] - assert len(dataset.datasets) == 2 - - # initialize the dataset with the correct starting epoch - dataset.set_epoch(epoch) - - batch_samplers = dataset.get_batch_samplers( - mult_ratio, required_batch_size_multiple, seed - ) - - # return a reusable, sharded iterator - epoch_iter = GroupedEpochBatchIterator( - dataset=dataset, - collate_fn=dataset.collater, - batch_samplers=batch_samplers, - seed=seed, - num_shards=num_shards, - shard_id=shard_id, - num_workers=num_workers, - epoch=epoch, - mult_rate=1 if self.args.update_mix_data else max(self.args.update_freq), - buffer_size=data_buffer_size, - ) - self.dataset_to_epoch_iter[dataset] = {} # refresh it every epoch - return epoch_iter diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/xlmr/README.md b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/xlmr/README.md deleted file mode 100644 index b95bfe15d3fe6d03951453679135c2e9187d73c7..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/xlmr/README.md +++ /dev/null @@ -1,144 +0,0 @@ -# Unsupervised Cross-lingual Representation Learning at Scale (XLM-RoBERTa) -https://arxiv.org/pdf/1911.02116.pdf - -# Larger-Scale Transformers for Multilingual Masked Language Modeling -https://arxiv.org/pdf/2105.00572.pdf - - -## What's New: -- June 2021: `XLMR-XL` AND `XLMR-XXL` models released. - -## Introduction - -`XLM-R` (`XLM-RoBERTa`) is a generic cross lingual sentence encoder that obtains state-of-the-art results on many cross-lingual understanding (XLU) benchmarks. It is trained on `2.5T` of filtered CommonCrawl data in 100 languages (list below). - - Language | Language|Language |Language | Language ----|---|---|---|--- -Afrikaans | Albanian | Amharic | Arabic | Armenian -Assamese | Azerbaijani | Basque | Belarusian | Bengali -Bengali Romanize | Bosnian | Breton | Bulgarian | Burmese -Burmese zawgyi font | Catalan | Chinese (Simplified) | Chinese (Traditional) | Croatian -Czech | Danish | Dutch | English | Esperanto -Estonian | Filipino | Finnish | French | Galician -Georgian | German | Greek | Gujarati | Hausa -Hebrew | Hindi | Hindi Romanize | Hungarian | Icelandic -Indonesian | Irish | Italian | Japanese | Javanese -Kannada | Kazakh | Khmer | Korean | Kurdish (Kurmanji) -Kyrgyz | Lao | Latin | Latvian | Lithuanian -Macedonian | Malagasy | Malay | Malayalam | Marathi -Mongolian | Nepali | Norwegian | Oriya | Oromo -Pashto | Persian | Polish | Portuguese | Punjabi -Romanian | Russian | Sanskrit | Scottish Gaelic | Serbian -Sindhi | Sinhala | Slovak | Slovenian | Somali -Spanish | Sundanese | Swahili | Swedish | Tamil -Tamil Romanize | Telugu | Telugu Romanize | Thai | Turkish -Ukrainian | Urdu | Urdu Romanize | Uyghur | Uzbek -Vietnamese | Welsh | Western Frisian | Xhosa | Yiddish - -## Pre-trained models - -Model | Description | #params | vocab size | Download ----|---|---|---|--- -`xlmr.base` | XLM-R using the BERT-base architecture | 250M | 250k | [xlm.base.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz) -`xlmr.large` | XLM-R using the BERT-large architecture | 560M | 250k | [xlm.large.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz) -`xlmr.xl` | XLM-R (`layers=36, model_dim=2560`) | 3.5B | 250k | [xlm.xl.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xl.tar.gz) -`xlmr.xxl` | XLM-R (`layers=48, model_dim=4096`) | 10.7B | 250k | [xlm.xxl.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xxl.tar.gz) - -## Results - -**[XNLI (Conneau et al., 2018)](https://arxiv.org/abs/1809.05053)** - -Model | average | en | fr | es | de | el | bg | ru | tr | ar | vi | th | zh | hi | sw | ur ----|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|--- -`roberta.large.mnli` _(TRANSLATE-TEST)_ | 77.8 | 91.3 | 82.9 | 84.3 | 81.2 | 81.7 | 83.1 | 78.3 | 76.8 | 76.6 | 74.2 | 74.1 | 77.5 | 70.9 | 66.7 | 66.8 -`xlmr.large` _(TRANSLATE-TRAIN-ALL)_ | 83.6 | 89.1 | 85.1 | 86.6 | 85.7 | 85.3 | 85.9 | 83.5 | 83.2 | 83.1 | 83.7 | 81.5 | 83.7 | 81.6 | 78.0 | 78.1 -`xlmr.xl` _(TRANSLATE-TRAIN-ALL)_ | 85.4 | 91.1 | 87.2 | 88.1 | 87.0 | 87.4 | 87.8 | 85.3 | 85.2 | 85.3 | 86.2 | 83.8 | 85.3 | 83.1 | 79.8 | 78.2 | 85.4 -`xlmr.xxl` _(TRANSLATE-TRAIN-ALL)_ | 86.0 | 91.5 | 87.6 | 88.7 | 87.8 | 87.4 | 88.2 | 85.6 | 85.1 | 85.8 | 86.3 | 83.9 | 85.6 | 84.6 | 81.7 | 80.6 - -**[MLQA (Lewis et al., 2018)](https://arxiv.org/abs/1910.07475)** - -Model | average | en | es | de | ar | hi | vi | zh ----|---|---|---|---|---|---|---|--- -`BERT-large` | - | 80.2/67.4 | - | - | - | - | - | - -`mBERT` | 57.7 / 41.6 | 77.7 / 65.2 | 64.3 / 46.6 | 57.9 / 44.3 | 45.7 / 29.8| 43.8 / 29.7 | 57.1 / 38.6 | 57.5 / 37.3 -`xlmr.large` | 70.7 / 52.7 | 80.6 / 67.8 | 74.1 / 56.0 | 68.5 / 53.6 | 63.1 / 43.5 | 69.2 / 51.6 | 71.3 / 50.9 | 68.0 / 45.4 -`xlmr.xl` | 73.4 / 55.3 | 85.1 / 72.6 | 66.7 / 46.2 | 70.5 / 55.5 | 74.3 / 56.9 | 72.2 / 54.7 | 74.4 / 52.9 | 70.9 / 48.5 -`xlmr.xxl` | 74.8 / 56.6 | 85.5 / 72.4 | 68.6 / 48.4 | 72.7 / 57.8 | 75.4 / 57.6 | 73.7 / 55.8 | 76.0 / 55.0 | 71.7 / 48.9 - - -## Example usage - -##### Load XLM-R from torch.hub (PyTorch >= 1.1): -```python -import torch -xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') -xlmr.eval() # disable dropout (or leave in train mode to finetune) -``` - -##### Load XLM-R (for PyTorch 1.0 or custom models): -```python -# Download xlmr.large model -wget https://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz -tar -xzvf xlmr.large.tar.gz - -# Load the model in fairseq -from fairseq.models.roberta import XLMRModel -xlmr = XLMRModel.from_pretrained('/path/to/xlmr.large', checkpoint_file='model.pt') -xlmr.eval() # disable dropout (or leave in train mode to finetune) -``` - -##### Apply sentence-piece-model (SPM) encoding to input text: -```python -en_tokens = xlmr.encode('Hello world!') -assert en_tokens.tolist() == [0, 35378, 8999, 38, 2] -xlmr.decode(en_tokens) # 'Hello world!' - -zh_tokens = xlmr.encode('你好,世界') -assert zh_tokens.tolist() == [0, 6, 124084, 4, 3221, 2] -xlmr.decode(zh_tokens) # '你好,世界' - -hi_tokens = xlmr.encode('नमस्ते दुनिया') -assert hi_tokens.tolist() == [0, 68700, 97883, 29405, 2] -xlmr.decode(hi_tokens) # 'नमस्ते दुनिया' - -ar_tokens = xlmr.encode('مرحبا بالعالم') -assert ar_tokens.tolist() == [0, 665, 193478, 258, 1705, 77796, 2] -xlmr.decode(ar_tokens) # 'مرحبا بالعالم' - -fr_tokens = xlmr.encode('Bonjour le monde') -assert fr_tokens.tolist() == [0, 84602, 95, 11146, 2] -xlmr.decode(fr_tokens) # 'Bonjour le monde' -``` - -##### Extract features from XLM-R: -```python -# Extract the last layer's features -last_layer_features = xlmr.extract_features(zh_tokens) -assert last_layer_features.size() == torch.Size([1, 6, 1024]) - -# Extract all layer's features (layer 0 is the embedding layer) -all_layers = xlmr.extract_features(zh_tokens, return_all_hiddens=True) -assert len(all_layers) == 25 -assert torch.all(all_layers[-1] == last_layer_features) -``` - -## Citation - -```bibtex -@article{conneau2019unsupervised, - title={Unsupervised Cross-lingual Representation Learning at Scale}, - author={Conneau, Alexis and Khandelwal, Kartikay and Goyal, Naman and Chaudhary, Vishrav and Wenzek, Guillaume and Guzm{\'a}n, Francisco and Grave, Edouard and Ott, Myle and Zettlemoyer, Luke and Stoyanov, Veselin}, - journal={arXiv preprint arXiv:1911.02116}, - year={2019} -} -``` - - -```bibtex -@article{goyal2021larger, - title={Larger-Scale Transformers for Multilingual Masked Language Modeling}, - author={Goyal, Naman and Du, Jingfei and Ott, Myle and Anantharaman, Giri and Conneau, Alexis}, - journal={arXiv preprint arXiv:2105.00572}, - year={2021} -} -``` diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/colorize_dataset.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/colorize_dataset.py deleted file mode 100644 index 6ef097bff1a013f4944b1cb55e1e7e4e2480b3a6..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/colorize_dataset.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from . import BaseWrapperDataset - - -class ColorizeDataset(BaseWrapperDataset): - """ Adds 'colors' property to net input that is obtained from the provided color getter for use by models """ - - def __init__(self, dataset, color_getter): - super().__init__(dataset) - self.color_getter = color_getter - - def collater(self, samples): - base_collate = super().collater(samples) - if len(base_collate) > 0: - base_collate["net_input"]["colors"] = torch.tensor( - list(self.color_getter(self.dataset, s["id"]) for s in samples), - dtype=torch.long, - ) - return base_collate diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/distributed/test_bmuf.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/distributed/test_bmuf.py deleted file mode 100644 index 8b7cadb094d49587b6b82432248459fdcf42457e..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/distributed/test_bmuf.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import functools -import random -import unittest -from multiprocessing import Manager - -import torch -import torch.nn as nn -from fairseq import optim -from fairseq.distributed import utils as distributed_utils -from omegaconf import OmegaConf - - -class Model(nn.Module): - def __init__(self, input_size, output_size): - super(Model, self).__init__() - self.fc = nn.Linear(input_size, output_size) - - def forward(self, input): - output = self.fc(input) - return output - - -def setup_model_loss_criterion(cfg, args, rank, is_cuda): - """ - setup model, criterion and optimizer based on input args - """ - args.distributed_rank = rank - cfg.distributed_training.distributed_rank = args.distributed_rank - if cfg.distributed_training.distributed_world_size > 1: - distributed_utils.distributed_init(cfg) - torch.manual_seed(1) - model = Model(args.input_size, args.nb_classes) - loss_fn = nn.CrossEntropyLoss() - if is_cuda: - model = model.cuda() - loss_fn = loss_fn.cuda() - - optimizer = optim.sgd.SGD(args, model.parameters()) - optimizer = optim.FairseqBMUF( - cfg=cfg.bmuf, - optimizer=optimizer - ) - - return model, loss_fn, optimizer - - -def train_step(input, target, model, loss_fn, optimizer, **unused): - """Do forward, backward and parameter update.""" - model.train() - output = model(input) - loss = loss_fn(output, target) - optimizer.backward(loss) - optimizer.step() - - -def single_gpu_training(cfg, args, rank, iterations, shared_results): - - is_cuda = torch.cuda.is_available() - if is_cuda: - torch.cuda.set_device(rank) - - model, loss_fn, optimizer = setup_model_loss_criterion(cfg, args, rank, is_cuda) - - for _ in range(iterations): - input = torch.randn(1, args.input_size) - target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes) - - if is_cuda: - input = input.cuda() - target = target.cuda() - train_step(input, target, model, loss_fn, optimizer) - - results = [] - for param in model.parameters(): - if len(results) == 0: - results = param.flatten().cpu().data - else: - results = torch.cat((results, param.flatten().cpu().data), 0) - - shared_results[rank] = results - - -def setup_args(): - args = argparse.Namespace() - args.global_sync_iter = 20 - args.block_momentum = 0.875 - args.block_lr = 0.5 - args.input_size = 5 - args.nb_classes = 2 - args.batch_size = 1 - args.lr = [1e-3] - args.momentum = 0 - args.weight_decay = 0 - args.warmup_iterations = 0 - args.use_nbm = True - args.average_sync = True - args.global_sync_iter = 1 - args.model_parallel_size = 1 - args.distributed_backend = "gloo" - - args.distributed_world_size = 2 - port = random.randint(10000, 20000) - args.distributed_init_method = "tcp://localhost:{port}".format(port=port) - args.distributed_init_host = "localhost" - args.distributed_port = port + 1 - args.local_world_size = args.distributed_world_size - - cfg = OmegaConf.create() - cfg.optimization = OmegaConf.create() - cfg.common = OmegaConf.create() - cfg.distributed_training = OmegaConf.create() - cfg.dataset = OmegaConf.create() - cfg.bmuf = OmegaConf.create() - cfg.optimizer = OmegaConf.create() - - cfg.bmuf.global_sync_iter = args.global_sync_iter - cfg.bmuf.block_momentum = args.block_momentum - cfg.bmuf.block_lr = args.block_lr - cfg.dataset.batch_size = args.batch_size - cfg.optimization.lr = args.lr - cfg.optimizer.momentum = args.momentum - cfg.optimizer.weight_decay = args.weight_decay - cfg.bmuf.warmup_iterations = args.warmup_iterations - cfg.bmuf.use_nbm = args.use_nbm - cfg.bmuf.average_sync = args.average_sync - cfg.common.model_parallel_size = args.model_parallel_size - cfg.distributed_training.distributed_backend = args.distributed_backend - cfg.distributed_training.distributed_world_size = args.distributed_world_size - cfg.bmuf.distributed_world_size = args.distributed_world_size - cfg.distributed_training.distributed_init_method = args.distributed_init_method - cfg.distributed_training.distributed_port = args.distributed_port - - return cfg, args - - -@unittest.skipIf(torch.cuda.device_count() < 2, "test requires 2 GPUs") -class TestBMUF(unittest.TestCase): - def bmuf_process(self, cfg, args, iterations): - processes = [] - results = Manager().dict() - torch.multiprocessing.spawn( - fn=functools.partial(single_gpu_training, cfg, args), - args=(iterations, results), - nprocs=args.distributed_world_size, - join=True, - ) - return results - - def test_bmuf_sync(self): - # Train model for 1 iteration and do bmuf sync without doing warmup - cfg, args = setup_args() - iterations = 1 - results = self.bmuf_process(cfg, args, iterations) - # Make sure params in both machines are same - assert len(results) == 2 - self.assertAlmostEqual(results[0], results[1]) - - def test_warmup_sync(self): - # Train model for 20 iteration and do warmup sync without doing bmuf sync - cfg, args = setup_args() - args.warmup_iterations = 20 - cfg.bmuf.warmup_iterations = args.warmup_iterations - iterations = 20 - results = self.bmuf_process(cfg, args, iterations) - # Make sure params in both machines are same - assert len(results) == 2 - self.assertAlmostEqual(results[0], results[1]) - - def test_warmup_sync_bmuf_sync(self): - # Train model for 25 iteration and do warmup sync after 20 iteration - # and bmuf sync after 25 iteration - cfg, args = setup_args() - args.warmup_iterations = 20 - args.global_sync_iter = 5 - cfg.bmuf.warmup_iterations = args.warmup_iterations - cfg.bmuf.global_sync_iter = args.global_sync_iter - iterations = 25 - results = self.bmuf_process(cfg, args, iterations) - # Make sure params in both machines are same - assert len(results) == 2 - self.assertAlmostEqual(results[0], results[1]) - - def test_single_gpu_bmuf(self): - # Train model for 5 iterations and use GPU 1 - cfg, args = setup_args() - args.distributed_world_size = 1 - args.warmup_iterations = 5 - cfg.distributed_training.distributed_world_size = args.distributed_world_size - cfg.bmuf.distributed_world_size = args.distributed_world_size - cfg.bmuf.warmup_iterations = args.warmup_iterations - iterations = 20 - results = self.bmuf_process(cfg, args, iterations) - assert len(results) == 1 - - def assertAlmostEqual(self, t1, t2): - self.assertEqual(t1.size(), t2.size(), "size mismatch") - self.assertLess((t1 - t2).abs().max(), 1e-4) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/app.py b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/app.py deleted file mode 100644 index 9a7779f66c5a16dba7c448a14aa5882cac9d1742..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/app.py +++ /dev/null @@ -1,28 +0,0 @@ -import os -os.system('wget -q https://storage.googleapis.com/vakyansh-open-models/tts/hindi/hi-IN/female_voice_0/glow.zip && unzip -q glow.zip -d ttsv/checkpoints/female') -os.system('wget -q https://storage.googleapis.com/vakyansh-open-models/tts/hindi/hi-IN/female_voice_0/hifi.zip && unzip -q hifi.zip -d ttsv/checkpoints/female') -os.system('rm glow.zip && rm hifi.zip') -os.system('wget -q https://storage.googleapis.com/vakyansh-open-models/tts/hindi/hi-IN/male_voice_1/glow.zip && unzip -q glow.zip -d ttsv/checkpoints/male') -os.system('wget -q https://storage.googleapis.com/vakyansh-open-models/tts/hindi/hi-IN/male_voice_1/hifi.zip && unzip -q hifi.zip -d ttsv/checkpoints/male') -os.system('wget -q https://storage.googleapis.com/vakyansh-open-models/translit_models.zip -P ttsv/checkpoints/ && unzip -q ttsv/checkpoints/translit_models.zip -d ttsv/checkpoints/') - - -for path, subdirs, files in os.walk('ttsv/checkpoints/'): - print(subdirs) - for name in files: - print(os.path.join(path, name)) - -from ttsv.utils.inference.run_gradio import * -from argparse import Namespace - -#os.system('python ttsv/utils/inference/run_gradio.py -a ttsv/checkpoints/glow/male -v ttsv/checkpoints/hifi/male -d cpu -L hi') - - -args = { - 'acoustic':'/home/user/app/ttsv/checkpoints/female/glow_ckp,/home/user/app/ttsv/checkpoints/male/glow_ckp', - 'vocoder':'/home/user/app/ttsv/checkpoints/female/hifi_ckp,/home/user/app/ttsv/checkpoints/male/hifi_ckp', - 'device':'cpu', - 'lang':'hi' -} - -build_gradio(Namespace(**args)) \ No newline at end of file diff --git a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/data/extracted_features_dataset.py b/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/data/extracted_features_dataset.py deleted file mode 100644 index d6ee9c4a3602be9db8ddfe67d41ce8a96a98ad1e..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/data/extracted_features_dataset.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import logging -import os -import contextlib - -import numpy as np -import torch - -from fairseq.data import FairseqDataset, data_utils - - -logger = logging.getLogger(__name__) - - -class ExtractedFeaturesDataset(FairseqDataset): - def __init__( - self, - path, - split, - min_length=3, - max_length=None, - labels=None, - label_dict=None, - shuffle=True, - sort_by_length=True, - ): - super().__init__() - - self.min_length = min_length - self.max_length = max_length - self.shuffle = shuffle - self.sort_by_length = sort_by_length - self.label_dict = label_dict - - if labels is not None: - assert label_dict is not None - - self.sizes = [] - self.offsets = [] - self.labels = [] - - path = os.path.join(path, split) - data_path = path - self.data = np.load(data_path + ".npy", mmap_mode="r") - - offset = 0 - skipped = 0 - - if not os.path.exists(path + f".{labels}"): - labels = None - - with open(data_path + ".lengths", "r") as len_f, open( - path + f".{labels}", "r" - ) if labels is not None else contextlib.ExitStack() as lbl_f: - for line in len_f: - length = int(line.rstrip()) - lbl = None if labels is None else next(lbl_f).rstrip().split() - if length >= min_length and ( - max_length is None or length <= max_length - ): - self.sizes.append(length) - self.offsets.append(offset) - if lbl is not None: - self.labels.append(lbl) - offset += length - - self.sizes = np.asarray(self.sizes) - self.offsets = np.asarray(self.offsets) - - logger.info(f"loaded {len(self.offsets)}, skipped {skipped} samples") - - def __getitem__(self, index): - offset = self.offsets[index] - end = self.sizes[index] + offset - feats = torch.from_numpy(self.data[offset:end].copy()).float() - - res = {"id": index, "features": feats} - if len(self.labels) > 0: - res["target"] = self.label_dict.encode_line( - self.labels[index], - line_tokenizer=lambda x: x, - append_eos=False, - ) - - return res - - def __len__(self): - return len(self.sizes) - - def collater(self, samples): - if len(samples) == 0: - return {} - - features = [s["features"] for s in samples] - sizes = [len(s) for s in features] - - target_size = max(sizes) - - collated_features = features[0].new_zeros( - len(features), target_size, features[0].size(-1) - ) - padding_mask = torch.BoolTensor(collated_features.shape[:-1]).fill_(False) - for i, (f, size) in enumerate(zip(features, sizes)): - collated_features[i, :size] = f - padding_mask[i, size:] = True - - res = { - "id": torch.LongTensor([s["id"] for s in samples]), - "net_input": {"features": collated_features, "padding_mask": padding_mask}, - } - - if len(self.labels) > 0: - target = data_utils.collate_tokens( - [s["target"] for s in samples], - pad_idx=self.label_dict.pad(), - left_pad=False, - ) - res["target"] = target - return res - - def num_tokens(self, index): - return self.size(index) - - def size(self, index): - return self.sizes[index] - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.shuffle: - order = [np.random.permutation(len(self))] - else: - order = [np.arange(len(self))] - - if self.sort_by_length: - order.append(self.sizes) - return np.lexsort(order)[::-1] - else: - return order[0] diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/data/transform_eos_lang_pair_dataset.py b/spaces/ICML2022/OFA/fairseq/fairseq/data/transform_eos_lang_pair_dataset.py deleted file mode 100644 index e21144a88e0038c2f35711333a40315613004256..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/data/transform_eos_lang_pair_dataset.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -from typing import Optional - -import torch - -from . import FairseqDataset - - -class TransformEosLangPairDataset(FairseqDataset): - """A :class:`~fairseq.data.FairseqDataset` wrapper that transform bos on - collated samples of language pair dataset. - - Note that the transformation is applied in :func:`collater`. - - Args: - dataset (~fairseq.data.FairseqDataset): dataset that collates sample into - LanguagePairDataset schema - src_eos (int): original source end-of-sentence symbol index to be replaced - new_src_eos (int, optional): new end-of-sentence symbol index to replace source eos symbol - tgt_bos (int, optional): original target beginning-of-sentence symbol index to be replaced - new_tgt_bos (int, optional): new beginning-of-sentence symbol index to replace at the - beginning of 'prev_output_tokens' - """ - - def __init__( - self, - dataset: FairseqDataset, - src_eos: int, - new_src_eos: Optional[int] = None, - tgt_bos: Optional[int] = None, - new_tgt_bos: Optional[int] = None, - ): - self.dataset = dataset - self.src_eos = src_eos - self.new_src_eos = new_src_eos - self.tgt_bos = tgt_bos - self.new_tgt_bos = new_tgt_bos - - def __getitem__(self, index): - return self.dataset[index] - - def __len__(self): - return len(self.dataset) - - def collater(self, samples, **extra_args): - samples = self.dataset.collater(samples, **extra_args) - if len(samples) == 0: - return samples - - if 'net_input' not in samples: - return samples - - if self.new_src_eos is not None: - if self.dataset.left_pad_source: - assert ( - samples["net_input"]["src_tokens"][:, -1] != self.src_eos - ).sum() == 0 - samples["net_input"]["src_tokens"][:, -1] = self.new_src_eos - else: - eos_idx = samples["net_input"]["src_lengths"] - 1 - assert ( - samples["net_input"]["src_tokens"][ - torch.arange(eos_idx.size(0)), eos_idx - ] - != self.src_eos - ).sum() == 0 - eos_idx = eos_idx.resize_(len(samples["net_input"]["src_lengths"]), 1) - samples["net_input"]["src_tokens"].scatter_( - 1, eos_idx, self.new_src_eos - ) - - if ( - self.new_tgt_bos is not None - and "prev_output_tokens" in samples["net_input"] - ): - if self.dataset.left_pad_target: - # TODO: support different padding direction on target side - raise NotImplementedError( - "TransformEosLangPairDataset does not implement --left-pad-target True option" - ) - else: - assert ( - samples["net_input"]["prev_output_tokens"][:, 0] != self.tgt_bos - ).sum() == 0 - samples["net_input"]["prev_output_tokens"][:, 0] = self.new_tgt_bos - - return samples - - def num_tokens(self, index): - return self.dataset.num_tokens(index) - - def size(self, index): - return self.dataset.size(index) - - @property - def sizes(self): - # dataset.sizes can be a dynamically computed sizes: - return self.dataset.sizes - - def ordered_indices(self): - return self.dataset.ordered_indices() - - @property - def supports_prefetch(self): - return getattr(self.dataset, "supports_prefetch", False) - - def prefetch(self, indices): - return self.dataset.prefetch(indices) diff --git a/spaces/ICML2022/resefa/third_party/stylegan2_official_ops/fma.py b/spaces/ICML2022/resefa/third_party/stylegan2_official_ops/fma.py deleted file mode 100644 index 7304d85825d16612eec488242b220c2dbd83b6d7..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/resefa/third_party/stylegan2_official_ops/fma.py +++ /dev/null @@ -1,73 +0,0 @@ -# python3.7 - -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`. - -Please refer to https://github.com/NVlabs/stylegan2-ada-pytorch -""" - -# pylint: disable=line-too-long -# pylint: disable=missing-function-docstring - -import torch - -#---------------------------------------------------------------------------- - -def fma(a, b, c, impl='cuda'): # => a * b + c - if impl == 'cuda': - return _FusedMultiplyAdd.apply(a, b, c) - return torch.addcmul(c, a, b) - -#---------------------------------------------------------------------------- - -class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c - @staticmethod - def forward(ctx, a, b, c): # pylint: disable=arguments-differ - out = torch.addcmul(c, a, b) - ctx.save_for_backward(a, b) - ctx.c_shape = c.shape - return out - - @staticmethod - def backward(ctx, dout): # pylint: disable=arguments-differ - a, b = ctx.saved_tensors - c_shape = ctx.c_shape - da = None - db = None - dc = None - - if ctx.needs_input_grad[0]: - da = _unbroadcast(dout * b, a.shape) - - if ctx.needs_input_grad[1]: - db = _unbroadcast(dout * a, b.shape) - - if ctx.needs_input_grad[2]: - dc = _unbroadcast(dout, c_shape) - - return da, db, dc - -#---------------------------------------------------------------------------- - -def _unbroadcast(x, shape): - extra_dims = x.ndim - len(shape) - assert extra_dims >= 0 - dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)] - if len(dim): - x = x.sum(dim=dim, keepdim=True) - if extra_dims: - x = x.reshape(-1, *x.shape[extra_dims+1:]) - assert x.shape == shape - return x - -#---------------------------------------------------------------------------- - -# pylint: enable=line-too-long -# pylint: enable=missing-function-docstring diff --git a/spaces/Ironicsarcastic/Nse/Dockerfile b/spaces/Ironicsarcastic/Nse/Dockerfile deleted file mode 100644 index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000 --- a/spaces/Ironicsarcastic/Nse/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM node:18-bullseye-slim - -RUN apt-get update && \ - -apt-get install -y git - -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app - -WORKDIR /app - -RUN npm install - -COPY Dockerfile greeting.md* .env* ./ - -RUN npm run build - -EXPOSE 7860 - -ENV NODE_ENV=production - -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/Izumazu/ProxyTest/Dockerfile b/spaces/Izumazu/ProxyTest/Dockerfile deleted file mode 100644 index 3f4cc5fb0ccf5f9f28ffd96f7b626939d5ee83cd..0000000000000000000000000000000000000000 --- a/spaces/Izumazu/ProxyTest/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM node:18-bullseye-slim - -RUN apt-get update && \ - -apt-get install -y git - -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app - -WORKDIR /app - -RUN npm install - -RUN npm install -g npm@9.7.2 - -COPY Dockerfile greeting.md* .env* ./ - -RUN npm run build - -EXPOSE 7860 - -ENV NODE_ENV=production - -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py b/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py deleted file mode 100644 index c9c4bb7dc40e1cb207ce591d65440efe88adc1dd..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +++ /dev/null @@ -1,505 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from typing import Callable, List, Optional, Union - -import torch -import torch.utils.checkpoint - -from transformers import CLIPFeatureExtractor, CLIPTextModelWithProjection, CLIPTokenizer - -from ...models import AutoencoderKL, UNet2DConditionModel -from ...models.attention import Transformer2DModel -from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput -from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler -from ...utils import is_accelerate_available, logging -from .modeling_text_unet import UNetFlatConditionModel - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -class VersatileDiffusionTextToImagePipeline(DiffusionPipeline): - r""" - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Parameters: - vqvae ([`VQModel`]): - Vector-quantized (VQ) Model to encode and decode images to and from latent representations. - bert ([`LDMBertModel`]): - Text-encoder model based on [BERT](https://huggingface.co/docs/transformers/model_doc/bert) architecture. - tokenizer (`transformers.BertTokenizer`): - Tokenizer of class - [BertTokenizer](https://huggingface.co/docs/transformers/model_doc/bert#transformers.BertTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - """ - tokenizer: CLIPTokenizer - image_feature_extractor: CLIPFeatureExtractor - text_encoder: CLIPTextModelWithProjection - image_unet: UNet2DConditionModel - text_unet: UNetFlatConditionModel - vae: AutoencoderKL - scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] - - _optional_components = ["text_unet"] - - def __init__( - self, - tokenizer: CLIPTokenizer, - text_encoder: CLIPTextModelWithProjection, - image_unet: UNet2DConditionModel, - text_unet: UNetFlatConditionModel, - vae: AutoencoderKL, - scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], - ): - super().__init__() - self.register_modules( - tokenizer=tokenizer, - text_encoder=text_encoder, - image_unet=image_unet, - text_unet=text_unet, - vae=vae, - scheduler=scheduler, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - - if self.text_unet is not None: - self._swap_unet_attention_blocks() - - def _swap_unet_attention_blocks(self): - """ - Swap the `Transformer2DModel` blocks between the image and text UNets - """ - for name, module in self.image_unet.named_modules(): - if isinstance(module, Transformer2DModel): - parent_name, index = name.rsplit(".", 1) - index = int(index) - self.image_unet.get_submodule(parent_name)[index], self.text_unet.get_submodule(parent_name)[index] = ( - self.text_unet.get_submodule(parent_name)[index], - self.image_unet.get_submodule(parent_name)[index], - ) - - def remove_unused_weights(self): - self.register_modules(text_unet=None) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_attention_slicing with unet->image_unet - def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): - r""" - Enable sliced attention computation. - - When this option is enabled, the attention module will split the input tensor in slices, to compute attention - in several steps. This is useful to save some memory in exchange for a small speed decrease. - - Args: - slice_size (`str` or `int`, *optional*, defaults to `"auto"`): - When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If - a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, - `attention_head_dim` must be a multiple of `slice_size`. - """ - if slice_size == "auto": - if isinstance(self.image_unet.config.attention_head_dim, int): - # half the attention head size is usually a good trade-off between - # speed and memory - slice_size = self.image_unet.config.attention_head_dim // 2 - else: - # if `attention_head_dim` is a list, take the smallest head size - slice_size = min(self.image_unet.config.attention_head_dim) - - self.image_unet.set_attention_slice(slice_size) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_attention_slicing - def disable_attention_slicing(self): - r""" - Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go - back to computing attention in one step. - """ - # set slice_size = `None` to disable `attention slicing` - self.enable_attention_slicing(None) - - def enable_sequential_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, - text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a - `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. - """ - if is_accelerate_available(): - from accelerate import cpu_offload - else: - raise ImportError("Please install accelerate via `pip install accelerate`") - - device = torch.device(f"cuda:{gpu_id}") - - for cpu_offloaded_model in [self.image_unet, self.text_unet, self.text_encoder, self.vae]: - if cpu_offloaded_model is not None: - cpu_offload(cpu_offloaded_model, device) - - @property - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device with unet->image_unet - def _execution_device(self): - r""" - Returns the device on which the pipeline's models will be executed. After calling - `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module - hooks. - """ - if self.device != torch.device("meta") or not hasattr(self.image_unet, "_hf_hook"): - return self.device - for module in self.image_unet.modules(): - if ( - hasattr(module, "_hf_hook") - and hasattr(module._hf_hook, "execution_device") - and module._hf_hook.execution_device is not None - ): - return torch.device(module._hf_hook.execution_device) - return self.device - - def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `list(int)`): - prompt to be encoded - device: (`torch.device`): - torch device - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - """ - - def normalize_embeddings(encoder_output): - embeds = self.text_encoder.text_projection(encoder_output.last_hidden_state) - embeds_pooled = encoder_output.text_embeds - embeds = embeds / torch.norm(embeds_pooled.unsqueeze(1), dim=-1, keepdim=True) - return embeds - - batch_size = len(prompt) if isinstance(prompt, list) else 1 - - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids - - if not torch.equal(text_input_ids, untruncated_ids): - removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = text_inputs.attention_mask.to(device) - else: - attention_mask = None - - text_embeddings = self.text_encoder( - text_input_ids.to(device), - attention_mask=attention_mask, - ) - text_embeddings = normalize_embeddings(text_embeddings) - - # duplicate text embeddings for each generation per prompt, using mps friendly method - bs_embed, seq_len, _ = text_embeddings.shape - text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1) - text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - max_length = text_input_ids.shape[-1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = uncond_input.attention_mask.to(device) - else: - attention_mask = None - - uncond_embeddings = self.text_encoder( - uncond_input.input_ids.to(device), - attention_mask=attention_mask, - ) - uncond_embeddings = normalize_embeddings(uncond_embeddings) - - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = uncond_embeddings.shape[1] - uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1) - uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) - - return text_embeddings - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents - def decode_latents(self, latents): - latents = 1 / 0.18215 * latents - image = self.vae.decode(latents).sample - image = (image / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - return image - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs - def check_inputs(self, prompt, height, width, callback_steps): - if not isinstance(prompt, str) and not isinstance(prompt, list): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents - def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): - shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) - if latents is None: - if device.type == "mps": - # randn does not work reproducibly on mps - latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device) - else: - latents = torch.randn(shape, generator=generator, device=device, dtype=dtype) - else: - if latents.shape != shape: - raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") - latents = latents.to(device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]], - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[torch.Generator] = None, - latents: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: Optional[int] = 1, - **kwargs, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - height (`int`, *optional*, defaults to self.image_unet.config.sample_size * self.vae_scale_factor): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to self.image_unet.config.sample_size * self.vae_scale_factor): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - - Examples: - - ```py - >>> from diffusers import VersatileDiffusionTextToImagePipeline - >>> import torch - - >>> pipe = VersatileDiffusionTextToImagePipeline.from_pretrained( - ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16 - ... ) - >>> pipe.remove_unused_weights() - >>> pipe = pipe.to("cuda") - - >>> generator = torch.Generator(device="cuda").manual_seed(0) - >>> image = pipe("an astronaut riding on a horse on mars", generator=generator).images[0] - >>> image.save("./astronaut.png") - ``` - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - # 0. Default height and width to unet - height = height or self.image_unet.config.sample_size * self.vae_scale_factor - width = width or self.image_unet.config.sample_size * self.vae_scale_factor - - # 1. Check inputs. Raise error if not correct - self.check_inputs(prompt, height, width, callback_steps) - - # 2. Define call parameters - batch_size = 1 if isinstance(prompt, str) else len(prompt) - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # 3. Encode input prompt - text_embeddings = self._encode_prompt( - prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt - ) - - # 4. Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = self.scheduler.timesteps - - # 5. Prepare latent variables - num_channels_latents = self.image_unet.in_channels - latents = self.prepare_latents( - batch_size * num_images_per_prompt, - num_channels_latents, - height, - width, - text_embeddings.dtype, - device, - generator, - latents, - ) - - # 6. Prepare extra step kwargs. - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 7. Denoising loop - for i, t in enumerate(self.progress_bar(timesteps)): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # call the callback, if provided - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # 9. Post-processing - image = self.decode_latents(latents) - - # 10. Convert to PIL - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image,) - - return ImagePipelineOutput(images=image) diff --git a/spaces/JingyeChen22/TextDiffuser/text-inpainting.sh b/spaces/JingyeChen22/TextDiffuser/text-inpainting.sh deleted file mode 100644 index b3579395b69e941a50087b8600c478fa63c8115a..0000000000000000000000000000000000000000 --- a/spaces/JingyeChen22/TextDiffuser/text-inpainting.sh +++ /dev/null @@ -1,8 +0,0 @@ -CUDA_VISIBLE_DEVICES=0 python inference.py \ - --mode="text-inpainting" \ - --resume_from_checkpoint="textdiffuser-ckpt/diffusion_backbone" \ - --prompt="a boy draws good morning on a board" \ - --original_image="assets/examples/text-inpainting/case2.jpg" \ - --text_mask="assets/examples/text-inpainting/case2_mask.jpg" \ - --output_dir="./output" \ - --vis_num=4 \ No newline at end of file diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/models/tokenization_moss.py b/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/models/tokenization_moss.py deleted file mode 100644 index 626315eb9e429ada99a15b04b9736c05e6743ffe..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/models/tokenization_moss.py +++ /dev/null @@ -1,368 +0,0 @@ -"""Tokenization classes for Moss""" - -import json -import os -import numpy as np -import regex as re - -from functools import lru_cache -from typing import TYPE_CHECKING, List, Optional, Tuple, Union - -from transformers.utils import is_tf_available, is_torch_available, logging -from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer - - -if TYPE_CHECKING: - if is_torch_available(): - import torch - if is_tf_available(): - import tensorflow as tf - - -logger = logging.get_logger(__name__) - -VOCAB_FILES_NAMES = { - "vocab_file": "vocab.json", - "merges_file": "merges.txt", -} - -PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "fnlp/moss-moon-003-base": "https://huggingface.co/fnlp/moss-moon-003-base/resolve/main/vocab.json", - "fnlp/moss-moon-003-sft": "https://huggingface.co/fnlp/moss-moon-003-sft/resolve/main/vocab.json", - "fnlp/moss-moon-003-sft-plugin": "https://huggingface.co/fnlp/moss-moon-003-sft-plugin/resolve/main/vocab.json", - }, - "merges_file": { - "fnlp/moss-moon-003-base": "https://huggingface.co/fnlp/moss-moon-003-base/resolve/main/merges.txt", - "fnlp/moss-moon-003-sft": "https://huggingface.co/fnlp/moss-moon-003-sft/resolve/main/merges.txt", - "fnlp/moss-moon-003-sft-plugin": "https://huggingface.co/fnlp/moss-moon-003-sft-plugin/resolve/main/merges.txt", - }, -} - -PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { - "fnlp/moss-moon-003-base": 2048, - "fnlp/moss-moon-003-sft": 2048, - "fnlp/moss-moon-003-sft-plugin": 2048, -} - - -@lru_cache() -def bytes_to_unicode(): - """ - Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control - characters the bpe code barfs on. - - The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab - if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for - decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup - tables between utf-8 bytes and unicode strings. - """ - bs = ( - list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) - ) - cs = bs[:] - n = 0 - for b in range(2**8): - if b not in bs: - bs.append(b) - cs.append(2**8 + n) - n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) - - -def get_pairs(word): - """ - Return set of symbol pairs in a word. - - Word is represented as tuple of symbols (symbols being variable-length strings). - """ - pairs = set() - prev_char = word[0] - for char in word[1:]: - pairs.add((prev_char, char)) - prev_char = char - return pairs - - -class MossTokenizer(PreTrainedTokenizer): - """ - Construct a Moss tokenizer. Based on byte-level Byte-Pair-Encoding. - - This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will - be encoded differently whether it is at the beginning of the sentence (without space) or not: - - You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you - call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. - - - - When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). - - - - This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to - this superclass for more information regarding those methods. - - Args: - vocab_file (`str`): - Path to the vocabulary file. - merges_file (`str`): - Path to the merges file. - errors (`str`, *optional*, defaults to `"replace"`): - Paradigm to follow when decoding bytes to UTF-8. See - [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. - unk_token (`str`, *optional*, defaults to `<|endoftext|>`): - The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this - token instead. - bos_token (`str`, *optional*, defaults to `<|endoftext|>`): - The beginning of sequence token. - eos_token (`str`, *optional*, defaults to `<|endoftext|>`): - The end of sequence token. - add_prefix_space (`bool`, *optional*, defaults to `False`): - Whether or not to add an initial space to the input. This allows to treat the leading word just as any - other word. (Moss tokenizer detect beginning of words by the preceding space). - """ - - vocab_files_names = VOCAB_FILES_NAMES - pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - model_input_names = ["input_ids", "attention_mask"] - - def __init__( - self, - vocab_file, - merges_file, - errors="replace", - unk_token="<|endoftext|>", - bos_token="<|endoftext|>", - eos_token="", - pad_token=None, - add_prefix_space=False, - add_bos_token=False, - **kwargs, - ): - bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token - eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token - unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token - pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token - super().__init__( - errors=errors, - unk_token=unk_token, - bos_token=bos_token, - eos_token=eos_token, - pad_token=pad_token, - add_prefix_space=add_prefix_space, - add_bos_token=add_bos_token, - **kwargs, - ) - self.add_bos_token = add_bos_token - - with open(vocab_file, encoding="utf-8") as vocab_handle: - self.encoder = json.load(vocab_handle) - self.decoder = {v: k for k, v in self.encoder.items()} - self.errors = errors # how to handle errors in decoding - self.byte_encoder = bytes_to_unicode() - self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} - with open(merges_file, encoding="utf-8") as merges_handle: - bpe_merges = merges_handle.read().split("\n")[1:-1] - bpe_merges = [tuple(merge.split()) for merge in bpe_merges] - self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) - self.cache = {} - self.add_prefix_space = add_prefix_space - - # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions - self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") - - @property - def vocab_size(self): - return len(self.encoder) - - def get_vocab(self): - return dict(self.encoder, **self.added_tokens_encoder) - - def bpe(self, token): - if token in self.cache: - return self.cache[token] - word = tuple(token) - pairs = get_pairs(word) - - if not pairs: - return token - - while True: - bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) - if bigram not in self.bpe_ranks: - break - first, second = bigram - new_word = [] - i = 0 - while i < len(word): - try: - j = word.index(first, i) - except ValueError: - new_word.extend(word[i:]) - break - else: - new_word.extend(word[i:j]) - i = j - - if word[i] == first and i < len(word) - 1 and word[i + 1] == second: - new_word.append(first + second) - i += 2 - else: - new_word.append(word[i]) - i += 1 - new_word = tuple(new_word) - word = new_word - if len(word) == 1: - break - else: - pairs = get_pairs(word) - word = " ".join(word) - self.cache[token] = word - return word - - def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): - if self.add_bos_token: - bos_token_ids = [self.bos_token_id] - else: - bos_token_ids = [] - - output = bos_token_ids + token_ids_0 - - if token_ids_1 is None: - return output - - return output + bos_token_ids + token_ids_1 - - def _tokenize(self, text): - """Tokenize a string.""" - bpe_tokens = [] - for token in re.findall(self.pat, text): - token = "".join( - self.byte_encoder[b] for b in token.encode("utf-8") - ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) - bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) - return bpe_tokens - - def _convert_token_to_id(self, token): - """Converts a token (str) in an id using the vocab.""" - return self.encoder.get(token, self.encoder.get(self.unk_token)) - - def _convert_id_to_token(self, index): - """Converts an index (integer) in a token (str) using the vocab.""" - return self.decoder.get(index) - - def convert_tokens_to_string(self, tokens): - """Converts a sequence of tokens (string) in a single string.""" - text = "".join(tokens) - text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) - return text - - def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: - if not os.path.isdir(save_directory): - logger.error(f"Vocabulary path ({save_directory}) should be a directory") - return - vocab_file = os.path.join( - save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] - ) - merge_file = os.path.join( - save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] - ) - - with open(vocab_file, "w", encoding="utf-8") as f: - f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") - - index = 0 - with open(merge_file, "w", encoding="utf-8") as writer: - writer.write("#version: 0.2\n") - for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): - if index != token_index: - logger.warning( - f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." - " Please check that the tokenizer is not corrupted!" - ) - index = token_index - writer.write(" ".join(bpe_tokens) + "\n") - index += 1 - - return vocab_file, merge_file - - def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): - add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) - if is_split_into_words or add_prefix_space: - text = " " + text - return (text, kwargs) - - def decode( - self, - token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], - skip_special_tokens: bool = False, - clean_up_tokenization_spaces: bool = None, - truncate_before_pattern: Optional[List[str]] = None, - **kwargs, - ) -> str: - """ - Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special - tokens and clean up tokenization spaces. - - Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. - - Args: - token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): - List of tokenized input ids. Can be obtained using the `__call__` method. - skip_special_tokens (`bool`, *optional*, defaults to `False`): - Whether or not to remove special tokens in the decoding. - clean_up_tokenization_spaces (`bool`, *optional*): - Whether or not to clean up the tokenization spaces. If `None`, will default to - `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`). - truncate_before_pattern (`List[str]`, *optional*, defaults to `None`): - A list of regular expression strings that will be used to truncate the returned string. This can be - used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning - of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`. - kwargs (additional keyword arguments, *optional*): - Will be passed to the underlying model specific decode method. - - Returns: - `str`: The decoded sentence. - """ - decoded_text = super()._decode( - token_ids=token_ids, - skip_special_tokens=skip_special_tokens, - clean_up_tokenization_spaces=clean_up_tokenization_spaces, - **kwargs, - ) - - if truncate_before_pattern is not None and len(truncate_before_pattern) > 0: - decoded_text = self.truncate(decoded_text, truncate_before_pattern) - - return decoded_text - - def truncate(self, completion, truncate_before_pattern): - def find_re(string, pattern, start_pos): - m = pattern.search(string, start_pos) - return m.start() if m else -1 - - terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern] - - prints = list(re.finditer("^print", completion, re.MULTILINE)) - - if len(prints) > 1: - completion = completion[: prints[1].start()] - - defs = list(re.finditer("^def", completion, re.MULTILINE)) - - if len(defs) > 1: - completion = completion[: defs[1].start()] - - start_pos = 0 - - terminals_pos = [ - pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1 - ] - - if len(terminals_pos) > 0: - return completion[: min(terminals_pos)] - else: - return completion diff --git a/spaces/Joom/Front-end-code-generation-from-images/classes/Vocabulary.py b/spaces/Joom/Front-end-code-generation-from-images/classes/Vocabulary.py deleted file mode 100644 index 3b79c96dbf5200852ece221cdd9a60bfbf0865ab..0000000000000000000000000000000000000000 --- a/spaces/Joom/Front-end-code-generation-from-images/classes/Vocabulary.py +++ /dev/null @@ -1,78 +0,0 @@ -__author__ = 'Taneem Jan, taneemishere.github.io' - -import sys -import numpy as np - -START_TOKEN = "" -END_TOKEN = "" -PLACEHOLDER = " " -SEPARATOR = '->' - - -class Vocabulary: - def __init__(self): - self.binary_vocabulary = {} - self.vocabulary = {} - self.token_lookup = {} - self.size = 0 - - self.append(START_TOKEN) - self.append(END_TOKEN) - self.append(PLACEHOLDER) - - def append(self, token): - if token not in self.vocabulary: - self.vocabulary[token] = self.size - self.token_lookup[self.size] = token - self.size += 1 - - def create_binary_representation(self): - if sys.version_info >= (3,): - items = self.vocabulary.items() - else: - items = self.vocabulary.iteritems() - for key, value in items: - binary = np.zeros(self.size) - binary[value] = 1 - self.binary_vocabulary[key] = binary - - def get_serialized_binary_representation(self): - if len(self.binary_vocabulary) == 0: - self.create_binary_representation() - - string = "" - if sys.version_info >= (3,): - items = self.binary_vocabulary.items() - else: - items = self.binary_vocabulary.iteritems() - for key, value in items: - array_as_string = np.array2string(value, separator=',', max_line_width=self.size * self.size) - string += "{}{}{}\n".format(key, SEPARATOR, array_as_string[1:len(array_as_string) - 1]) - return string - - def save(self, path): - output_file_name = "{}/words.vocab".format(path) - output_file = open(output_file_name, 'w') - output_file.write(self.get_serialized_binary_representation()) - output_file.close() - - def retrieve(self, path): - input_file = open("{}/words.vocab".format(path), 'r') - buffer = "" - for line in input_file: - try: - separator_position = len(buffer) + line.index(SEPARATOR) - buffer += line - key = buffer[:separator_position] - value = buffer[separator_position + len(SEPARATOR):] - value = np.fromstring(value, sep=',') - - self.binary_vocabulary[key] = value - self.vocabulary[key] = np.where(value == 1)[0][0] - self.token_lookup[np.where(value == 1)[0][0]] = key - - buffer = "" - except ValueError: - buffer += line - input_file.close() - self.size = len(self.vocabulary) diff --git a/spaces/Josh98/nl2bash_m/tests.py b/spaces/Josh98/nl2bash_m/tests.py deleted file mode 100644 index 601ed757507caebec67493462d11eb4c8901c2a1..0000000000000000000000000000000000000000 --- a/spaces/Josh98/nl2bash_m/tests.py +++ /dev/null @@ -1,17 +0,0 @@ -test_cases = [ - { - "predictions": [0, 0], - "references": [1, 1], - "result": {"metric_score": 0} - }, - { - "predictions": [1, 1], - "references": [1, 1], - "result": {"metric_score": 1} - }, - { - "predictions": [1, 0], - "references": [1, 1], - "result": {"metric_score": 0.5} - } -] \ No newline at end of file diff --git a/spaces/KPCGD/bingo/src/lib/hooks/use-at-bottom.tsx b/spaces/KPCGD/bingo/src/lib/hooks/use-at-bottom.tsx deleted file mode 100644 index d37c8cf4162adcb0064e08ecec24eb731416b045..0000000000000000000000000000000000000000 --- a/spaces/KPCGD/bingo/src/lib/hooks/use-at-bottom.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import * as React from 'react' - -export function useAtBottom(offset = 0) { - const [isAtBottom, setIsAtBottom] = React.useState(false) - - React.useEffect(() => { - const handleScroll = () => { - setIsAtBottom( - window.innerHeight + window.scrollY >= - document.body.offsetHeight - offset - ) - } - - window.addEventListener('scroll', handleScroll, { passive: true }) - handleScroll() - - return () => { - window.removeEventListener('scroll', handleScroll) - } - }, [offset]) - - return isAtBottom -} diff --git a/spaces/KazeDevID/RVC-Model/vc_infer_pipeline.py b/spaces/KazeDevID/RVC-Model/vc_infer_pipeline.py deleted file mode 100644 index c26d45068f9b6bf2b194b13c3c89f8a06347c124..0000000000000000000000000000000000000000 --- a/spaces/KazeDevID/RVC-Model/vc_infer_pipeline.py +++ /dev/null @@ -1,306 +0,0 @@ -import numpy as np, parselmouth, torch, pdb -from time import time as ttime -import torch.nn.functional as F -from config import x_pad, x_query, x_center, x_max -import scipy.signal as signal -import pyworld, os, traceback, faiss -from scipy import signal - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - - -class VC(object): - def __init__(self, tgt_sr, device, is_half): - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * x_query # 查询切点前后查询时间 - self.t_center = self.sr * x_center # 查询切点位置 - self.t_max = self.sr * x_max # 免查询时长阈值 - self.device = device - self.is_half = is_half - - def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None): - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - f0, t = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0] - f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9, # layer 9 - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) - - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - _, I = index.search(npy, 1) - npy = big_npy[I.squeeze()] - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - times, - f0_up_key, - f0_method, - file_index, - file_big_npy, - index_rate, - if_f0, - f0_file=None, - ): - if ( - file_big_npy != "" - and file_index != "" - and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - big_npy = np.load(file_big_npy) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - print("Feature retrieval library doesn't exist or ratio is 0") - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/KenjieDec/RemBG/rembg/session_simple.py b/spaces/KenjieDec/RemBG/rembg/session_simple.py deleted file mode 100644 index 7ec31813f2e14e80856803d2335671c9f50ca84f..0000000000000000000000000000000000000000 --- a/spaces/KenjieDec/RemBG/rembg/session_simple.py +++ /dev/null @@ -1,30 +0,0 @@ -from typing import List - -import numpy as np -from PIL import Image -from PIL.Image import Image as PILImage - -from .session_base import BaseSession - - -class SimpleSession(BaseSession): - def predict(self, img: PILImage) -> List[PILImage]: - ort_outs = self.inner_session.run( - None, - self.normalize( - img, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225), (320, 320) - ), - ) - - pred = ort_outs[0][:, 0, :, :] - - ma = np.max(pred) - mi = np.min(pred) - - pred = (pred - mi) / (ma - mi) - pred = np.squeeze(pred) - - mask = Image.fromarray((pred * 255).astype("uint8"), mode="L") - mask = mask.resize(img.size, Image.LANCZOS) - - return [mask] diff --git a/spaces/Kevin676/Raven-with-Voice-Cloning/README.md b/spaces/Kevin676/Raven-with-Voice-Cloning/README.md deleted file mode 100644 index f7a362ba6a209fbcf0554cf206fedaf6b3691253..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Raven-with-Voice-Cloning/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Raven RWKV 7B -emoji: 🚀 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: BlinkDL/Raven-RWKV-7B ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/synthesizer/train.py b/spaces/Kevin676/Real-Time-Voice-Cloning/synthesizer/train.py deleted file mode 100644 index a136cf9b38538ca7dc428adf209c0cbb40e890d7..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Real-Time-Voice-Cloning/synthesizer/train.py +++ /dev/null @@ -1,269 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import optim -from torch.utils.data import DataLoader -from synthesizer import audio -from synthesizer.models.tacotron import Tacotron -from synthesizer.synthesizer_dataset import SynthesizerDataset, collate_synthesizer -from synthesizer.utils import ValueWindow, data_parallel_workaround -from synthesizer.utils.plot import plot_spectrogram -from synthesizer.utils.symbols import symbols -from synthesizer.utils.text import sequence_to_text -from vocoder.display import * -from datetime import datetime -import numpy as np -from pathlib import Path -import sys -import time -import platform - - -def np_now(x: torch.Tensor): return x.detach().cpu().numpy() - -def time_string(): - return datetime.now().strftime("%Y-%m-%d %H:%M") - -def train(run_id: str, syn_dir: str, models_dir: str, save_every: int, - backup_every: int, force_restart:bool, hparams): - - syn_dir = Path(syn_dir) - models_dir = Path(models_dir) - models_dir.mkdir(exist_ok=True) - - model_dir = models_dir.joinpath(run_id) - plot_dir = model_dir.joinpath("plots") - wav_dir = model_dir.joinpath("wavs") - mel_output_dir = model_dir.joinpath("mel-spectrograms") - meta_folder = model_dir.joinpath("metas") - model_dir.mkdir(exist_ok=True) - plot_dir.mkdir(exist_ok=True) - wav_dir.mkdir(exist_ok=True) - mel_output_dir.mkdir(exist_ok=True) - meta_folder.mkdir(exist_ok=True) - - weights_fpath = model_dir.joinpath(run_id).with_suffix(".pt") - metadata_fpath = syn_dir.joinpath("train.txt") - - print("Checkpoint path: {}".format(weights_fpath)) - print("Loading training data from: {}".format(metadata_fpath)) - print("Using model: Tacotron") - - # Book keeping - step = 0 - time_window = ValueWindow(100) - loss_window = ValueWindow(100) - - - # From WaveRNN/train_tacotron.py - if torch.cuda.is_available(): - device = torch.device("cuda") - - for session in hparams.tts_schedule: - _, _, _, batch_size = session - if batch_size % torch.cuda.device_count() != 0: - raise ValueError("`batch_size` must be evenly divisible by n_gpus!") - else: - device = torch.device("cpu") - print("Using device:", device) - - # Instantiate Tacotron Model - print("\nInitialising Tacotron Model...\n") - model = Tacotron(embed_dims=hparams.tts_embed_dims, - num_chars=len(symbols), - encoder_dims=hparams.tts_encoder_dims, - decoder_dims=hparams.tts_decoder_dims, - n_mels=hparams.num_mels, - fft_bins=hparams.num_mels, - postnet_dims=hparams.tts_postnet_dims, - encoder_K=hparams.tts_encoder_K, - lstm_dims=hparams.tts_lstm_dims, - postnet_K=hparams.tts_postnet_K, - num_highways=hparams.tts_num_highways, - dropout=hparams.tts_dropout, - stop_threshold=hparams.tts_stop_threshold, - speaker_embedding_size=hparams.speaker_embedding_size).to(device) - - # Initialize the optimizer - optimizer = optim.Adam(model.parameters()) - - # Load the weights - if force_restart or not weights_fpath.exists(): - print("\nStarting the training of Tacotron from scratch\n") - model.save(weights_fpath) - - # Embeddings metadata - char_embedding_fpath = meta_folder.joinpath("CharacterEmbeddings.tsv") - with open(char_embedding_fpath, "w", encoding="utf-8") as f: - for symbol in symbols: - if symbol == " ": - symbol = "\\s" # For visual purposes, swap space with \s - - f.write("{}\n".format(symbol)) - - else: - print("\nLoading weights at %s" % weights_fpath) - model.load(weights_fpath, optimizer) - print("Tacotron weights loaded from step %d" % model.step) - - # Initialize the dataset - metadata_fpath = syn_dir.joinpath("train.txt") - mel_dir = syn_dir.joinpath("mels") - embed_dir = syn_dir.joinpath("embeds") - dataset = SynthesizerDataset(metadata_fpath, mel_dir, embed_dir, hparams) - test_loader = DataLoader(dataset, - batch_size=1, - shuffle=True, - pin_memory=True) - - for i, session in enumerate(hparams.tts_schedule): - current_step = model.get_step() - - r, lr, max_step, batch_size = session - - training_steps = max_step - current_step - - # Do we need to change to the next session? - if current_step >= max_step: - # Are there no further sessions than the current one? - if i == len(hparams.tts_schedule) - 1: - # We have completed training. Save the model and exit - model.save(weights_fpath, optimizer) - break - else: - # There is a following session, go to it - continue - - model.r = r - - # Begin the training - simple_table([(f"Steps with r={r}", str(training_steps // 1000) + "k Steps"), - ("Batch Size", batch_size), - ("Learning Rate", lr), - ("Outputs/Step (r)", model.r)]) - - for p in optimizer.param_groups: - p["lr"] = lr - - data_loader = DataLoader(dataset, - collate_fn=lambda batch: collate_synthesizer(batch, r, hparams), - batch_size=batch_size, - num_workers=2 if platform.system() != "Windows" else 0, - shuffle=True, - pin_memory=True) - - total_iters = len(dataset) - steps_per_epoch = np.ceil(total_iters / batch_size).astype(np.int32) - epochs = np.ceil(training_steps / steps_per_epoch).astype(np.int32) - - for epoch in range(1, epochs+1): - for i, (texts, mels, embeds, idx) in enumerate(data_loader, 1): - start_time = time.time() - - # Generate stop tokens for training - stop = torch.ones(mels.shape[0], mels.shape[2]) - for j, k in enumerate(idx): - stop[j, :int(dataset.metadata[k][4])-1] = 0 - - texts = texts.to(device) - mels = mels.to(device) - embeds = embeds.to(device) - stop = stop.to(device) - - # Forward pass - # Parallelize model onto GPUS using workaround due to python bug - if device.type == "cuda" and torch.cuda.device_count() > 1: - m1_hat, m2_hat, attention, stop_pred = data_parallel_workaround(model, texts, - mels, embeds) - else: - m1_hat, m2_hat, attention, stop_pred = model(texts, mels, embeds) - - # Backward pass - m1_loss = F.mse_loss(m1_hat, mels) + F.l1_loss(m1_hat, mels) - m2_loss = F.mse_loss(m2_hat, mels) - stop_loss = F.binary_cross_entropy(stop_pred, stop) - - loss = m1_loss + m2_loss + stop_loss - - optimizer.zero_grad() - loss.backward() - - if hparams.tts_clip_grad_norm is not None: - grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), hparams.tts_clip_grad_norm) - if np.isnan(grad_norm.cpu()): - print("grad_norm was NaN!") - - optimizer.step() - - time_window.append(time.time() - start_time) - loss_window.append(loss.item()) - - step = model.get_step() - k = step // 1000 - - msg = f"| Epoch: {epoch}/{epochs} ({i}/{steps_per_epoch}) | Loss: {loss_window.average:#.4} | {1./time_window.average:#.2} steps/s | Step: {k}k | " - stream(msg) - - # Backup or save model as appropriate - if backup_every != 0 and step % backup_every == 0 : - backup_fpath = Path("{}/{}_{}k.pt".format(str(weights_fpath.parent), run_id, k)) - model.save(backup_fpath, optimizer) - - if save_every != 0 and step % save_every == 0 : - # Must save latest optimizer state to ensure that resuming training - # doesn't produce artifacts - model.save(weights_fpath, optimizer) - - # Evaluate model to generate samples - epoch_eval = hparams.tts_eval_interval == -1 and i == steps_per_epoch # If epoch is done - step_eval = hparams.tts_eval_interval > 0 and step % hparams.tts_eval_interval == 0 # Every N steps - if epoch_eval or step_eval: - for sample_idx in range(hparams.tts_eval_num_samples): - # At most, generate samples equal to number in the batch - if sample_idx + 1 <= len(texts): - # Remove padding from mels using frame length in metadata - mel_length = int(dataset.metadata[idx[sample_idx]][4]) - mel_prediction = np_now(m2_hat[sample_idx]).T[:mel_length] - target_spectrogram = np_now(mels[sample_idx]).T[:mel_length] - attention_len = mel_length // model.r - - eval_model(attention=np_now(attention[sample_idx][:, :attention_len]), - mel_prediction=mel_prediction, - target_spectrogram=target_spectrogram, - input_seq=np_now(texts[sample_idx]), - step=step, - plot_dir=plot_dir, - mel_output_dir=mel_output_dir, - wav_dir=wav_dir, - sample_num=sample_idx + 1, - loss=loss, - hparams=hparams) - - # Break out of loop to update training schedule - if step >= max_step: - break - - # Add line break after every epoch - print("") - -def eval_model(attention, mel_prediction, target_spectrogram, input_seq, step, - plot_dir, mel_output_dir, wav_dir, sample_num, loss, hparams): - # Save some results for evaluation - attention_path = str(plot_dir.joinpath("attention_step_{}_sample_{}".format(step, sample_num))) - save_attention(attention, attention_path) - - # save predicted mel spectrogram to disk (debug) - mel_output_fpath = mel_output_dir.joinpath("mel-prediction-step-{}_sample_{}.npy".format(step, sample_num)) - np.save(str(mel_output_fpath), mel_prediction, allow_pickle=False) - - # save griffin lim inverted wav for debug (mel -> wav) - wav = audio.inv_mel_spectrogram(mel_prediction.T, hparams) - wav_fpath = wav_dir.joinpath("step-{}-wave-from-mel_sample_{}.wav".format(step, sample_num)) - audio.save_wav(wav, str(wav_fpath), sr=hparams.sample_rate) - - # save real and predicted mel-spectrogram plot to disk (control purposes) - spec_fpath = plot_dir.joinpath("step-{}-mel-spectrogram_sample_{}.png".format(step, sample_num)) - title_str = "{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, loss) - plot_spectrogram(mel_prediction, str(spec_fpath), title=title_str, - target_spectrogram=target_spectrogram, - max_len=target_spectrogram.size // hparams.num_mels) - print("Input at step {}: {}".format(step, sequence_to_text(input_seq))) diff --git a/spaces/Kreaols/ChuanhuChatGPT/assets/custom.js b/spaces/Kreaols/ChuanhuChatGPT/assets/custom.js deleted file mode 100644 index f013209931218fd054979e290706f1945de76856..0000000000000000000000000000000000000000 --- a/spaces/Kreaols/ChuanhuChatGPT/assets/custom.js +++ /dev/null @@ -1,502 +0,0 @@ - -// custom javascript here - -const MAX_HISTORY_LENGTH = 32; - -var key_down_history = []; -var currentIndex = -1; -var user_input_ta; - -var gradioContainer = null; -var user_input_ta = null; -var user_input_tb = null; -var userInfoDiv = null; -var appTitleDiv = null; -var chatbot = null; -var chatbotWrap = null; -var apSwitch = null; -var empty_botton = null; -var messageBotDivs = null; -var loginUserForm = null; -var logginUser = null; - -var userLogged = false; -var usernameGotten = false; -var historyLoaded = false; - -var ga = document.getElementsByTagName("gradio-app"); -var targetNode = ga[0]; -var isInIframe = (window.self !== window.top); -var language = navigator.language.slice(0,2); - -var forView_i18n = { - 'zh': "仅供查看", - 'en': "For viewing only", - 'ja': "閲覧専用", - 'fr': "Pour consultation seulement", - 'es': "Solo para visualización", -}; - -// gradio 页面加载好了么??? 我能动你的元素了么?? -function gradioLoaded(mutations) { - for (var i = 0; i < mutations.length; i++) { - if (mutations[i].addedNodes.length) { - loginUserForm = document.querySelector(".gradio-container > .main > .wrap > .panel > .form") - gradioContainer = document.querySelector(".gradio-container"); - user_input_tb = document.getElementById('user_input_tb'); - userInfoDiv = document.getElementById("user_info"); - appTitleDiv = document.getElementById("app_title"); - chatbot = document.querySelector('#chuanhu_chatbot'); - chatbotWrap = document.querySelector('#chuanhu_chatbot > .wrap'); - apSwitch = document.querySelector('.apSwitch input[type="checkbox"]'); - empty_botton = document.getElementById("empty_btn") - - if (loginUserForm) { - localStorage.setItem("userLogged", true); - userLogged = true; - } - - if (gradioContainer && apSwitch) { // gradioCainter 加载出来了没? - adjustDarkMode(); - } - if (user_input_tb) { // user_input_tb 加载出来了没? - selectHistory(); - } - if (userInfoDiv && appTitleDiv) { // userInfoDiv 和 appTitleDiv 加载出来了没? - if (!usernameGotten) { - getUserInfo(); - } - setTimeout(showOrHideUserInfo(), 2000); - } - if (chatbot) { // chatbot 加载出来了没? - setChatbotHeight(); - } - if (chatbotWrap) { - if (!historyLoaded) { - loadHistoryHtml(); - } - setChatbotScroll(); - } - if (empty_botton) { - emptyHistory(); - } - } - } -} - -function webLocale() { - console.log("webLocale", language); - if (forView_i18n.hasOwnProperty(language)) { - var forView = forView_i18n[language]; - var forViewStyle = document.createElement('style'); - forViewStyle.innerHTML = '.wrap>.history-message>:last-child::after { content: "' + forView + '"!important; }'; - document.head.appendChild(forViewStyle); - // console.log("added forViewStyle", forView); - } -} - -function selectHistory() { - user_input_ta = user_input_tb.querySelector("textarea"); - if (user_input_ta) { - observer.disconnect(); // 停止监听 - // 在 textarea 上监听 keydown 事件 - user_input_ta.addEventListener("keydown", function (event) { - var value = user_input_ta.value.trim(); - // 判断按下的是否为方向键 - if (event.code === 'ArrowUp' || event.code === 'ArrowDown') { - // 如果按下的是方向键,且输入框中有内容,且历史记录中没有该内容,则不执行操作 - if (value && key_down_history.indexOf(value) === -1) - return; - // 对于需要响应的动作,阻止默认行为。 - event.preventDefault(); - var length = key_down_history.length; - if (length === 0) { - currentIndex = -1; // 如果历史记录为空,直接将当前选中的记录重置 - return; - } - if (currentIndex === -1) { - currentIndex = length; - } - if (event.code === 'ArrowUp' && currentIndex > 0) { - currentIndex--; - user_input_ta.value = key_down_history[currentIndex]; - } else if (event.code === 'ArrowDown' && currentIndex < length - 1) { - currentIndex++; - user_input_ta.value = key_down_history[currentIndex]; - } - user_input_ta.selectionStart = user_input_ta.value.length; - user_input_ta.selectionEnd = user_input_ta.value.length; - const input_event = new InputEvent("input", { bubbles: true, cancelable: true }); - user_input_ta.dispatchEvent(input_event); - } else if (event.code === "Enter") { - if (value) { - currentIndex = -1; - if (key_down_history.indexOf(value) === -1) { - key_down_history.push(value); - if (key_down_history.length > MAX_HISTORY_LENGTH) { - key_down_history.shift(); - } - } - } - } - }); - } -} - -var username = null; -function getUserInfo() { - if (usernameGotten) { - return; - } - userLogged = localStorage.getItem('userLogged'); - if (userLogged) { - username = userInfoDiv.innerText; - if (username) { - if (username.includes("getting user info…")) { - setTimeout(getUserInfo, 500); - return; - } else if (username === " ") { - localStorage.removeItem("username"); - localStorage.removeItem("userLogged") - userLogged = false; - usernameGotten = true; - return; - } else { - username = username.match(/User:\s*(.*)/)[1] || username; - localStorage.setItem("username", username); - usernameGotten = true; - clearHistoryHtml(); - } - } - } -} - -function toggleUserInfoVisibility(shouldHide) { - if (userInfoDiv) { - if (shouldHide) { - userInfoDiv.classList.add("hideK"); - } else { - userInfoDiv.classList.remove("hideK"); - } - } -} -function showOrHideUserInfo() { - var sendBtn = document.getElementById("submit_btn"); - - // Bind mouse/touch events to show/hide user info - appTitleDiv.addEventListener("mouseenter", function () { - toggleUserInfoVisibility(false); - }); - userInfoDiv.addEventListener("mouseenter", function () { - toggleUserInfoVisibility(false); - }); - sendBtn.addEventListener("mouseenter", function () { - toggleUserInfoVisibility(false); - }); - - appTitleDiv.addEventListener("mouseleave", function () { - toggleUserInfoVisibility(true); - }); - userInfoDiv.addEventListener("mouseleave", function () { - toggleUserInfoVisibility(true); - }); - sendBtn.addEventListener("mouseleave", function () { - toggleUserInfoVisibility(true); - }); - - appTitleDiv.ontouchstart = function () { - toggleUserInfoVisibility(false); - }; - userInfoDiv.ontouchstart = function () { - toggleUserInfoVisibility(false); - }; - sendBtn.ontouchstart = function () { - toggleUserInfoVisibility(false); - }; - - appTitleDiv.ontouchend = function () { - setTimeout(function () { - toggleUserInfoVisibility(true); - }, 3000); - }; - userInfoDiv.ontouchend = function () { - setTimeout(function () { - toggleUserInfoVisibility(true); - }, 3000); - }; - sendBtn.ontouchend = function () { - setTimeout(function () { - toggleUserInfoVisibility(true); - }, 3000); // Delay 1 second to hide user info - }; - - // Hide user info after 2 second - setTimeout(function () { - toggleUserInfoVisibility(true); - }, 2000); -} - -function toggleDarkMode(isEnabled) { - if (isEnabled) { - document.body.classList.add("dark"); - document.body.style.setProperty("background-color", "var(--neutral-950)", "important"); - } else { - document.body.classList.remove("dark"); - document.body.style.backgroundColor = ""; - } -} -function adjustDarkMode() { - const darkModeQuery = window.matchMedia("(prefers-color-scheme: dark)"); - - // 根据当前颜色模式设置初始状态 - apSwitch.checked = darkModeQuery.matches; - toggleDarkMode(darkModeQuery.matches); - // 监听颜色模式变化 - darkModeQuery.addEventListener("change", (e) => { - apSwitch.checked = e.matches; - toggleDarkMode(e.matches); - }); - // apSwitch = document.querySelector('.apSwitch input[type="checkbox"]'); - apSwitch.addEventListener("change", (e) => { - toggleDarkMode(e.target.checked); - }); -} - -function setChatbotHeight() { - const screenWidth = window.innerWidth; - const statusDisplay = document.querySelector('#status_display'); - const statusDisplayHeight = statusDisplay ? statusDisplay.offsetHeight : 0; - const wrap = chatbot.querySelector('.wrap'); - const vh = window.innerHeight * 0.01; - document.documentElement.style.setProperty('--vh', `${vh}px`); - if (isInIframe) { - chatbot.style.height = `700px`; - wrap.style.maxHeight = `calc(700px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))` - } else { - if (screenWidth <= 320) { - chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px)`; - wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`; - } else if (screenWidth <= 499) { - chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px)`; - wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`; - } else { - chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px)`; - wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`; - } - } -} -function setChatbotScroll() { - var scrollHeight = chatbotWrap.scrollHeight; - chatbotWrap.scrollTo(0,scrollHeight) -} -var rangeInputs = null; -var numberInputs = null; -function setSlider() { - rangeInputs = document.querySelectorAll('input[type="range"]'); - numberInputs = document.querySelectorAll('input[type="number"]') - setSliderRange(); - rangeInputs.forEach(rangeInput => { - rangeInput.addEventListener('input', setSliderRange); - }); - numberInputs.forEach(numberInput => { - numberInput.addEventListener('input', setSliderRange); - }) -} -function setSliderRange() { - var range = document.querySelectorAll('input[type="range"]'); - range.forEach(range => { - range.style.backgroundSize = (range.value - range.min) / (range.max - range.min) * 100 + '% 100%'; - }); -} - -function addChuanhuButton(botElement) { - var rawMessage = null; - var mdMessage = null; - rawMessage = botElement.querySelector('.raw-message'); - mdMessage = botElement.querySelector('.md-message'); - if (!rawMessage) { - var buttons = botElement.querySelectorAll('button.chuanhu-btn'); - for (var i = 0; i < buttons.length; i++) { - buttons[i].parentNode.removeChild(buttons[i]); - } - return; - } - var copyButton = null; - var toggleButton = null; - copyButton = botElement.querySelector('button.copy-bot-btn'); - toggleButton = botElement.querySelector('button.toggle-md-btn'); - if (copyButton) copyButton.remove(); - if (toggleButton) toggleButton.remove(); - - // Copy bot button - var copyButton = document.createElement('button'); - copyButton.classList.add('chuanhu-btn'); - copyButton.classList.add('copy-bot-btn'); - copyButton.setAttribute('aria-label', 'Copy'); - copyButton.innerHTML = copyIcon; - copyButton.addEventListener('click', () => { - const textToCopy = rawMessage.innerText; - navigator.clipboard - .writeText(textToCopy) - .then(() => { - copyButton.innerHTML = copiedIcon; - setTimeout(() => { - copyButton.innerHTML = copyIcon; - }, 1500); - }) - .catch(() => { - console.error("copy failed"); - }); - }); - botElement.appendChild(copyButton); - - // Toggle button - var toggleButton = document.createElement('button'); - toggleButton.classList.add('chuanhu-btn'); - toggleButton.classList.add('toggle-md-btn'); - toggleButton.setAttribute('aria-label', 'Toggle'); - var renderMarkdown = mdMessage.classList.contains('hideM'); - toggleButton.innerHTML = renderMarkdown ? mdIcon : rawIcon; - toggleButton.addEventListener('click', () => { - renderMarkdown = mdMessage.classList.contains('hideM'); - if (renderMarkdown){ - renderMarkdownText(botElement); - toggleButton.innerHTML=rawIcon; - } else { - removeMarkdownText(botElement); - toggleButton.innerHTML=mdIcon; - } - }); - botElement.insertBefore(toggleButton, copyButton); -} - -function renderMarkdownText(message) { - var mdDiv = message.querySelector('.md-message'); - if (mdDiv) mdDiv.classList.remove('hideM'); - var rawDiv = message.querySelector('.raw-message'); - if (rawDiv) rawDiv.classList.add('hideM'); -} -function removeMarkdownText(message) { - var rawDiv = message.querySelector('.raw-message'); - if (rawDiv) rawDiv.classList.remove('hideM'); - var mdDiv = message.querySelector('.md-message'); - if (mdDiv) mdDiv.classList.add('hideM'); -} - -let timeoutId; -let isThrottled = false; -var mmutation -// 监听所有元素中 bot message 的变化,为 bot 消息添加复制按钮。 -var mObserver = new MutationObserver(function (mutationsList) { - for (mmutation of mutationsList) { - if (mmutation.type === 'childList') { - for (var node of mmutation.addedNodes) { - if (node.nodeType === 1 && node.classList.contains('message') && node.getAttribute('data-testid') === 'bot') { - saveHistoryHtml(); - document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton); - } - if (node.tagName === 'INPUT' && node.getAttribute('type') === 'range') { - setSlider(); - } - } - for (var node of mmutation.removedNodes) { - if (node.nodeType === 1 && node.classList.contains('message') && node.getAttribute('data-testid') === 'bot') { - saveHistoryHtml(); - document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton); - } - } - } else if (mmutation.type === 'attributes') { - if (mmutation.target.nodeType === 1 && mmutation.target.classList.contains('message') && mmutation.target.getAttribute('data-testid') === 'bot') { - if (isThrottled) break; // 为了防止重复不断疯狂渲染,加上等待_(:з」∠)_ - isThrottled = true; - clearTimeout(timeoutId); - timeoutId = setTimeout(() => { - isThrottled = false; - document.querySelectorAll('#chuanhu_chatbot>.wrap>.message-wrap .message.bot').forEach(addChuanhuButton); - saveHistoryHtml(); - }, 500); - } - } - } -}); -mObserver.observe(document.documentElement, { attributes: true, childList: true, subtree: true }); - -var loadhistorytime = 0; // for debugging -function saveHistoryHtml() { - var historyHtml = document.querySelector('#chuanhu_chatbot > .wrap'); - localStorage.setItem('chatHistory', historyHtml.innerHTML); - // console.log("History Saved") - historyLoaded = false; -} -function loadHistoryHtml() { - var historyHtml = localStorage.getItem('chatHistory'); - if (!historyHtml) { - historyLoaded = true; - return; // no history, do nothing - } - userLogged = localStorage.getItem('userLogged'); - if (userLogged){ - historyLoaded = true; - return; // logged in, do nothing - } - if (!historyLoaded) { - var tempDiv = document.createElement('div'); - tempDiv.innerHTML = historyHtml; - var buttons = tempDiv.querySelectorAll('button.chuanhu-btn'); - var gradioCopyButtons = tempDiv.querySelectorAll('button.copy_code_button'); - for (var i = 0; i < buttons.length; i++) { - buttons[i].parentNode.removeChild(buttons[i]); - } - for (var i = 0; i < gradioCopyButtons.length; i++) { - gradioCopyButtons[i].parentNode.removeChild(gradioCopyButtons[i]); - } - var fakeHistory = document.createElement('div'); - fakeHistory.classList.add('history-message'); - fakeHistory.innerHTML = tempDiv.innerHTML; - webLocale(); - chatbotWrap.insertBefore(fakeHistory, chatbotWrap.firstChild); - // var fakeHistory = document.createElement('div'); - // fakeHistory.classList.add('history-message'); - // fakeHistory.innerHTML = historyHtml; - // chatbotWrap.insertBefore(fakeHistory, chatbotWrap.firstChild); - historyLoaded = true; - console.log("History Loaded"); - loadhistorytime += 1; // for debugging - } else { - historyLoaded = false; - } -} -function clearHistoryHtml() { - localStorage.removeItem("chatHistory"); - historyMessages = chatbotWrap.querySelector('.history-message'); - if (historyMessages) { - chatbotWrap.removeChild(historyMessages); - console.log("History Cleared"); - } -} -function emptyHistory() { - empty_botton.addEventListener("click", function () { - clearHistoryHtml(); - }); -} - -// 监视页面内部 DOM 变动 -var observer = new MutationObserver(function (mutations) { - gradioLoaded(mutations); -}); -observer.observe(targetNode, { childList: true, subtree: true }); - -// 监视页面变化 -window.addEventListener("DOMContentLoaded", function () { - isInIframe = (window.self !== window.top); - historyLoaded = false; -}); -window.addEventListener('resize', setChatbotHeight); -window.addEventListener('scroll', setChatbotHeight); -window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", adjustDarkMode); - -// button svg code -const copyIcon = ''; -const copiedIcon = ''; -const mdIcon = ''; -const rawIcon = ''; diff --git a/spaces/KyanChen/RSPrompter/mmdet/datasets/api_wrappers/coco_api.py b/spaces/KyanChen/RSPrompter/mmdet/datasets/api_wrappers/coco_api.py deleted file mode 100644 index 40f7f2c9b930de3dadd967db9d131913fc9bf54c..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/datasets/api_wrappers/coco_api.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# This file add snake case alias for coco api - -import warnings -from collections import defaultdict -from typing import List, Optional, Union - -import pycocotools -from pycocotools.coco import COCO as _COCO -from pycocotools.cocoeval import COCOeval as _COCOeval - - -class COCO(_COCO): - """This class is almost the same as official pycocotools package. - - It implements some snake case function aliases. So that the COCO class has - the same interface as LVIS class. - """ - - def __init__(self, annotation_file=None): - if getattr(pycocotools, '__version__', '0') >= '12.0.2': - warnings.warn( - 'mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', # noqa: E501 - UserWarning) - super().__init__(annotation_file=annotation_file) - self.img_ann_map = self.imgToAnns - self.cat_img_map = self.catToImgs - - def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None): - return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd) - - def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]): - return self.getCatIds(cat_names, sup_names, cat_ids) - - def get_img_ids(self, img_ids=[], cat_ids=[]): - return self.getImgIds(img_ids, cat_ids) - - def load_anns(self, ids): - return self.loadAnns(ids) - - def load_cats(self, ids): - return self.loadCats(ids) - - def load_imgs(self, ids): - return self.loadImgs(ids) - - -# just for the ease of import -COCOeval = _COCOeval - - -class COCOPanoptic(COCO): - """This wrapper is for loading the panoptic style annotation file. - - The format is shown in the CocoPanopticDataset class. - - Args: - annotation_file (str, optional): Path of annotation file. - Defaults to None. - """ - - def __init__(self, annotation_file: Optional[str] = None) -> None: - super(COCOPanoptic, self).__init__(annotation_file) - - def createIndex(self) -> None: - """Create index.""" - # create index - print('creating index...') - # anns stores 'segment_id -> annotation' - anns, cats, imgs = {}, {}, {} - img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list) - if 'annotations' in self.dataset: - for ann in self.dataset['annotations']: - for seg_ann in ann['segments_info']: - # to match with instance.json - seg_ann['image_id'] = ann['image_id'] - img_to_anns[ann['image_id']].append(seg_ann) - # segment_id is not unique in coco dataset orz... - # annotations from different images but - # may have same segment_id - if seg_ann['id'] in anns.keys(): - anns[seg_ann['id']].append(seg_ann) - else: - anns[seg_ann['id']] = [seg_ann] - - # filter out annotations from other images - img_to_anns_ = defaultdict(list) - for k, v in img_to_anns.items(): - img_to_anns_[k] = [x for x in v if x['image_id'] == k] - img_to_anns = img_to_anns_ - - if 'images' in self.dataset: - for img_info in self.dataset['images']: - img_info['segm_file'] = img_info['file_name'].replace( - 'jpg', 'png') - imgs[img_info['id']] = img_info - - if 'categories' in self.dataset: - for cat in self.dataset['categories']: - cats[cat['id']] = cat - - if 'annotations' in self.dataset and 'categories' in self.dataset: - for ann in self.dataset['annotations']: - for seg_ann in ann['segments_info']: - cat_to_imgs[seg_ann['category_id']].append(ann['image_id']) - - print('index created!') - - self.anns = anns - self.imgToAnns = img_to_anns - self.catToImgs = cat_to_imgs - self.imgs = imgs - self.cats = cats - - def load_anns(self, - ids: Union[List[int], int] = []) -> Optional[List[dict]]: - """Load anns with the specified ids. - - ``self.anns`` is a list of annotation lists instead of a - list of annotations. - - Args: - ids (Union[List[int], int]): Integer ids specifying anns. - - Returns: - anns (List[dict], optional): Loaded ann objects. - """ - anns = [] - - if hasattr(ids, '__iter__') and hasattr(ids, '__len__'): - # self.anns is a list of annotation lists instead of - # a list of annotations - for id in ids: - anns += self.anns[id] - return anns - elif type(ids) == int: - return self.anns[ids] diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/resnet.py b/spaces/KyanChen/RSPrompter/mmdet/models/backbones/resnet.py deleted file mode 100644 index 1d6f48f94f286e3c5e3179f752a7b36ea77c0d45..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/resnet.py +++ /dev/null @@ -1,672 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer -from mmengine.model import BaseModule -from torch.nn.modules.batchnorm import _BatchNorm - -from mmdet.registry import MODELS -from ..layers import ResLayer - - -class BasicBlock(BaseModule): - expansion = 1 - - def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - dcn=None, - plugins=None, - init_cfg=None): - super(BasicBlock, self).__init__(init_cfg) - assert dcn is None, 'Not implemented yet.' - assert plugins is None, 'Not implemented yet.' - - self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) - self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) - - self.conv1 = build_conv_layer( - conv_cfg, - inplanes, - planes, - 3, - stride=stride, - padding=dilation, - dilation=dilation, - bias=False) - self.add_module(self.norm1_name, norm1) - self.conv2 = build_conv_layer( - conv_cfg, planes, planes, 3, padding=1, bias=False) - self.add_module(self.norm2_name, norm2) - - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - self.dilation = dilation - self.with_cp = with_cp - - @property - def norm1(self): - """nn.Module: normalization layer after the first convolution layer""" - return getattr(self, self.norm1_name) - - @property - def norm2(self): - """nn.Module: normalization layer after the second convolution layer""" - return getattr(self, self.norm2_name) - - def forward(self, x): - """Forward function.""" - - def _inner_forward(x): - identity = x - - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.norm2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -class Bottleneck(BaseModule): - expansion = 4 - - def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN'), - dcn=None, - plugins=None, - init_cfg=None): - """Bottleneck block for ResNet. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottleneck, self).__init__(init_cfg) - assert style in ['pytorch', 'caffe'] - assert dcn is None or isinstance(dcn, dict) - assert plugins is None or isinstance(plugins, list) - if plugins is not None: - allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] - assert all(p['position'] in allowed_position for p in plugins) - - self.inplanes = inplanes - self.planes = planes - self.stride = stride - self.dilation = dilation - self.style = style - self.with_cp = with_cp - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.dcn = dcn - self.with_dcn = dcn is not None - self.plugins = plugins - self.with_plugins = plugins is not None - - if self.with_plugins: - # collect plugins for conv1/conv2/conv3 - self.after_conv1_plugins = [ - plugin['cfg'] for plugin in plugins - if plugin['position'] == 'after_conv1' - ] - self.after_conv2_plugins = [ - plugin['cfg'] for plugin in plugins - if plugin['position'] == 'after_conv2' - ] - self.after_conv3_plugins = [ - plugin['cfg'] for plugin in plugins - if plugin['position'] == 'after_conv3' - ] - - if self.style == 'pytorch': - self.conv1_stride = 1 - self.conv2_stride = stride - else: - self.conv1_stride = stride - self.conv2_stride = 1 - - self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) - self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - norm_cfg, planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - conv_cfg, - inplanes, - planes, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - fallback_on_stride = False - if self.with_dcn: - fallback_on_stride = dcn.pop('fallback_on_stride', False) - if not self.with_dcn or fallback_on_stride: - self.conv2 = build_conv_layer( - conv_cfg, - planes, - planes, - kernel_size=3, - stride=self.conv2_stride, - padding=dilation, - dilation=dilation, - bias=False) - else: - assert self.conv_cfg is None, 'conv_cfg must be None for DCN' - self.conv2 = build_conv_layer( - dcn, - planes, - planes, - kernel_size=3, - stride=self.conv2_stride, - padding=dilation, - dilation=dilation, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.conv3 = build_conv_layer( - conv_cfg, - planes, - planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - - if self.with_plugins: - self.after_conv1_plugin_names = self.make_block_plugins( - planes, self.after_conv1_plugins) - self.after_conv2_plugin_names = self.make_block_plugins( - planes, self.after_conv2_plugins) - self.after_conv3_plugin_names = self.make_block_plugins( - planes * self.expansion, self.after_conv3_plugins) - - def make_block_plugins(self, in_channels, plugins): - """make plugins for block. - - Args: - in_channels (int): Input channels of plugin. - plugins (list[dict]): List of plugins cfg to build. - - Returns: - list[str]: List of the names of plugin. - """ - assert isinstance(plugins, list) - plugin_names = [] - for plugin in plugins: - plugin = plugin.copy() - name, layer = build_plugin_layer( - plugin, - in_channels=in_channels, - postfix=plugin.pop('postfix', '')) - assert not hasattr(self, name), f'duplicate plugin {name}' - self.add_module(name, layer) - plugin_names.append(name) - return plugin_names - - def forward_plugin(self, x, plugin_names): - out = x - for name in plugin_names: - out = getattr(self, name)(out) - return out - - @property - def norm1(self): - """nn.Module: normalization layer after the first convolution layer""" - return getattr(self, self.norm1_name) - - @property - def norm2(self): - """nn.Module: normalization layer after the second convolution layer""" - return getattr(self, self.norm2_name) - - @property - def norm3(self): - """nn.Module: normalization layer after the third convolution layer""" - return getattr(self, self.norm3_name) - - def forward(self, x): - """Forward function.""" - - def _inner_forward(x): - identity = x - out = self.conv1(x) - out = self.norm1(out) - out = self.relu(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv1_plugin_names) - - out = self.conv2(out) - out = self.norm2(out) - out = self.relu(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv2_plugin_names) - - out = self.conv3(out) - out = self.norm3(out) - - if self.with_plugins: - out = self.forward_plugin(out, self.after_conv3_plugin_names) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -@MODELS.register_module() -class ResNet(BaseModule): - """ResNet backbone. - - Args: - depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. - stem_channels (int | None): Number of stem channels. If not specified, - it will be the same as `base_channels`. Default: None. - base_channels (int): Number of base channels of res layer. Default: 64. - in_channels (int): Number of input image channels. Default: 3. - num_stages (int): Resnet stages. Default: 4. - strides (Sequence[int]): Strides of the first block of each stage. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv - avg_down (bool): Use AvgPool instead of stride conv when - downsampling in the bottleneck. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. - norm_cfg (dict): Dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - plugins (list[dict]): List of plugins for stages, each dict contains: - - - cfg (dict, required): Cfg dict to build plugin. - - position (str, required): Position inside block to insert - plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. - - stages (tuple[bool], optional): Stages to apply plugin, length - should be same as 'num_stages'. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): Whether to use zero init for last norm layer - in resblocks to let them behave as identity. - pretrained (str, optional): model pretrained path. Default: None - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - - Example: - >>> from mmdet.models import ResNet - >>> import torch - >>> self = ResNet(depth=18) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 64, 8, 8) - (1, 128, 4, 4) - (1, 256, 2, 2) - (1, 512, 1, 1) - """ - - arch_settings = { - 18: (BasicBlock, (2, 2, 2, 2)), - 34: (BasicBlock, (3, 4, 6, 3)), - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, - depth, - in_channels=3, - stem_channels=None, - base_channels=64, - num_stages=4, - strides=(1, 2, 2, 2), - dilations=(1, 1, 1, 1), - out_indices=(0, 1, 2, 3), - style='pytorch', - deep_stem=False, - avg_down=False, - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - dcn=None, - stage_with_dcn=(False, False, False, False), - plugins=None, - with_cp=False, - zero_init_residual=True, - pretrained=None, - init_cfg=None): - super(ResNet, self).__init__(init_cfg) - self.zero_init_residual = zero_init_residual - if depth not in self.arch_settings: - raise KeyError(f'invalid depth {depth} for resnet') - - block_init_cfg = None - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be specified at the same time' - if isinstance(pretrained, str): - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - elif pretrained is None: - if init_cfg is None: - self.init_cfg = [ - dict(type='Kaiming', layer='Conv2d'), - dict( - type='Constant', - val=1, - layer=['_BatchNorm', 'GroupNorm']) - ] - block = self.arch_settings[depth][0] - if self.zero_init_residual: - if block is BasicBlock: - block_init_cfg = dict( - type='Constant', - val=0, - override=dict(name='norm2')) - elif block is Bottleneck: - block_init_cfg = dict( - type='Constant', - val=0, - override=dict(name='norm3')) - else: - raise TypeError('pretrained must be a str or None') - - self.depth = depth - if stem_channels is None: - stem_channels = base_channels - self.stem_channels = stem_channels - self.base_channels = base_channels - self.num_stages = num_stages - assert num_stages >= 1 and num_stages <= 4 - self.strides = strides - self.dilations = dilations - assert len(strides) == len(dilations) == num_stages - self.out_indices = out_indices - assert max(out_indices) < num_stages - self.style = style - self.deep_stem = deep_stem - self.avg_down = avg_down - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.with_cp = with_cp - self.norm_eval = norm_eval - self.dcn = dcn - self.stage_with_dcn = stage_with_dcn - if dcn is not None: - assert len(stage_with_dcn) == num_stages - self.plugins = plugins - self.block, stage_blocks = self.arch_settings[depth] - self.stage_blocks = stage_blocks[:num_stages] - self.inplanes = stem_channels - - self._make_stem_layer(in_channels, stem_channels) - - self.res_layers = [] - for i, num_blocks in enumerate(self.stage_blocks): - stride = strides[i] - dilation = dilations[i] - dcn = self.dcn if self.stage_with_dcn[i] else None - if plugins is not None: - stage_plugins = self.make_stage_plugins(plugins, i) - else: - stage_plugins = None - planes = base_channels * 2**i - res_layer = self.make_res_layer( - block=self.block, - inplanes=self.inplanes, - planes=planes, - num_blocks=num_blocks, - stride=stride, - dilation=dilation, - style=self.style, - avg_down=self.avg_down, - with_cp=with_cp, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - dcn=dcn, - plugins=stage_plugins, - init_cfg=block_init_cfg) - self.inplanes = planes * self.block.expansion - layer_name = f'layer{i + 1}' - self.add_module(layer_name, res_layer) - self.res_layers.append(layer_name) - - self._freeze_stages() - - self.feat_dim = self.block.expansion * base_channels * 2**( - len(self.stage_blocks) - 1) - - def make_stage_plugins(self, plugins, stage_idx): - """Make plugins for ResNet ``stage_idx`` th stage. - - Currently we support to insert ``context_block``, - ``empirical_attention_block``, ``nonlocal_block`` into the backbone - like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of - Bottleneck. - - An example of plugins format could be: - - Examples: - >>> plugins=[ - ... dict(cfg=dict(type='xxx', arg1='xxx'), - ... stages=(False, True, True, True), - ... position='after_conv2'), - ... dict(cfg=dict(type='yyy'), - ... stages=(True, True, True, True), - ... position='after_conv3'), - ... dict(cfg=dict(type='zzz', postfix='1'), - ... stages=(True, True, True, True), - ... position='after_conv3'), - ... dict(cfg=dict(type='zzz', postfix='2'), - ... stages=(True, True, True, True), - ... position='after_conv3') - ... ] - >>> self = ResNet(depth=18) - >>> stage_plugins = self.make_stage_plugins(plugins, 0) - >>> assert len(stage_plugins) == 3 - - Suppose ``stage_idx=0``, the structure of blocks in the stage would be: - - .. code-block:: none - - conv1-> conv2->conv3->yyy->zzz1->zzz2 - - Suppose 'stage_idx=1', the structure of blocks in the stage would be: - - .. code-block:: none - - conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2 - - If stages is missing, the plugin would be applied to all stages. - - Args: - plugins (list[dict]): List of plugins cfg to build. The postfix is - required if multiple same type plugins are inserted. - stage_idx (int): Index of stage to build - - Returns: - list[dict]: Plugins for current stage - """ - stage_plugins = [] - for plugin in plugins: - plugin = plugin.copy() - stages = plugin.pop('stages', None) - assert stages is None or len(stages) == self.num_stages - # whether to insert plugin into current stage - if stages is None or stages[stage_idx]: - stage_plugins.append(plugin) - - return stage_plugins - - def make_res_layer(self, **kwargs): - """Pack all blocks in a stage into a ``ResLayer``.""" - return ResLayer(**kwargs) - - @property - def norm1(self): - """nn.Module: the normalization layer named "norm1" """ - return getattr(self, self.norm1_name) - - def _make_stem_layer(self, in_channels, stem_channels): - if self.deep_stem: - self.stem = nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels, - stem_channels // 2, - kernel_size=3, - stride=2, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, stem_channels // 2)[1], - nn.ReLU(inplace=True), - build_conv_layer( - self.conv_cfg, - stem_channels // 2, - stem_channels // 2, - kernel_size=3, - stride=1, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, stem_channels // 2)[1], - nn.ReLU(inplace=True), - build_conv_layer( - self.conv_cfg, - stem_channels // 2, - stem_channels, - kernel_size=3, - stride=1, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, stem_channels)[1], - nn.ReLU(inplace=True)) - else: - self.conv1 = build_conv_layer( - self.conv_cfg, - in_channels, - stem_channels, - kernel_size=7, - stride=2, - padding=3, - bias=False) - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, stem_channels, postfix=1) - self.add_module(self.norm1_name, norm1) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - def _freeze_stages(self): - if self.frozen_stages >= 0: - if self.deep_stem: - self.stem.eval() - for param in self.stem.parameters(): - param.requires_grad = False - else: - self.norm1.eval() - for m in [self.conv1, self.norm1]: - for param in m.parameters(): - param.requires_grad = False - - for i in range(1, self.frozen_stages + 1): - m = getattr(self, f'layer{i}') - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def forward(self, x): - """Forward function.""" - if self.deep_stem: - x = self.stem(x) - else: - x = self.conv1(x) - x = self.norm1(x) - x = self.relu(x) - x = self.maxpool(x) - outs = [] - for i, layer_name in enumerate(self.res_layers): - res_layer = getattr(self, layer_name) - x = res_layer(x) - if i in self.out_indices: - outs.append(x) - return tuple(outs) - - def train(self, mode=True): - """Convert the model into training mode while keep normalization layer - freezed.""" - super(ResNet, self).train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - # trick: eval have effect on BatchNorm only - if isinstance(m, _BatchNorm): - m.eval() - - -@MODELS.register_module() -class ResNetV1d(ResNet): - r"""ResNetV1d variant described in `Bag of Tricks - `_. - - Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in - the input stem with three 3x3 convs. And in the downsampling block, a 2x2 - avg_pool with stride 2 is added before conv, whose stride is changed to 1. - """ - - def __init__(self, **kwargs): - super(ResNetV1d, self).__init__( - deep_stem=True, avg_down=True, **kwargs) diff --git a/spaces/LuxOAI/ChatGpt-Web/app/bing-chat/index.js b/spaces/LuxOAI/ChatGpt-Web/app/bing-chat/index.js deleted file mode 100644 index 1904031c5751b5701407916ab300919a9e61ac5a..0000000000000000000000000000000000000000 --- a/spaces/LuxOAI/ChatGpt-Web/app/bing-chat/index.js +++ /dev/null @@ -1,284 +0,0 @@ -// src/bing-chat.ts -import crypto from "node:crypto"; -import WebSocket from "ws"; - -// src/fetch.ts -var fetch = globalThis.fetch; -if (typeof fetch !== "function") { - throw new Error("Invalid environment: global fetch not defined"); -} - -// src/bing-chat.ts -var terminalChar = ""; -var BingChat = class { - constructor(opts) { - const { cookie, debug = false } = opts; - this._cookie = cookie; - this._debug = !!debug; - if (!this._cookie) { - throw new Error("Bing cookie is required"); - } - } - /** - * Sends a message to Bing Chat, waits for the response to resolve, and returns - * the response. - * - * If you want to receive a stream of partial responses, use `opts.onProgress`. - * - * @param message - The prompt message to send - * @param opts.conversationId - Optional ID of a conversation to continue (defaults to a random UUID) - * @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated - * - * @returns The response from Bing Chat - */ - async sendMessage(text, opts = {}) { - const { - invocationId = "1", - onProgress, - locale = "en-US", - market = "en-US", - region = "US", - location, - messageType = "Chat", - variant = "Balanced", - } = opts; - let { conversationId, clientId, conversationSignature } = opts; - const isStartOfSession = !( - conversationId && - clientId && - conversationSignature - ); - if (isStartOfSession) { - const conversation = await this.createConversation(); - conversationId = conversation.conversationId; - clientId = conversation.clientId; - conversationSignature = conversation.conversationSignature; - } - const result = { - author: "bot", - id: crypto.randomUUID(), - conversationId, - clientId, - conversationSignature, - invocationId: `${parseInt(invocationId, 10) + 1}`, - text: "", - }; - const responseP = new Promise(async (resolve, reject) => { - const chatWebsocketUrl = "wss://sydney.bing.com/sydney/ChatHub"; - const ws = new WebSocket(chatWebsocketUrl, { - perMessageDeflate: false, - headers: { - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - pragma: "no-cache", - }, - }); - let isFulfilled = false; - function cleanup() { - ws.close(); - ws.removeAllListeners(); - } - ws.on("error", (error) => { - console.warn("WebSocket error:", error); - cleanup(); - if (!isFulfilled) { - isFulfilled = true; - reject(new Error(`WebSocket error: ${error.toString()}`)); - } - }); - ws.on("close", () => {}); - ws.on("open", () => { - ws.send(`{"protocol":"json","version":1}${terminalChar}`); - }); - let stage = 0; - ws.on("message", (data) => { - var _a, _b; - const objects = data.toString().split(terminalChar); - const messages = objects - .map((object) => { - try { - return JSON.parse(object); - } catch (error) { - return object; - } - }) - .filter(Boolean); - if (!messages.length) { - return; - } - if (stage === 0) { - ws.send(`{"type":6}${terminalChar}`); - const traceId = crypto.randomBytes(16).toString("hex"); - const locationStr = location - ? `lat:${location.lat};long:${location.lng};re=${ - location.re || "1000m" - };` - : void 0; - const optionsSets = [ - "nlu_direct_response_filter", - "deepleo", - "enable_debug_commands", - "disable_emoji_spoken_text", - "responsible_ai_policy_235", - "enablemm", - "trffovrd", - "h3toppfp3", - "forcerep", - "cpcttl1d", - "dv3sugg", - ]; - if (variant == "Balanced") { - optionsSets.push("galileo"); - optionsSets.push("glprompt"); - } else if (variant == "Creative") { - optionsSets.push("h3imaginative"); - optionsSets.push("gencontentv3"); - } else if (variant == "Precise") { - optionsSets.push("h3precise"); - } - const params = { - arguments: [ - { - source: "cib", - optionsSets, - allowedMessageTypes: [ - "Chat", - "InternalSearchQuery", - "InternalSearchResult", - "InternalLoaderMessage", - "RenderCardRequest", - "AdsQuery", - "SemanticSerp", - ], - sliceIds: [], - traceId, - isStartOfSession, - message: { - locale, - market, - region, - location: locationStr, - author: "user", - inputMethod: "Keyboard", - messageType, - text, - }, - conversationSignature, - participant: { id: clientId }, - conversationId, - }, - ], - invocationId, - target: "chat", - type: 4, - }; - if (this._debug) { - console.log(chatWebsocketUrl, JSON.stringify(params, null, 2)); - } - ws.send(`${JSON.stringify(params)}${terminalChar}`); - ++stage; - return; - } - for (const message of messages) { - if (message.type === 1) { - const update = message; - const msg = - (_a = update.arguments[0].messages) == null ? void 0 : _a[0]; - if (!msg) continue; - if (!msg.messageType) { - result.author = msg.author; - result.text = msg.text; - result.detail = msg; - onProgress == null ? void 0 : onProgress(result); - } - } else if (message.type === 2) { - const response = message; - if (this._debug) { - console.log("RESPONSE", JSON.stringify(response, null, 2)); - } - const validMessages = - (_b = response.item.messages) == null - ? void 0 - : _b.filter((m) => !m.messageType); - const lastMessage = - validMessages == null - ? void 0 - : validMessages[ - (validMessages == null ? void 0 : validMessages.length) - 1 - ]; - if (lastMessage) { - result.conversationId = response.item.conversationId; - result.conversationExpiryTime = - response.item.conversationExpiryTime; - result.author = lastMessage.author; - result.text = lastMessage.text; - result.detail = lastMessage; - if (!isFulfilled) { - isFulfilled = true; - resolve(result); - } - } - } else if (message.type === 3) { - if (!isFulfilled) { - isFulfilled = true; - resolve(result); - } - cleanup(); - return; - } else { - } - } - }); - }); - return responseP; - } - async createConversation() { - const requestId = crypto.randomUUID(); - const cookie = this._cookie.includes(";") - ? this._cookie - : `_U=${this._cookie}`; - return fetch("https://www.bing.com/turing/conversation/create", { - headers: { - accept: "application/json", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "sec-ch-ua": - '"Not_A Brand";v="99", "Microsoft Edge";v="109", "Chromium";v="109"', - "sec-ch-ua-arch": '"x86"', - "sec-ch-ua-bitness": '"64"', - "sec-ch-ua-full-version": '"109.0.1518.78"', - "sec-ch-ua-full-version-list": - '"Not_A Brand";v="99.0.0.0", "Microsoft Edge";v="109.0.1518.78", "Chromium";v="109.0.5414.120"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-model": "", - "sec-ch-ua-platform": '"macOS"', - "sec-ch-ua-platform-version": '"12.6.0"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "x-edge-shopping-flag": "1", - "x-ms-client-request-id": requestId, - "x-ms-useragent": - "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/MacIntel", - "x-forwarded-for": "1.1.1.1", - cookie, - }, - referrer: "https://www.bing.com/search", - referrerPolicy: "origin-when-cross-origin", - body: null, - method: "GET", - mode: "cors", - credentials: "include", - }).then((res) => { - if (res.ok) { - return res.json(); - } else { - throw new Error( - `unexpected HTTP error createConversation ${res.status}: ${res.statusText}`, - ); - } - }); - } -}; -export { BingChat }; -//# sourceMappingURL=index.js.map diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Global/detection_models/Synchronized-BatchNorm-PyTorch/sync_batchnorm/comm.py b/spaces/MCkernick/Image_Restoration_Colorization/Global/detection_models/Synchronized-BatchNorm-PyTorch/sync_batchnorm/comm.py deleted file mode 100644 index 922f8c4a3adaa9b32fdcaef09583be03b0d7eb2b..0000000000000000000000000000000000000000 --- a/spaces/MCkernick/Image_Restoration_Colorization/Global/detection_models/Synchronized-BatchNorm-PyTorch/sync_batchnorm/comm.py +++ /dev/null @@ -1,137 +0,0 @@ -# -*- coding: utf-8 -*- -# File : comm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import queue -import collections -import threading - -__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster'] - - -class FutureResult(object): - """A thread-safe future implementation. Used only as one-to-one pipe.""" - - def __init__(self): - self._result = None - self._lock = threading.Lock() - self._cond = threading.Condition(self._lock) - - def put(self, result): - with self._lock: - assert self._result is None, 'Previous result has\'t been fetched.' - self._result = result - self._cond.notify() - - def get(self): - with self._lock: - if self._result is None: - self._cond.wait() - - res = self._result - self._result = None - return res - - -_MasterRegistry = collections.namedtuple('MasterRegistry', ['result']) -_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result']) - - -class SlavePipe(_SlavePipeBase): - """Pipe for master-slave communication.""" - - def run_slave(self, msg): - self.queue.put((self.identifier, msg)) - ret = self.result.get() - self.queue.put(True) - return ret - - -class SyncMaster(object): - """An abstract `SyncMaster` object. - - - During the replication, as the data parallel will trigger an callback of each module, all slave devices should - call `register(id)` and obtain an `SlavePipe` to communicate with the master. - - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected, - and passed to a registered callback. - - After receiving the messages, the master device should gather the information and determine to message passed - back to each slave devices. - """ - - def __init__(self, master_callback): - """ - - Args: - master_callback: a callback to be invoked after having collected messages from slave devices. - """ - self._master_callback = master_callback - self._queue = queue.Queue() - self._registry = collections.OrderedDict() - self._activated = False - - def __getstate__(self): - return {'master_callback': self._master_callback} - - def __setstate__(self, state): - self.__init__(state['master_callback']) - - def register_slave(self, identifier): - """ - Register an slave device. - - Args: - identifier: an identifier, usually is the device id. - - Returns: a `SlavePipe` object which can be used to communicate with the master device. - - """ - if self._activated: - assert self._queue.empty(), 'Queue is not clean before next initialization.' - self._activated = False - self._registry.clear() - future = FutureResult() - self._registry[identifier] = _MasterRegistry(future) - return SlavePipe(identifier, self._queue, future) - - def run_master(self, master_msg): - """ - Main entry for the master device in each forward pass. - The messages were first collected from each devices (including the master device), and then - an callback will be invoked to compute the message to be sent back to each devices - (including the master device). - - Args: - master_msg: the message that the master want to send to itself. This will be placed as the first - message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example. - - Returns: the message to be sent back to the master device. - - """ - self._activated = True - - intermediates = [(0, master_msg)] - for i in range(self.nr_slaves): - intermediates.append(self._queue.get()) - - results = self._master_callback(intermediates) - assert results[0][0] == 0, 'The first result should belongs to the master.' - - for i, res in results: - if i == 0: - continue - self._registry[i].result.put(res) - - for i in range(self.nr_slaves): - assert self._queue.get() is True - - return results[0][1] - - @property - def nr_slaves(self): - return len(self._registry) diff --git a/spaces/MINAMONI/anime-remove-background/app.py b/spaces/MINAMONI/anime-remove-background/app.py deleted file mode 100644 index 230a0d5f8a3da6ab18ecb8db1cd90016a489b96a..0000000000000000000000000000000000000000 --- a/spaces/MINAMONI/anime-remove-background/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import gradio as gr -import huggingface_hub -import onnxruntime as rt -import numpy as np -import cv2 - - -def get_mask(img, s=1024): - img = (img / 255).astype(np.float32) - h, w = h0, w0 = img.shape[:-1] - h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s) - ph, pw = s - h, s - w - img_input = np.zeros([s, s, 3], dtype=np.float32) - img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h)) - img_input = np.transpose(img_input, (2, 0, 1)) - img_input = img_input[np.newaxis, :] - mask = rmbg_model.run(None, {'img': img_input})[0][0] - mask = np.transpose(mask, (1, 2, 0)) - mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] - mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis] - return mask - - -def rmbg_fn(img): - mask = get_mask(img) - img = (mask * img + 255 * (1 - mask)).astype(np.uint8) - mask = (mask * 255).astype(np.uint8) - img = np.concatenate([img, mask], axis=2, dtype=np.uint8) - mask = mask.repeat(3, axis=2) - return mask, img - - -if __name__ == "__main__": - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] - model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx") - rmbg_model = rt.InferenceSession(model_path, providers=providers) - app = gr.Blocks() - with app: - gr.Markdown("# Anime Remove Background\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.animeseg)\n\n" - "demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)") - with gr.Row(): - with gr.Column(): - input_img = gr.Image(label="input image") - examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)] - examples = gr.Dataset(components=[input_img], samples=examples_data) - run_btn = gr.Button(variant="primary") - output_mask = gr.Image(label="mask") - output_img = gr.Image(label="result", image_mode="RGBA") - examples.click(lambda x: x[0], [examples], [input_img]) - run_btn.click(rmbg_fn, [input_img], [output_mask, output_img]) - app.launch() diff --git a/spaces/ML701G7/taim-gan/app.py b/spaces/ML701G7/taim-gan/app.py deleted file mode 100644 index c5064769be64a805e314f71e951b83aaca82f17d..0000000000000000000000000000000000000000 --- a/spaces/ML701G7/taim-gan/app.py +++ /dev/null @@ -1,122 +0,0 @@ -import numpy as np # this should come first to mitigate mlk-service bug -from src.models.utils import get_image_arr, load_model -from src.data import TAIMGANTokenizer -from torchvision import transforms -from src.config import config_dict -from pathlib import Path -from enum import IntEnum, auto -from PIL import Image -import gradio as gr -import torch -from src.models.modules import ( - VGGEncoder, - InceptionEncoder, - TextEncoder, - Generator -) - -########## -# PARAMS # -########## - -IMG_CHANS = 3 # RGB channels for image -IMG_HW = 256 # height and width of images -HIDDEN_DIM = 128 # hidden dimensions of lstm cell in one direction -C = 2 * HIDDEN_DIM # length of embeddings - -Ng = config_dict["Ng"] -cond_dim = config_dict["condition_dim"] -z_dim = config_dict["noise_dim"] - - -############### -# LOAD MODELS # -############### - -models = { - "COCO": { - "dir": "weights/coco" - }, - "Bird": { - "dir": "weights/bird" - }, - "UTKFace": { - "dir": "weights/utkface" - } -} - -for model_name in models: - # create tokenizer - models[model_name]["tokenizer"] = TAIMGANTokenizer(captions_path=f"{models[model_name]['dir']}/captions.pickle") - vocab_size = len(models[model_name]["tokenizer"].word_to_ix) - # instantiate models - models[model_name]["generator"] = Generator(Ng=Ng, D=C, conditioning_dim=cond_dim, noise_dim=z_dim).eval() - models[model_name]["lstm"] = TextEncoder(vocab_size=vocab_size, emb_dim=C, hidden_dim=HIDDEN_DIM).eval() - models[model_name]["vgg"] = VGGEncoder().eval() - models[model_name]["inception"] = InceptionEncoder(D=C).eval() - # load models - load_model( - generator=models[model_name]["generator"], - discriminator=None, - image_encoder=models[model_name]["inception"], - text_encoder=models[model_name]["lstm"], - output_dir=Path(models[model_name]["dir"]), - device=torch.device("cpu") - ) - - -def change_image_with_text(image: Image, text: str, model_name: str) -> Image: - """ - Create an image modified by text from the original image - and save it with _modified postfix - - :param gr.Image image: Path to the image - :param str text: Desired caption - """ - global models - tokenizer = models[model_name]["tokenizer"] - G = models[model_name]["generator"] - lstm = models[model_name]["lstm"] - inception = models[model_name]["inception"] - vgg = models[model_name]["vgg"] - # generate some noise - noise = torch.rand(z_dim).unsqueeze(0) - # transform input text and get masks with embeddings - tokens = torch.tensor(tokenizer.encode(text)).unsqueeze(0) - mask = (tokens == tokenizer.pad_token_id) - word_embs, sent_embs = lstm(tokens) - # open the image and transform it to the tensor - image = transforms.Compose([ - transforms.ToTensor(), - transforms.Resize((IMG_HW, IMG_HW)), - transforms.Normalize( - mean=(0.5, 0.5, 0.5), - std=(0.5, 0.5, 0.5) - ) - ])(image).unsqueeze(0) - # obtain visual features of the image - vgg_features = vgg(image) - local_features, global_features = inception(image) - # generate new image from the old one - fake_image, _, _ = G(noise, sent_embs, word_embs, global_features, - local_features, vgg_features, mask) - # denormalize the image - fake_image = Image.fromarray(get_image_arr(fake_image)[0]) - # return image in gradio format - return fake_image - - -########## -# GRADIO # -########## -demo = gr.Interface( - fn=change_image_with_text, - inputs=[gr.Image(type="pil"), "text", gr.inputs.Dropdown(list(models.keys()))], - outputs=gr.Image(type="pil"), - examples=[ - ["src/data/stubs/bird.jpg", "black bird with blue wings", "Bird"], - ["src/data/stubs/lady.jpg", "lady with blue eyes", "UTKFace"], - ["src/data/stubs/bird.jpg", "white bird with black wings", "Bird"] - ] -) -demo.launch(debug=True) diff --git a/spaces/MakiAi/SquareMotion/app.py b/spaces/MakiAi/SquareMotion/app.py deleted file mode 100644 index 97792e282e465a57279a10387fcb63b824b39c43..0000000000000000000000000000000000000000 --- a/spaces/MakiAi/SquareMotion/app.py +++ /dev/null @@ -1,238 +0,0 @@ -# main.py -import streamlit as st -from PIL import Image -import io -import zipfile -import random -import pandas as pd -import os -from image_processor import process_image, create_video_from_image, concatenate_videos, list_files_with_permissions # 画像処理関数を別ファイルからインポート -from moviepy.editor import * -import tempfile - -import numpy as np -import librosa -import librosa.display -import matplotlib.pyplot as plt -import seaborn as sns - -def app_description(): - """Display the description of the app.""" - st.title('SquareMotion v2.1.0') - st.markdown(""" - ### アプリの概要 - - このアプリは、ユーザーがアップロードした画像を基に動画を生成するものです。 - ユーザーは複数の画像をアップロードし、それぞれの画像に基づく動画の再生時間をランダムに設定することができます。 - 生成されたこれらの動画は一つの連続した動画として結合することも可能です。 - - #### 使い方 - 1. 正方形の画像をアップロードします。 - 2. リサイズ後のサイズとブラーの半径などのパラメータをスライダーで指定します。 - 3. 処理された画像/動画を確認し、ダウンロードボタンから保存できます。 - - #### パラメータ - """) - - -def display_processed_images(uploaded_files, target_size, blur_radius, aspect_ratio_w, aspect_ratio_h): - """画像を処理して表示し、ダウンロードボタンを提供します。""" - processed_images = [] - processed_images_np = [] - uploaded_file_names = [] - for uploaded_file in uploaded_files: - # 画像を読み込み、処理します - img = Image.open(uploaded_file) - processed_img = process_image(img, (target_size, target_size), blur_radius, aspect_ratio_w, aspect_ratio_h) - # st.image(processed_img, caption=f'Processed {uploaded_file.name}', use_column_width=True) - st.image(processed_img, caption=f'Processed {uploaded_file.name}', width=300) - - # 画像のバイトデータを取得 - img_byte_arr = io.BytesIO() - processed_img.save(img_byte_arr, format='PNG') - processed_images.append((f'processed_{uploaded_file.name}', img_byte_arr.getvalue())) - processed_images_np.append(processed_img) - # 個別の画像ダウンロードボタン - st.download_button( - label="処理済み画像をダウンロード", - data=img_byte_arr.getvalue(), - file_name=f'processed_{uploaded_file.name}', - mime="image/png" - ) - - uploaded_file_names.append(uploaded_file.name) - - # 一括ダウンロードのためのZIP作成 - if processed_images: - zip_buffer = io.BytesIO() - with zipfile.ZipFile(zip_buffer, 'a', zipfile.ZIP_DEFLATED) as zf: - for file_name, img_data in processed_images: - zf.writestr(file_name, img_data) - zip_buffer.seek(0) - - st.download_button( - label="すべての処理済み画像をダウンロード (ZIP)", - data=zip_buffer.getvalue(), - file_name="processed_images.zip", - mime="application/zip" - ) - - return processed_images_np, uploaded_file_names - -def display_processed_videos(processed_images, uploaded_file_names, min_duration=1, max_duration=3): - """ - 処理済みの画像を使用して動画を作成し、ダウンロードボタンとともに動画を描画します。 - min_duration: 動画の最小期間(秒) - max_duration: 動画の最大期間(秒) - """ - - video_files = [] - for processed_img, uploaded_name in zip(processed_images, uploaded_file_names): - - # 各処理画像に対して動画を作成 - video_duration = random.randint(min_duration, max_duration) - video_bytes, mov_name = create_video_from_image(processed_img, video_duration, uploaded_name) - video_files.append(mov_name) - # video_bytes.seek(0) - # 動画を描画 - st.video(video_bytes, format='video/mp4', start_time=0) - - # 動画のダウンロードボタン - st.download_button( - label="処理済み動画をダウンロード", - data=video_bytes, - file_name=f'processed_{uploaded_name}.mp4', - mime="video/mp4" - ) - - - return video_files - -def extract_audio_from_file(uploaded_file): - """アップロードされたファイルから音楽を抽出する関数""" - if not uploaded_file: - return None, None - - # 一時ファイルとして保存。拡張子は.wavとする。 - tfile = tempfile.NamedTemporaryFile(delete=False, prefix="audio_extract_", suffix=".wav") - tfile.write(uploaded_file.getvalue()) - - audio = None - if uploaded_file.type == 'video/mp4': - music_clip = VideoFileClip(tfile.name) - audio = music_clip.audio - else: # 'audio/wav' or 'audio/mpeg' (for MP3) - audio = AudioFileClip(tfile.name) - - return audio, tfile.name - -def save_uploaded_file(uploaded_file): - """ - StreamlitのUploadedFileを一時ファイルとして保存し、そのファイルパスを返します。 - """ - tfile = tempfile.NamedTemporaryFile(delete=False) - tfile.write(uploaded_file.read()) - - return tfile.name - -def visualize_audio_waveform(uploaded_file): - # 一時ファイルとして保存 - temp_path = save_uploaded_file(uploaded_file) - - # オリジナルのサンプリングレートで音楽ファイルをロード - y_original, sr_original = librosa.load(temp_path, sr=None) - - # 100分の1のサンプリングレートでロード - y_resampled, sr_resampled = librosa.load(temp_path, sr=sr_original // 1000) - - # 時刻tを生成 - t = [i/sr_resampled for i in range(len(y_resampled))] - - # pandasのデータフレームを作成 - chart_data = pd.DataFrame({ - 'Time': t, - 'Waveform': y_resampled - }) - - # Streamlitのline_chart関数で波形を表示 - st.line_chart(chart_data, x='Time', y='Waveform') - - # 一時ファイルの削除 - # os.remove(temp_path) - -def main(): - app_description() - - # assets\audioフォルダ内のwavファイルをリストアップ - audio_files = [f for f in os.listdir('assets/audio') if f.endswith('.wav')] - audio_files.append("アップロードした音声ファイルを使用する") - - # 音楽として使用するファイルをアップロード - uploaded_music_file = st.file_uploader('音楽として使用するファイル(MP4/WAV/MP3)をアップロードしてください', type=['mp4', 'wav', 'mp3']) - - # ランダムにデフォルトの音声ファイルを選ぶ - default_audio_file = random.choice(audio_files) - - # Streamlitのselectboxでwavファイルまたはアップロードされたファイルを選択 - selected_audio_file = st.selectbox("音楽として使用するwavファイルを選択してください", audio_files, index=audio_files.index(default_audio_file)) - - if selected_audio_file == "アップロードした音声ファイルを使用する": - if uploaded_music_file: - # 音楽を抽出 - audio, audio_path = extract_audio_from_file(uploaded_music_file) - else: - st.warning("音声ファイルをアップロードしてください") - audio, audio_path = None, None - else: - audio_path = os.path.join('assets/audio', selected_audio_file) - audio = AudioFileClip(audio_path) - - uploaded_files = st.file_uploader('画像をアップロードしてください', type=['png', 'jpg', 'jpeg'], accept_multiple_files=True) - - target_size = st.slider('リサイズ後のサイズ', min_value=100, max_value=3000, value=2000) - blur_radius = st.slider('ブラーの半径', min_value=0, max_value=50, value=10) - aspect_ratio_w = st.slider('アスペクト比(W)', min_value=1, max_value=20, value=9) - aspect_ratio_h = st.slider('アスペクト比(H)', min_value=1, max_value=20, value=16) - min_duration = st.slider('動画時間の最小値', min_value=1, max_value=7, value=1) - max_duration = st.slider('動画時間の最大値', min_value=1, max_value=7, value=3) - - st.markdown("### Music visualize") - if uploaded_music_file: - visualize_audio_waveform(uploaded_music_file) - - st.markdown("### Vertical transformation") - processed_images, uploaded_file_names = display_processed_images(uploaded_files, target_size, blur_radius, aspect_ratio_w, aspect_ratio_h) - - - st.markdown("### Motion transformation") - video_files = display_processed_videos(processed_images, uploaded_file_names, min_duration, max_duration) - - st.markdown("### Combined video") - - - # 動画を結合 - if(video_files): - - # video_filesをpandas DataFrameに変換 - df_video_files = pd.DataFrame(video_files, columns=["Video Files"]) - - # Streamlitにテーブルとして表示 - st.table(df_video_files) - - combined_video = concatenate_videos(video_files, audio_path, audio=audio) - - # Streamlitで表示 - st.video(combined_video, format="video/mp4") - - # ダウンロードボタン - st.download_button( - label="結合された動画をダウンロード", - data=combined_video, - file_name="combined_video.mp4", - mime="video/mp4" - ) - - -if __name__ == "__main__": - main() - # streamlit run app.py \ No newline at end of file diff --git a/spaces/Manjushri/MusicGen/tests/data/__init__.py b/spaces/Manjushri/MusicGen/tests/data/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/Manjushri/MusicGen/tests/data/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/Mayanand/Automatic-Number-Plate-Recognition/app.py b/spaces/Mayanand/Automatic-Number-Plate-Recognition/app.py deleted file mode 100644 index 65aab8820a1a4b94781b107f3623c71abde839fe..0000000000000000000000000000000000000000 --- a/spaces/Mayanand/Automatic-Number-Plate-Recognition/app.py +++ /dev/null @@ -1,30 +0,0 @@ -import cv2 -import gradio as gr -from main import read_number_plate, add_text, add_rect -from detection import resize - -def predict_fn(im): - im = resize(im) - - boxes, texts = read_number_plate(im) - print(texts) - for box, text in zip(boxes, texts): - im = add_rect(im, *box) - im = add_text(im, text, box) - return im - - -demo = gr.Interface( - fn=predict_fn, - inputs=[gr.inputs.Image(label="Input Image")], - outputs=[gr.inputs.Image(label="Prediction")], - title="Automatic Number Plate Recognition Demo", - description="Gradio App for Automatic Number Plate Recognition", - examples=[ - ["example/car1.jpg"], - ["example/car2.jpg"], - ["example/car3.jpg"] - ], -) - -demo.launch() \ No newline at end of file diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/hooks/hook.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/hooks/hook.py deleted file mode 100644 index b8855c107727ecf85b917c890fc8b7f6359238a4..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/runner/hooks/hook.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from annotator.uniformer.mmcv.utils import Registry, is_method_overridden - -HOOKS = Registry('hook') - - -class Hook: - stages = ('before_run', 'before_train_epoch', 'before_train_iter', - 'after_train_iter', 'after_train_epoch', 'before_val_epoch', - 'before_val_iter', 'after_val_iter', 'after_val_epoch', - 'after_run') - - def before_run(self, runner): - pass - - def after_run(self, runner): - pass - - def before_epoch(self, runner): - pass - - def after_epoch(self, runner): - pass - - def before_iter(self, runner): - pass - - def after_iter(self, runner): - pass - - def before_train_epoch(self, runner): - self.before_epoch(runner) - - def before_val_epoch(self, runner): - self.before_epoch(runner) - - def after_train_epoch(self, runner): - self.after_epoch(runner) - - def after_val_epoch(self, runner): - self.after_epoch(runner) - - def before_train_iter(self, runner): - self.before_iter(runner) - - def before_val_iter(self, runner): - self.before_iter(runner) - - def after_train_iter(self, runner): - self.after_iter(runner) - - def after_val_iter(self, runner): - self.after_iter(runner) - - def every_n_epochs(self, runner, n): - return (runner.epoch + 1) % n == 0 if n > 0 else False - - def every_n_inner_iters(self, runner, n): - return (runner.inner_iter + 1) % n == 0 if n > 0 else False - - def every_n_iters(self, runner, n): - return (runner.iter + 1) % n == 0 if n > 0 else False - - def end_of_epoch(self, runner): - return runner.inner_iter + 1 == len(runner.data_loader) - - def is_last_epoch(self, runner): - return runner.epoch + 1 == runner._max_epochs - - def is_last_iter(self, runner): - return runner.iter + 1 == runner._max_iters - - def get_triggered_stages(self): - trigger_stages = set() - for stage in Hook.stages: - if is_method_overridden(stage, Hook, self): - trigger_stages.add(stage) - - # some methods will be triggered in multi stages - # use this dict to map method to stages. - method_stages_map = { - 'before_epoch': ['before_train_epoch', 'before_val_epoch'], - 'after_epoch': ['after_train_epoch', 'after_val_epoch'], - 'before_iter': ['before_train_iter', 'before_val_iter'], - 'after_iter': ['after_train_iter', 'after_val_iter'], - } - - for method, map_stages in method_stages_map.items(): - if is_method_overridden(method, Hook, self): - trigger_stages.update(map_stages) - - return [stage for stage in Hook.stages if stage in trigger_stages] diff --git a/spaces/MikeyAulin/stabilityai-stable-diffusion-2-1/app.py b/spaces/MikeyAulin/stabilityai-stable-diffusion-2-1/app.py deleted file mode 100644 index 0160420876923d89f2ab5fccb9f4d13725e29972..0000000000000000000000000000000000000000 --- a/spaces/MikeyAulin/stabilityai-stable-diffusion-2-1/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stabilityai/stable-diffusion-2-1").launch() \ No newline at end of file diff --git a/spaces/MrZak/Learn-Up/README.md b/spaces/MrZak/Learn-Up/README.md deleted file mode 100644 index acf1c4e9ba774fb6598d17eb4281575a7a1893ba..0000000000000000000000000000000000000000 --- a/spaces/MrZak/Learn-Up/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Learn Up -emoji: 🏢 -colorFrom: purple -colorTo: indigo -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/NCTCMumbai/NCTC/models/research/autoaugment/train_cifar.py b/spaces/NCTCMumbai/NCTC/models/research/autoaugment/train_cifar.py deleted file mode 100644 index 9e3942ee26b1bd68234d34b17a818e058d9c881a..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/autoaugment/train_cifar.py +++ /dev/null @@ -1,452 +0,0 @@ -# Copyright 2018 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""AutoAugment Train/Eval module. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import contextlib -import os -import time - -import custom_ops as ops -import data_utils -import helper_utils -import numpy as np -from shake_drop import build_shake_drop_model -from shake_shake import build_shake_shake_model -import tensorflow as tf -from wrn import build_wrn_model - -tf.flags.DEFINE_string('model_name', 'wrn', - 'wrn, shake_shake_32, shake_shake_96, shake_shake_112, ' - 'pyramid_net') -tf.flags.DEFINE_string('checkpoint_dir', '/tmp/training', 'Training Directory.') -tf.flags.DEFINE_string('data_path', '/tmp/data', - 'Directory where dataset is located.') -tf.flags.DEFINE_string('dataset', 'cifar10', - 'Dataset to train with. Either cifar10 or cifar100') -tf.flags.DEFINE_integer('use_cpu', 1, '1 if use CPU, else GPU.') - -FLAGS = tf.flags.FLAGS - -arg_scope = tf.contrib.framework.arg_scope - - -def setup_arg_scopes(is_training): - """Sets up the argscopes that will be used when building an image model. - - Args: - is_training: Is the model training or not. - - Returns: - Arg scopes to be put around the model being constructed. - """ - - batch_norm_decay = 0.9 - batch_norm_epsilon = 1e-5 - batch_norm_params = { - # Decay for the moving averages. - 'decay': batch_norm_decay, - # epsilon to prevent 0s in variance. - 'epsilon': batch_norm_epsilon, - 'scale': True, - # collection containing the moving mean and moving variance. - 'is_training': is_training, - } - - scopes = [] - - scopes.append(arg_scope([ops.batch_norm], **batch_norm_params)) - return scopes - - -def build_model(inputs, num_classes, is_training, hparams): - """Constructs the vision model being trained/evaled. - - Args: - inputs: input features/images being fed to the image model build built. - num_classes: number of output classes being predicted. - is_training: is the model training or not. - hparams: additional hyperparameters associated with the image model. - - Returns: - The logits of the image model. - """ - scopes = setup_arg_scopes(is_training) - with contextlib.nested(*scopes): - if hparams.model_name == 'pyramid_net': - logits = build_shake_drop_model( - inputs, num_classes, is_training) - elif hparams.model_name == 'wrn': - logits = build_wrn_model( - inputs, num_classes, hparams.wrn_size) - elif hparams.model_name == 'shake_shake': - logits = build_shake_shake_model( - inputs, num_classes, hparams, is_training) - return logits - - -class CifarModel(object): - """Builds an image model for Cifar10/Cifar100.""" - - def __init__(self, hparams): - self.hparams = hparams - - def build(self, mode): - """Construct the cifar model.""" - assert mode in ['train', 'eval'] - self.mode = mode - self._setup_misc(mode) - self._setup_images_and_labels() - self._build_graph(self.images, self.labels, mode) - - self.init = tf.group(tf.global_variables_initializer(), - tf.local_variables_initializer()) - - def _setup_misc(self, mode): - """Sets up miscellaneous in the cifar model constructor.""" - self.lr_rate_ph = tf.Variable(0.0, name='lrn_rate', trainable=False) - self.reuse = None if (mode == 'train') else True - self.batch_size = self.hparams.batch_size - if mode == 'eval': - self.batch_size = 25 - - def _setup_images_and_labels(self): - """Sets up image and label placeholders for the cifar model.""" - if FLAGS.dataset == 'cifar10': - self.num_classes = 10 - else: - self.num_classes = 100 - self.images = tf.placeholder(tf.float32, [self.batch_size, 32, 32, 3]) - self.labels = tf.placeholder(tf.float32, - [self.batch_size, self.num_classes]) - - def assign_epoch(self, session, epoch_value): - session.run(self._epoch_update, feed_dict={self._new_epoch: epoch_value}) - - def _build_graph(self, images, labels, mode): - """Constructs the TF graph for the cifar model. - - Args: - images: A 4-D image Tensor - labels: A 2-D labels Tensor. - mode: string indicating training mode ( e.g., 'train', 'valid', 'test'). - """ - is_training = 'train' in mode - if is_training: - self.global_step = tf.train.get_or_create_global_step() - - logits = build_model( - images, - self.num_classes, - is_training, - self.hparams) - self.predictions, self.cost = helper_utils.setup_loss( - logits, labels) - self.accuracy, self.eval_op = tf.metrics.accuracy( - tf.argmax(labels, 1), tf.argmax(self.predictions, 1)) - self._calc_num_trainable_params() - - # Adds L2 weight decay to the cost - self.cost = helper_utils.decay_weights(self.cost, - self.hparams.weight_decay_rate) - - if is_training: - self._build_train_op() - - # Setup checkpointing for this child model - # Keep 2 or more checkpoints around during training. - with tf.device('/cpu:0'): - self.saver = tf.train.Saver(max_to_keep=2) - - self.init = tf.group(tf.global_variables_initializer(), - tf.local_variables_initializer()) - - def _calc_num_trainable_params(self): - self.num_trainable_params = np.sum([ - np.prod(var.get_shape().as_list()) for var in tf.trainable_variables() - ]) - tf.logging.info('number of trainable params: {}'.format( - self.num_trainable_params)) - - def _build_train_op(self): - """Builds the train op for the cifar model.""" - hparams = self.hparams - tvars = tf.trainable_variables() - grads = tf.gradients(self.cost, tvars) - if hparams.gradient_clipping_by_global_norm > 0.0: - grads, norm = tf.clip_by_global_norm( - grads, hparams.gradient_clipping_by_global_norm) - tf.summary.scalar('grad_norm', norm) - - # Setup the initial learning rate - initial_lr = self.lr_rate_ph - optimizer = tf.train.MomentumOptimizer( - initial_lr, - 0.9, - use_nesterov=True) - - self.optimizer = optimizer - apply_op = optimizer.apply_gradients( - zip(grads, tvars), global_step=self.global_step, name='train_step') - train_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) - with tf.control_dependencies([apply_op]): - self.train_op = tf.group(*train_ops) - - -class CifarModelTrainer(object): - """Trains an instance of the CifarModel class.""" - - def __init__(self, hparams): - self._session = None - self.hparams = hparams - - self.model_dir = os.path.join(FLAGS.checkpoint_dir, 'model') - self.log_dir = os.path.join(FLAGS.checkpoint_dir, 'log') - # Set the random seed to be sure the same validation set - # is used for each model - np.random.seed(0) - self.data_loader = data_utils.DataSet(hparams) - np.random.seed() # Put the random seed back to random - self.data_loader.reset() - - def save_model(self, step=None): - """Dumps model into the backup_dir. - - Args: - step: If provided, creates a checkpoint with the given step - number, instead of overwriting the existing checkpoints. - """ - model_save_name = os.path.join(self.model_dir, 'model.ckpt') - if not tf.gfile.IsDirectory(self.model_dir): - tf.gfile.MakeDirs(self.model_dir) - self.saver.save(self.session, model_save_name, global_step=step) - tf.logging.info('Saved child model') - - def extract_model_spec(self): - """Loads a checkpoint with the architecture structure stored in the name.""" - checkpoint_path = tf.train.latest_checkpoint(self.model_dir) - if checkpoint_path is not None: - self.saver.restore(self.session, checkpoint_path) - tf.logging.info('Loaded child model checkpoint from %s', - checkpoint_path) - else: - self.save_model(step=0) - - def eval_child_model(self, model, data_loader, mode): - """Evaluate the child model. - - Args: - model: image model that will be evaluated. - data_loader: dataset object to extract eval data from. - mode: will the model be evalled on train, val or test. - - Returns: - Accuracy of the model on the specified dataset. - """ - tf.logging.info('Evaluating child model in mode %s', mode) - while True: - try: - with self._new_session(model): - accuracy = helper_utils.eval_child_model( - self.session, - model, - data_loader, - mode) - tf.logging.info('Eval child model accuracy: {}'.format(accuracy)) - # If epoch trained without raising the below errors, break - # from loop. - break - except (tf.errors.AbortedError, tf.errors.UnavailableError) as e: - tf.logging.info('Retryable error caught: %s. Retrying.', e) - - return accuracy - - @contextlib.contextmanager - def _new_session(self, m): - """Creates a new session for model m.""" - # Create a new session for this model, initialize - # variables, and save / restore from - # checkpoint. - self._session = tf.Session( - '', - config=tf.ConfigProto( - allow_soft_placement=True, log_device_placement=False)) - self.session.run(m.init) - - # Load in a previous checkpoint, or save this one - self.extract_model_spec() - try: - yield - finally: - tf.Session.reset('') - self._session = None - - def _build_models(self): - """Builds the image models for train and eval.""" - # Determine if we should build the train and eval model. When using - # distributed training we only want to build one or the other and not both. - with tf.variable_scope('model', use_resource=False): - m = CifarModel(self.hparams) - m.build('train') - self._num_trainable_params = m.num_trainable_params - self._saver = m.saver - with tf.variable_scope('model', reuse=True, use_resource=False): - meval = CifarModel(self.hparams) - meval.build('eval') - return m, meval - - def _calc_starting_epoch(self, m): - """Calculates the starting epoch for model m based on global step.""" - hparams = self.hparams - batch_size = hparams.batch_size - steps_per_epoch = int(hparams.train_size / batch_size) - with self._new_session(m): - curr_step = self.session.run(m.global_step) - total_steps = steps_per_epoch * hparams.num_epochs - epochs_left = (total_steps - curr_step) // steps_per_epoch - starting_epoch = hparams.num_epochs - epochs_left - return starting_epoch - - def _run_training_loop(self, m, curr_epoch): - """Trains the cifar model `m` for one epoch.""" - start_time = time.time() - while True: - try: - with self._new_session(m): - train_accuracy = helper_utils.run_epoch_training( - self.session, m, self.data_loader, curr_epoch) - tf.logging.info('Saving model after epoch') - self.save_model(step=curr_epoch) - break - except (tf.errors.AbortedError, tf.errors.UnavailableError) as e: - tf.logging.info('Retryable error caught: %s. Retrying.', e) - tf.logging.info('Finished epoch: {}'.format(curr_epoch)) - tf.logging.info('Epoch time(min): {}'.format( - (time.time() - start_time) / 60.0)) - return train_accuracy - - def _compute_final_accuracies(self, meval): - """Run once training is finished to compute final val/test accuracies.""" - valid_accuracy = self.eval_child_model(meval, self.data_loader, 'val') - if self.hparams.eval_test: - test_accuracy = self.eval_child_model(meval, self.data_loader, 'test') - else: - test_accuracy = 0 - tf.logging.info('Test Accuracy: {}'.format(test_accuracy)) - return valid_accuracy, test_accuracy - - def run_model(self): - """Trains and evalutes the image model.""" - hparams = self.hparams - - # Build the child graph - with tf.Graph().as_default(), tf.device( - '/cpu:0' if FLAGS.use_cpu else '/gpu:0'): - m, meval = self._build_models() - - # Figure out what epoch we are on - starting_epoch = self._calc_starting_epoch(m) - - # Run the validation error right at the beginning - valid_accuracy = self.eval_child_model( - meval, self.data_loader, 'val') - tf.logging.info('Before Training Epoch: {} Val Acc: {}'.format( - starting_epoch, valid_accuracy)) - training_accuracy = None - - for curr_epoch in xrange(starting_epoch, hparams.num_epochs): - - # Run one training epoch - training_accuracy = self._run_training_loop(m, curr_epoch) - - valid_accuracy = self.eval_child_model( - meval, self.data_loader, 'val') - tf.logging.info('Epoch: {} Valid Acc: {}'.format( - curr_epoch, valid_accuracy)) - - valid_accuracy, test_accuracy = self._compute_final_accuracies( - meval) - - tf.logging.info( - 'Train Acc: {} Valid Acc: {} Test Acc: {}'.format( - training_accuracy, valid_accuracy, test_accuracy)) - - @property - def saver(self): - return self._saver - - @property - def session(self): - return self._session - - @property - def num_trainable_params(self): - return self._num_trainable_params - - -def main(_): - if FLAGS.dataset not in ['cifar10', 'cifar100']: - raise ValueError('Invalid dataset: %s' % FLAGS.dataset) - hparams = tf.contrib.training.HParams( - train_size=50000, - validation_size=0, - eval_test=1, - dataset=FLAGS.dataset, - data_path=FLAGS.data_path, - batch_size=128, - gradient_clipping_by_global_norm=5.0) - if FLAGS.model_name == 'wrn': - hparams.add_hparam('model_name', 'wrn') - hparams.add_hparam('num_epochs', 200) - hparams.add_hparam('wrn_size', 160) - hparams.add_hparam('lr', 0.1) - hparams.add_hparam('weight_decay_rate', 5e-4) - elif FLAGS.model_name == 'shake_shake_32': - hparams.add_hparam('model_name', 'shake_shake') - hparams.add_hparam('num_epochs', 1800) - hparams.add_hparam('shake_shake_widen_factor', 2) - hparams.add_hparam('lr', 0.01) - hparams.add_hparam('weight_decay_rate', 0.001) - elif FLAGS.model_name == 'shake_shake_96': - hparams.add_hparam('model_name', 'shake_shake') - hparams.add_hparam('num_epochs', 1800) - hparams.add_hparam('shake_shake_widen_factor', 6) - hparams.add_hparam('lr', 0.01) - hparams.add_hparam('weight_decay_rate', 0.001) - elif FLAGS.model_name == 'shake_shake_112': - hparams.add_hparam('model_name', 'shake_shake') - hparams.add_hparam('num_epochs', 1800) - hparams.add_hparam('shake_shake_widen_factor', 7) - hparams.add_hparam('lr', 0.01) - hparams.add_hparam('weight_decay_rate', 0.001) - elif FLAGS.model_name == 'pyramid_net': - hparams.add_hparam('model_name', 'pyramid_net') - hparams.add_hparam('num_epochs', 1800) - hparams.add_hparam('lr', 0.05) - hparams.add_hparam('weight_decay_rate', 5e-5) - hparams.batch_size = 64 - else: - raise ValueError('Not Valid Model Name: %s' % FLAGS.model_name) - cifar_trainer = CifarModelTrainer(hparams) - cifar_trainer.run_model() - -if __name__ == '__main__': - tf.logging.set_verbosity(tf.logging.INFO) - tf.app.run() diff --git a/spaces/NMEX/rvc-hoyogame-v2/lib/infer_pack/models_onnx.py b/spaces/NMEX/rvc-hoyogame-v2/lib/infer_pack/models_onnx.py deleted file mode 100644 index 963e67b29f828e9fdd096397952054fe77cf3d10..0000000000000000000000000000000000000000 --- a/spaces/NMEX/rvc-hoyogame-v2/lib/infer_pack/models_onnx.py +++ /dev/null @@ -1,819 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMsNSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - version, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - if version == "v1": - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - else: - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - self.speaker_map = None - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def construct_spkmixmap(self, n_speaker): - self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) - for i in range(n_speaker): - self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) - self.speaker_map = self.speaker_map.unsqueeze(0) - - def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): - if self.speaker_map is not None: # [N, S] * [S, B, 1, H] - g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] - g = g * self.speaker_map # [N, S, B, 1, H] - g = torch.sum(g, dim=1) # [N, 1, B, 1, H] - g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] - else: - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/NikeZoldyck/green-screen-composition-transfer/utils/shared_utils.py b/spaces/NikeZoldyck/green-screen-composition-transfer/utils/shared_utils.py deleted file mode 100644 index 6c9f5203cb08f1a232d49c4a1875a9cdbc20bbc2..0000000000000000000000000000000000000000 --- a/spaces/NikeZoldyck/green-screen-composition-transfer/utils/shared_utils.py +++ /dev/null @@ -1,140 +0,0 @@ -from pathlib import Path -from rembg import remove -import io - -# Apply the transformations needed -from torch import autocast, nn -import torch -import torch.nn as nn -import torch -import torchvision.transforms as transforms -import torchvision.utils as utils -import torch.nn as nn -import pyrootutils -from PIL import Image -import numpy as np -from utils.photo_wct import PhotoWCT -from utils.photo_smooth import Propagator -#from utils.smooth_filter import smooth_filter - -# Load models -root = Path.cwd() -device = "cuda" if torch.cuda.is_available() else "cpu" -# Load model -p_wct = PhotoWCT().to(device) -p_wct.load_state_dict(torch.load(root/"models/components/photo_wct.pth")) -p_pro = Propagator().to(device) -stylization_module=p_wct -smoothing_module=p_pro - - -#Dependecies - To be installed - -#!pip install replicate -#Token - To be authenticated - -#API TOKEN - 664474670af075461f85420f7b1d23d18484f826 -#To be declared as an environment variable - -#export REPLICATE_API_TOKEN = -import replicate -import os -import requests - - - -def stableDiffusionAPICall(text_prompt): - os.environ['REPLICATE_API_TOKEN'] = 'a9f4c06cb9808f42b29637bb60b7b88f106ad5b8' - model = replicate.models.get("stability-ai/stable-diffusion") - #text_prompt = 'photorealistic, elf fighting Sauron' - gen_bg_img = model.predict(prompt=text_prompt)[0] - img_data = requests.get(gen_bg_img).content - # r_data = binascii.unhexlify(img_data) - stream = io.BytesIO(img_data) - img = Image.open(stream) - del img_data - - return img - - - -def memory_limit_image_resize(cont_img): - # prevent too small or too big images - MINSIZE=400 - MAXSIZE=800 - orig_width = cont_img.width - orig_height = cont_img.height - if max(cont_img.width,cont_img.height) < MINSIZE: - if cont_img.width > cont_img.height: - cont_img.thumbnail((int(cont_img.width*1.0/cont_img.height*MINSIZE), MINSIZE), Image.BICUBIC) - else: - cont_img.thumbnail((MINSIZE, int(cont_img.height*1.0/cont_img.width*MINSIZE)), Image.BICUBIC) - if min(cont_img.width,cont_img.height) > MAXSIZE: - if cont_img.width > cont_img.height: - cont_img.thumbnail((MAXSIZE, int(cont_img.height*1.0/cont_img.width*MAXSIZE)), Image.BICUBIC) - else: - cont_img.thumbnail(((int(cont_img.width*1.0/cont_img.height*MAXSIZE), MAXSIZE)), Image.BICUBIC) - print("Resize image: (%d,%d)->(%d,%d)" % (orig_width, orig_height, cont_img.width, cont_img.height)) - return cont_img.width, cont_img.height - - - - - -def superimpose(input_img,back_img): - matte_img = remove(input_img) - back_img.paste(matte_img, (0, 0), matte_img) - return back_img,input_img - - - -def style_transfer(cont_img,styl_img): - with torch.no_grad(): - new_cw, new_ch = memory_limit_image_resize(cont_img) - new_sw, new_sh = memory_limit_image_resize(styl_img) - cont_pilimg = cont_img.copy() - cw = cont_pilimg.width - ch = cont_pilimg.height - cont_img = transforms.ToTensor()(cont_img).unsqueeze(0) - styl_img = transforms.ToTensor()(styl_img).unsqueeze(0) - - cont_seg = [] - styl_seg = [] - - if device == 'cuda': - cont_img = cont_img.to(device) - styl_img = styl_img.to(device) - stylization_module.to(device) - cont_seg = np.asarray(cont_seg) - styl_seg = np.asarray(styl_seg) - - stylized_img = stylization_module.transform(cont_img, styl_img, cont_seg, styl_seg) - if ch != new_ch or cw != new_cw: - stylized_img = nn.functional.upsample(stylized_img, size=(ch, cw), mode='bilinear') - grid = utils.make_grid(stylized_img.data, nrow=1, padding=0) - ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy() - stylized_img = Image.fromarray(ndarr) - #final_img = smooth_filter(stylized_img, cont_pilimg, f_radius=15, f_edge=1e-1) - return stylized_img - -def smoother(stylized_img, over_img): - if device == 'cuda': - smoothing_module.to(device) - final_img = smoothing_module.process(stylized_img, over_img) - #final_img = smooth_filter(stylized_img, over_img, f_radius=15, f_edge=1e-1) - return final_img - - -if __name__ == "__main__": - root = pyrootutils.setup_root(__file__, pythonpath=True) - fg_path = root/"notebooks/profile_new.png" - bg_path = root/"notebooks/back_img.png" - ckpt_path = root/"src/models/MODNet/pretrained/modnet_photographic_portrait_matting.ckpt" - - #stableDiffusionAPICall("Photorealistic scenery of a concert") - fg_img = Image.open(fg_path).resize((800,800)) - bg_img = Image.open(bg_path).resize((800,800)) - #img = combined_display(fg_img, bg_img,ckpt_path) - img = superimpose(fg_img,bg_img) - img.save(root/"notebooks/overlay.png") - # bg_img.paste(img, (0, 0), img) - # bg_img.save(root/"notebooks/check.png") - - diff --git a/spaces/NingKanae/anime-voice-generator/models.py b/spaces/NingKanae/anime-voice-generator/models.py deleted file mode 100644 index 8353b867f441de7e4d05aef980e672899c3a8889..0000000000000000000000000000000000000000 --- a/spaces/NingKanae/anime-voice-generator/models.py +++ /dev/null @@ -1,533 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/benchmark/dummy_lm.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/benchmark/dummy_lm.py deleted file mode 100644 index c6246a0c0e338fa36244b3aa4fb57f189fbffcb6..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/benchmark/dummy_lm.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from dataclasses import dataclass, field -from typing import Optional - -import torch -from .dummy_dataset import DummyDataset -from fairseq.data import Dictionary -from fairseq.dataclass import FairseqDataclass -from fairseq.tasks import FairseqTask, register_task -from omegaconf import II - - -logger = logging.getLogger(__name__) - - -@dataclass -class DummyLMConfig(FairseqDataclass): - dict_size: int = 49996 - dataset_size: int = 100000 - tokens_per_sample: int = field( - default=512, metadata={"help": "max sequence length"} - ) - add_bos_token: bool = False - batch_size: Optional[int] = II("dataset.batch_size") - max_tokens: Optional[int] = II("dataset.max_tokens") - max_target_positions: int = II("task.tokens_per_sample") - - -@register_task("dummy_lm", dataclass=DummyLMConfig) -class DummyLMTask(FairseqTask): - def __init__(self, cfg: DummyLMConfig): - super().__init__(cfg) - - # load dictionary - self.dictionary = Dictionary() - for i in range(cfg.dict_size): - self.dictionary.add_symbol("word{}".format(i)) - self.dictionary.pad_to_multiple_(8) # often faster if divisible by 8 - logger.info("dictionary: {} types".format(len(self.dictionary))) - - seq = torch.arange(cfg.tokens_per_sample + 1) + self.dictionary.pad() + 1 - - self.dummy_src = seq[:-1] - self.dummy_tgt = seq[1:] - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - """Load a given dataset split. - Args: - split (str): name of the split (e.g., train, valid, test) - """ - if self.cfg.batch_size is not None: - bsz = self.cfg.batch_size - else: - bsz = max(1, self.cfg.max_tokens // self.cfg.tokens_per_sample) - self.datasets[split] = DummyDataset( - { - "id": 1, - "net_input": { - "src_tokens": torch.stack([self.dummy_src for _ in range(bsz)]), - "src_lengths": torch.full( - (bsz,), self.cfg.tokens_per_sample, dtype=torch.long - ), - }, - "target": torch.stack([self.dummy_tgt for _ in range(bsz)]), - "nsentences": bsz, - "ntokens": bsz * self.cfg.tokens_per_sample, - }, - num_items=self.cfg.dataset_size, - item_size=self.cfg.tokens_per_sample, - ) - - @property - def source_dictionary(self): - return self.dictionary - - @property - def target_dictionary(self): - return self.dictionary diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/adagrad.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/adagrad.py deleted file mode 100644 index 4f539541c1c91d8c822f7ce624fa6eabf744f60e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/optim/adagrad.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch.optim - -from . import LegacyFairseqOptimizer, register_optimizer - - -@register_optimizer("adagrad") -class Adagrad(LegacyFairseqOptimizer): - def __init__(self, args, params): - super().__init__(args) - self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) - - @staticmethod - def add_args(parser): - """Add optimizer-specific arguments to the parser.""" - # fmt: off - parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', - help='weight decay') - # fmt: on - - @property - def optimizer_config(self): - """ - Return a kwarg dictionary that will be used to override optimizer - args stored in checkpoints. This allows us to load a checkpoint and - resume training using a different set of optimizer args, e.g., with a - different learning rate. - """ - return { - "lr": self.args.lr[0], - "weight_decay": self.args.weight_decay, - } - - @property - def supports_flat_params(self): - return False diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/m2m_100/tokenizers/seg_ja.sh b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/m2m_100/tokenizers/seg_ja.sh deleted file mode 100644 index be6f5ca5fe4ac8e8c786a439caaed1d1314f1aef..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/m2m_100/tokenizers/seg_ja.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -SCRIPT=`realpath $0` -KYTEA=`dirname $SCRIPT`/thirdparty/kytea -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$KYTEA/lib:/usr/local/lib -export PATH=$PATH:"$KYTEA/bin" - -cat - | tr -d "[:blank:]" | kytea -notags diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/preprocessing/denoiser/resample.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/preprocessing/denoiser/resample.py deleted file mode 100644 index 1222addc424d4f898d602009e4032907241aadfe..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/preprocessing/denoiser/resample.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# author: adefossez - -import math - -import torch as th -from torch.nn import functional as F - - -def sinc(t): - """sinc. - - :param t: the input tensor - """ - return th.where(t == 0, th.tensor(1., device=t.device, dtype=t.dtype), - th.sin(t) / t) - - -def kernel_upsample2(zeros=56): - """kernel_upsample2. - - """ - win = th.hann_window(4 * zeros + 1, periodic=False) - winodd = win[1::2] - t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros) - t *= math.pi - kernel = (sinc(t) * winodd).view(1, 1, -1) - return kernel - - -def upsample2(x, zeros=56): - """ - Upsampling the input by 2 using sinc interpolation. - Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method." - ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing. - Vol. 9. IEEE, 1984. - """ - *other, time = x.shape - kernel = kernel_upsample2(zeros).to(x) - out = F.conv1d(x.view(-1, 1, time), kernel, padding=zeros)[..., 1:].view( - *other, time - ) - y = th.stack([x, out], dim=-1) - return y.view(*other, -1) - - -def kernel_downsample2(zeros=56): - """kernel_downsample2. - - """ - win = th.hann_window(4 * zeros + 1, periodic=False) - winodd = win[1::2] - t = th.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros) - t.mul_(math.pi) - kernel = (sinc(t) * winodd).view(1, 1, -1) - return kernel - - -def downsample2(x, zeros=56): - """ - Downsampling the input by 2 using sinc interpolation. - Smith, Julius, and Phil Gossett. "A flexible sampling-rate conversion method." - ICASSP'84. IEEE International Conference on Acoustics, Speech, and Signal Processing. - Vol. 9. IEEE, 1984. - """ - if x.shape[-1] % 2 != 0: - x = F.pad(x, (0, 1)) - xeven = x[..., ::2] - xodd = x[..., 1::2] - *other, time = xodd.shape - kernel = kernel_downsample2(zeros).to(x) - out = xeven + F.conv1d( - xodd.view(-1, 1, time), kernel, padding=zeros - )[..., :-1].view(*other, time) - return out.view(*other, -1).mul(0.5) diff --git a/spaces/ORI-Muchim/MarinTTS/monotonic_align/__init__.py b/spaces/ORI-Muchim/MarinTTS/monotonic_align/__init__.py deleted file mode 100644 index 40b6f64aa116c74cac2f6a33444c9eeea2fdb38c..0000000000000000000000000000000000000000 --- a/spaces/ORI-Muchim/MarinTTS/monotonic_align/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - - -def maximum_path(neg_cent, mask): - """ numba optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) - diff --git a/spaces/OkamiFeng/Bark-with-Voice-Cloning/util/settings.py b/spaces/OkamiFeng/Bark-with-Voice-Cloning/util/settings.py deleted file mode 100644 index 2ab66b0c7605d2b877defdd8592097a8a4c6f21a..0000000000000000000000000000000000000000 --- a/spaces/OkamiFeng/Bark-with-Voice-Cloning/util/settings.py +++ /dev/null @@ -1,41 +0,0 @@ -import yaml - -class Settings: - def __init__(self, config_file): - self.config_file = config_file - self.load() - - def load(self): - try: - with open(self.config_file, 'r') as f: - data = yaml.load(f, Loader=yaml.FullLoader) - self.selected_theme = data.get('selected_theme', "gstaff/xkcd") - self.server_name = data.get('server_name', "") - self.server_port = data.get('server_port', 0) - self.server_share = data.get('server_share', False) - self.input_text_desired_length = data.get('input_text_desired_length', 110) - self.input_text_max_length = data.get('input_text_max_length', 170) - self.silence_sentence = data.get('silence_between_sentences', 250) - self.silence_speakers = data.get('silence_between_speakers', 500) - self.output_folder_path = data.get('output_folder_path', 'outputs') - - except: - self.selected_theme = "gstaff/xkcd" - - def save(self): - data = { - 'selected_theme': self.selected_theme, - 'server_name': self.server_name, - 'server_port': self.server_port, - 'server_share': self.server_share, - 'input_text_desired_length' : self.input_text_desired_length, - 'input_text_max_length' : self.input_text_max_length, - 'silence_between_sentences': self.silence_sentence, - 'silence_between_speakers': self.silence_speakers, - 'output_folder_path': self.output_folder_path - } - with open(self.config_file, 'w') as f: - yaml.dump(data, f) - - - diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/husky_src/husky_chat.py b/spaces/OpenGVLab/InternGPT/iGPT/models/husky_src/husky_chat.py deleted file mode 100644 index 659600a2bc17c4fdc3c71854608f0171ff2d44c4..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/husky_src/husky_chat.py +++ /dev/null @@ -1,297 +0,0 @@ -import logging -import warnings -import torch -import torch.nn as nn - -from dataclasses import dataclass, field -from typing import Optional, Dict, Sequence, Union, List, Tuple, Any - -from transformers import ( - LlamaForCausalLM, - Blip2PreTrainedModel, - Blip2VisionModel, - Blip2Config, - Blip2QFormerModel, - GenerationConfig, -) - -from transformers.utils import ModelOutput - -warnings.filterwarnings('ignore') -logger = logging.getLogger(__name__) - -@dataclass -class Blip2ForConditionalGenerationModelOutput(ModelOutput): - """ - Class defining the outputs of [`Blip2ForConditionalGeneration`]. - - Args: - loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): - Language modeling loss from the language model. - logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): - Prediction scores of the language modeling head of the language model. - vision_outputs (`BaseModelOutputWithPooling`): - Outputs of the vision encoder. - qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`): - Outputs of the Q-Former (Querying Transformer). - language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`): - Outputs of the language model. - """ - - loss: Optional[Tuple[torch.FloatTensor]] = None - logits: Optional[Tuple[torch.FloatTensor]] = None - vision_outputs: Optional[torch.FloatTensor] = None - qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None - language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None - - def to_tuple(self) -> Tuple[Any]: - return tuple( - self[k] - if k not in ["vision_outputs", "qformer_outputs", "language_model_outputs"] - else getattr(self, k).to_tuple() - for k in self.keys() - ) - -class Blip2LlaMAForConditionalGeneration(Blip2PreTrainedModel): - config_class = Blip2Config - main_input_name = "pixel_values" - - def __init__(self, config: Blip2Config): - super().__init__(config) - - self.vision_model = Blip2VisionModel(config.vision_config) - - self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size)) - self.qformer = Blip2QFormerModel(config.qformer_config) - - language_model = LlamaForCausalLM(config.text_config) - self.language_model = language_model - - self.language_projection = nn.Linear(config.qformer_config.hidden_size, language_model.config.hidden_size) - - self.config.hidden_size = config.text_config.hidden_size - self.num_queries = config.num_query_tokens - self.offset = 5 - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.language_model.get_input_embeddings() - - def set_input_embeddings(self, value): - self.language_model.set_input_embeddings(value) - - def set_output_embeddings(self, new_embeddings): - self.language_model.set_output_embeddings(new_embeddings) - - def get_output_embeddings(self) -> nn.Module: - return self.language_model.get_output_embeddings() - - def get_encoder(self): - return self.language_model.get_encoder() - - def get_decoder(self): - return self.language_model.get_decoder() - - def extract_feature( - self, - pixel_values: torch.FloatTensor, - ): - image_embeds = self.vision_model(pixel_values, return_dict=True).last_hidden_state - image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) - - query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) - query_outputs = self.qformer( - query_embeds=query_tokens, - encoder_hidden_states=image_embeds, - encoder_attention_mask=image_attention_mask, - return_dict=True, - ) - query_output = query_outputs.last_hidden_state - - language_model_inputs = self.language_projection(query_output) - return language_model_inputs - - def _tie_weights(self): - if not self.config.use_decoder_only_language_model: - self.language_model.encoder.embed_tokens = self.language_model.shared - self.language_model.decoder.embed_tokens = self.language_model.shared - - def _preprocess_accelerate(self): - r""" - Some pre-processing hacks to make the model `accelerate` compatible. Check - https://github.com/huggingface/transformers/pull/21707 for more details. - """ - hf_device_map = self.hf_device_map - - if len(hf_device_map) > 1 and "language_model" not in hf_device_map and torch.cuda.device_count() > 1: - # warn users about unexpected behavior when using multi-GPU + BLIP-2 + `accelerate`. - logger.warning( - "The `language_model` is not in the `hf_device_map` dictionary and you are running your script" - " in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`." - " Please pass a `device_map` that contains `language_model` to remove this warning." - " Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for", - " more details on creating a `device_map` for large models.", - ) - - if hasattr(self.language_model, "_hf_hook"): - self.language_model._hf_hook.io_same_device = True # For `generate` compatibility - - def forward( - self, - pixel_values: torch.FloatTensor, - input_ids: torch.FloatTensor, - attention_mask: Optional[torch.LongTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - labels: Optional[torch.LongTensor] = None, - return_dict: Optional[bool] = None, - - ) -> Union[Tuple, Blip2ForConditionalGenerationModelOutput]: - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - # step 1: forward the images through the vision encoder, - # to get image embeddings of shape (batch_size, seq_len, hidden_size) - vision_outputs = self.vision_model( - pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - image_embeds = vision_outputs[0] - - # step 2: forward the query tokens through the QFormer, using the image embeddings for cross-attention - image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) - - query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) - query_outputs = self.qformer( - query_embeds=query_tokens, - encoder_hidden_states=image_embeds, - encoder_attention_mask=image_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - query_output = query_outputs[0] - - # step 3: use the language model, conditioned on the query outputs and the prompt - language_model_inputs = self.language_projection(query_output) - assert language_model_inputs.shape[1] == self.num_queries - - inputs_embeds = self.language_model.get_input_embeddings()(input_ids) - # Human: . Give the describe Assistant: - # position of : [offset: offset+num_queries] - - inputs_embeds[:, self.offset:self.offset + self.num_queries, :] = language_model_inputs - if attention_mask is None: - attention_mask = torch.ones_like(input_ids) - - outputs = self.language_model( - inputs_embeds=inputs_embeds, - attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - logits = outputs.logits if return_dict else outputs[0] - loss = None - - # we compute the loss here since we need to take into account the sequence length of the query embeds - if labels is not None: - logits = logits[:, -labels.size(1):, :] - # Shift so that tokens < n predict n - shift_logits = logits[..., :-1, :].contiguous() - shift_labels = labels[..., 1:].contiguous().to(logits.device).to(torch.long) - - # Flatten the tokens - loss_fct = nn.CrossEntropyLoss(reduction="mean") - loss = loss_fct(shift_logits.view(-1, self.config.text_config.vocab_size), shift_labels.view(-1)) - - if not return_dict: - output = (logits, vision_outputs, query_outputs, outputs) - return ((loss,) + output) if loss is not None else output - - return Blip2ForConditionalGenerationModelOutput( - loss=loss, - logits=logits, - vision_outputs=vision_outputs, - qformer_outputs=query_outputs, - language_model_outputs=outputs, - ) - - @torch.no_grad() - def generate( - self, - pixel_values: torch.FloatTensor, - input_ids: Optional[torch.LongTensor] = None, - attention_mask: Optional[torch.LongTensor] = None, - language_model_inputs: Optional[torch.FloatTensor] = None, - generation_config: Optional[GenerationConfig] = None, - **generate_kwargs, - ) -> torch.LongTensor: - """ - Overrides `generate` function to be able to use the model as a conditional generator. - - Args: - pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)): - Input images to be processed. - input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): - The sequence used as a prompt for the generation. - attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*): - Mask to avoid performing attention on padding token indices - generation_config (`~generation.GenerationConfig`, *optional*): - The generation configuration to be used as base parametrization for the generation call. `**kwargs` - passed to generate matching the attributes of `generation_config` will override them. If - `generation_config` is not provided, the default will be used, which had the following loading - priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model - configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s - default values, whose documentation should be checked to parameterize generation. - - Returns: - captions (list): A list of strings of length batch_size * num_captions. - """ - if hasattr(self, "hf_device_map"): - # preprocess for `accelerate` - self._preprocess_accelerate() - if language_model_inputs is None: - batch_size = pixel_values.shape[0] - image_embeds = self.vision_model(pixel_values, return_dict=True).last_hidden_state - image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device) - - query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) - query_outputs = self.qformer( - query_embeds=query_tokens, - encoder_hidden_states=image_embeds, - encoder_attention_mask=image_attention_mask, - return_dict=True, - ) - query_output = query_outputs.last_hidden_state - - language_model_inputs = self.language_projection(query_output) - assert language_model_inputs.shape[1] == self.num_queries - - if input_ids is None: - input_ids = ( - torch.LongTensor([[self.config.text_config.bos_token_id]]) - .repeat(batch_size, 1) - .to(image_embeds.device) - ) - - if attention_mask is None: - attention_mask = torch.ones_like(input_ids) - - inputs_embeds = self.language_model.get_input_embeddings()(input_ids) - - # position of : [offset: offset+num_queries] - inputs_embeds[:, self.offset:self.offset + self.num_queries, :] = language_model_inputs - - outputs = self.language_model.generate( - inputs_embeds=inputs_embeds, - attention_mask=attention_mask, - generation_config=generation_config, - **generate_kwargs, - ) - - return outputs diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/model/stylegan/lpips/base_model.py b/spaces/PKUWilliamYang/VToonify/vtoonify/model/stylegan/lpips/base_model.py deleted file mode 100644 index 8de1d16f0c7fa52d8067139abc6e769e96d0a6a1..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/VToonify/vtoonify/model/stylegan/lpips/base_model.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import numpy as np -import torch -from torch.autograd import Variable -from pdb import set_trace as st -from IPython import embed - -class BaseModel(): - def __init__(self): - pass; - - def name(self): - return 'BaseModel' - - def initialize(self, use_gpu=True, gpu_ids=[0]): - self.use_gpu = use_gpu - self.gpu_ids = gpu_ids - - def forward(self): - pass - - def get_image_paths(self): - pass - - def optimize_parameters(self): - pass - - def get_current_visuals(self): - return self.input - - def get_current_errors(self): - return {} - - def save(self, label): - pass - - # helper saving function that can be used by subclasses - def save_network(self, network, path, network_label, epoch_label): - save_filename = '%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(path, save_filename) - torch.save(network.state_dict(), save_path) - - # helper loading function that can be used by subclasses - def load_network(self, network, network_label, epoch_label): - save_filename = '%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(self.save_dir, save_filename) - print('Loading network from %s'%save_path) - network.load_state_dict(torch.load(save_path)) - - def update_learning_rate(): - pass - - def get_image_paths(self): - return self.image_paths - - def save_done(self, flag=False): - np.save(os.path.join(self.save_dir, 'done_flag'),flag) - np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i') diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/array.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/array.go deleted file mode 100644 index 5183c91f093483604fbf28599ce2832e910fe7c8..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/array.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/AutoGPT/run.sh b/spaces/PeepDaSlan9/AutoGPT/run.sh deleted file mode 100644 index edcbc44155b9ca9df83e283fdf976472c13e6492..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/AutoGPT/run.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -python scripts/check_requirements.py requirements.txt -if [ $? -eq 1 ] -then - echo Installing missing packages... - pip install -r requirements.txt -fi -python -m autogpt $@ -read -p "Press any key to continue..." diff --git a/spaces/PeepDaSlan9/B2B-APG/app.py b/spaces/PeepDaSlan9/B2B-APG/app.py deleted file mode 100644 index 7a5ab892acaac24480a13ad8819e6fc33ecadb8d..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/B2B-APG/app.py +++ /dev/null @@ -1,8 +0,0 @@ -import gradio -import gradio as gr - -def greet(name): - return "Hello " + name + "!!" - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/Plurigrid/LifeSim/src/app/agents/smith.ts b/spaces/Plurigrid/LifeSim/src/app/agents/smith.ts deleted file mode 100644 index ee0b86870ee06f9ee260b5727a6db16bf0641234..0000000000000000000000000000000000000000 --- a/spaces/Plurigrid/LifeSim/src/app/agents/smith.ts +++ /dev/null @@ -1,42 +0,0 @@ -import { pick } from "./pick" -import { Agent, Scene } from "./types" - -const actions = [ - "standing and waiting", - "standing and looking at camera", - "standing and adjusting their tie", - "standing and talking on a cellphone", - "standing and reading the journal" -] - - -const positions = [ - "on the roof of a building", - "in the lobby of a building", - "in an elevator", - "on the sidewalk of a street" -] - -export const agent: Agent = { - title: "Smith", - type: "smith", - simulate: (): Scene => { - const action = pick(actions) - const position = pick(positions) - - const prompt = [ - `static medium shot of Agent Smith from the Matrix`, - `wearing a black costume with black tie and black sunglasses`, - action, - position, - `high res`, - `documentary`, - ].join(", ") - - return { - action, - position, - prompt - } - } -} \ No newline at end of file diff --git a/spaces/RRVSS/SVS/app.py b/spaces/RRVSS/SVS/app.py deleted file mode 100644 index 76afa75501d9c6886c8ed1fc23476eaabe66c385..0000000000000000000000000000000000000000 --- a/spaces/RRVSS/SVS/app.py +++ /dev/null @@ -1,10 +0,0 @@ -import gradio as gr -i=0 -while i<100: - gr.Interface.load("models/gpt2-large").launch() - iface.launch(share=True) - i=i+1 - - - - diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py deleted file mode 100644 index aa460d3eda50fbb174623a1b5bbca54645fd588a..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py +++ /dev/null @@ -1,68 +0,0 @@ -import re -import textwrap -import email.message - -from ._text import FoldedCase - - -class Message(email.message.Message): - multiple_use_keys = set( - map( - FoldedCase, - [ - 'Classifier', - 'Obsoletes-Dist', - 'Platform', - 'Project-URL', - 'Provides-Dist', - 'Provides-Extra', - 'Requires-Dist', - 'Requires-External', - 'Supported-Platform', - 'Dynamic', - ], - ) - ) - """ - Keys that may be indicated multiple times per PEP 566. - """ - - def __new__(cls, orig: email.message.Message): - res = super().__new__(cls) - vars(res).update(vars(orig)) - return res - - def __init__(self, *args, **kwargs): - self._headers = self._repair_headers() - - # suppress spurious error from mypy - def __iter__(self): - return super().__iter__() - - def _repair_headers(self): - def redent(value): - "Correct for RFC822 indentation" - if not value or '\n' not in value: - return value - return textwrap.dedent(' ' * 8 + value) - - headers = [(key, redent(value)) for key, value in vars(self)['_headers']] - if self._payload: - headers.append(('Description', self.get_payload())) - return headers - - @property - def json(self): - """ - Convert PackageMetadata to a JSON-compatible format - per PEP 0566. - """ - - def transform(key): - value = self.get_all(key) if key in self.multiple_use_keys else self[key] - if key == 'Keywords': - value = re.split(r'\s+', value) - tk = key.lower().replace('-', '_') - return tk, value - - return dict(map(transform, map(FoldedCase, self))) diff --git a/spaces/Re1e9/DoodleDecoder/README.md b/spaces/Re1e9/DoodleDecoder/README.md deleted file mode 100644 index bbadcce8b5ffd38e750b79f5b8bc69528d992a53..0000000000000000000000000000000000000000 --- a/spaces/Re1e9/DoodleDecoder/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: DoodleDecoder -emoji: 📚 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.17.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Realcat/image-matching-webui/third_party/SOLD2/README.md b/spaces/Realcat/image-matching-webui/third_party/SOLD2/README.md deleted file mode 100644 index 69713c07084d26ab689532c29293d056bc84f655..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/SOLD2/README.md +++ /dev/null @@ -1,216 +0,0 @@ -# SOLD² - Self-supervised Occlusion-aware Line Description and Detection - -This repository contains the implementation of the paper: [SOLD² : Self-supervised Occlusion-aware Line Description and Detection](https://arxiv.org/abs/2104.03362), J-T. Lin*, R. Pautrat*, V. Larsson, M. Oswald and M. Pollefeys (Oral at CVPR 2021). - -SOLD² is a deep line segment detector and descriptor that can be trained without hand-labelled line segments and that can robustly match lines even in the presence of occlusion. - -## Demos - -Matching in the presence of occlusion: -![demo_occlusion](assets/videos/demo_occlusion.gif) - -Matching with a moving camera: -![demo_moving_camera](assets/videos/demo_moving_camera.gif) - -## Usage - -### Using from kornia - -SOLD² is integrated into [kornia](https://github.com/kornia/kornia) library since version 0.6.7. - - ``` - pip install kornia==0.6.7 - ``` - - Then you can import it as - ```python3 - from kornia.feature import SOLD2 - ``` - - See tutorial on using SOLD² from kornia [here](https://kornia-tutorials.readthedocs.io/en/latest/line_detection_and_matching_sold2.html). - -### Installation - -We recommend using this code in a Python environment (e.g. venv or conda). The following script installs the necessary requirements with pip: -```bash -pip install -r requirements.txt -``` - -Set your dataset and experiment paths (where you will store your datasets and checkpoints of your experiments) by modifying the file `config/project_config.py`. Both variables `DATASET_ROOT` and `EXP_PATH` have to be set. - -Install the Python package: -```bash -pip install -e . -``` - -You can download the version of the [Wireframe dataset](https://github.com/huangkuns/wireframe) that we used during our training and testing [here](https://www.polybox.ethz.ch/index.php/s/IfdEf7RoHol7jeg). This repository also includes some files to train on the [Holicity dataset](https://holicity.io/) to add more outdoor images, but note that we did not extensively test this dataset and the original paper was based on the Wireframe dataset only. - -### Training your own model - -All training parameters are located in configuration files in the folder `config`. Training SOLD² from scratch requires several steps, some of which taking several days, depending on the size of your dataset. - -
-Step 1: Train on a synthetic dataset - -The following command will create the synthetic dataset and start training the model on it: -```bash -python -m sold2.experiment --mode train --dataset_config sold2/config/synthetic_dataset.yaml --model_config sold2/config/train_detector.yaml --exp_name sold2_synth -``` -
- -
-Step 2: Export the raw pseudo ground truth on the Wireframe dataset with homography adaptation - -Note that this step can take one to several days depending on your machine and on the size of the dataset. You can set the batch size to the maximum capacity that your GPU can handle. Prior to this step, make sure that the dataset config file `config/wireframe_dataset.yaml` has the lines `gt_source_train` and `gt_source_test` commented and you should also disable the photometric and homographic augmentations. -```bash -python -m sold2.experiment --exp_name wireframe_train --mode export --resume_path --model_config sold2/config/train_detector.yaml --dataset_config sold2/config/wireframe_dataset.yaml --checkpoint_name --export_dataset_mode train --export_batch_size 4 -``` - -You can similarly perform the same for the test set: -```bash -python -m sold2.experiment --exp_name wireframe_test --mode export --resume_path --model_config sold2/config/train_detector.yaml --dataset_config sold2/config/wireframe_dataset.yaml --checkpoint_name --export_dataset_mode test --export_batch_size 4 -``` -
- -
- Step3: Compute the ground truth line segments from the raw data - -```bash -python -m sold2.postprocess.convert_homography_results sold2/config/export_line_features.yaml -``` - -We recommend testing the results on a few samples of your dataset to check the quality of the output, and modifying the hyperparameters if need be. Using a `detect_thresh=0.5` and `inlier_thresh=0.99` proved to be successful for the Wireframe dataset in our case for example. -
- -
- Step 4: Train the detector on the Wireframe dataset - -We found it easier to pretrain the detector alone first, before fine-tuning it with the descriptor part. -Uncomment the lines 'gt_source_train' and 'gt_source_test' in `config/wireframe_dataset.yaml` and fill them with the path to the h5 file generated in the previous step. -```bash -python -m sold2.experiment --mode train --dataset_config sold2/config/wireframe_dataset.yaml --model_config sold2/config/train_detector.yaml --exp_name sold2_wireframe -``` - -Alternatively, you can also fine-tune the already trained synthetic model: -```bash -python -m sold2.experiment --mode train --dataset_config sold2/config/wireframe_dataset.yaml --model_config sold2/config/train_detector.yaml --exp_name sold2_wireframe --pretrained --pretrained_path --checkpoint_name -``` - -Lastly, you can resume a training that was stopped: -```bash -python -m sold2.experiment --mode train --dataset_config sold2/config/wireframe_dataset.yaml --model_config sold2/config/train_detector.yaml --exp_name sold2_wireframe --resume --resume_path --checkpoint_name -``` -
- -
- Step 5: Train the full pipeline on the Wireframe dataset - -You first need to modify the field 'return_type' in `config/wireframe_dataset.yaml` to 'paired_desc'. The following command will then train the full model (detector + descriptor) on the Wireframe dataset: -```bash -python -m sold2.experiment --mode train --dataset_config sold2/config/wireframe_dataset.yaml --model_config sold2/config/train_full_pipeline.yaml --exp_name sold2_full_wireframe --pretrained --pretrained_path --checkpoint_name -``` -
- - -### Pretrained models - -We provide the checkpoints of two pretrained models: -- [sold2_synthetic.tar](https://www.polybox.ethz.ch/index.php/s/Lu8jWo7nMKal9yb): SOLD² detector trained on the synthetic dataset only. -- [sold2_wireframe.tar](https://www.polybox.ethz.ch/index.php/s/blOrW89gqSLoHOk): full version of SOLD² trained on the Wireframe dataset. - -Note that you do not need to untar the models, you can directly used them as they are. - - -### How to use it - -We provide a [notebook](notebooks/match_lines.ipynb) showing how to use the trained model of SOLD². Additionally, you can use the model to export line features (segments and descriptor maps) as follows: -```bash -python -m sold2.export_line_features --img_list --output_folder --checkpoint_path -``` - -You can tune some of the line detection parameters in `config/export_line_features.yaml`, in particular the 'detect_thresh' and 'inlier_thresh' to adapt them to your trained model and type of images. As the line detection can be sensitive to the image resolution, we recommend using it with images in the range 300~800 px per side. - - - -## Results - -Comparison of repeatability and localization error to the state of the art on the [Wireframe dataset](https://github.com/huangkuns/wireframe) for an error threshold of 5 pixels in structural and orthogonal distances: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Structural distanceOrthogonal distance
Rep-5Loc-5Rep-5Loc-5
LCNN0.4342.5890.5701.725
HAWP0.4512.6250.5371.725
DeepHough0.4192.5760.6181.720
TP-LSD TP5120.5632.4670.7461.450
LSD0.3582.0790.7070.825
Ours with NMS0.5571.9950.8011.119
Ours0.6162.0190.9140.816
- -Matching precision-recall curves on the [Wireframe](https://github.com/huangkuns/wireframe) and [ETH3D](https://www.eth3d.net/) datasets: -![pred_lines_pr_curve](assets/results/pred_lines_pr_curve.png) - -## Bibtex - -If you use this code in your project, please consider citing the following paper: -```bibtex -@InProceedings{Pautrat_Lin_2021_CVPR, - author = {Pautrat*, Rémi and Lin*, Juan-Ting and Larsson, Viktor and Oswald, Martin R. and Pollefeys, Marc}, - title = {SOLD2: Self-supervised Occlusion-aware Line Description and Detection}, - booktitle = {Computer Vision and Pattern Recognition (CVPR)}, - year = {2021}, -} -``` diff --git a/spaces/Reself/StableVideo/ldm/modules/image_degradation/bsrgan.py b/spaces/Reself/StableVideo/ldm/modules/image_degradation/bsrgan.py deleted file mode 100644 index 32ef56169978e550090261cddbcf5eb611a6173b..0000000000000000000000000000000000000000 --- a/spaces/Reself/StableVideo/ldm/modules/image_degradation/bsrgan.py +++ /dev/null @@ -1,730 +0,0 @@ -# -*- coding: utf-8 -*- -""" -# -------------------------------------------- -# Super-Resolution -# -------------------------------------------- -# -# Kai Zhang (cskaizhang@gmail.com) -# https://github.com/cszn -# From 2019/03--2021/08 -# -------------------------------------------- -""" - -import numpy as np -import cv2 -import torch - -from functools import partial -import random -from scipy import ndimage -import scipy -import scipy.stats as ss -from scipy.interpolate import interp2d -from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util - - -def modcrop_np(img, sf): - ''' - Args: - img: numpy image, WxH or WxHxC - sf: scale factor - Return: - cropped image - ''' - w, h = img.shape[:2] - im = np.copy(img) - return im[:w - w % sf, :h - h % sf, ...] - - -""" -# -------------------------------------------- -# anisotropic Gaussian kernels -# -------------------------------------------- -""" - - -def analytic_kernel(k): - """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" - k_size = k.shape[0] - # Calculate the big kernels size - big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) - # Loop over the small kernel to fill the big one - for r in range(k_size): - for c in range(k_size): - big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k - # Crop the edges of the big kernel to ignore very small values and increase run time of SR - crop = k_size // 2 - cropped_big_k = big_k[crop:-crop, crop:-crop] - # Normalize to 1 - return cropped_big_k / cropped_big_k.sum() - - -def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): - """ generate an anisotropic Gaussian kernel - Args: - ksize : e.g., 15, kernel size - theta : [0, pi], rotation angle range - l1 : [0.1,50], scaling of eigenvalues - l2 : [0.1,l1], scaling of eigenvalues - If l1 = l2, will get an isotropic Gaussian kernel. - Returns: - k : kernel - """ - - v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) - V = np.array([[v[0], v[1]], [v[1], -v[0]]]) - D = np.array([[l1, 0], [0, l2]]) - Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) - k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) - - return k - - -def gm_blur_kernel(mean, cov, size=15): - center = size / 2.0 + 0.5 - k = np.zeros([size, size]) - for y in range(size): - for x in range(size): - cy = y - center + 1 - cx = x - center + 1 - k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) - - k = k / np.sum(k) - return k - - -def shift_pixel(x, sf, upper_left=True): - """shift pixel for super-resolution with different scale factors - Args: - x: WxHxC or WxH - sf: scale factor - upper_left: shift direction - """ - h, w = x.shape[:2] - shift = (sf - 1) * 0.5 - xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) - if upper_left: - x1 = xv + shift - y1 = yv + shift - else: - x1 = xv - shift - y1 = yv - shift - - x1 = np.clip(x1, 0, w - 1) - y1 = np.clip(y1, 0, h - 1) - - if x.ndim == 2: - x = interp2d(xv, yv, x)(x1, y1) - if x.ndim == 3: - for i in range(x.shape[-1]): - x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) - - return x - - -def blur(x, k): - ''' - x: image, NxcxHxW - k: kernel, Nx1xhxw - ''' - n, c = x.shape[:2] - p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') - k = k.repeat(1, c, 1, 1) - k = k.view(-1, 1, k.shape[2], k.shape[3]) - x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) - x = x.view(n, c, x.shape[2], x.shape[3]) - - return x - - -def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): - """" - # modified version of https://github.com/assafshocher/BlindSR_dataset_generator - # Kai Zhang - # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var - # max_var = 2.5 * sf - """ - # Set random eigen-vals (lambdas) and angle (theta) for COV matrix - lambda_1 = min_var + np.random.rand() * (max_var - min_var) - lambda_2 = min_var + np.random.rand() * (max_var - min_var) - theta = np.random.rand() * np.pi # random theta - noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 - - # Set COV matrix using Lambdas and Theta - LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array([[np.cos(theta), -np.sin(theta)], - [np.sin(theta), np.cos(theta)]]) - SIGMA = Q @ LAMBDA @ Q.T - INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] - - # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) - MU = MU[None, None, :, None] - - # Create meshgrid for Gaussian - [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) - Z = np.stack([X, Y], 2)[:, :, :, None] - - # Calcualte Gaussian for every pixel of the kernel - ZZ = Z - MU - ZZ_t = ZZ.transpose(0, 1, 3, 2) - raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) - - # shift the kernel so it will be centered - # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) - - # Normalize the kernel and return - # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) - kernel = raw_kernel / np.sum(raw_kernel) - return kernel - - -def fspecial_gaussian(hsize, sigma): - hsize = [hsize, hsize] - siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] - std = sigma - [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) - arg = -(x * x + y * y) / (2 * std * std) - h = np.exp(arg) - h[h < scipy.finfo(float).eps * h.max()] = 0 - sumh = h.sum() - if sumh != 0: - h = h / sumh - return h - - -def fspecial_laplacian(alpha): - alpha = max([0, min([alpha, 1])]) - h1 = alpha / (alpha + 1) - h2 = (1 - alpha) / (alpha + 1) - h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] - h = np.array(h) - return h - - -def fspecial(filter_type, *args, **kwargs): - ''' - python code from: - https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py - ''' - if filter_type == 'gaussian': - return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': - return fspecial_laplacian(*args, **kwargs) - - -""" -# -------------------------------------------- -# degradation models -# -------------------------------------------- -""" - - -def bicubic_degradation(x, sf=3): - ''' - Args: - x: HxWxC image, [0, 1] - sf: down-scale factor - Return: - bicubicly downsampled LR image - ''' - x = util.imresize_np(x, scale=1 / sf) - return x - - -def srmd_degradation(x, k, sf=3): - ''' blur + bicubic downsampling - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2018learning, - title={Learning a single convolutional super-resolution network for multiple degradations}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={3262--3271}, - year={2018} - } - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' - x = bicubic_degradation(x, sf=sf) - return x - - -def dpsr_degradation(x, k, sf=3): - ''' bicubic downsampling + blur - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2019deep, - title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={1671--1681}, - year={2019} - } - ''' - x = bicubic_degradation(x, sf=sf) - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - return x - - -def classical_degradation(x, k, sf=3): - ''' blur + downsampling - Args: - x: HxWxC image, [0, 1]/[0, 255] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) - st = 0 - return x[st::sf, st::sf, ...] - - -def add_sharpening(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. borrowed from real-ESRGAN - Input image: I; Blurry image: B. - 1. K = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * K + (1 - Mask) * I - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - K = img + weight * residual - K = np.clip(K, 0, 1) - return soft_mask * K + (1 - soft_mask) * img - - -def add_blur(img, sf=4): - wd2 = 4.0 + sf - wd = 2.0 + 0.2 * sf - if random.random() < 0.5: - l1 = wd2 * random.random() - l2 = wd2 * random.random() - k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) - else: - k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random()) - img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') - - return img - - -def add_resize(img, sf=4): - rnum = np.random.rand() - if rnum > 0.8: # up - sf1 = random.uniform(1, 2) - elif rnum < 0.7: # down - sf1 = random.uniform(0.5 / sf, 1) - else: - sf1 = 1.0 - img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - return img - - -# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): -# noise_level = random.randint(noise_level1, noise_level2) -# rnum = np.random.rand() -# if rnum > 0.6: # add color Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) -# elif rnum < 0.4: # add grayscale Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) -# else: # add noise -# L = noise_level2 / 255. -# D = np.diag(np.random.rand(3)) -# U = orth(np.random.rand(3, 3)) -# conv = np.dot(np.dot(np.transpose(U), D), U) -# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) -# img = np.clip(img, 0.0, 1.0) -# return img - -def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - rnum = np.random.rand() - if rnum > 0.6: # add color Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: # add grayscale Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: # add noise - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_speckle_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - img = np.clip(img, 0.0, 1.0) - rnum = random.random() - if rnum > 0.6: - img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: - img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_Poisson_noise(img): - img = np.clip((img * 255.0).round(), 0, 255) / 255. - vals = 10 ** (2 * random.random() + 2.0) # [2, 4] - if random.random() < 0.5: - img = np.random.poisson(img * vals).astype(np.float32) / vals - else: - img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) - img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. - noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray - img += noise_gray[:, :, np.newaxis] - img = np.clip(img, 0.0, 1.0) - return img - - -def add_JPEG_noise(img): - quality_factor = random.randint(30, 95) - img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) - result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) - img = cv2.imdecode(encimg, 1) - img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) - return img - - -def random_crop(lq, hq, sf=4, lq_patchsize=64): - h, w = lq.shape[:2] - rnd_h = random.randint(0, h - lq_patchsize) - rnd_w = random.randint(0, w - lq_patchsize) - lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] - - rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) - hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] - return lq, hq - - -def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - hq = img.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - img = util.imresize_np(img, 1 / 2, True) - img = np.clip(img, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - img = add_blur(img, sf=sf) - - elif i == 1: - img = add_blur(img, sf=sf) - - elif i == 2: - a, b = img.shape[1], img.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') - img = img[0::sf, 0::sf, ...] # nearest downsampling - img = np.clip(img, 0.0, 1.0) - - elif i == 3: - # downsample3 - img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - img = add_JPEG_noise(img) - - elif i == 6: - # add processed camera sensor noise - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf_ori, lq_patchsize) - - return img, hq - - -# todo no isp_model? -def degradation_bsrgan_variant(image, sf=4, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - image = util.uint2single(image) - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = image.shape[:2] - image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = image.shape[:2] - - hq = image.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - image = util.imresize_np(image, 1 / 2, True) - image = np.clip(image, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - image = add_blur(image, sf=sf) - - elif i == 1: - image = add_blur(image, sf=sf) - - elif i == 2: - a, b = image.shape[1], image.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') - image = image[0::sf, 0::sf, ...] # nearest downsampling - image = np.clip(image, 0.0, 1.0) - - elif i == 3: - # downsample3 - image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - image = np.clip(image, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - image = add_JPEG_noise(image) - - # elif i == 6: - # # add processed camera sensor noise - # if random.random() < isp_prob and isp_model is not None: - # with torch.no_grad(): - # img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - image = add_JPEG_noise(image) - image = util.single2uint(image) - example = {"image":image} - return example - - -# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc... -def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None): - """ - This is an extended degradation model by combining - the degradation models of BSRGAN and Real-ESRGAN - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - use_shuffle: the degradation shuffle - use_sharp: sharpening the img - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - if use_sharp: - img = add_sharpening(img) - hq = img.copy() - - if random.random() < shuffle_prob: - shuffle_order = random.sample(range(13), 13) - else: - shuffle_order = list(range(13)) - # local shuffle for noise, JPEG is always the last one - shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) - shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) - - poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 - - for i in shuffle_order: - if i == 0: - img = add_blur(img, sf=sf) - elif i == 1: - img = add_resize(img, sf=sf) - elif i == 2: - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - elif i == 3: - if random.random() < poisson_prob: - img = add_Poisson_noise(img) - elif i == 4: - if random.random() < speckle_prob: - img = add_speckle_noise(img) - elif i == 5: - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - elif i == 6: - img = add_JPEG_noise(img) - elif i == 7: - img = add_blur(img, sf=sf) - elif i == 8: - img = add_resize(img, sf=sf) - elif i == 9: - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - elif i == 10: - if random.random() < poisson_prob: - img = add_Poisson_noise(img) - elif i == 11: - if random.random() < speckle_prob: - img = add_speckle_noise(img) - elif i == 12: - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - else: - print('check the shuffle!') - - # resize to desired size - img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), - interpolation=random.choice([1, 2, 3])) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf, lq_patchsize) - - return img, hq - - -if __name__ == '__main__': - print("hey") - img = util.imread_uint('utils/test.png', 3) - print(img) - img = util.uint2single(img) - print(img) - img = img[:448, :448] - h = img.shape[0] // 4 - print("resizing to", h) - sf = 4 - deg_fn = partial(degradation_bsrgan_variant, sf=sf) - for i in range(20): - print(i) - img_lq = deg_fn(img) - print(img_lq) - img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"] - print(img_lq.shape) - print("bicubic", img_lq_bicubic.shape) - print(img_hq.shape) - lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) - util.imsave(img_concat, str(i) + '.png') - - diff --git a/spaces/Rian000/Sayashi/README.md b/spaces/Rian000/Sayashi/README.md deleted file mode 100644 index f345394ac9d55f8a0143ee7e336f81d0ced43c10..0000000000000000000000000000000000000000 --- a/spaces/Rian000/Sayashi/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Sayashi -emoji: 👁 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Riksarkivet/htr_demo/helper/text/overview/htrflow/htrflow_col1.md b/spaces/Riksarkivet/htr_demo/helper/text/overview/htrflow/htrflow_col1.md deleted file mode 100644 index b604c7095046efa21ed06bc3decea593b1b58070..0000000000000000000000000000000000000000 --- a/spaces/Riksarkivet/htr_demo/helper/text/overview/htrflow/htrflow_col1.md +++ /dev/null @@ -1,18 +0,0 @@ -## Introduction - -The Swedish National Archives introduces a demonstrational end-to-end HTR (Handwritten Text Recognition) pipeline. The pipeline consists of two instance segmentation models, one trained for segmenting text-regions within running-text document images, and another trained for segmenting text-lines within these regions. The text-lines are then transcribed by a text-recognition model trained on a vast set of swedish handwriting ranging from the 17th to the 19th century. - -## Usage - -It needs to be emphasized that this application is intended mainly for demo-purposes. Its aim is to showcase our pipeline for transcribing historical, running-text documents, not to put the pipeline into large-scale production. -**Note**: In the future we’ll optimize the code to suit a production scenario with multi-GPU, batch-inference, but this is still a work in progress.
- -For an insight into the upcoming features we are working on: - -- Navigate to the > **Overview** > **Changelog & Roadmap**. - -## Limitations - -The demo, hosted on Huggingface and assigned a T4 GPU, can only handle two users submissions at a time. If you experience long wait times or unresponsiveness, this is the reason. In the future, we plan to host this solution ourselves, with a better server for an improved user experience, optimized code, and multiple model options. Exciting developments are on the horizon! - -It's also important to note that the models work on running text, not text in table format. diff --git a/spaces/RisticksAI/ProfNet3-Snapy-support-chatbot/app.py b/spaces/RisticksAI/ProfNet3-Snapy-support-chatbot/app.py deleted file mode 100644 index 3410a2440cc1bcab607b1f74f9c1603c7aaafce7..0000000000000000000000000000000000000000 --- a/spaces/RisticksAI/ProfNet3-Snapy-support-chatbot/app.py +++ /dev/null @@ -1,8 +0,0 @@ -import gradio as gr -import start - -def respond(message): - return start.generate(message) - -iface = gr.Interface(fn=respond, inputs="text", outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/Robert001/UniControl-Demo/annotator/openpose/body.py b/spaces/Robert001/UniControl-Demo/annotator/openpose/body.py deleted file mode 100644 index c9205f00f90ad4faf62fc0e46095b7eb3338b608..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/openpose/body.py +++ /dev/null @@ -1,229 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala -''' - -import cv2 -import numpy as np -import math -import time -from scipy.ndimage.filters import gaussian_filter -import matplotlib.pyplot as plt -import matplotlib -import torch -from torchvision import transforms - -from . import util -from .model import bodypose_model - -class Body(object): - def __init__(self, model_path): - self.model = bodypose_model() - if torch.cuda.is_available(): - self.model = self.model.cuda() - print('cuda') - model_dict = util.transfer(self.model, torch.load(model_path)) - self.model.load_state_dict(model_dict) - self.model.eval() - - def __call__(self, oriImg): - # scale_search = [0.5, 1.0, 1.5, 2.0] - scale_search = [0.5] - boxsize = 368 - stride = 8 - padValue = 128 - thre1 = 0.1 - thre2 = 0.05 - multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search] - heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19)) - paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) - - for m in range(len(multiplier)): - scale = multiplier[m] - imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) - imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue) - im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 - im = np.ascontiguousarray(im) - - data = torch.from_numpy(im).float() - if torch.cuda.is_available(): - data = data.cuda() - # data = data.permute([2, 0, 1]).unsqueeze(0).float() - with torch.no_grad(): - Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data) - Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy() - Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy() - - # extract outputs, resize, and remove padding - # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps - heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps - heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) - heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] - heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) - - # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs - paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs - paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) - paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] - paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) - - heatmap_avg += heatmap_avg + heatmap / len(multiplier) - paf_avg += + paf / len(multiplier) - - all_peaks = [] - peak_counter = 0 - - for part in range(18): - map_ori = heatmap_avg[:, :, part] - one_heatmap = gaussian_filter(map_ori, sigma=3) - - map_left = np.zeros(one_heatmap.shape) - map_left[1:, :] = one_heatmap[:-1, :] - map_right = np.zeros(one_heatmap.shape) - map_right[:-1, :] = one_heatmap[1:, :] - map_up = np.zeros(one_heatmap.shape) - map_up[:, 1:] = one_heatmap[:, :-1] - map_down = np.zeros(one_heatmap.shape) - map_down[:, :-1] = one_heatmap[:, 1:] - - peaks_binary = np.logical_and.reduce( - (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1)) - peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse - peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks] - peak_id = range(peak_counter, peak_counter + len(peaks)) - peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))] - - all_peaks.append(peaks_with_score_and_id) - peak_counter += len(peaks) - - # find connection in the specified sequence, center 29 is in the position 15 - limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \ - [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \ - [1, 16], [16, 18], [3, 17], [6, 18]] - # the middle joints heatmap correpondence - mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \ - [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \ - [55, 56], [37, 38], [45, 46]] - - connection_all = [] - special_k = [] - mid_num = 10 - - for k in range(len(mapIdx)): - score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]] - candA = all_peaks[limbSeq[k][0] - 1] - candB = all_peaks[limbSeq[k][1] - 1] - nA = len(candA) - nB = len(candB) - indexA, indexB = limbSeq[k] - if (nA != 0 and nB != 0): - connection_candidate = [] - for i in range(nA): - for j in range(nB): - vec = np.subtract(candB[j][:2], candA[i][:2]) - norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) - norm = max(0.001, norm) - vec = np.divide(vec, norm) - - startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \ - np.linspace(candA[i][1], candB[j][1], num=mid_num))) - - vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \ - for I in range(len(startend))]) - vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \ - for I in range(len(startend))]) - - score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1]) - score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min( - 0.5 * oriImg.shape[0] / norm - 1, 0) - criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts) - criterion2 = score_with_dist_prior > 0 - if criterion1 and criterion2: - connection_candidate.append( - [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]]) - - connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) - connection = np.zeros((0, 5)) - for c in range(len(connection_candidate)): - i, j, s = connection_candidate[c][0:3] - if (i not in connection[:, 3] and j not in connection[:, 4]): - connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]]) - if (len(connection) >= min(nA, nB)): - break - - connection_all.append(connection) - else: - special_k.append(k) - connection_all.append([]) - - # last number in each row is the total parts number of that person - # the second last number in each row is the score of the overall configuration - subset = -1 * np.ones((0, 20)) - candidate = np.array([item for sublist in all_peaks for item in sublist]) - - for k in range(len(mapIdx)): - if k not in special_k: - partAs = connection_all[k][:, 0] - partBs = connection_all[k][:, 1] - indexA, indexB = np.array(limbSeq[k]) - 1 - - for i in range(len(connection_all[k])): # = 1:size(temp,1) - found = 0 - subset_idx = [-1, -1] - for j in range(len(subset)): # 1:size(subset,1): - if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]: - subset_idx[found] = j - found += 1 - - if found == 1: - j = subset_idx[0] - if subset[j][indexB] != partBs[i]: - subset[j][indexB] = partBs[i] - subset[j][-1] += 1 - subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] - elif found == 2: # if found 2 and disjoint, merge them - j1, j2 = subset_idx - membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] - if len(np.nonzero(membership == 2)[0]) == 0: # merge - subset[j1][:-2] += (subset[j2][:-2] + 1) - subset[j1][-2:] += subset[j2][-2:] - subset[j1][-2] += connection_all[k][i][2] - subset = np.delete(subset, j2, 0) - else: # as like found == 1 - subset[j1][indexB] = partBs[i] - subset[j1][-1] += 1 - subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] - - # if find no partA in the subset, create a new subset - elif not found and k < 17: - row = -1 * np.ones(20) - row[indexA] = partAs[i] - row[indexB] = partBs[i] - row[-1] = 2 - row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] - subset = np.vstack([subset, row]) - # delete some rows of subset which has few parts occur - deleteIdx = [] - for i in range(len(subset)): - if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4: - deleteIdx.append(i) - subset = np.delete(subset, deleteIdx, axis=0) - - # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts - # candidate: x, y, score, id - return candidate, subset - -if __name__ == "__main__": - body_estimation = Body('../model/body_pose_model.pth') - - test_image = '../images/ski.jpg' - oriImg = cv2.imread(test_image) # B,G,R order - candidate, subset = body_estimation(oriImg) - canvas = util.draw_bodypose(oriImg, candidate, subset) - plt.imshow(canvas[:, :, [2, 1, 0]]) - plt.show() diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/dense_heads/embedding_rpn_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/dense_heads/embedding_rpn_head.py deleted file mode 100644 index 200ce8d20c5503f98c5c21f30bb9d00437e25f34..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/dense_heads/embedding_rpn_head.py +++ /dev/null @@ -1,100 +0,0 @@ -import torch -import torch.nn as nn - -from mmdet.models.builder import HEADS -from ...core import bbox_cxcywh_to_xyxy - - -@HEADS.register_module() -class EmbeddingRPNHead(nn.Module): - """RPNHead in the `Sparse R-CNN `_ . - - Unlike traditional RPNHead, this module does not need FPN input, but just - decode `init_proposal_bboxes` and expand the first dimension of - `init_proposal_bboxes` and `init_proposal_features` to the batch_size. - - Args: - num_proposals (int): Number of init_proposals. Default 100. - proposal_feature_channel (int): Channel number of - init_proposal_feature. Defaults to 256. - """ - - def __init__(self, - num_proposals=100, - proposal_feature_channel=256, - **kwargs): - super(EmbeddingRPNHead, self).__init__() - self.num_proposals = num_proposals - self.proposal_feature_channel = proposal_feature_channel - self._init_layers() - - def _init_layers(self): - """Initialize a sparse set of proposal boxes and proposal features.""" - self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4) - self.init_proposal_features = nn.Embedding( - self.num_proposals, self.proposal_feature_channel) - - def init_weights(self): - """Initialize the init_proposal_bboxes as normalized. - - [c_x, c_y, w, h], and we initialize it to the size of the entire - image. - """ - nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5) - nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1) - - def _decode_init_proposals(self, imgs, img_metas): - """Decode init_proposal_bboxes according to the size of images and - expand dimension of init_proposal_features to batch_size. - - Args: - imgs (list[Tensor]): List of FPN features. - img_metas (list[dict]): List of meta-information of - images. Need the img_shape to decode the init_proposals. - - Returns: - Tuple(Tensor): - - - proposals (Tensor): Decoded proposal bboxes, - has shape (batch_size, num_proposals, 4). - - init_proposal_features (Tensor): Expanded proposal - features, has shape - (batch_size, num_proposals, proposal_feature_channel). - - imgs_whwh (Tensor): Tensor with shape - (batch_size, 4), the dimension means - [img_width, img_height, img_width, img_height]. - """ - proposals = self.init_proposal_bboxes.weight.clone() - proposals = bbox_cxcywh_to_xyxy(proposals) - num_imgs = len(imgs[0]) - imgs_whwh = [] - for meta in img_metas: - h, w, _ = meta['img_shape'] - imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]])) - imgs_whwh = torch.cat(imgs_whwh, dim=0) - imgs_whwh = imgs_whwh[:, None, :] - - # imgs_whwh has shape (batch_size, 1, 4) - # The shape of proposals change from (num_proposals, 4) - # to (batch_size ,num_proposals, 4) - proposals = proposals * imgs_whwh - - init_proposal_features = self.init_proposal_features.weight.clone() - init_proposal_features = init_proposal_features[None].expand( - num_imgs, *init_proposal_features.size()) - return proposals, init_proposal_features, imgs_whwh - - def forward_dummy(self, img, img_metas): - """Dummy forward function. - - Used in flops calculation. - """ - return self._decode_init_proposals(img, img_metas) - - def forward_train(self, img, img_metas): - """Forward function in training stage.""" - return self._decode_init_proposals(img, img_metas) - - def simple_test_rpn(self, img, img_metas): - """Forward function in testing stage.""" - return self._decode_init_proposals(img, img_metas) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/fast_rcnn.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/fast_rcnn.py deleted file mode 100644 index 3d6e242767b927ed37198b6bc7862abecef99a33..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/detectors/fast_rcnn.py +++ /dev/null @@ -1,52 +0,0 @@ -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class FastRCNN(TwoStageDetector): - """Implementation of `Fast R-CNN `_""" - - def __init__(self, - backbone, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None): - super(FastRCNN, self).__init__( - backbone=backbone, - neck=neck, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained) - - def forward_test(self, imgs, img_metas, proposals, **kwargs): - """ - Args: - imgs (List[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains all images in the batch. - img_metas (List[List[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. - proposals (List[List[Tensor]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. The Tensor should have a shape Px4, where - P is the number of proposals. - """ - for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: - if not isinstance(var, list): - raise TypeError(f'{name} must be a list, but got {type(var)}') - - num_augs = len(imgs) - if num_augs != len(img_metas): - raise ValueError(f'num of augmentations ({len(imgs)}) ' - f'!= num of image meta ({len(img_metas)})') - - if num_augs == 1: - return self.simple_test(imgs[0], img_metas[0], proposals[0], - **kwargs) - else: - # TODO: support test-time augmentation - assert NotImplementedError diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/anchor_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/anchor_head.py deleted file mode 100644 index eea73520572725f547216ab639c1ebbdfb50834c..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/anchor_head.py +++ /dev/null @@ -1,751 +0,0 @@ -import torch -import torch.nn as nn -from mmcv.cnn import normal_init -from mmcv.runner import force_fp32 - -from mmdet.core import (anchor_inside_flags, build_anchor_generator, - build_assigner, build_bbox_coder, build_sampler, - images_to_levels, multi_apply, multiclass_nms, unmap) -from ..builder import HEADS, build_loss -from .base_dense_head import BaseDenseHead -from .dense_test_mixins import BBoxTestMixin - - -@HEADS.register_module() -class AnchorHead(BaseDenseHead, BBoxTestMixin): - """Anchor-based head (RPN, RetinaNet, SSD, etc.). - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - feat_channels (int): Number of hidden channels. Used in child classes. - anchor_generator (dict): Config dict for anchor generator - bbox_coder (dict): Config of bounding box coder. - reg_decoded_bbox (bool): If true, the regression loss would be - applied directly on decoded bounding boxes, converting both - the predicted boxes and regression targets to absolute - coordinates format. Default False. It should be `True` when - using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head. - loss_cls (dict): Config of classification loss. - loss_bbox (dict): Config of localization loss. - train_cfg (dict): Training config of anchor head. - test_cfg (dict): Testing config of anchor head. - """ # noqa: W605 - - def __init__(self, - num_classes, - in_channels, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8, 16, 32], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - clip_border=True, - target_means=(.0, .0, .0, .0), - target_stds=(1.0, 1.0, 1.0, 1.0)), - reg_decoded_bbox=False, - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - loss_bbox=dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), - train_cfg=None, - test_cfg=None): - super(AnchorHead, self).__init__() - self.in_channels = in_channels - self.num_classes = num_classes - self.feat_channels = feat_channels - self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) - # TODO better way to determine whether sample or not - self.sampling = loss_cls['type'] not in [ - 'FocalLoss', 'GHMC', 'QualityFocalLoss' - ] - if self.use_sigmoid_cls: - self.cls_out_channels = num_classes - else: - self.cls_out_channels = num_classes + 1 - - if self.cls_out_channels <= 0: - raise ValueError(f'num_classes={num_classes} is too small') - self.reg_decoded_bbox = reg_decoded_bbox - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.loss_cls = build_loss(loss_cls) - self.loss_bbox = build_loss(loss_bbox) - self.train_cfg = train_cfg - self.test_cfg = test_cfg - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - # use PseudoSampler when sampling is False - if self.sampling and hasattr(self.train_cfg, 'sampler'): - sampler_cfg = self.train_cfg.sampler - else: - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - self.fp16_enabled = False - - self.anchor_generator = build_anchor_generator(anchor_generator) - # usually the numbers of anchors for each level are the same - # except SSD detectors - self.num_anchors = self.anchor_generator.num_base_anchors[0] - self._init_layers() - - def _init_layers(self): - """Initialize layers of the head.""" - self.conv_cls = nn.Conv2d(self.in_channels, - self.num_anchors * self.cls_out_channels, 1) - self.conv_reg = nn.Conv2d(self.in_channels, self.num_anchors * 4, 1) - - def init_weights(self): - """Initialize weights of the head.""" - normal_init(self.conv_cls, std=0.01) - normal_init(self.conv_reg, std=0.01) - - def forward_single(self, x): - """Forward feature of a single scale level. - - Args: - x (Tensor): Features of a single scale level. - - Returns: - tuple: - cls_score (Tensor): Cls scores for a single scale level \ - the channels number is num_anchors * num_classes. - bbox_pred (Tensor): Box energies / deltas for a single scale \ - level, the channels number is num_anchors * 4. - """ - cls_score = self.conv_cls(x) - bbox_pred = self.conv_reg(x) - return cls_score, bbox_pred - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: A tuple of classification scores and bbox prediction. - - - cls_scores (list[Tensor]): Classification scores for all \ - scale levels, each is a 4D-tensor, the channels number \ - is num_anchors * num_classes. - - bbox_preds (list[Tensor]): Box energies / deltas for all \ - scale levels, each is a 4D-tensor, the channels number \ - is num_anchors * 4. - """ - return multi_apply(self.forward_single, feats) - - def get_anchors(self, featmap_sizes, img_metas, device='cuda'): - """Get anchors according to feature map sizes. - - Args: - featmap_sizes (list[tuple]): Multi-level feature map sizes. - img_metas (list[dict]): Image meta info. - device (torch.device | str): Device for returned tensors - - Returns: - tuple: - anchor_list (list[Tensor]): Anchors of each image. - valid_flag_list (list[Tensor]): Valid flags of each image. - """ - num_imgs = len(img_metas) - - # since feature map sizes of all images are the same, we only compute - # anchors for one time - multi_level_anchors = self.anchor_generator.grid_anchors( - featmap_sizes, device) - anchor_list = [multi_level_anchors for _ in range(num_imgs)] - - # for each image, we compute valid flags of multi level anchors - valid_flag_list = [] - for img_id, img_meta in enumerate(img_metas): - multi_level_flags = self.anchor_generator.valid_flags( - featmap_sizes, img_meta['pad_shape'], device) - valid_flag_list.append(multi_level_flags) - - return anchor_list, valid_flag_list - - def _get_targets_single(self, - flat_anchors, - valid_flags, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True): - """Compute regression and classification targets for anchors in a - single image. - - Args: - flat_anchors (Tensor): Multi-level anchors of the image, which are - concatenated into a single tensor of shape (num_anchors ,4) - valid_flags (Tensor): Multi level valid flags of the image, - which are concatenated into a single tensor of - shape (num_anchors,). - gt_bboxes (Tensor): Ground truth bboxes of the image, - shape (num_gts, 4). - gt_bboxes_ignore (Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - img_meta (dict): Meta info of the image. - gt_labels (Tensor): Ground truth labels of each box, - shape (num_gts,). - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: - labels_list (list[Tensor]): Labels of each level - label_weights_list (list[Tensor]): Label weights of each level - bbox_targets_list (list[Tensor]): BBox targets of each level - bbox_weights_list (list[Tensor]): BBox weights of each level - num_total_pos (int): Number of positive samples in all images - num_total_neg (int): Number of negative samples in all images - """ - inside_flags = anchor_inside_flags(flat_anchors, valid_flags, - img_meta['img_shape'][:2], - self.train_cfg.allowed_border) - if not inside_flags.any(): - return (None, ) * 7 - # assign gt and sample anchors - anchors = flat_anchors[inside_flags, :] - - assign_result = self.assigner.assign( - anchors, gt_bboxes, gt_bboxes_ignore, - None if self.sampling else gt_labels) - sampling_result = self.sampler.sample(assign_result, anchors, - gt_bboxes) - - num_valid_anchors = anchors.shape[0] - bbox_targets = torch.zeros_like(anchors) - bbox_weights = torch.zeros_like(anchors) - labels = anchors.new_full((num_valid_anchors, ), - self.num_classes, - dtype=torch.long) - label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - if not self.reg_decoded_bbox: - pos_bbox_targets = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) - else: - pos_bbox_targets = sampling_result.pos_gt_bboxes - bbox_targets[pos_inds, :] = pos_bbox_targets - bbox_weights[pos_inds, :] = 1.0 - if gt_labels is None: - # Only rpn gives gt_labels as None - # Foreground is the first class since v2.5.0 - labels[pos_inds] = 0 - else: - labels[pos_inds] = gt_labels[ - sampling_result.pos_assigned_gt_inds] - if self.train_cfg.pos_weight <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg.pos_weight - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_anchors.size(0) - labels = unmap( - labels, num_total_anchors, inside_flags, - fill=self.num_classes) # fill bg label - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) - bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) - - return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, - neg_inds, sampling_result) - - def get_targets(self, - anchor_list, - valid_flag_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True, - return_sampling_results=False): - """Compute regression and classification targets for anchors in - multiple images. - - Args: - anchor_list (list[list[Tensor]]): Multi level anchors of each - image. The outer list indicates images, and the inner list - corresponds to feature levels of the image. Each element of - the inner list is a tensor of shape (num_anchors, 4). - valid_flag_list (list[list[Tensor]]): Multi level valid flags of - each image. The outer list indicates images, and the inner list - corresponds to feature levels of the image. Each element of - the inner list is a tensor of shape (num_anchors, ) - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta info of each image. - gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be - ignored. - gt_labels_list (list[Tensor]): Ground truth labels of each box. - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: Usually returns a tuple containing learning targets. - - - labels_list (list[Tensor]): Labels of each level. - - label_weights_list (list[Tensor]): Label weights of each \ - level. - - bbox_targets_list (list[Tensor]): BBox targets of each level. - - bbox_weights_list (list[Tensor]): BBox weights of each level. - - num_total_pos (int): Number of positive samples in all \ - images. - - num_total_neg (int): Number of negative samples in all \ - images. - additional_returns: This function enables user-defined returns from - `self._get_targets_single`. These returns are currently refined - to properties at each feature map (i.e. having HxW dimension). - The results will be concatenated after the end - """ - num_imgs = len(img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - # concat all level anchors to a single tensor - concat_anchor_list = [] - concat_valid_flag_list = [] - for i in range(num_imgs): - assert len(anchor_list[i]) == len(valid_flag_list[i]) - concat_anchor_list.append(torch.cat(anchor_list[i])) - concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - results = multi_apply( - self._get_targets_single, - concat_anchor_list, - concat_valid_flag_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - unmap_outputs=unmap_outputs) - (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, - pos_inds_list, neg_inds_list, sampling_results_list) = results[:7] - rest_results = list(results[7:]) # user-added return values - # no valid anchors - if any([labels is None for labels in all_labels]): - return None - # sampled anchors of all images - num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) - num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) - # split targets to a list w.r.t. multiple levels - labels_list = images_to_levels(all_labels, num_level_anchors) - label_weights_list = images_to_levels(all_label_weights, - num_level_anchors) - bbox_targets_list = images_to_levels(all_bbox_targets, - num_level_anchors) - bbox_weights_list = images_to_levels(all_bbox_weights, - num_level_anchors) - res = (labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) - if return_sampling_results: - res = res + (sampling_results_list, ) - for i, r in enumerate(rest_results): # user-added return values - rest_results[i] = images_to_levels(r, num_level_anchors) - - return res + tuple(rest_results) - - def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights, - bbox_targets, bbox_weights, num_total_samples): - """Compute loss of a single scale level. - - Args: - cls_score (Tensor): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W). - bbox_pred (Tensor): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W). - anchors (Tensor): Box reference for each scale level with shape - (N, num_total_anchors, 4). - labels (Tensor): Labels of each anchors with shape - (N, num_total_anchors). - label_weights (Tensor): Label weights of each anchor with shape - (N, num_total_anchors) - bbox_targets (Tensor): BBox regression targets of each anchor wight - shape (N, num_total_anchors, 4). - bbox_weights (Tensor): BBox regression loss weights of each anchor - with shape (N, num_total_anchors, 4). - num_total_samples (int): If sampling, num total samples equal to - the number of total anchors; Otherwise, it is the number of - positive anchors. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - # classification loss - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - cls_score = cls_score.permute(0, 2, 3, - 1).reshape(-1, self.cls_out_channels) - loss_cls = self.loss_cls( - cls_score, labels, label_weights, avg_factor=num_total_samples) - # regression loss - bbox_targets = bbox_targets.reshape(-1, 4) - bbox_weights = bbox_weights.reshape(-1, 4) - bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) - if self.reg_decoded_bbox: - # When the regression loss (e.g. `IouLoss`, `GIouLoss`) - # is applied directly on the decoded bounding boxes, it - # decodes the already encoded coordinates to absolute format. - anchors = anchors.reshape(-1, 4) - bbox_pred = self.bbox_coder.decode(anchors, bbox_pred) - loss_bbox = self.loss_bbox( - bbox_pred, - bbox_targets, - bbox_weights, - avg_factor=num_total_samples) - return loss_cls, loss_bbox - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. Default: None - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.anchor_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - num_total_samples = ( - num_total_pos + num_total_neg if self.sampling else num_total_pos) - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - # concat all level anchors and flags to a single tensor - concat_anchor_list = [] - for i in range(len(anchor_list)): - concat_anchor_list.append(torch.cat(anchor_list[i])) - all_anchor_list = images_to_levels(concat_anchor_list, - num_level_anchors) - - losses_cls, losses_bbox = multi_apply( - self.loss_single, - cls_scores, - bbox_preds, - all_anchor_list, - labels_list, - label_weights_list, - bbox_targets_list, - bbox_weights_list, - num_total_samples=num_total_samples) - return dict(loss_cls=losses_cls, loss_bbox=losses_bbox) - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def get_bboxes(self, - cls_scores, - bbox_preds, - img_metas, - cfg=None, - rescale=False, - with_nms=True): - """Transform network output for a batch into bbox predictions. - - Args: - cls_scores (list[Tensor]): Box scores for each level in the - feature pyramid, has shape - (N, num_anchors * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for each - level in the feature pyramid, has shape - (N, num_anchors * 4, H, W). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - cfg (mmcv.Config | None): Test / postprocessing configuration, - if None, test_cfg would be used - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where 5 represent - (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. - The shape of the second tensor in the tuple is (n,), and - each element represents the class label of the corresponding - box. - - Example: - >>> import mmcv - >>> self = AnchorHead( - >>> num_classes=9, - >>> in_channels=1, - >>> anchor_generator=dict( - >>> type='AnchorGenerator', - >>> scales=[8], - >>> ratios=[0.5, 1.0, 2.0], - >>> strides=[4,])) - >>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}] - >>> cfg = mmcv.Config(dict( - >>> score_thr=0.00, - >>> nms=dict(type='nms', iou_thr=1.0), - >>> max_per_img=10)) - >>> feat = torch.rand(1, 1, 3, 3) - >>> cls_score, bbox_pred = self.forward_single(feat) - >>> # note the input lists are over different levels, not images - >>> cls_scores, bbox_preds = [cls_score], [bbox_pred] - >>> result_list = self.get_bboxes(cls_scores, bbox_preds, - >>> img_metas, cfg) - >>> det_bboxes, det_labels = result_list[0] - >>> assert len(result_list) == 1 - >>> assert det_bboxes.shape[1] == 5 - >>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img - """ - assert len(cls_scores) == len(bbox_preds) - num_levels = len(cls_scores) - - device = cls_scores[0].device - featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)] - mlvl_anchors = self.anchor_generator.grid_anchors( - featmap_sizes, device=device) - - mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)] - mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)] - - if torch.onnx.is_in_onnx_export(): - assert len( - img_metas - ) == 1, 'Only support one input image while in exporting to ONNX' - img_shapes = img_metas[0]['img_shape_for_onnx'] - else: - img_shapes = [ - img_metas[i]['img_shape'] - for i in range(cls_scores[0].shape[0]) - ] - scale_factors = [ - img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0]) - ] - - if with_nms: - # some heads don't support with_nms argument - result_list = self._get_bboxes(mlvl_cls_scores, mlvl_bbox_preds, - mlvl_anchors, img_shapes, - scale_factors, cfg, rescale) - else: - result_list = self._get_bboxes(mlvl_cls_scores, mlvl_bbox_preds, - mlvl_anchors, img_shapes, - scale_factors, cfg, rescale, - with_nms) - return result_list - - def _get_bboxes(self, - mlvl_cls_scores, - mlvl_bbox_preds, - mlvl_anchors, - img_shapes, - scale_factors, - cfg, - rescale=False, - with_nms=True): - """Transform outputs for a batch item into bbox predictions. - - Args: - mlvl_cls_scores (list[Tensor]): Each element in the list is - the scores of bboxes of single level in the feature pyramid, - has shape (N, num_anchors * num_classes, H, W). - mlvl_bbox_preds (list[Tensor]): Each element in the list is the - bboxes predictions of single level in the feature pyramid, - has shape (N, num_anchors * 4, H, W). - mlvl_anchors (list[Tensor]): Each element in the list is - the anchors of single level in feature pyramid, has shape - (num_anchors, 4). - img_shapes (list[tuple[int]]): Each tuple in the list represent - the shape(height, width, 3) of single image in the batch. - scale_factors (list[ndarray]): Scale factor of the batch - image arange as list[(w_scale, h_scale, w_scale, h_scale)]. - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where 5 represent - (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. - The shape of the second tensor in the tuple is (n,), and - each element represents the class label of the corresponding - box. - """ - cfg = self.test_cfg if cfg is None else cfg - assert len(mlvl_cls_scores) == len(mlvl_bbox_preds) == len( - mlvl_anchors) - batch_size = mlvl_cls_scores[0].shape[0] - # convert to tensor to keep tracing - nms_pre_tensor = torch.tensor( - cfg.get('nms_pre', -1), - device=mlvl_cls_scores[0].device, - dtype=torch.long) - - mlvl_bboxes = [] - mlvl_scores = [] - for cls_score, bbox_pred, anchors in zip(mlvl_cls_scores, - mlvl_bbox_preds, - mlvl_anchors): - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - cls_score = cls_score.permute(0, 2, 3, - 1).reshape(batch_size, -1, - self.cls_out_channels) - if self.use_sigmoid_cls: - scores = cls_score.sigmoid() - else: - scores = cls_score.softmax(-1) - bbox_pred = bbox_pred.permute(0, 2, 3, - 1).reshape(batch_size, -1, 4) - anchors = anchors.expand_as(bbox_pred) - # Always keep topk op for dynamic input in onnx - if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export() - or scores.shape[-2] > nms_pre_tensor): - from torch import _shape_as_tensor - # keep shape as tensor and get k - num_anchor = _shape_as_tensor(scores)[-2].to( - nms_pre_tensor.device) - nms_pre = torch.where(nms_pre_tensor < num_anchor, - nms_pre_tensor, num_anchor) - - # Get maximum scores for foreground classes. - if self.use_sigmoid_cls: - max_scores, _ = scores.max(-1) - else: - # remind that we set FG labels to [0, num_class-1] - # since mmdet v2.0 - # BG cat_id: num_class - max_scores, _ = scores[..., :-1].max(-1) - - _, topk_inds = max_scores.topk(nms_pre) - batch_inds = torch.arange(batch_size).view( - -1, 1).expand_as(topk_inds) - anchors = anchors[batch_inds, topk_inds, :] - bbox_pred = bbox_pred[batch_inds, topk_inds, :] - scores = scores[batch_inds, topk_inds, :] - - bboxes = self.bbox_coder.decode( - anchors, bbox_pred, max_shape=img_shapes) - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - - batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1) - if rescale: - batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor( - scale_factors).unsqueeze(1) - batch_mlvl_scores = torch.cat(mlvl_scores, dim=1) - - # Set max number of box to be feed into nms in deployment - deploy_nms_pre = cfg.get('deploy_nms_pre', -1) - if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export(): - # Get maximum scores for foreground classes. - if self.use_sigmoid_cls: - max_scores, _ = batch_mlvl_scores.max(-1) - else: - # remind that we set FG labels to [0, num_class-1] - # since mmdet v2.0 - # BG cat_id: num_class - max_scores, _ = batch_mlvl_scores[..., :-1].max(-1) - _, topk_inds = max_scores.topk(deploy_nms_pre) - batch_inds = torch.arange(batch_size).view(-1, - 1).expand_as(topk_inds) - batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds] - batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds] - if self.use_sigmoid_cls: - # Add a dummy background class to the backend when using sigmoid - # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 - # BG cat_id: num_class - padding = batch_mlvl_scores.new_zeros(batch_size, - batch_mlvl_scores.shape[1], - 1) - batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1) - - if with_nms: - det_results = [] - for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes, - batch_mlvl_scores): - det_bbox, det_label = multiclass_nms(mlvl_bboxes, mlvl_scores, - cfg.score_thr, cfg.nms, - cfg.max_per_img) - det_results.append(tuple([det_bbox, det_label])) - else: - det_results = [ - tuple(mlvl_bs) - for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores) - ] - return det_results - - def aug_test(self, feats, img_metas, rescale=False): - """Test function with test time augmentation. - - Args: - feats (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains features for all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[ndarray]: bbox results of each class - """ - return self.aug_test_bboxes(feats, img_metas, rescale=rescale) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/decode_heads/dm_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/decode_heads/dm_head.py deleted file mode 100644 index 19c963923126b53ce22f60813540a35badf24b3d..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/models/decode_heads/dm_head.py +++ /dev/null @@ -1,140 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from annotator.uniformer.mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer - -from ..builder import HEADS -from .decode_head import BaseDecodeHead - - -class DCM(nn.Module): - """Dynamic Convolutional Module used in DMNet. - - Args: - filter_size (int): The filter size of generated convolution kernel - used in Dynamic Convolutional Module. - fusion (bool): Add one conv to fuse DCM output feature. - in_channels (int): Input channels. - channels (int): Channels after modules, before conv_seg. - conv_cfg (dict | None): Config of conv layers. - norm_cfg (dict | None): Config of norm layers. - act_cfg (dict): Config of activation layers. - """ - - def __init__(self, filter_size, fusion, in_channels, channels, conv_cfg, - norm_cfg, act_cfg): - super(DCM, self).__init__() - self.filter_size = filter_size - self.fusion = fusion - self.in_channels = in_channels - self.channels = channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.filter_gen_conv = nn.Conv2d(self.in_channels, self.channels, 1, 1, - 0) - - self.input_redu_conv = ConvModule( - self.in_channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - if self.norm_cfg is not None: - self.norm = build_norm_layer(self.norm_cfg, self.channels)[1] - else: - self.norm = None - self.activate = build_activation_layer(self.act_cfg) - - if self.fusion: - self.fusion_conv = ConvModule( - self.channels, - self.channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, x): - """Forward function.""" - generated_filter = self.filter_gen_conv( - F.adaptive_avg_pool2d(x, self.filter_size)) - x = self.input_redu_conv(x) - b, c, h, w = x.shape - # [1, b * c, h, w], c = self.channels - x = x.view(1, b * c, h, w) - # [b * c, 1, filter_size, filter_size] - generated_filter = generated_filter.view(b * c, 1, self.filter_size, - self.filter_size) - pad = (self.filter_size - 1) // 2 - if (self.filter_size - 1) % 2 == 0: - p2d = (pad, pad, pad, pad) - else: - p2d = (pad + 1, pad, pad + 1, pad) - x = F.pad(input=x, pad=p2d, mode='constant', value=0) - # [1, b * c, h, w] - output = F.conv2d(input=x, weight=generated_filter, groups=b * c) - # [b, c, h, w] - output = output.view(b, c, h, w) - if self.norm is not None: - output = self.norm(output) - output = self.activate(output) - - if self.fusion: - output = self.fusion_conv(output) - - return output - - -@HEADS.register_module() -class DMHead(BaseDecodeHead): - """Dynamic Multi-scale Filters for Semantic Segmentation. - - This head is the implementation of - `DMNet `_. - - Args: - filter_sizes (tuple[int]): The size of generated convolutional filters - used in Dynamic Convolutional Module. Default: (1, 3, 5, 7). - fusion (bool): Add one conv to fuse DCM output feature. - """ - - def __init__(self, filter_sizes=(1, 3, 5, 7), fusion=False, **kwargs): - super(DMHead, self).__init__(**kwargs) - assert isinstance(filter_sizes, (list, tuple)) - self.filter_sizes = filter_sizes - self.fusion = fusion - dcm_modules = [] - for filter_size in self.filter_sizes: - dcm_modules.append( - DCM(filter_size, - self.fusion, - self.in_channels, - self.channels, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - self.dcm_modules = nn.ModuleList(dcm_modules) - self.bottleneck = ConvModule( - self.in_channels + len(filter_sizes) * self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - dcm_outs = [x] - for dcm_module in self.dcm_modules: - dcm_outs.append(dcm_module(x)) - dcm_outs = torch.cat(dcm_outs, dim=1) - output = self.bottleneck(dcm_outs) - output = self.cls_seg(output) - return output diff --git a/spaces/Rominn/vits-uma-genshin-honkai/mel_processing.py b/spaces/Rominn/vits-uma-genshin-honkai/mel_processing.py deleted file mode 100644 index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000 --- a/spaces/Rominn/vits-uma-genshin-honkai/mel_processing.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/Satyam-Singh/garage-bAInd-Platypus2-70B/README.md b/spaces/Satyam-Singh/garage-bAInd-Platypus2-70B/README.md deleted file mode 100644 index 499fc63597df7aba3e0ca59e74e478de48af6c82..0000000000000000000000000000000000000000 --- a/spaces/Satyam-Singh/garage-bAInd-Platypus2-70B/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Garage BAInd Platypus2 70B -emoji: 🏆 -colorFrom: red -colorTo: red -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SeViLA/SeViLA/lavis/models/pnp_vqa_models/pnp_unifiedqav2_fid.py b/spaces/SeViLA/SeViLA/lavis/models/pnp_vqa_models/pnp_unifiedqav2_fid.py deleted file mode 100644 index 43da9ac1452aa2aa4d5de48409ced8628b34b093..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/lavis/models/pnp_vqa_models/pnp_unifiedqav2_fid.py +++ /dev/null @@ -1,87 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause - - Based on facebookresearch code base - https://github.com/facebookresearch/FiD -""" - -import torch -import torch.nn as nn -from lavis.common.registry import registry -from lavis.models.base_model import BaseModel -from lavis.common.utils import get_abs_path -from transformers import T5Config, T5Tokenizer, T5ForConditionalGeneration - - -@registry.register_model("pnp_unifiedqav2_fid") -class PNPUnifiedQAv2FiD(T5ForConditionalGeneration, BaseModel): - - PRETRAINED_MODEL_CONFIG_DICT = {} - - def __init__(self, config, model_path): - super().__init__(config) - - self.tokenizer = T5Tokenizer.from_pretrained(model_path) - - def forward(self, input_ids=None, attention_mask=None, **kwargs): - if input_ids != None: - if input_ids.dim() == 3: - self.encoder.num_contexts = input_ids.size(1) - input_ids = input_ids.view(input_ids.size(0), -1) - if attention_mask != None: - attention_mask = attention_mask.view(attention_mask.size(0), -1) - - return super().forward( - input_ids=input_ids, - attention_mask=attention_mask, - **kwargs - ) - - def generate(self, input_ids, attention_mask, num_beams=1, min_length=0, max_length=20): - self.encoder.num_contexts = input_ids.size(1) - - return super().generate( - input_ids=input_ids.view(input_ids.size(0), -1), - attention_mask=attention_mask.view(attention_mask.size(0), -1), - num_beams=num_beams, - min_length=min_length, - max_length=max_length - ) - - def load_unifiedqa(self, state_dict): - self.load_state_dict(state_dict) - self.encoder = T5EncoderWrapper(self.encoder) - - @classmethod - def from_config(cls, cfg): - model_path = cfg.get('pretrained') - t5_config_path = get_abs_path(cfg.get("t5_config_path")) - t5_config = T5Config.from_json_file(t5_config_path) - model = cls(t5_config, model_path) - model.load_unifiedqa(T5ForConditionalGeneration.from_pretrained(model_path).state_dict()) - - return model - - -class T5EncoderWrapper(torch.nn.Module): - - def __init__(self, encoder): - super().__init__() - - self.encoder = encoder - self.block = self.encoder.block - self.parallelize = self.encoder.parallelize - self.main_input_name = encoder.main_input_name - - def forward(self, input_ids=None, attention_mask=None, **kwargs): - bsz, total_length = input_ids.shape - context_length = total_length // self.num_contexts - input_ids = input_ids.view(bsz*self.num_contexts, context_length) - attention_mask = attention_mask.view(bsz*self.num_contexts, context_length) - outputs = self.encoder(input_ids, attention_mask, **kwargs) - outputs = (outputs[0].view(bsz, self.num_contexts*context_length, -1), ) + outputs[1:] - - return outputs \ No newline at end of file diff --git a/spaces/Searchium-ai/Video-Search/README.md b/spaces/Searchium-ai/Video-Search/README.md deleted file mode 100644 index 57b1c5b7cc62608b07c8cd086063781fb37e85c6..0000000000000000000000000000000000000000 --- a/spaces/Searchium-ai/Video-Search/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Clip4Clip Webvid -emoji: 🦀 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.32.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Semibit/gentle-audio/Dockerfile b/spaces/Semibit/gentle-audio/Dockerfile deleted file mode 100644 index 638a3fb8790b65502c3aa637f4fa0feaf1b25d99..0000000000000000000000000000000000000000 --- a/spaces/Semibit/gentle-audio/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM lowerquality/gentle:latest -RUN apt-get update && apt-get install -y socat && rm -rf /var/lib/apt/lists/* -RUN echo $USER -RUN whoami -# USER node -RUN mkdir -p /gentle/webdata/zip - -RUN chown -R 1000:1000 /gentle/webdata/zip -RUN chown -R 1000:1000 /gentle - -EXPOSE 8765 -EXPOSE 7860 - -CMD sh -c "cd /gentle && python serve.py & socat TCP-LISTEN:7860,fork,reuseaddr TCP:localhost:8765" diff --git a/spaces/SeyedAli/Arabic-Speech-Synthesis/README.md b/spaces/SeyedAli/Arabic-Speech-Synthesis/README.md deleted file mode 100644 index b145006953ba04baca458bfd385693692f3d5bc4..0000000000000000000000000000000000000000 --- a/spaces/SeyedAli/Arabic-Speech-Synthesis/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Arabic Speech Synthesis -emoji: 🔊 -colorFrom: yellow -colorTo: gray -sdk: gradio -sdk_version: 3.44.4 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Silentlin/DiffSinger/inference/svs/ds_e2e.py b/spaces/Silentlin/DiffSinger/inference/svs/ds_e2e.py deleted file mode 100644 index 68590a8ebd8132c7e1a9af171e3095afaa2ec56d..0000000000000000000000000000000000000000 --- a/spaces/Silentlin/DiffSinger/inference/svs/ds_e2e.py +++ /dev/null @@ -1,67 +0,0 @@ -import torch -# from inference.tts.fs import FastSpeechInfer -# from modules.tts.fs2_orig import FastSpeech2Orig -from inference.svs.base_svs_infer import BaseSVSInfer -from utils import load_ckpt -from utils.hparams import hparams -from usr.diff.shallow_diffusion_tts import GaussianDiffusion -from usr.diffsinger_task import DIFF_DECODERS -from modules.fastspeech.pe import PitchExtractor -import utils - - -class DiffSingerE2EInfer(BaseSVSInfer): - def build_model(self): - model = GaussianDiffusion( - phone_encoder=self.ph_encoder, - out_dims=hparams['audio_num_mel_bins'], denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams), - timesteps=hparams['timesteps'], - K_step=hparams['K_step'], - loss_type=hparams['diff_loss_type'], - spec_min=hparams['spec_min'], spec_max=hparams['spec_max'], - ) - model.eval() - load_ckpt(model, hparams['work_dir'], 'model') - - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - self.pe = PitchExtractor().to(self.device) - utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True) - self.pe.eval() - return model - - def forward_model(self, inp): - sample = self.input_to_batch(inp) - txt_tokens = sample['txt_tokens'] # [B, T_t] - spk_id = sample.get('spk_ids') - with torch.no_grad(): - output = self.model(txt_tokens, spk_id=spk_id, ref_mels=None, infer=True, - pitch_midi=sample['pitch_midi'], midi_dur=sample['midi_dur'], - is_slur=sample['is_slur']) - mel_out = output['mel_out'] # [B, T,80] - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - f0_pred = self.pe(mel_out)['f0_denorm_pred'] # pe predict from Pred mel - else: - f0_pred = output['f0_denorm'] - wav_out = self.run_vocoder(mel_out, f0=f0_pred) - wav_out = wav_out.cpu().numpy() - return wav_out[0] - -if __name__ == '__main__': - inp = { - 'text': '小酒窝长睫毛AP是你最美的记号', - 'notes': 'C#4/Db4 | F#4/Gb4 | G#4/Ab4 | A#4/Bb4 F#4/Gb4 | F#4/Gb4 C#4/Db4 | C#4/Db4 | rest | C#4/Db4 | A#4/Bb4 | G#4/Ab4 | A#4/Bb4 | G#4/Ab4 | F4 | C#4/Db4', - 'notes_duration': '0.407140 | 0.376190 | 0.242180 | 0.509550 0.183420 | 0.315400 0.235020 | 0.361660 | 0.223070 | 0.377270 | 0.340550 | 0.299620 | 0.344510 | 0.283770 | 0.323390 | 0.360340', - 'input_type': 'word' - } # user input: Chinese characters - inp = { - 'text': '小酒窝长睫毛AP是你最美的记号', - 'ph_seq': 'x iao j iu w o ch ang ang j ie ie m ao AP sh i n i z ui m ei d e j i h ao', - 'note_seq': 'C#4/Db4 C#4/Db4 F#4/Gb4 F#4/Gb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 F#4/Gb4 F#4/Gb4 F#4/Gb4 C#4/Db4 C#4/Db4 C#4/Db4 rest C#4/Db4 C#4/Db4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 F4 F4 C#4/Db4 C#4/Db4', - 'note_dur_seq': '0.407140 0.407140 0.376190 0.376190 0.242180 0.242180 0.509550 0.509550 0.183420 0.315400 0.315400 0.235020 0.361660 0.361660 0.223070 0.377270 0.377270 0.340550 0.340550 0.299620 0.299620 0.344510 0.344510 0.283770 0.283770 0.323390 0.323390 0.360340 0.360340', - 'is_slur_seq': '0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0', - 'input_type': 'phoneme' - } # input like Opencpop dataset. - DiffSingerE2EInfer.example_run(inp) - - -# CUDA_VISIBLE_DEVICES=3 python inference/svs/ds_e2e.py --config usr/configs/midi/e2e/opencpop/ds100_adj_rel.yaml --exp_name 0228_opencpop_ds100_rel \ No newline at end of file diff --git a/spaces/Silentlin/DiffSinger/modules/parallel_wavegan/stft_loss.py b/spaces/Silentlin/DiffSinger/modules/parallel_wavegan/stft_loss.py deleted file mode 100644 index 229e6c777dc9ec7f710842d1e648dba1189ec8b4..0000000000000000000000000000000000000000 --- a/spaces/Silentlin/DiffSinger/modules/parallel_wavegan/stft_loss.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""STFT-based Loss modules.""" -import librosa -import torch - -from modules.parallel_wavegan.losses import LogSTFTMagnitudeLoss, SpectralConvergengeLoss, stft - - -class STFTLoss(torch.nn.Module): - """STFT loss module.""" - - def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window", - use_mel_loss=False): - """Initialize STFT loss module.""" - super(STFTLoss, self).__init__() - self.fft_size = fft_size - self.shift_size = shift_size - self.win_length = win_length - self.window = getattr(torch, window)(win_length) - self.spectral_convergenge_loss = SpectralConvergengeLoss() - self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss() - self.use_mel_loss = use_mel_loss - self.mel_basis = None - - def forward(self, x, y): - """Calculate forward propagation. - - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - - Returns: - Tensor: Spectral convergence loss value. - Tensor: Log STFT magnitude loss value. - - """ - x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window) - y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window) - if self.use_mel_loss: - if self.mel_basis is None: - self.mel_basis = torch.from_numpy(librosa.filters.mel(22050, self.fft_size, 80)).cuda().T - x_mag = x_mag @ self.mel_basis - y_mag = y_mag @ self.mel_basis - - sc_loss = self.spectral_convergenge_loss(x_mag, y_mag) - mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag) - - return sc_loss, mag_loss - - -class MultiResolutionSTFTLoss(torch.nn.Module): - """Multi resolution STFT loss module.""" - - def __init__(self, - fft_sizes=[1024, 2048, 512], - hop_sizes=[120, 240, 50], - win_lengths=[600, 1200, 240], - window="hann_window", - use_mel_loss=False): - """Initialize Multi resolution STFT loss module. - - Args: - fft_sizes (list): List of FFT sizes. - hop_sizes (list): List of hop sizes. - win_lengths (list): List of window lengths. - window (str): Window function type. - - """ - super(MultiResolutionSTFTLoss, self).__init__() - assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) - self.stft_losses = torch.nn.ModuleList() - for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths): - self.stft_losses += [STFTLoss(fs, ss, wl, window, use_mel_loss)] - - def forward(self, x, y): - """Calculate forward propagation. - - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - - Returns: - Tensor: Multi resolution spectral convergence loss value. - Tensor: Multi resolution log STFT magnitude loss value. - - """ - sc_loss = 0.0 - mag_loss = 0.0 - for f in self.stft_losses: - sc_l, mag_l = f(x, y) - sc_loss += sc_l - mag_loss += mag_l - sc_loss /= len(self.stft_losses) - mag_loss /= len(self.stft_losses) - - return sc_loss, mag_loss diff --git a/spaces/SlowBette/ChatBot_gpt3.5/app.py b/spaces/SlowBette/ChatBot_gpt3.5/app.py deleted file mode 100644 index 865b9e8c1e409e7eaa83009dcb08cd64ced523d9..0000000000000000000000000000000000000000 --- a/spaces/SlowBette/ChatBot_gpt3.5/app.py +++ /dev/null @@ -1,61 +0,0 @@ -import openai -import os -import gradio as gr -openai.api_key = os.environ.get("OPENAI_API_KEY") - -class Conversation: - def __init__(self, prompt, num_of_round): - self.prompt = prompt - self.num_of_round = num_of_round - self.messages = [] - self.messages.append({"role": "system", "content": self.prompt}) - - def ask(self, question): - try: - self.messages.append({"role": "user", "content": question}) - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=self.messages, - temperature=0.5, - max_tokens=2048, - top_p=1, - ) - except Exception as e: - print(e) - return e - - message = response["choices"][0]["message"]["content"] - self.messages.append({"role": "assistant", "content": message}) - print(len(self.messages)) - if len(self.messages) > self.num_of_round*2 + 1: - del self.messages[1:3] #Remove the first round conversation left. - return message - - def print_messages(self): - for msg in self.messages: - print(msg['role'] + ": " + msg['content']) - - -prompt = """你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求: -1. 你的回答必须是中文 -2. 回答限制在100个字以内""" - -conv = Conversation(prompt, 10) - -def answer(question, history=[]): - history.append(question) - response = conv.ask(question) - history.append(response) - responses = [(u,b) for u,b in zip(history[::2], history[1::2])] - return responses, history - -with gr.Blocks(css="#chatbot{height:300px} .overflow-y-auto{height:500px}") as demo: - chatbot = gr.Chatbot(elem_id="chatbot") - state = gr.State([]) - - with gr.Row(): - txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False) - - txt.submit(answer, [txt, state], [chatbot, state]) - -demo.launch() \ No newline at end of file diff --git a/spaces/SouthCity/ShuruiXu/crazy_functions/test_project/latex/attention/background.tex b/spaces/SouthCity/ShuruiXu/crazy_functions/test_project/latex/attention/background.tex deleted file mode 100644 index 785069dc0f9143bad24e640056dd1072d5c6e5b5..0000000000000000000000000000000000000000 --- a/spaces/SouthCity/ShuruiXu/crazy_functions/test_project/latex/attention/background.tex +++ /dev/null @@ -1,58 +0,0 @@ -The goal of reducing sequential computation also forms the foundation of the Extended Neural GPU \citep{extendedngpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as basic building block, computing hidden representations in parallel for all input and output positions. In these models, the number of operations required to relate signals from two arbitrary input or output positions grows in the distance between positions, linearly for ConvS2S and logarithmically for ByteNet. This makes it more difficult to learn dependencies between distant positions \citep{hochreiter2001gradient}. In the Transformer this is reduced to a constant number of operations, albeit at the cost of reduced effective resolution due to averaging attention-weighted positions, an effect we counteract with Multi-Head Attention as described in section~\ref{sec:attention}. - -Self-attention, sometimes called intra-attention is an attention mechanism relating different positions of a single sequence in order to compute a representation of the sequence. Self-attention has been used successfully in a variety of tasks including reading comprehension, abstractive summarization, textual entailment and learning task-independent sentence representations \citep{cheng2016long, decomposableAttnModel, paulus2017deep, lin2017structured}. - -End-to-end memory networks are based on a recurrent attention mechanism instead of sequence-aligned recurrence and have been shown to perform well on simple-language question answering and language modeling tasks \citep{sukhbaatar2015}. - -To the best of our knowledge, however, the Transformer is the first transduction model relying entirely on self-attention to compute representations of its input and output without using sequence-aligned RNNs or convolution. -In the following sections, we will describe the Transformer, motivate self-attention and discuss its advantages over models such as \citep{neural_gpu, NalBytenet2017} and \citep{JonasFaceNet2017}. - - -%\citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs. - -%For example,! in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at low computation cost, making it an essential ingredient in competitive recurrent models for machine translation. - -%A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture. - -%After the seminal models introduced in \citep{sutskever14, bahdanau2014neural, cho2014learning}, recurrent models have become the dominant solution for both sequence modeling and sequence-to-sequence transduction. Many efforts such as \citep{wu2016google,luong2015effective,jozefowicz2016exploring} have pushed the boundaries of machine translation (MT) and language modeling with recurrent endoder-decoder and recurrent language models. Recent effort \citep{shazeer2017outrageously} has successfully combined the power of conditional computation with sequence models to train very large models for MT, pushing SOTA at lower computational cost. - -%Recurrent models compute a vector of hidden states $h_t$, for each time step $t$ of computation. $h_t$ is a function of both the input at time $t$ and the previous hidden state $h_t$. This dependence on the previous hidden state precludes processing all timesteps at once, instead requiring long sequences of sequential operations. In practice, this results in greatly reduced computational efficiency, as on modern computing hardware, a single operation on a large batch is much faster than a large number of operations on small batches. The problem gets worse at longer sequence lengths. Although sequential computation is not a severe bottleneck at inference time, as autoregressively generating each output requires all previous outputs, the inability to compute scores at all output positions at once hinders us from rapidly training our models over large datasets. Although impressive work such as \citep{Kuchaiev2017Factorization} is able to significantly accelerate the training of LSTMs with factorization tricks, we are still bound by the linear dependence on sequence length. - -%If the model could compute hidden states at each time step using only the inputs and outputs, it would be liberated from the dependence on results from previous time steps during training. This line of thought is the foundation of recent efforts such as the Markovian neural GPU \citep{neural_gpu}, ByteNet \citep{NalBytenet2017} and ConvS2S \citep{JonasFaceNet2017}, all of which use convolutional neural networks as a building block to compute hidden representations simultaneously for all timesteps, resulting in $O(1)$ sequential time complexity. \citep{JonasFaceNet2017} report new SOTA on machine translation for English-to-German (EnDe), Enlish-to-French (EnFr) and English-to-Romanian language pairs. - -%A crucial component for accurate sequence prediction is modeling cross-positional communication. For example, in MT, we must draw information from both input and previous output words to translate an output word accurately. An attention layer \citep{bahdanau2014neural} can connect a very large number of positions at a low computation cost, also $O(1)$ sequential time complexity, making it an essential ingredient in recurrent encoder-decoder architectures for MT. A natural question to ask then is, "Could we replace recurrence with attention?". \marginpar{Don't know if it's the most natural question to ask given the previous statements. Also, need to say that the complexity table summarizes these statements} Such a model would be blessed with the computational efficiency of attention and the power of cross-positional communication. In this work, show that pure attention models work remarkably well for MT, achieving new SOTA results on EnDe and EnFr, and can be trained in under $2$ days on xyz architecture. - - - -%Note: Facebook model is no better than RNNs in this regard, since it requires a number of layers proportional to the distance you want to communicate. Bytenet is more promising, since it requires a logarithmnic number of layers (does bytenet have SOTA results)? - -%Note: An attention layer can connect a very large number of positions at a low computation cost in O(1) sequential operations. This is why encoder-decoder attention has been so successful in seq-to-seq models so far. It is only natural, then, to also use attention to connect the timesteps of the same sequence. - -%Note: I wouldn't say that long sequences are not a problem during inference. It would be great if we could infer with no long sequences. We could just say later on that, while our training graph is constant-depth, our model still requires sequential operations in the decoder part during inference due to the autoregressive nature of the model. - -%\begin{table}[h!] -%\caption{Attention models are quite efficient for cross-positional communications when sequence length is smaller than channel depth. $n$ represents the sequence length and $d$ represents the channel depth.} -%\label{tab:op_complexities} -%\begin{center} -%\vspace{-5pt} -%\scalebox{0.75}{ - -%\begin{tabular}{l|c|c|c} -%\hline \hline -%Layer Type & Receptive & Complexity & Sequential \\ -% & Field & & Operations \\ -%\hline -%Pointwise Feed-Forward & $1$ & $O(n \cdot d^2)$ & $O(1)$ \\ -%\hline -%Recurrent & $n$ & $O(n \cdot d^2)$ & $O(n)$ \\ -%\hline -%Convolutional & $r$ & $O(r \cdot n \cdot d^2)$ & $O(1)$ \\ -%\hline -%Convolutional (separable) & $r$ & $O(r \cdot n \cdot d + n %\cdot d^2)$ & $O(1)$ \\ -%\hline -%Attention & $r$ & $O(r \cdot n \cdot d)$ & $O(1)$ \\ -%\hline \hline -%\end{tabular} -%} -%\end{center} -%\end{table} \ No newline at end of file diff --git a/spaces/SpacesExamples/jupyterlab/start_server.sh b/spaces/SpacesExamples/jupyterlab/start_server.sh deleted file mode 100644 index 505a6f60f995b547565c578802442fe1b9064110..0000000000000000000000000000000000000000 --- a/spaces/SpacesExamples/jupyterlab/start_server.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -JUPYTER_TOKEN="${JUPYTER_TOKEN:=huggingface}" - -echo "Starting Jupyter Lab with token $JUPYTER_TOKEN" - -jupyter-lab \ - --ip 0.0.0.0 \ - --port 7860 \ - --no-browser \ - --allow-root \ - --ServerApp.token="$JUPYTER_TOKEN" \ - --ServerApp.tornado_settings="{'headers': {'Content-Security-Policy': 'frame-ancestors *'}}" \ - --ServerApp.cookie_options="{'SameSite': 'None', 'Secure': True}" \ - --ServerApp.disable_check_xsrf=True \ - --LabApp.news_url=None \ - --LabApp.check_for_updates_class="jupyterlab.NeverCheckForUpdate" \ No newline at end of file diff --git a/spaces/Starkate/zo/app.py b/spaces/Starkate/zo/app.py deleted file mode 100644 index 618008db094b4154299e286aee489b5481faa1b8..0000000000000000000000000000000000000000 --- a/spaces/Starkate/zo/app.py +++ /dev/null @@ -1,155 +0,0 @@ -from pathlib import Path -from typing import List, Dict, Tuple -import matplotlib.colors as mpl_colors - -import pandas as pd -import seaborn as sns -import shinyswatch - -import shiny.experimental as x -from shiny import App, Inputs, Outputs, Session, reactive, render, req, ui - -sns.set_theme() - -www_dir = Path(__file__).parent.resolve() / "www" - -df = pd.read_csv(Path(__file__).parent / "penguins.csv", na_values="NA") -numeric_cols: List[str] = df.select_dtypes(include=["float64"]).columns.tolist() -species: List[str] = df["Species"].unique().tolist() -species.sort() - -app_ui = x.ui.page_fillable( - shinyswatch.theme.minty(), - ui.layout_sidebar( - ui.panel_sidebar( - # Artwork by @allison_horst - ui.input_selectize( - "xvar", - "X variable", - numeric_cols, - selected="Bill Length (mm)", - ), - ui.input_selectize( - "yvar", - "Y variable", - numeric_cols, - selected="Bill Depth (mm)", - ), - ui.input_checkbox_group( - "species", "Filter by species", species, selected=species - ), - ui.hr(), - ui.input_switch("by_species", "Show species", value=True), - ui.input_switch("show_margins", "Show marginal plots", value=True), - width=2, - ), - ui.panel_main( - ui.output_ui("value_boxes"), - x.ui.output_plot("scatter", fill=True), - ui.help_text( - "Artwork by ", - ui.a("@allison_horst", href="https://twitter.com/allison_horst"), - class_="text-end", - ), - ), - ), -) - - -def server(input: Inputs, output: Outputs, session: Session): - @reactive.Calc - def filtered_df() -> pd.DataFrame: - """Returns a Pandas data frame that includes only the desired rows""" - - # This calculation "req"uires that at least one species is selected - req(len(input.species()) > 0) - - # Filter the rows so we only include the desired species - return df[df["Species"].isin(input.species())] - - @output - @render.plot - def scatter(): - """Generates a plot for Shiny to display to the user""" - - # The plotting function to use depends on whether margins are desired - plotfunc = sns.jointplot if input.show_margins() else sns.scatterplot - - plotfunc( - data=filtered_df(), - x=input.xvar(), - y=input.yvar(), - palette=palette, - hue="Species" if input.by_species() else None, - hue_order=species, - legend=False, - ) - - @output - @render.ui - def value_boxes(): - df = filtered_df() - - def penguin_value_box(title: str, count: int, bgcol: str, showcase_img: str): - return x.ui.value_box( - title, - count, - {"class_": "pt-1 pb-0"}, - showcase=x.ui.as_fill_item( - ui.tags.img( - {"style": "object-fit:contain;"}, - src=showcase_img, - ) - ), - theme_color=None, - style=f"background-color: {bgcol};", - ) - - if not input.by_species(): - return penguin_value_box( - "Penguins", - len(df.index), - bg_palette["default"], - # Artwork by @allison_horst - showcase_img="penguins.png", - ) - - value_boxes = [ - penguin_value_box( - name, - len(df[df["Species"] == name]), - bg_palette[name], - # Artwork by @allison_horst - showcase_img=f"{name}.png", - ) - for name in species - # Only include boxes for _selected_ species - if name in input.species() - ] - - return x.ui.layout_column_wrap(1 / len(value_boxes), *value_boxes) - - -# "darkorange", "purple", "cyan4" -colors = [[255, 140, 0], [160, 32, 240], [0, 139, 139]] -colors = [(r / 255.0, g / 255.0, b / 255.0) for r, g, b in colors] - -palette: Dict[str, Tuple[float, float, float]] = { - "Adelie": colors[0], - "Chinstrap": colors[1], - "Gentoo": colors[2], - "default": sns.color_palette()[0], # type: ignore -} - -bg_palette = {} -# Use `sns.set_style("whitegrid")` to help find approx alpha value -for name, col in palette.items(): - # Adjusted n_colors until `axe` accessibility did not complain about color contrast - bg_palette[name] = mpl_colors.to_hex(sns.light_palette(col, n_colors=7)[1]) # type: ignore - - -app = App( - app_ui, - server, - static_assets=str(www_dir), -) diff --git a/spaces/Sumit7864/Image-Enhancer/inference_realesrgan.py b/spaces/Sumit7864/Image-Enhancer/inference_realesrgan.py deleted file mode 100644 index 0c56682a7b0736cc56a539b4387d1330bd22bba0..0000000000000000000000000000000000000000 --- a/spaces/Sumit7864/Image-Enhancer/inference_realesrgan.py +++ /dev/null @@ -1,88 +0,0 @@ -import argparse -import cv2 -import glob -import os -from basicsr.archs.rrdbnet_arch import RRDBNet -from basicsr.utils.download_util import load_file_from_url - -from realesrgan import RealESRGANer -from realesrgan.archs.srvgg_arch import SRVGGNetCompact - - -def main(**args): - """Inference demo for Real-ESRGAN. - """ - print(args) - - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) - netscale = 4 - file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth'] - - - # determine model paths - model_path = os.path.join('weights', 'RealESRGAN_x4plus.pth') - - # use dni to control the denoise strength - dni_weight = None - - # restorer - upsampler = RealESRGANer( - scale=netscale, - model_path=model_path, - dni_weight=dni_weight, - model=model, - tile=0, - tile_pad=10, - pre_pad=0, - half=not args['fp32'], - gpu_id=None) - - if args['face_enhance'] == 'True': # Use GFPGAN for face enhancement - from gfpgan import GFPGANer - face_enhancer = GFPGANer( - model_path=os.path.join('weights', 'GFPGANv1.3.pth'), - upscale=args['outscale'], - arch='clean', - channel_multiplier=2, - bg_upsampler=upsampler) - - # os.makedirs(args.output, exist_ok=True) - - path = args['input'] - - # for idx, path in enumerate(paths): - imgname, extension = os.path.splitext(os.path.basename(path)) - # print('Testing', idx, imgname) - - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) - if len(img.shape) == 3 and img.shape[2] == 4: - img_mode = 'RGBA' - else: - img_mode = None - - try: - if args['face_enhance']: - _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True) - else: - output, _ = upsampler.enhance(img, outscale=args['outscale']) - except RuntimeError as error: - print('Error', error) - print('If you encounter CUDA out of memory, try to set --tile with a smaller number.') - else: - if args['ext'] == 'auto': - extension = extension[1:] - else: - extension = args['ext'] - - if img_mode == 'RGBA': # RGBA images should be saved in png format - extension = 'png' - # if args.suffix == '': - save_path = os.path.join(args['output'], f'{imgname}.{extension}') - # else: - # save_path = os.path.join(args.output, f'{imgname}_{args.suffix}.{extension}') - cv2.imwrite(save_path, output) - return save_path - # return output - -# if __name__ == '__main__': -# main() diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/pt_inputhooks/asyncio.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/pt_inputhooks/asyncio.py deleted file mode 100644 index d2499e11e684a86ecad01bf34569efc6526cd792..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/pt_inputhooks/asyncio.py +++ /dev/null @@ -1,62 +0,0 @@ -""" -Inputhook for running the original asyncio event loop while we're waiting for -input. - -By default, in IPython, we run the prompt with a different asyncio event loop, -because otherwise we risk that people are freezing the prompt by scheduling bad -coroutines. E.g., a coroutine that does a while/true and never yield back -control to the loop. We can't cancel that. - -However, sometimes we want the asyncio loop to keep running while waiting for -a prompt. - -The following example will print the numbers from 1 to 10 above the prompt, -while we are waiting for input. (This works also because we use -prompt_toolkit`s `patch_stdout`):: - - In [1]: import asyncio - - In [2]: %gui asyncio - - In [3]: async def f(): - ...: for i in range(10): - ...: await asyncio.sleep(1) - ...: print(i) - - - In [4]: asyncio.ensure_future(f()) - -""" -from prompt_toolkit import __version__ as ptk_version - -from IPython.core.async_helpers import get_asyncio_loop - -PTK3 = ptk_version.startswith("3.") - - -def inputhook(context): - """ - Inputhook for asyncio event loop integration. - """ - # For prompt_toolkit 3.0, this input hook literally doesn't do anything. - # The event loop integration here is implemented in `interactiveshell.py` - # by running the prompt itself in the current asyncio loop. The main reason - # for this is that nesting asyncio event loops is unreliable. - if PTK3: - return - - # For prompt_toolkit 2.0, we can run the current asyncio event loop, - # because prompt_toolkit 2.0 uses a different event loop internally. - - # get the persistent asyncio event loop - loop = get_asyncio_loop() - - def stop(): - loop.stop() - - fileno = context.fileno() - loop.add_reader(fileno, stop) - try: - loop.run_forever() - finally: - loop.remove_reader(fileno) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/test/db/test_base.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/test/db/test_base.py deleted file mode 100644 index 8bfaa1f733a08ef4a2f6ccee69dc44739ff8021c..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/test/db/test_base.py +++ /dev/null @@ -1,42 +0,0 @@ -from chromadb.db.base import ParameterValue, get_sql -import pypika - - -def test_value_params_default() -> None: - t = pypika.Table("foo") - - original_query = ( - pypika.Query.from_(t) - .select(t.a, t.b) - .where(t.a == pypika.Parameter("?")) - .where(t.b == pypika.Parameter("?")) - ) - - value_based_query = ( - pypika.Query.from_(t) - .select(t.a, t.b) - .where(t.a == ParameterValue(42)) - .where(t.b == ParameterValue(43)) - ) - sql, values = get_sql(value_based_query) - assert sql == original_query.get_sql() - assert values == (42, 43) - - -def test_value_params_numeric() -> None: - t = pypika.Table("foo") - original_query = ( - pypika.Query.from_(t) - .select(t.a, t.b) - .where(t.a == pypika.NumericParameter(1)) - .where(t.b == pypika.NumericParameter(2)) - ) - value_based_query = ( - pypika.Query.from_(t) - .select(t.a, t.b) - .where(t.a == ParameterValue(42)) - .where(t.b == ParameterValue(43)) - ) - sql, values = get_sql(value_based_query, formatstr=":{}") - assert sql == original_query.get_sql() - assert values == (42, 43) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/attach_script.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/attach_script.py deleted file mode 100644 index af23e56138fa28fa678120ec0b50e96a18e96b91..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydevd_attach_to_process/attach_script.py +++ /dev/null @@ -1,188 +0,0 @@ - - -def get_main_thread_instance(threading): - if hasattr(threading, 'main_thread'): - return threading.main_thread() - else: - # On Python 2 we don't really have an API to get the main thread, - # so, we just get it from the 'shutdown' bound method. - return threading._shutdown.im_self - - -def get_main_thread_id(unlikely_thread_id=None): - ''' - :param unlikely_thread_id: - Pass to mark some thread id as not likely the main thread. - - :return tuple(thread_id, critical_warning) - ''' - import sys - import os - - current_frames = sys._current_frames() - possible_thread_ids = [] - for thread_ident, frame in current_frames.items(): - while frame.f_back is not None: - frame = frame.f_back - - basename = os.path.basename(frame.f_code.co_filename) - if basename.endswith(('.pyc', '.pyo')): - basename = basename[:-1] - - if (frame.f_code.co_name, basename) in [ - ('_run_module_as_main', 'runpy.py'), - ('_run_module_as_main', ''), - ('run_module_as_main', 'runpy.py'), - ('run_module', 'runpy.py'), - ('run_path', 'runpy.py'), - ]: - # This is the case for python -m (this is an ideal match, so, - # let's return it). - return thread_ident, '' - - if frame.f_code.co_name == '': - if frame.f_globals.get('__name__') == '__main__': - possible_thread_ids.insert(0, thread_ident) # Add with higher priority - continue - - # Usually the main thread will be started in the , whereas others would - # be started in another place (but when Python is embedded, this may not be - # correct, so, just add to the available possibilities as we'll have to choose - # one if there are multiple). - possible_thread_ids.append(thread_ident) - - if len(possible_thread_ids) > 0: - if len(possible_thread_ids) == 1: - return possible_thread_ids[0], '' # Ideal: only one match - - while unlikely_thread_id in possible_thread_ids: - possible_thread_ids.remove(unlikely_thread_id) - - if len(possible_thread_ids) == 1: - return possible_thread_ids[0], '' # Ideal: only one match - - elif len(possible_thread_ids) > 1: - # Bad: we can't really be certain of anything at this point. - return possible_thread_ids[0], \ - 'Multiple thread ids found (%s). Choosing main thread id randomly (%s).' % ( - possible_thread_ids, possible_thread_ids[0]) - - # If we got here we couldn't discover the main thread id. - return None, 'Unable to discover main thread id.' - - -def fix_main_thread_id(on_warn=lambda msg:None, on_exception=lambda msg:None, on_critical=lambda msg:None): - # This means that we weren't able to import threading in the main thread (which most - # likely means that the main thread is paused or in some very long operation). - # In this case we'll import threading here and hotfix what may be wrong in the threading - # module (if we're on Windows where we create a thread to do the attach and on Linux - # we are not certain on which thread we're executing this code). - # - # The code below is a workaround for https://bugs.python.org/issue37416 - import sys - import threading - - try: - with threading._active_limbo_lock: - main_thread_instance = get_main_thread_instance(threading) - - if sys.platform == 'win32': - # On windows this code would be called in a secondary thread, so, - # the current thread is unlikely to be the main thread. - if hasattr(threading, '_get_ident'): - unlikely_thread_id = threading._get_ident() # py2 - else: - unlikely_thread_id = threading.get_ident() # py3 - else: - unlikely_thread_id = None - - main_thread_id, critical_warning = get_main_thread_id(unlikely_thread_id) - - if main_thread_id is not None: - main_thread_id_attr = '_ident' - if not hasattr(main_thread_instance, main_thread_id_attr): - main_thread_id_attr = '_Thread__ident' - assert hasattr(main_thread_instance, main_thread_id_attr) - - if main_thread_id != getattr(main_thread_instance, main_thread_id_attr): - # Note that we also have to reset the '_tstack_lock' for a regular lock. - # This is needed to avoid an error on shutdown because this lock is bound - # to the thread state and will be released when the secondary thread - # that initialized the lock is finished -- making an assert fail during - # process shutdown. - main_thread_instance._tstate_lock = threading._allocate_lock() - main_thread_instance._tstate_lock.acquire() - - # Actually patch the thread ident as well as the threading._active dict - # (we should have the _active_limbo_lock to do that). - threading._active.pop(getattr(main_thread_instance, main_thread_id_attr), None) - setattr(main_thread_instance, main_thread_id_attr, main_thread_id) - threading._active[getattr(main_thread_instance, main_thread_id_attr)] = main_thread_instance - - # Note: only import from pydevd after the patching is done (we want to do the minimum - # possible when doing that patching). - on_warn('The threading module was not imported by user code in the main thread. The debugger will attempt to work around https://bugs.python.org/issue37416.') - - if critical_warning: - on_critical('Issue found when debugger was trying to work around https://bugs.python.org/issue37416:\n%s' % (critical_warning,)) - except: - on_exception('Error patching main thread id.') - - -def attach(port, host, protocol='', debug_mode=''): - try: - import sys - fix_main_thread = 'threading' not in sys.modules - - if fix_main_thread: - - def on_warn(msg): - from _pydev_bundle import pydev_log - pydev_log.warn(msg) - - def on_exception(msg): - from _pydev_bundle import pydev_log - pydev_log.exception(msg) - - def on_critical(msg): - from _pydev_bundle import pydev_log - pydev_log.critical(msg) - - fix_main_thread_id(on_warn=on_warn, on_exception=on_exception, on_critical=on_critical) - - else: - from _pydev_bundle import pydev_log # @Reimport - pydev_log.debug('The threading module is already imported by user code.') - - if protocol: - from _pydevd_bundle import pydevd_defaults - pydevd_defaults.PydevdCustomization.DEFAULT_PROTOCOL = protocol - - if debug_mode: - from _pydevd_bundle import pydevd_defaults - pydevd_defaults.PydevdCustomization.DEBUG_MODE = debug_mode - - import pydevd - - # I.e.: disconnect/reset if already connected. - - pydevd.SetupHolder.setup = None - - py_db = pydevd.get_global_debugger() - if py_db is not None: - py_db.dispose_and_kill_all_pydevd_threads(wait=False) - - # pydevd.DebugInfoHolder.DEBUG_TRACE_LEVEL = 3 - pydevd.settrace( - port=port, - host=host, - stdoutToServer=True, - stderrToServer=True, - overwrite_prev_trace=True, - suspend=False, - trace_only_current_thread=False, - patch_multiprocessing=False, - ) - except: - import traceback - traceback.print_exc() diff --git a/spaces/Superlang/ImageProcessor/annotator/leres/pix2pix/models/base_model_hg.py b/spaces/Superlang/ImageProcessor/annotator/leres/pix2pix/models/base_model_hg.py deleted file mode 100644 index 1709accdf0b048b3793dfd1f58d1b06c35f7b907..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/leres/pix2pix/models/base_model_hg.py +++ /dev/null @@ -1,58 +0,0 @@ -import os -import torch - -class BaseModelHG(): - def name(self): - return 'BaseModel' - - def initialize(self, opt): - self.opt = opt - self.gpu_ids = opt.gpu_ids - self.isTrain = opt.isTrain - self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor - self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) - - def set_input(self, input): - self.input = input - - def forward(self): - pass - - # used in test time, no backprop - def test(self): - pass - - def get_image_paths(self): - pass - - def optimize_parameters(self): - pass - - def get_current_visuals(self): - return self.input - - def get_current_errors(self): - return {} - - def save(self, label): - pass - - # helper saving function that can be used by subclasses - def save_network(self, network, network_label, epoch_label, gpu_ids): - save_filename = '_%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(self.save_dir, save_filename) - torch.save(network.cpu().state_dict(), save_path) - if len(gpu_ids) and torch.cuda.is_available(): - network.cuda(device_id=gpu_ids[0]) - - # helper loading function that can be used by subclasses - def load_network(self, network, network_label, epoch_label): - save_filename = '%s_net_%s.pth' % (epoch_label, network_label) - save_path = os.path.join(self.save_dir, save_filename) - print(save_path) - model = torch.load(save_path) - return model - # network.load_state_dict(torch.load(save_path)) - - def update_learning_rate(): - pass diff --git a/spaces/TEnngal/bingo/src/components/chat.tsx b/spaces/TEnngal/bingo/src/components/chat.tsx deleted file mode 100644 index a37ab1cc96ca2e6bfd9acbe313a8d946bfd5c3d4..0000000000000000000000000000000000000000 --- a/spaces/TEnngal/bingo/src/components/chat.tsx +++ /dev/null @@ -1,93 +0,0 @@ -'use client' - -import { useCallback, useEffect, useMemo, useState } from 'react' -import { useAtom } from 'jotai' -import Image from 'next/image' -import { cn } from '@/lib/utils' -import { ChatList } from '@/components/chat-list' -import { ChatPanel } from '@/components/chat-panel' -import { WelcomeScreen } from '@/components/welcome-screen' -import { ChatScrollAnchor } from '@/components/chat-scroll-anchor' -import { ToneSelector } from './tone-selector' -import { ChatHeader } from './chat-header' -import { ChatSuggestions } from './chat-suggestions' -import { bingConversationStyleAtom } from '@/state' -import { ButtonScrollToBottom } from '@/components/button-scroll-to-bottom' -import StopIcon from '@/assets/images/stop.svg' -import { useBing } from '@/lib/hooks/use-bing' -import { ChatMessageModel } from '@/lib/bots/bing/types' -import { ChatNotification } from './chat-notification' -import { Settings } from './settings' -import { ChatHistory } from './chat-history' - -export type ChatProps = React.ComponentProps<'div'> & { initialMessages?: ChatMessageModel[] } - -export default function Chat({ className }: ChatProps) { - - const [bingStyle, setBingStyle] = useAtom(bingConversationStyleAtom) - const { - messages, - sendMessage, - resetConversation, - stopGenerating, - setInput, - bot, - input, - generating, - isSpeaking, - uploadImage, - attachmentList, - setAttachmentList, - } = useBing() - - useEffect(() => { - window.scrollTo({ - top: document.body.offsetHeight, - behavior: 'smooth' - }) - }, []) - - return ( -
- -
- - - - {messages.length ? ( - <> - - - - - - {generating ? ( -
- -
- ) : null} - - ) : null} -
- - -
- ) -} diff --git a/spaces/TEnngal/bingo/src/pages/api/sydney.ts b/spaces/TEnngal/bingo/src/pages/api/sydney.ts deleted file mode 100644 index 8bd7074bc72bd2803e4acf89d3814908893ff044..0000000000000000000000000000000000000000 --- a/spaces/TEnngal/bingo/src/pages/api/sydney.ts +++ /dev/null @@ -1,66 +0,0 @@ -import { NextApiRequest, NextApiResponse } from 'next' -import { WebSocket, debug } from '@/lib/isomorphic' -import { BingWebBot } from '@/lib/bots/bing' -import { websocketUtils } from '@/lib/bots/bing/utils' -import { WatchDog, createHeaders } from '@/lib/utils' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const conversationContext = req.body - const headers = createHeaders(req.cookies) - const id = headers['x-forwarded-for'] - - debug(id, headers) - res.setHeader('Content-Type', 'text/stream; charset=UTF-8') - - const ws = new WebSocket('wss://sydney.bing.com/sydney/ChatHub', { - headers: { - ...headers, - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - pragma: 'no-cache', - } - }) - - const closeDog = new WatchDog() - const timeoutDog = new WatchDog() - ws.onmessage = (event) => { - timeoutDog.watch(() => { - debug(id, 'timeout') - ws.send(websocketUtils.packMessage({ type: 6 })) - }, 3000) - closeDog.watch(() => { - debug(id, 'timeout close') - ws.close() - }, 20000) - res.write(event.data) - if (/\{"type":([367])\}/.test(String(event.data))) { - const type = parseInt(RegExp.$1, 10) - debug(id, 'connection type', type) - if (type === 3) { - ws.close() - } else { - ws.send(websocketUtils.packMessage({ type })) - } - } - } - - ws.onclose = () => { - timeoutDog.reset() - closeDog.reset() - debug(id, 'ws close') - res.end() - } - - await new Promise((resolve) => ws.onopen = resolve) - ws.send(websocketUtils.packMessage({ protocol: 'json', version: 1 })) - ws.send(websocketUtils.packMessage({ type: 6 })) - ws.send(websocketUtils.packMessage(BingWebBot.buildChatRequest(conversationContext!))) - req.socket.once('close', () => { - debug(id, 'connection close') - ws.close() - if (!res.closed) { - res.end() - } - }) -} diff --git a/spaces/THUDM/CodeGeeX/app.py b/spaces/THUDM/CodeGeeX/app.py deleted file mode 100644 index afb8bc7689f62af81bb0cd59bd5dd849d2b9c67f..0000000000000000000000000000000000000000 --- a/spaces/THUDM/CodeGeeX/app.py +++ /dev/null @@ -1,112 +0,0 @@ -import json -import os - -import gradio as gr -import requests - -APIKEY = os.environ.get("APIKEY") -APISECRET = os.environ.get("APISECRET") - - -def predict(prompt, lang, seed, out_seq_length, temperature, top_k, top_p): - global APIKEY - global APISECRET - - if prompt == '': - return 'Input should not be empty!' - - url = 'https://tianqi.aminer.cn/api/v2/multilingual_code_generate_block' - - payload = json.dumps({ - "apikey" : APIKEY, - "apisecret" : APISECRET, - "prompt" : prompt, - "lang" : lang, - "out_seq_length": out_seq_length, - "seed" : seed, - "temperature" : temperature, - "top_k" : top_k, - "top_p" : top_p, - }) - - headers = { - 'Content-Type': 'application/json' - } - - try: - response = requests.request("POST", url, headers=headers, data=payload, timeout=(20, 100)).json() - except Exception as e: - return 'Timeout! Please wait a few minutes and retry' - - if response['status'] == 1: - return response['message'] - - answer = response['result']['output']['code'][0] - - return prompt + answer - - -def main(): - gr.close_all() - examples = [] - with open("./example_inputs.jsonl", "r") as f: - for line in f: - examples.append(list(json.loads(line).values())) - - with gr.Blocks() as demo: - gr.Markdown( - """ - - """) - gr.Markdown( - """ -

- 🏠 Homepage | 📖 Blog | 🪧 DEMO | 🛠 VS Code or Jetbrains Extensions | 💻 Source code | 🤖 Download Model -

- """) - gr.Markdown( - """ - We introduce CodeGeeX, a large-scale multilingual code generation model with 13 billion parameters, pre-trained on a large code corpus of more than 20 programming languages. CodeGeeX supports 15+ programming languages for both code generation and translation. CodeGeeX is open source, please refer to our [GitHub](https://github.com/THUDM/CodeGeeX) for more details. This is a minimal-functional DEMO, for other DEMOs like code translation, please visit our [Homepage](https://codegeex.cn). We also offer free [VS Code](https://marketplace.visualstudio.com/items?itemName=aminer.codegeex) or [Jetbrains](https://plugins.jetbrains.com/plugin/20587-codegeex) extensions for full functionality. - """) - - with gr.Row(): - with gr.Column(): - prompt = gr.Textbox(lines=13, placeholder='Please enter the description or select an example input below.',label='Input') - with gr.Row(): - gen = gr.Button("Generate") - clr = gr.Button("Clear") - - outputs = gr.Textbox(lines=15, label='Output') - - gr.Markdown( - """ - Generation Parameter - """) - with gr.Row(): - with gr.Column(): - lang = gr.Radio( - choices=["C++", "C", "C#", "Python", "Java", "HTML", "PHP", "JavaScript", "TypeScript", "Go", - "Rust", - "SQL", "Kotlin", "R", "Fortran"], value='lang', label='Programming Language', - default="Python") - with gr.Column(): - seed = gr.Slider(maximum=10000, value=8888, step=1, label='Seed') - with gr.Row(): - out_seq_length = gr.Slider(maximum=1024, value=128, minimum=1, step=1, label='Output Sequence Length') - temperature = gr.Slider(maximum=1, value=0.2, minimum=0, label='Temperature') - with gr.Row(): - top_k = gr.Slider(maximum=40, value=0, minimum=0, step=1, label='Top K') - top_p = gr.Slider(maximum=1, value=1.0, minimum=0, label='Top P') - - inputs = [prompt, lang, seed, out_seq_length, temperature, top_k, top_p] - gen.click(fn=predict, inputs=inputs, outputs=outputs) - clr.click(fn=lambda value: gr.update(value=""), inputs=clr, outputs=prompt) - - gr_examples = gr.Examples(examples=examples, inputs=[prompt, lang], - label="Example Inputs (Click to insert an examplet it into the input box)", - examples_per_page=20) - - demo.launch() - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/packaging/_tokenizer.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/packaging/_tokenizer.py deleted file mode 100644 index dd0d648d49a7c1a62d25ce5c9107aa448a8a22d1..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/packaging/_tokenizer.py +++ /dev/null @@ -1,192 +0,0 @@ -import contextlib -import re -from dataclasses import dataclass -from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union - -from .specifiers import Specifier - - -@dataclass -class Token: - name: str - text: str - position: int - - -class ParserSyntaxError(Exception): - """The provided source text could not be parsed correctly.""" - - def __init__( - self, - message: str, - *, - source: str, - span: Tuple[int, int], - ) -> None: - self.span = span - self.message = message - self.source = source - - super().__init__() - - def __str__(self) -> str: - marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^" - return "\n ".join([self.message, self.source, marker]) - - -DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = { - "LEFT_PARENTHESIS": r"\(", - "RIGHT_PARENTHESIS": r"\)", - "LEFT_BRACKET": r"\[", - "RIGHT_BRACKET": r"\]", - "SEMICOLON": r";", - "COMMA": r",", - "QUOTED_STRING": re.compile( - r""" - ( - ('[^']*') - | - ("[^"]*") - ) - """, - re.VERBOSE, - ), - "OP": r"(===|==|~=|!=|<=|>=|<|>)", - "BOOLOP": r"\b(or|and)\b", - "IN": r"\bin\b", - "NOT": r"\bnot\b", - "VARIABLE": re.compile( - r""" - \b( - python_version - |python_full_version - |os[._]name - |sys[._]platform - |platform_(release|system) - |platform[._](version|machine|python_implementation) - |python_implementation - |implementation_(name|version) - |extra - )\b - """, - re.VERBOSE, - ), - "SPECIFIER": re.compile( - Specifier._operator_regex_str + Specifier._version_regex_str, - re.VERBOSE | re.IGNORECASE, - ), - "AT": r"\@", - "URL": r"[^ \t]+", - "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b", - "VERSION_PREFIX_TRAIL": r"\.\*", - "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*", - "WS": r"[ \t]+", - "END": r"$", -} - - -class Tokenizer: - """Context-sensitive token parsing. - - Provides methods to examine the input stream to check whether the next token - matches. - """ - - def __init__( - self, - source: str, - *, - rules: "Dict[str, Union[str, re.Pattern[str]]]", - ) -> None: - self.source = source - self.rules: Dict[str, re.Pattern[str]] = { - name: re.compile(pattern) for name, pattern in rules.items() - } - self.next_token: Optional[Token] = None - self.position = 0 - - def consume(self, name: str) -> None: - """Move beyond provided token name, if at current position.""" - if self.check(name): - self.read() - - def check(self, name: str, *, peek: bool = False) -> bool: - """Check whether the next token has the provided name. - - By default, if the check succeeds, the token *must* be read before - another check. If `peek` is set to `True`, the token is not loaded and - would need to be checked again. - """ - assert ( - self.next_token is None - ), f"Cannot check for {name!r}, already have {self.next_token!r}" - assert name in self.rules, f"Unknown token name: {name!r}" - - expression = self.rules[name] - - match = expression.match(self.source, self.position) - if match is None: - return False - if not peek: - self.next_token = Token(name, match[0], self.position) - return True - - def expect(self, name: str, *, expected: str) -> Token: - """Expect a certain token name next, failing with a syntax error otherwise. - - The token is *not* read. - """ - if not self.check(name): - raise self.raise_syntax_error(f"Expected {expected}") - return self.read() - - def read(self) -> Token: - """Consume the next token and return it.""" - token = self.next_token - assert token is not None - - self.position += len(token.text) - self.next_token = None - - return token - - def raise_syntax_error( - self, - message: str, - *, - span_start: Optional[int] = None, - span_end: Optional[int] = None, - ) -> NoReturn: - """Raise ParserSyntaxError at the given position.""" - span = ( - self.position if span_start is None else span_start, - self.position if span_end is None else span_end, - ) - raise ParserSyntaxError( - message, - source=self.source, - span=span, - ) - - @contextlib.contextmanager - def enclosing_tokens( - self, open_token: str, close_token: str, *, around: str - ) -> Iterator[None]: - if self.check(open_token): - open_position = self.position - self.read() - else: - open_position = None - - yield - - if open_position is None: - return - - if not self.check(close_token): - self.raise_syntax_error( - f"Expected matching {close_token} for {open_token}, after {around}", - span_start=open_position, - ) - - self.read() diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/install_data.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/install_data.py deleted file mode 100644 index 7ba35eef8270c34f183090bfa189358565526899..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/install_data.py +++ /dev/null @@ -1,83 +0,0 @@ -"""distutils.command.install_data - -Implements the Distutils 'install_data' command, for installing -platform-independent data files.""" - -# contributed by Bastian Kleineidam - -import os -from ..core import Command -from ..util import change_root, convert_path - - -class install_data(Command): - description = "install data files" - - user_options = [ - ( - 'install-dir=', - 'd', - "base directory for installing data files " - "(default: installation base dir)", - ), - ('root=', None, "install everything relative to this alternate root directory"), - ('force', 'f', "force installation (overwrite existing files)"), - ] - - boolean_options = ['force'] - - def initialize_options(self): - self.install_dir = None - self.outfiles = [] - self.root = None - self.force = 0 - self.data_files = self.distribution.data_files - self.warn_dir = 1 - - def finalize_options(self): - self.set_undefined_options( - 'install', - ('install_data', 'install_dir'), - ('root', 'root'), - ('force', 'force'), - ) - - def run(self): - self.mkpath(self.install_dir) - for f in self.data_files: - if isinstance(f, str): - # it's a simple file, so copy it - f = convert_path(f) - if self.warn_dir: - self.warn( - "setup script did not provide a directory for " - "'%s' -- installing right in '%s'" % (f, self.install_dir) - ) - (out, _) = self.copy_file(f, self.install_dir) - self.outfiles.append(out) - else: - # it's a tuple with path to install to and a list of files - dir = convert_path(f[0]) - if not os.path.isabs(dir): - dir = os.path.join(self.install_dir, dir) - elif self.root: - dir = change_root(self.root, dir) - self.mkpath(dir) - - if f[1] == []: - # If there are no files listed, the user must be - # trying to create an empty directory, so add the - # directory to the list of output files. - self.outfiles.append(dir) - else: - # Copy files, adding them to the list of output files. - for data in f[1]: - data = convert_path(data) - (out, _) = self.copy_file(data, dir) - self.outfiles.append(out) - - def get_inputs(self): - return self.data_files or [] - - def get_outputs(self): - return self.outfiles diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/vision.cpp b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/vision.cpp deleted file mode 100644 index c9a2cd4f20e6f58be1c5783d67c64232dd59b560..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/vision.cpp +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. - -#include -#include "ROIAlignRotated/ROIAlignRotated.h" -#include "box_iou_rotated/box_iou_rotated.h" -#include "cocoeval/cocoeval.h" -#include "deformable/deform_conv.h" -#include "nms_rotated/nms_rotated.h" - -namespace detectron2 { - -#if defined(WITH_CUDA) || defined(WITH_HIP) -extern int get_cudart_version(); -#endif - -std::string get_cuda_version() { -#if defined(WITH_CUDA) || defined(WITH_HIP) - std::ostringstream oss; - -#if defined(WITH_CUDA) - oss << "CUDA "; -#else - oss << "HIP "; -#endif - - // copied from - // https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/CUDAHooks.cpp#L231 - auto printCudaStyleVersion = [&](int v) { - oss << (v / 1000) << "." << (v / 10 % 100); - if (v % 10 != 0) { - oss << "." << (v % 10); - } - }; - printCudaStyleVersion(get_cudart_version()); - return oss.str(); -#else // neither CUDA nor HIP - return std::string("not available"); -#endif -} - -bool has_cuda() { -#if defined(WITH_CUDA) - return true; -#else - return false; -#endif -} - -// similar to -// https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Version.cpp -std::string get_compiler_version() { - std::ostringstream ss; -#if defined(__GNUC__) -#ifndef __clang__ - -#if ((__GNUC__ <= 4) && (__GNUC_MINOR__ <= 8)) -#error "GCC >= 4.9 is required!" -#endif - - { ss << "GCC " << __GNUC__ << "." << __GNUC_MINOR__; } -#endif -#endif - -#if defined(__clang_major__) - { - ss << "clang " << __clang_major__ << "." << __clang_minor__ << "." - << __clang_patchlevel__; - } -#endif - -#if defined(_MSC_VER) - { ss << "MSVC " << _MSC_FULL_VER; } -#endif - return ss.str(); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("get_compiler_version", &get_compiler_version, "get_compiler_version"); - m.def("get_cuda_version", &get_cuda_version, "get_cuda_version"); - m.def("has_cuda", &has_cuda, "has_cuda"); - - m.def("deform_conv_forward", &deform_conv_forward, "deform_conv_forward"); - m.def( - "deform_conv_backward_input", - &deform_conv_backward_input, - "deform_conv_backward_input"); - m.def( - "deform_conv_backward_filter", - &deform_conv_backward_filter, - "deform_conv_backward_filter"); - m.def( - "modulated_deform_conv_forward", - &modulated_deform_conv_forward, - "modulated_deform_conv_forward"); - m.def( - "modulated_deform_conv_backward", - &modulated_deform_conv_backward, - "modulated_deform_conv_backward"); - - m.def("COCOevalAccumulate", &COCOeval::Accumulate, "COCOeval::Accumulate"); - m.def( - "COCOevalEvaluateImages", - &COCOeval::EvaluateImages, - "COCOeval::EvaluateImages"); - pybind11::class_(m, "InstanceAnnotation") - .def(pybind11::init()); - pybind11::class_(m, "ImageEvaluation") - .def(pybind11::init<>()); -} - -TORCH_LIBRARY(detectron2, m) { - m.def("nms_rotated", &nms_rotated); - m.def("box_iou_rotated", &box_iou_rotated); - m.def("roi_align_rotated_forward", &ROIAlignRotated_forward); - m.def("roi_align_rotated_backward", &ROIAlignRotated_backward); -} -} // namespace detectron2 diff --git a/spaces/Tiju1996/resume-parser/README.md b/spaces/Tiju1996/resume-parser/README.md deleted file mode 100644 index bc21ed39fae0856149ebe02b5c8a03889020332d..0000000000000000000000000000000000000000 --- a/spaces/Tiju1996/resume-parser/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Resume Parser -emoji: 🏢 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -license: openrail -duplicated_from: Sybghat/resume-parser ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Truepic/watermarked-content-credentials/static/index.html b/spaces/Truepic/watermarked-content-credentials/static/index.html deleted file mode 100644 index c5aecb1e6f89b3aeaf6cf983be074c831db16c7b..0000000000000000000000000000000000000000 --- a/spaces/Truepic/watermarked-content-credentials/static/index.html +++ /dev/null @@ -1,430 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-
-
-
- - -
- - -
-
-
-
-
-

Uploaded image

-
-
-
-
- -
-
-
- Content Credentials
-
- Digital watermark -
-
-
-
-

Result

- -
-
- - -
- -
- -
-
-
-
-
- - -
- -
- -
- -
-
- -
- - -
- - - -
- Something went wrong. Please try again. -
-
- -
-

How it works

- -

- When an image is generated and signed with C2PA Content - Credentials, an imperceptible digital watermark, powered by - Steg.AI, is also added to the image pixels. The watermark - serves as a backup in case the Content Credentials are lost, - such as when sharing the image between currently incompatible - services like text messaging. By using the watermark, it is - possible to retrieve a restored, signed version of the image - from before the data was decoupled, which you can try in the - verify tab. -

- -

- Want to know more? Read our - community blog post. -

-
-
-
-
-
- - - -
- -
- - -
- - - -
- Something went wrong. Please try again. -
-
-
-

How it works

- -

- When an image is generated and signed with C2PA Content - Credentials, an imperceptible digital watermark, powered by - Steg.AI, is also added to the image pixels. The watermark - serves as a backup in case the Content Credentials are lost, - such as when sharing the image between currently incompatible - services like text messaging. By using the watermark, it is - possible to retrieve a restored, signed version of the image - from before the data was decoupled, which you can try in the - verify tab. -

- -

- Want to know more? Read our - community blog post. -

-
-
-
-
-
-
- - - - diff --git a/spaces/UdayPrasad/mnist_classification/README.md b/spaces/UdayPrasad/mnist_classification/README.md deleted file mode 100644 index 0e91338df164cf2d161db6ab3571b5f140ff37d7..0000000000000000000000000000000000000000 --- a/spaces/UdayPrasad/mnist_classification/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Mnist Classification -emoji: 📚 -colorFrom: gray -colorTo: indigo -sdk: gradio -sdk_version: 3.0.22 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/VincentZB/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/__init__.py b/spaces/VincentZB/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Vinnybustacap/WizardLM-WizardLM-7B-V1.0/app.py b/spaces/Vinnybustacap/WizardLM-WizardLM-7B-V1.0/app.py deleted file mode 100644 index 1ca912717277e6d7b54602dccd75db36e17c1143..0000000000000000000000000000000000000000 --- a/spaces/Vinnybustacap/WizardLM-WizardLM-7B-V1.0/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/WizardLM/WizardLM-7B-V1.0").launch() \ No newline at end of file diff --git a/spaces/XiJingPong/Perisa-Bot/README.md b/spaces/XiJingPong/Perisa-Bot/README.md deleted file mode 100644 index 085cc7490b1dcb7173d951a733a61e5093487f1b..0000000000000000000000000000000000000000 --- a/spaces/XiJingPong/Perisa-Bot/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Perisa-bot -emoji: 👁 -colorFrom: red -colorTo: indigo -sdk: docker -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Xinyoumeng233hu/SteganographywithGPT-2/drgb.py b/spaces/Xinyoumeng233hu/SteganographywithGPT-2/drgb.py deleted file mode 100644 index 8069589085b2e530990bd7b5ba1350affd484de1..0000000000000000000000000000000000000000 --- a/spaces/Xinyoumeng233hu/SteganographywithGPT-2/drgb.py +++ /dev/null @@ -1,43 +0,0 @@ -#@title Colab setup { run: "auto", display-mode: "form" } -#@markdown This downloads some prereqs. It might take a while! You only have to run this cell once. -# !pip install torch==1.13.1 pytorch-transformers==1.1.0 bitarray==1.0.1 -import hashlib -import hmac -import numpy as np - -class DRBG(object): - def __init__(self, key, seed): - self.key = key - self.val = b'\x01' * 64 - self.reseed(seed) - - self.byte_index = 0 - self.bit_index = 0 - - def hmac(self, key, val): - return hmac.new(key, val, hashlib.sha512).digest() - - def reseed(self, data=b''): - self.key = self.hmac(self.key, self.val + b'\x00' + data) - self.val = self.hmac(self.key, self.val) - - if data: - self.key = self.hmac(self.key, self.val + b'\x01' + data) - self.val = self.hmac(self.key, self.val) - - def generate_bits(self, n): - xs = np.zeros(n, dtype=bool) - for i in range(0,n): - xs[i] = (self.val[self.byte_index] >> (7 - self.bit_index)) & 1 - - self.bit_index += 1 - if self.bit_index >= 8: - self.bit_index = 0 - self.byte_index += 1 - - if self.byte_index >= 8: - self.byte_index = 0 - self.val = self.hmac(self.key, self.val) - - self.reseed() - return xs \ No newline at end of file diff --git a/spaces/YONG627/456123/yolov5-code-main/utils/docker/Dockerfile b/spaces/YONG627/456123/yolov5-code-main/utils/docker/Dockerfile deleted file mode 100644 index b5d2af9fb08e792d1d5040c556e84539117ead8e..0000000000000000000000000000000000000000 --- a/spaces/YONG627/456123/yolov5-code-main/utils/docker/Dockerfile +++ /dev/null @@ -1,75 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 -# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference - -# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -# FROM docker.io/pytorch/pytorch:latest -FROM pytorch/pytorch:latest - -# Downloads to user config dir -ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ - -# Install linux packages -ENV DEBIAN_FRONTEND noninteractive -RUN apt update -RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg -# RUN alias python=python3 - -# Security updates -# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 -RUN apt upgrade --no-install-recommends -y openssl - -# Create working directory -RUN rm -rf /usr/src/app && mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app - -# Install pip packages -COPY requirements.txt . -RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' - # tensorflow tensorflowjs \ - -# Set environment variables -ENV OMP_NUM_THREADS=1 - -# Cleanup -ENV DEBIAN_FRONTEND teletype - - -# Usage Examples ------------------------------------------------------------------------------------------------------- - -# Build and Push -# t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t - -# Pull and Run -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t - -# Pull and Run with local directory access -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t - -# Kill all -# sudo docker kill $(sudo docker ps -q) - -# Kill all image-based -# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) - -# DockerHub tag update -# t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew - -# Clean up -# sudo docker system prune -a --volumes - -# Update Ubuntu drivers -# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ - -# DDP test -# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 - -# GCP VM from Image -# docker.io/ultralytics/yolov5:latest diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/sem_seg_evaluation.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/sem_seg_evaluation.py deleted file mode 100644 index 7a19db71562ef47569dc7f77ec616af85447f0ec..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/sem_seg_evaluation.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import itertools -import json -import logging -import numpy as np -import os -from collections import OrderedDict -import PIL.Image as Image -import pycocotools.mask as mask_util -import torch - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.utils.comm import all_gather, is_main_process, synchronize -from detectron2.utils.file_io import PathManager - -from .evaluator import DatasetEvaluator - - -class SemSegEvaluator(DatasetEvaluator): - """ - Evaluate semantic segmentation metrics. - """ - - def __init__( - self, - dataset_name, - distributed=True, - output_dir=None, - *, - num_classes=None, - ignore_label=None, - ): - """ - Args: - dataset_name (str): name of the dataset to be evaluated. - distributed (bool): if True, will collect results from all ranks for evaluation. - Otherwise, will evaluate the results in the current process. - output_dir (str): an output directory to dump results. - num_classes, ignore_label: deprecated argument - """ - self._logger = logging.getLogger(__name__) - if num_classes is not None: - self._logger.warn( - "SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata." - ) - if ignore_label is not None: - self._logger.warn( - "SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata." - ) - self._dataset_name = dataset_name - self._distributed = distributed - self._output_dir = output_dir - - self._cpu_device = torch.device("cpu") - - self.input_file_to_gt_file = { - dataset_record["file_name"]: dataset_record["sem_seg_file_name"] - for dataset_record in DatasetCatalog.get(dataset_name) - } - - meta = MetadataCatalog.get(dataset_name) - # Dict that maps contiguous training ids to COCO category ids - try: - c2d = meta.stuff_dataset_id_to_contiguous_id - self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()} - except AttributeError: - self._contiguous_id_to_dataset_id = None - self._class_names = meta.stuff_classes - self._num_classes = len(meta.stuff_classes) - if num_classes is not None: - assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}" - self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label - - def reset(self): - self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64) - self._predictions = [] - - def process(self, inputs, outputs): - """ - Args: - inputs: the inputs to a model. - It is a list of dicts. Each dict corresponds to an image and - contains keys like "height", "width", "file_name". - outputs: the outputs of a model. It is either list of semantic segmentation predictions - (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic - segmentation prediction in the same format. - """ - for input, output in zip(inputs, outputs): - output = output["sem_seg"].argmax(dim=0).to(self._cpu_device) - pred = np.array(output, dtype=np.int) - with PathManager.open(self.input_file_to_gt_file[input["file_name"]], "rb") as f: - gt = np.array(Image.open(f), dtype=np.int) - - gt[gt == self._ignore_label] = self._num_classes - - self._conf_matrix += np.bincount( - (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1), - minlength=self._conf_matrix.size, - ).reshape(self._conf_matrix.shape) - - self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"])) - - def evaluate(self): - """ - Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): - - * Mean intersection-over-union averaged across classes (mIoU) - * Frequency Weighted IoU (fwIoU) - * Mean pixel accuracy averaged across classes (mACC) - * Pixel Accuracy (pACC) - """ - if self._distributed: - synchronize() - conf_matrix_list = all_gather(self._conf_matrix) - self._predictions = all_gather(self._predictions) - self._predictions = list(itertools.chain(*self._predictions)) - if not is_main_process(): - return - - self._conf_matrix = np.zeros_like(self._conf_matrix) - for conf_matrix in conf_matrix_list: - self._conf_matrix += conf_matrix - - if self._output_dir: - PathManager.mkdirs(self._output_dir) - file_path = os.path.join(self._output_dir, "sem_seg_predictions.json") - with PathManager.open(file_path, "w") as f: - f.write(json.dumps(self._predictions)) - - acc = np.full(self._num_classes, np.nan, dtype=np.float) - iou = np.full(self._num_classes, np.nan, dtype=np.float) - tp = self._conf_matrix.diagonal()[:-1].astype(np.float) - pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float) - class_weights = pos_gt / np.sum(pos_gt) - pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float) - acc_valid = pos_gt > 0 - acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid] - iou_valid = (pos_gt + pos_pred) > 0 - union = pos_gt + pos_pred - tp - iou[acc_valid] = tp[acc_valid] / union[acc_valid] - macc = np.sum(acc[acc_valid]) / np.sum(acc_valid) - miou = np.sum(iou[acc_valid]) / np.sum(iou_valid) - fiou = np.sum(iou[acc_valid] * class_weights[acc_valid]) - pacc = np.sum(tp) / np.sum(pos_gt) - - res = {} - res["mIoU"] = 100 * miou - res["fwIoU"] = 100 * fiou - for i, name in enumerate(self._class_names): - res["IoU-{}".format(name)] = 100 * iou[i] - res["mACC"] = 100 * macc - res["pACC"] = 100 * pacc - for i, name in enumerate(self._class_names): - res["ACC-{}".format(name)] = 100 * acc[i] - - if self._output_dir: - file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth") - with PathManager.open(file_path, "wb") as f: - torch.save(res, f) - results = OrderedDict({"sem_seg": res}) - self._logger.info(results) - return results - - def encode_json_sem_seg(self, sem_seg, input_file_name): - """ - Convert semantic segmentation to COCO stuff format with segments encoded as RLEs. - See http://cocodataset.org/#format-results - """ - json_list = [] - for label in np.unique(sem_seg): - if self._contiguous_id_to_dataset_id is not None: - assert ( - label in self._contiguous_id_to_dataset_id - ), "Label {} is not in the metadata info for {}".format(label, self._dataset_name) - dataset_id = self._contiguous_id_to_dataset_id[label] - else: - dataset_id = int(label) - mask = (sem_seg == label).astype(np.uint8) - mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F"))[0] - mask_rle["counts"] = mask_rle["counts"].decode("utf-8") - json_list.append( - {"file_name": input_file_name, "category_id": dataset_id, "segmentation": mask_rle} - ) - return json_list diff --git a/spaces/Yuliang/ICON/lib/dataset/TestDataset.py b/spaces/Yuliang/ICON/lib/dataset/TestDataset.py deleted file mode 100644 index 6872a4ca1a83cf380bc2864e62cd5ac15a59cbdb..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ICON/lib/dataset/TestDataset.py +++ /dev/null @@ -1,254 +0,0 @@ - -# -*- coding: utf-8 -*- - -# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is -# holder of all proprietary rights on this computer program. -# You can only use this computer program if you have closed -# a license agreement with MPG or you get the right to use the computer -# program from someone who is authorized to grant you that right. -# Any use of the computer program without a valid license is prohibited and -# liable to prosecution. -# -# Copyright©2019 Max-Planck-Gesellschaft zur Förderung -# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute -# for Intelligent Systems. All rights reserved. -# -# Contact: ps-license@tuebingen.mpg.de - -import os - -import lib.smplx as smplx -from lib.pymaf.utils.geometry import rotation_matrix_to_angle_axis, batch_rodrigues -from lib.pymaf.utils.imutils import process_image -from lib.pymaf.core import path_config -from lib.pymaf.models import pymaf_net -from lib.common.config import cfg -from lib.common.render import Render -from lib.dataset.body_model import TetraSMPLModel -from lib.dataset.mesh_util import get_visibility, SMPLX -import os.path as osp -import torch -import numpy as np -import random -from termcolor import colored -from PIL import ImageFile -from huggingface_hub import cached_download - -ImageFile.LOAD_TRUNCATED_IMAGES = True - - -class TestDataset(): - def __init__(self, cfg, device): - - random.seed(1993) - - self.image_path = cfg['image_path'] - self.seg_dir = cfg['seg_dir'] - self.has_det = cfg['has_det'] - self.hps_type = cfg['hps_type'] - self.smpl_type = 'smpl' if cfg['hps_type'] != 'pixie' else 'smplx' - self.smpl_gender = 'neutral' - - self.device = device - - self.subject_list = [self.image_path] - - # smpl related - self.smpl_data = SMPLX() - - self.get_smpl_model = lambda smpl_type, smpl_gender: smplx.create( - model_path=self.smpl_data.model_dir, - gender=smpl_gender, - model_type=smpl_type, - ext='npz') - - # Load SMPL model - self.smpl_model = self.get_smpl_model( - self.smpl_type, self.smpl_gender).to(self.device) - self.faces = self.smpl_model.faces - - self.hps = pymaf_net(path_config.SMPL_MEAN_PARAMS, - pretrained=True).to(self.device) - self.hps.load_state_dict(torch.load( - path_config.CHECKPOINT_FILE)['model'], - strict=True) - self.hps.eval() - - print(colored(f"Using {self.hps_type} as HPS Estimator\n", "green")) - - self.render = Render(size=512, device=device) - - def __len__(self): - return len(self.subject_list) - - def compute_vis_cmap(self, smpl_verts, smpl_faces): - - (xy, z) = torch.as_tensor(smpl_verts).split([2, 1], dim=1) - smpl_vis = get_visibility(xy, -z, torch.as_tensor(smpl_faces).long()) - if self.smpl_type == 'smpl': - smplx_ind = self.smpl_data.smpl2smplx(np.arange(smpl_vis.shape[0])) - else: - smplx_ind = np.arange(smpl_vis.shape[0]) - smpl_cmap = self.smpl_data.get_smpl_mat(smplx_ind) - - return { - 'smpl_vis': smpl_vis.unsqueeze(0).to(self.device), - 'smpl_cmap': smpl_cmap.unsqueeze(0).to(self.device), - 'smpl_verts': smpl_verts.unsqueeze(0) - } - - def compute_voxel_verts(self, body_pose, global_orient, betas, trans, - scale): - - smpl_path = cached_download(osp.join(self.smpl_data.model_dir, "smpl/SMPL_NEUTRAL.pkl"), use_auth_token=os.environ['ICON']) - tetra_path = cached_download(osp.join(self.smpl_data.tedra_dir, - 'tetra_neutral_adult_smpl.npz'), use_auth_token=os.environ['ICON']) - smpl_model = TetraSMPLModel(smpl_path, tetra_path, 'adult') - - pose = torch.cat([global_orient[0], body_pose[0]], dim=0) - smpl_model.set_params(rotation_matrix_to_angle_axis(pose), - beta=betas[0]) - - verts = np.concatenate( - [smpl_model.verts, smpl_model.verts_added], - axis=0) * scale.item() + trans.detach().cpu().numpy() - faces = np.loadtxt(cached_download(osp.join(self.smpl_data.tedra_dir, - 'tetrahedrons_neutral_adult.txt'), use_auth_token=os.environ['ICON']), - dtype=np.int32) - 1 - - pad_v_num = int(8000 - verts.shape[0]) - pad_f_num = int(25100 - faces.shape[0]) - - verts = np.pad(verts, ((0, pad_v_num), (0, 0)), - mode='constant', - constant_values=0.0).astype(np.float32) * 0.5 - faces = np.pad(faces, ((0, pad_f_num), (0, 0)), - mode='constant', - constant_values=0.0).astype(np.int32) - - verts[:, 2] *= -1.0 - - voxel_dict = { - 'voxel_verts': - torch.from_numpy(verts).to(self.device).unsqueeze(0).float(), - 'voxel_faces': - torch.from_numpy(faces).to(self.device).unsqueeze(0).long(), - 'pad_v_num': - torch.tensor(pad_v_num).to(self.device).unsqueeze(0).long(), - 'pad_f_num': - torch.tensor(pad_f_num).to(self.device).unsqueeze(0).long() - } - - return voxel_dict - - def __getitem__(self, index): - - img_path = self.subject_list[index] - img_name = img_path.split("/")[-1].rsplit(".", 1)[0] - - if self.seg_dir is None: - img_icon, img_hps, img_ori, img_mask, uncrop_param = process_image( - img_path, self.hps_type, 512, self.device) - - data_dict = { - 'name': img_name, - 'image': img_icon.to(self.device).unsqueeze(0), - 'ori_image': img_ori, - 'mask': img_mask, - 'uncrop_param': uncrop_param - } - - else: - img_icon, img_hps, img_ori, img_mask, uncrop_param, segmentations = process_image( - img_path, self.hps_type, 512, self.device, - seg_path=os.path.join(self.seg_dir, f'{img_name}.json')) - data_dict = { - 'name': img_name, - 'image': img_icon.to(self.device).unsqueeze(0), - 'ori_image': img_ori, - 'mask': img_mask, - 'uncrop_param': uncrop_param, - 'segmentations': segmentations - } - - with torch.no_grad(): - # import ipdb; ipdb.set_trace() - preds_dict = self.hps.forward(img_hps) - - data_dict['smpl_faces'] = torch.Tensor( - self.faces.astype(np.int16)).long().unsqueeze(0).to( - self.device) - - if self.hps_type == 'pymaf': - output = preds_dict['smpl_out'][-1] - scale, tranX, tranY = output['theta'][0, :3] - data_dict['betas'] = output['pred_shape'] - data_dict['body_pose'] = output['rotmat'][:, 1:] - data_dict['global_orient'] = output['rotmat'][:, 0:1] - data_dict['smpl_verts'] = output['verts'] - - elif self.hps_type == 'pare': - data_dict['body_pose'] = preds_dict['pred_pose'][:, 1:] - data_dict['global_orient'] = preds_dict['pred_pose'][:, 0:1] - data_dict['betas'] = preds_dict['pred_shape'] - data_dict['smpl_verts'] = preds_dict['smpl_vertices'] - scale, tranX, tranY = preds_dict['pred_cam'][0, :3] - - elif self.hps_type == 'pixie': - data_dict.update(preds_dict) - data_dict['body_pose'] = preds_dict['body_pose'] - data_dict['global_orient'] = preds_dict['global_pose'] - data_dict['betas'] = preds_dict['shape'] - data_dict['smpl_verts'] = preds_dict['vertices'] - scale, tranX, tranY = preds_dict['cam'][0, :3] - - elif self.hps_type == 'hybrik': - data_dict['body_pose'] = preds_dict['pred_theta_mats'][:, 1:] - data_dict['global_orient'] = preds_dict['pred_theta_mats'][:, [0]] - data_dict['betas'] = preds_dict['pred_shape'] - data_dict['smpl_verts'] = preds_dict['pred_vertices'] - scale, tranX, tranY = preds_dict['pred_camera'][0, :3] - scale = scale * 2 - - elif self.hps_type == 'bev': - data_dict['betas'] = torch.from_numpy(preds_dict['smpl_betas'])[ - [0], :10].to(self.device).float() - pred_thetas = batch_rodrigues(torch.from_numpy( - preds_dict['smpl_thetas'][0]).reshape(-1, 3)).float() - data_dict['body_pose'] = pred_thetas[1:][None].to(self.device) - data_dict['global_orient'] = pred_thetas[[0]][None].to(self.device) - data_dict['smpl_verts'] = torch.from_numpy( - preds_dict['verts'][[0]]).to(self.device).float() - tranX = preds_dict['cam_trans'][0, 0] - tranY = preds_dict['cam'][0, 1] + 0.28 - scale = preds_dict['cam'][0, 0] * 1.1 - - data_dict['scale'] = scale - data_dict['trans'] = torch.tensor( - [tranX, tranY, 0.0]).to(self.device).float() - - # data_dict info (key-shape): - # scale, tranX, tranY - tensor.float - # betas - [1,10] / [1, 200] - # body_pose - [1, 23, 3, 3] / [1, 21, 3, 3] - # global_orient - [1, 1, 3, 3] - # smpl_verts - [1, 6890, 3] / [1, 10475, 3] - - # from rot_mat to rot_6d for better optimization - N_body = data_dict["body_pose"].shape[1] - data_dict["body_pose"] = data_dict["body_pose"][:, :, :, :2].reshape(1, N_body,-1) - data_dict["global_orient"] = data_dict["global_orient"][:, :, :, :2].reshape(1, 1,-1) - - return data_dict - - def render_normal(self, verts, faces): - - # render optimized mesh (normal, T_normal, image [-1,1]) - self.render.load_meshes(verts, faces) - return self.render.get_rgb_image() - - def render_depth(self, verts, faces): - - # render optimized mesh (normal, T_normal, image [-1,1]) - self.render.load_meshes(verts, faces) - return self.render.get_depth_map(cam_ids=[0, 2]) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/datasets/pascal_voc12.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/datasets/pascal_voc12.py deleted file mode 100644 index ba1d42d0c5781f56dc177d860d856bb34adce555..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/datasets/pascal_voc12.py +++ /dev/null @@ -1,57 +0,0 @@ -# dataset settings -dataset_type = 'PascalVOCDataset' -data_root = 'data/VOCdevkit/VOC2012' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -crop_size = (512, 512) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', prob=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 512), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=4, - workers_per_gpu=4, - train=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClass', - split='ImageSets/Segmentation/train.txt', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClass', - split='ImageSets/Segmentation/val.txt', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClass', - split='ImageSets/Segmentation/val.txt', - pipeline=test_pipeline)) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/upfirdn2d.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/upfirdn2d.py deleted file mode 100644 index c8bb2c3c949eed38a6465ed369fa881538dca010..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/upfirdn2d.py +++ /dev/null @@ -1,330 +0,0 @@ -# modified from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.py # noqa:E501 - -# Copyright (c) 2021, NVIDIA Corporation. All rights reserved. -# NVIDIA Source Code License for StyleGAN2 with Adaptive Discriminator -# Augmentation (ADA) -# ======================================================================= - -# 1. Definitions - -# "Licensor" means any person or entity that distributes its Work. - -# "Software" means the original work of authorship made available under -# this License. - -# "Work" means the Software and any additions to or derivative works of -# the Software that are made available under this License. - -# The terms "reproduce," "reproduction," "derivative works," and -# "distribution" have the meaning as provided under U.S. copyright law; -# provided, however, that for the purposes of this License, derivative -# works shall not include works that remain separable from, or merely -# link (or bind by name) to the interfaces of, the Work. - -# Works, including the Software, are "made available" under this License -# by including in or with the Work either (a) a copyright notice -# referencing the applicability of this License to the Work, or (b) a -# copy of this License. - -# 2. License Grants - -# 2.1 Copyright Grant. Subject to the terms and conditions of this -# License, each Licensor grants to you a perpetual, worldwide, -# non-exclusive, royalty-free, copyright license to reproduce, -# prepare derivative works of, publicly display, publicly perform, -# sublicense and distribute its Work and any resulting derivative -# works in any form. - -# 3. Limitations - -# 3.1 Redistribution. You may reproduce or distribute the Work only -# if (a) you do so under this License, (b) you include a complete -# copy of this License with your distribution, and (c) you retain -# without modification any copyright, patent, trademark, or -# attribution notices that are present in the Work. - -# 3.2 Derivative Works. You may specify that additional or different -# terms apply to the use, reproduction, and distribution of your -# derivative works of the Work ("Your Terms") only if (a) Your Terms -# provide that the use limitation in Section 3.3 applies to your -# derivative works, and (b) you identify the specific derivative -# works that are subject to Your Terms. Notwithstanding Your Terms, -# this License (including the redistribution requirements in Section -# 3.1) will continue to apply to the Work itself. - -# 3.3 Use Limitation. The Work and any derivative works thereof only -# may be used or intended for use non-commercially. Notwithstanding -# the foregoing, NVIDIA and its affiliates may use the Work and any -# derivative works commercially. As used herein, "non-commercially" -# means for research or evaluation purposes only. - -# 3.4 Patent Claims. If you bring or threaten to bring a patent claim -# against any Licensor (including any claim, cross-claim or -# counterclaim in a lawsuit) to enforce any patents that you allege -# are infringed by any Work, then your rights under this License from -# such Licensor (including the grant in Section 2.1) will terminate -# immediately. - -# 3.5 Trademarks. This License does not grant any rights to use any -# Licensor’s or its affiliates’ names, logos, or trademarks, except -# as necessary to reproduce the notices described in this License. - -# 3.6 Termination. If you violate any term of this License, then your -# rights under this License (including the grant in Section 2.1) will -# terminate immediately. - -# 4. Disclaimer of Warranty. - -# THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR -# NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER -# THIS LICENSE. - -# 5. Limitation of Liability. - -# EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL -# THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE -# SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, -# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF -# OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK -# (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, -# LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER -# COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF -# THE POSSIBILITY OF SUCH DAMAGES. - -# ======================================================================= - -import torch -from torch.autograd import Function -from torch.nn import functional as F - -from annotator.uniformer.mmcv.utils import to_2tuple -from ..utils import ext_loader - -upfirdn2d_ext = ext_loader.load_ext('_ext', ['upfirdn2d']) - - -class UpFirDn2dBackward(Function): - - @staticmethod - def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, - in_size, out_size): - - up_x, up_y = up - down_x, down_y = down - g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad - - grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) - - grad_input = upfirdn2d_ext.upfirdn2d( - grad_output, - grad_kernel, - up_x=down_x, - up_y=down_y, - down_x=up_x, - down_y=up_y, - pad_x0=g_pad_x0, - pad_x1=g_pad_x1, - pad_y0=g_pad_y0, - pad_y1=g_pad_y1) - grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], - in_size[3]) - - ctx.save_for_backward(kernel) - - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - ctx.up_x = up_x - ctx.up_y = up_y - ctx.down_x = down_x - ctx.down_y = down_y - ctx.pad_x0 = pad_x0 - ctx.pad_x1 = pad_x1 - ctx.pad_y0 = pad_y0 - ctx.pad_y1 = pad_y1 - ctx.in_size = in_size - ctx.out_size = out_size - - return grad_input - - @staticmethod - def backward(ctx, gradgrad_input): - kernel, = ctx.saved_tensors - - gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], - ctx.in_size[3], 1) - - gradgrad_out = upfirdn2d_ext.upfirdn2d( - gradgrad_input, - kernel, - up_x=ctx.up_x, - up_y=ctx.up_y, - down_x=ctx.down_x, - down_y=ctx.down_y, - pad_x0=ctx.pad_x0, - pad_x1=ctx.pad_x1, - pad_y0=ctx.pad_y0, - pad_y1=ctx.pad_y1) - # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], - # ctx.out_size[1], ctx.in_size[3]) - gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], - ctx.out_size[0], ctx.out_size[1]) - - return gradgrad_out, None, None, None, None, None, None, None, None - - -class UpFirDn2d(Function): - - @staticmethod - def forward(ctx, input, kernel, up, down, pad): - up_x, up_y = up - down_x, down_y = down - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - kernel_h, kernel_w = kernel.shape - batch, channel, in_h, in_w = input.shape - ctx.in_size = input.shape - - input = input.reshape(-1, in_h, in_w, 1) - - ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - ctx.out_size = (out_h, out_w) - - ctx.up = (up_x, up_y) - ctx.down = (down_x, down_y) - ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) - - g_pad_x0 = kernel_w - pad_x0 - 1 - g_pad_y0 = kernel_h - pad_y0 - 1 - g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 - g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 - - ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) - - out = upfirdn2d_ext.upfirdn2d( - input, - kernel, - up_x=up_x, - up_y=up_y, - down_x=down_x, - down_y=down_y, - pad_x0=pad_x0, - pad_x1=pad_x1, - pad_y0=pad_y0, - pad_y1=pad_y1) - # out = out.view(major, out_h, out_w, minor) - out = out.view(-1, channel, out_h, out_w) - - return out - - @staticmethod - def backward(ctx, grad_output): - kernel, grad_kernel = ctx.saved_tensors - - grad_input = UpFirDn2dBackward.apply( - grad_output, - kernel, - grad_kernel, - ctx.up, - ctx.down, - ctx.pad, - ctx.g_pad, - ctx.in_size, - ctx.out_size, - ) - - return grad_input, None, None, None, None - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - """UpFRIDn for 2d features. - - UpFIRDn is short for upsample, apply FIR filter and downsample. More - details can be found in: - https://www.mathworks.com/help/signal/ref/upfirdn.html - - Args: - input (Tensor): Tensor with shape of (n, c, h, w). - kernel (Tensor): Filter kernel. - up (int | tuple[int], optional): Upsampling factor. If given a number, - we will use this factor for the both height and width side. - Defaults to 1. - down (int | tuple[int], optional): Downsampling factor. If given a - number, we will use this factor for the both height and width side. - Defaults to 1. - pad (tuple[int], optional): Padding for tensors, (x_pad, y_pad) or - (x_pad_0, x_pad_1, y_pad_0, y_pad_1). Defaults to (0, 0). - - Returns: - Tensor: Tensor after UpFIRDn. - """ - if input.device.type == 'cpu': - if len(pad) == 2: - pad = (pad[0], pad[1], pad[0], pad[1]) - - up = to_2tuple(up) - - down = to_2tuple(down) - - out = upfirdn2d_native(input, kernel, up[0], up[1], down[0], down[1], - pad[0], pad[1], pad[2], pad[3]) - else: - _up = to_2tuple(up) - - _down = to_2tuple(down) - - if len(pad) == 4: - _pad = pad - elif len(pad) == 2: - _pad = (pad[0], pad[1], pad[0], pad[1]) - - out = UpFirDn2d.apply(input, kernel, _up, _down, _pad) - - return out - - -def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, - pad_y0, pad_y1): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, - [0, 0, - max(pad_x0, 0), - max(pad_x1, 0), - max(pad_y0, 0), - max(pad_y1, 0)]) - out = out[:, - max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :, ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - - return out.view(-1, channel, out_h, out_w) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/hooks/optimizer.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/hooks/optimizer.py deleted file mode 100644 index 4ef3e9ff8f9c6926e32bdf027612267b64ed80df..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/runner/hooks/optimizer.py +++ /dev/null @@ -1,508 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -from collections import defaultdict -from itertools import chain - -from torch.nn.utils import clip_grad - -from annotator.uniformer.mmcv.utils import TORCH_VERSION, _BatchNorm, digit_version -from ..dist_utils import allreduce_grads -from ..fp16_utils import LossScaler, wrap_fp16_model -from .hook import HOOKS, Hook - -try: - # If PyTorch version >= 1.6.0, torch.cuda.amp.GradScaler would be imported - # and used; otherwise, auto fp16 will adopt mmcv's implementation. - from torch.cuda.amp import GradScaler -except ImportError: - pass - - -@HOOKS.register_module() -class OptimizerHook(Hook): - - def __init__(self, grad_clip=None): - self.grad_clip = grad_clip - - def clip_grads(self, params): - params = list( - filter(lambda p: p.requires_grad and p.grad is not None, params)) - if len(params) > 0: - return clip_grad.clip_grad_norm_(params, **self.grad_clip) - - def after_train_iter(self, runner): - runner.optimizer.zero_grad() - runner.outputs['loss'].backward() - if self.grad_clip is not None: - grad_norm = self.clip_grads(runner.model.parameters()) - if grad_norm is not None: - # Add grad norm to the logger - runner.log_buffer.update({'grad_norm': float(grad_norm)}, - runner.outputs['num_samples']) - runner.optimizer.step() - - -@HOOKS.register_module() -class GradientCumulativeOptimizerHook(OptimizerHook): - """Optimizer Hook implements multi-iters gradient cumulating. - - Args: - cumulative_iters (int, optional): Num of gradient cumulative iters. - The optimizer will step every `cumulative_iters` iters. - Defaults to 1. - - Examples: - >>> # Use cumulative_iters to simulate a large batch size - >>> # It is helpful when the hardware cannot handle a large batch size. - >>> loader = DataLoader(data, batch_size=64) - >>> optim_hook = GradientCumulativeOptimizerHook(cumulative_iters=4) - >>> # almost equals to - >>> loader = DataLoader(data, batch_size=256) - >>> optim_hook = OptimizerHook() - """ - - def __init__(self, cumulative_iters=1, **kwargs): - super(GradientCumulativeOptimizerHook, self).__init__(**kwargs) - - assert isinstance(cumulative_iters, int) and cumulative_iters > 0, \ - f'cumulative_iters only accepts positive int, but got ' \ - f'{type(cumulative_iters)} instead.' - - self.cumulative_iters = cumulative_iters - self.divisible_iters = 0 - self.remainder_iters = 0 - self.initialized = False - - def has_batch_norm(self, module): - if isinstance(module, _BatchNorm): - return True - for m in module.children(): - if self.has_batch_norm(m): - return True - return False - - def _init(self, runner): - if runner.iter % self.cumulative_iters != 0: - runner.logger.warning( - 'Resume iter number is not divisible by cumulative_iters in ' - 'GradientCumulativeOptimizerHook, which means the gradient of ' - 'some iters is lost and the result may be influenced slightly.' - ) - - if self.has_batch_norm(runner.model) and self.cumulative_iters > 1: - runner.logger.warning( - 'GradientCumulativeOptimizerHook may slightly decrease ' - 'performance if the model has BatchNorm layers.') - - residual_iters = runner.max_iters - runner.iter - - self.divisible_iters = ( - residual_iters // self.cumulative_iters * self.cumulative_iters) - self.remainder_iters = residual_iters - self.divisible_iters - - self.initialized = True - - def after_train_iter(self, runner): - if not self.initialized: - self._init(runner) - - if runner.iter < self.divisible_iters: - loss_factor = self.cumulative_iters - else: - loss_factor = self.remainder_iters - loss = runner.outputs['loss'] - loss = loss / loss_factor - loss.backward() - - if (self.every_n_iters(runner, self.cumulative_iters) - or self.is_last_iter(runner)): - - if self.grad_clip is not None: - grad_norm = self.clip_grads(runner.model.parameters()) - if grad_norm is not None: - # Add grad norm to the logger - runner.log_buffer.update({'grad_norm': float(grad_norm)}, - runner.outputs['num_samples']) - runner.optimizer.step() - runner.optimizer.zero_grad() - - -if (TORCH_VERSION != 'parrots' - and digit_version(TORCH_VERSION) >= digit_version('1.6.0')): - - @HOOKS.register_module() - class Fp16OptimizerHook(OptimizerHook): - """FP16 optimizer hook (using PyTorch's implementation). - - If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend, - to take care of the optimization procedure. - - Args: - loss_scale (float | str | dict): Scale factor configuration. - If loss_scale is a float, static loss scaling will be used with - the specified scale. If loss_scale is a string, it must be - 'dynamic', then dynamic loss scaling will be used. - It can also be a dict containing arguments of GradScalar. - Defaults to 512. For Pytorch >= 1.6, mmcv uses official - implementation of GradScaler. If you use a dict version of - loss_scale to create GradScaler, please refer to: - https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler - for the parameters. - - Examples: - >>> loss_scale = dict( - ... init_scale=65536.0, - ... growth_factor=2.0, - ... backoff_factor=0.5, - ... growth_interval=2000 - ... ) - >>> optimizer_hook = Fp16OptimizerHook(loss_scale=loss_scale) - """ - - def __init__(self, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - loss_scale=512., - distributed=True): - self.grad_clip = grad_clip - self.coalesce = coalesce - self.bucket_size_mb = bucket_size_mb - self.distributed = distributed - self._scale_update_param = None - if loss_scale == 'dynamic': - self.loss_scaler = GradScaler() - elif isinstance(loss_scale, float): - self._scale_update_param = loss_scale - self.loss_scaler = GradScaler(init_scale=loss_scale) - elif isinstance(loss_scale, dict): - self.loss_scaler = GradScaler(**loss_scale) - else: - raise ValueError('loss_scale must be of type float, dict, or ' - f'"dynamic", got {loss_scale}') - - def before_run(self, runner): - """Preparing steps before Mixed Precision Training.""" - # wrap model mode to fp16 - wrap_fp16_model(runner.model) - # resume from state dict - if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']: - scaler_state_dict = runner.meta['fp16']['loss_scaler'] - self.loss_scaler.load_state_dict(scaler_state_dict) - - def copy_grads_to_fp32(self, fp16_net, fp32_weights): - """Copy gradients from fp16 model to fp32 weight copy.""" - for fp32_param, fp16_param in zip(fp32_weights, - fp16_net.parameters()): - if fp16_param.grad is not None: - if fp32_param.grad is None: - fp32_param.grad = fp32_param.data.new( - fp32_param.size()) - fp32_param.grad.copy_(fp16_param.grad) - - def copy_params_to_fp16(self, fp16_net, fp32_weights): - """Copy updated params from fp32 weight copy to fp16 model.""" - for fp16_param, fp32_param in zip(fp16_net.parameters(), - fp32_weights): - fp16_param.data.copy_(fp32_param.data) - - def after_train_iter(self, runner): - """Backward optimization steps for Mixed Precision Training. For - dynamic loss scaling, please refer to - https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler. - - 1. Scale the loss by a scale factor. - 2. Backward the loss to obtain the gradients. - 3. Unscale the optimizer’s gradient tensors. - 4. Call optimizer.step() and update scale factor. - 5. Save loss_scaler state_dict for resume purpose. - """ - # clear grads of last iteration - runner.model.zero_grad() - runner.optimizer.zero_grad() - - self.loss_scaler.scale(runner.outputs['loss']).backward() - self.loss_scaler.unscale_(runner.optimizer) - # grad clip - if self.grad_clip is not None: - grad_norm = self.clip_grads(runner.model.parameters()) - if grad_norm is not None: - # Add grad norm to the logger - runner.log_buffer.update({'grad_norm': float(grad_norm)}, - runner.outputs['num_samples']) - # backward and update scaler - self.loss_scaler.step(runner.optimizer) - self.loss_scaler.update(self._scale_update_param) - - # save state_dict of loss_scaler - runner.meta.setdefault( - 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() - - @HOOKS.register_module() - class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook, - Fp16OptimizerHook): - """Fp16 optimizer Hook (using PyTorch's implementation) implements - multi-iters gradient cumulating. - - If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend, - to take care of the optimization procedure. - """ - - def __init__(self, *args, **kwargs): - super(GradientCumulativeFp16OptimizerHook, - self).__init__(*args, **kwargs) - - def after_train_iter(self, runner): - if not self.initialized: - self._init(runner) - - if runner.iter < self.divisible_iters: - loss_factor = self.cumulative_iters - else: - loss_factor = self.remainder_iters - loss = runner.outputs['loss'] - loss = loss / loss_factor - - self.loss_scaler.scale(loss).backward() - - if (self.every_n_iters(runner, self.cumulative_iters) - or self.is_last_iter(runner)): - - # copy fp16 grads in the model to fp32 params in the optimizer - self.loss_scaler.unscale_(runner.optimizer) - - if self.grad_clip is not None: - grad_norm = self.clip_grads(runner.model.parameters()) - if grad_norm is not None: - # Add grad norm to the logger - runner.log_buffer.update( - {'grad_norm': float(grad_norm)}, - runner.outputs['num_samples']) - - # backward and update scaler - self.loss_scaler.step(runner.optimizer) - self.loss_scaler.update(self._scale_update_param) - - # save state_dict of loss_scaler - runner.meta.setdefault( - 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() - - # clear grads - runner.model.zero_grad() - runner.optimizer.zero_grad() - -else: - - @HOOKS.register_module() - class Fp16OptimizerHook(OptimizerHook): - """FP16 optimizer hook (mmcv's implementation). - - The steps of fp16 optimizer is as follows. - 1. Scale the loss value. - 2. BP in the fp16 model. - 2. Copy gradients from fp16 model to fp32 weights. - 3. Update fp32 weights. - 4. Copy updated parameters from fp32 weights to fp16 model. - - Refer to https://arxiv.org/abs/1710.03740 for more details. - - Args: - loss_scale (float | str | dict): Scale factor configuration. - If loss_scale is a float, static loss scaling will be used with - the specified scale. If loss_scale is a string, it must be - 'dynamic', then dynamic loss scaling will be used. - It can also be a dict containing arguments of LossScaler. - Defaults to 512. - """ - - def __init__(self, - grad_clip=None, - coalesce=True, - bucket_size_mb=-1, - loss_scale=512., - distributed=True): - self.grad_clip = grad_clip - self.coalesce = coalesce - self.bucket_size_mb = bucket_size_mb - self.distributed = distributed - if loss_scale == 'dynamic': - self.loss_scaler = LossScaler(mode='dynamic') - elif isinstance(loss_scale, float): - self.loss_scaler = LossScaler( - init_scale=loss_scale, mode='static') - elif isinstance(loss_scale, dict): - self.loss_scaler = LossScaler(**loss_scale) - else: - raise ValueError('loss_scale must be of type float, dict, or ' - f'"dynamic", got {loss_scale}') - - def before_run(self, runner): - """Preparing steps before Mixed Precision Training. - - 1. Make a master copy of fp32 weights for optimization. - 2. Convert the main model from fp32 to fp16. - """ - # keep a copy of fp32 weights - old_groups = runner.optimizer.param_groups - runner.optimizer.param_groups = copy.deepcopy( - runner.optimizer.param_groups) - state = defaultdict(dict) - p_map = { - old_p: p - for old_p, p in zip( - chain(*(g['params'] for g in old_groups)), - chain(*(g['params'] - for g in runner.optimizer.param_groups))) - } - for k, v in runner.optimizer.state.items(): - state[p_map[k]] = v - runner.optimizer.state = state - # convert model to fp16 - wrap_fp16_model(runner.model) - # resume from state dict - if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']: - scaler_state_dict = runner.meta['fp16']['loss_scaler'] - self.loss_scaler.load_state_dict(scaler_state_dict) - - def copy_grads_to_fp32(self, fp16_net, fp32_weights): - """Copy gradients from fp16 model to fp32 weight copy.""" - for fp32_param, fp16_param in zip(fp32_weights, - fp16_net.parameters()): - if fp16_param.grad is not None: - if fp32_param.grad is None: - fp32_param.grad = fp32_param.data.new( - fp32_param.size()) - fp32_param.grad.copy_(fp16_param.grad) - - def copy_params_to_fp16(self, fp16_net, fp32_weights): - """Copy updated params from fp32 weight copy to fp16 model.""" - for fp16_param, fp32_param in zip(fp16_net.parameters(), - fp32_weights): - fp16_param.data.copy_(fp32_param.data) - - def after_train_iter(self, runner): - """Backward optimization steps for Mixed Precision Training. For - dynamic loss scaling, please refer `loss_scalar.py` - - 1. Scale the loss by a scale factor. - 2. Backward the loss to obtain the gradients (fp16). - 3. Copy gradients from the model to the fp32 weight copy. - 4. Scale the gradients back and update the fp32 weight copy. - 5. Copy back the params from fp32 weight copy to the fp16 model. - 6. Save loss_scaler state_dict for resume purpose. - """ - # clear grads of last iteration - runner.model.zero_grad() - runner.optimizer.zero_grad() - # scale the loss value - scaled_loss = runner.outputs['loss'] * self.loss_scaler.loss_scale - scaled_loss.backward() - # copy fp16 grads in the model to fp32 params in the optimizer - - fp32_weights = [] - for param_group in runner.optimizer.param_groups: - fp32_weights += param_group['params'] - self.copy_grads_to_fp32(runner.model, fp32_weights) - # allreduce grads - if self.distributed: - allreduce_grads(fp32_weights, self.coalesce, - self.bucket_size_mb) - - has_overflow = self.loss_scaler.has_overflow(fp32_weights) - # if has overflow, skip this iteration - if not has_overflow: - # scale the gradients back - for param in fp32_weights: - if param.grad is not None: - param.grad.div_(self.loss_scaler.loss_scale) - if self.grad_clip is not None: - grad_norm = self.clip_grads(fp32_weights) - if grad_norm is not None: - # Add grad norm to the logger - runner.log_buffer.update( - {'grad_norm': float(grad_norm)}, - runner.outputs['num_samples']) - # update fp32 params - runner.optimizer.step() - # copy fp32 params to the fp16 model - self.copy_params_to_fp16(runner.model, fp32_weights) - self.loss_scaler.update_scale(has_overflow) - if has_overflow: - runner.logger.warning('Check overflow, downscale loss scale ' - f'to {self.loss_scaler.cur_scale}') - - # save state_dict of loss_scaler - runner.meta.setdefault( - 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() - - @HOOKS.register_module() - class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook, - Fp16OptimizerHook): - """Fp16 optimizer Hook (using mmcv implementation) implements multi- - iters gradient cumulating.""" - - def __init__(self, *args, **kwargs): - super(GradientCumulativeFp16OptimizerHook, - self).__init__(*args, **kwargs) - - def after_train_iter(self, runner): - if not self.initialized: - self._init(runner) - - if runner.iter < self.divisible_iters: - loss_factor = self.cumulative_iters - else: - loss_factor = self.remainder_iters - - loss = runner.outputs['loss'] - loss = loss / loss_factor - - # scale the loss value - scaled_loss = loss * self.loss_scaler.loss_scale - scaled_loss.backward() - - if (self.every_n_iters(runner, self.cumulative_iters) - or self.is_last_iter(runner)): - - # copy fp16 grads in the model to fp32 params in the optimizer - fp32_weights = [] - for param_group in runner.optimizer.param_groups: - fp32_weights += param_group['params'] - self.copy_grads_to_fp32(runner.model, fp32_weights) - # allreduce grads - if self.distributed: - allreduce_grads(fp32_weights, self.coalesce, - self.bucket_size_mb) - - has_overflow = self.loss_scaler.has_overflow(fp32_weights) - # if has overflow, skip this iteration - if not has_overflow: - # scale the gradients back - for param in fp32_weights: - if param.grad is not None: - param.grad.div_(self.loss_scaler.loss_scale) - if self.grad_clip is not None: - grad_norm = self.clip_grads(fp32_weights) - if grad_norm is not None: - # Add grad norm to the logger - runner.log_buffer.update( - {'grad_norm': float(grad_norm)}, - runner.outputs['num_samples']) - # update fp32 params - runner.optimizer.step() - # copy fp32 params to the fp16 model - self.copy_params_to_fp16(runner.model, fp32_weights) - else: - runner.logger.warning( - 'Check overflow, downscale loss scale ' - f'to {self.loss_scaler.cur_scale}') - - self.loss_scaler.update_scale(has_overflow) - - # save state_dict of loss_scaler - runner.meta.setdefault( - 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict() - - # clear grads - runner.model.zero_grad() - runner.optimizer.zero_grad() diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/utils/inverted_residual.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/utils/inverted_residual.py deleted file mode 100644 index ffd73e78e22fa3a7aec3ddb7629deb14b397d403..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/utils/inverted_residual.py +++ /dev/null @@ -1,220 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala - * Modified from MMCV repo: From https://github.com/open-mmlab/mmcv - * Copyright (c) OpenMMLab. All rights reserved. -''' - -from annotator.uniformer.mmcv.cnn import ConvModule -from torch import nn -from torch.utils import checkpoint as cp - -from .se_layer import SELayer - - -class InvertedResidual(nn.Module): - """InvertedResidual block for MobileNetV2. - - Args: - in_channels (int): The input channels of the InvertedResidual block. - out_channels (int): The output channels of the InvertedResidual block. - stride (int): Stride of the middle (first) 3x3 convolution. - expand_ratio (int): Adjusts number of channels of the hidden layer - in InvertedResidual by this amount. - dilation (int): Dilation rate of depthwise conv. Default: 1 - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU6'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, - in_channels, - out_channels, - stride, - expand_ratio, - dilation=1, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU6'), - with_cp=False): - super(InvertedResidual, self).__init__() - self.stride = stride - assert stride in [1, 2], f'stride must in [1, 2]. ' \ - f'But received {stride}.' - self.with_cp = with_cp - self.use_res_connect = self.stride == 1 and in_channels == out_channels - hidden_dim = int(round(in_channels * expand_ratio)) - - layers = [] - if expand_ratio != 1: - layers.append( - ConvModule( - in_channels=in_channels, - out_channels=hidden_dim, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg)) - layers.extend([ - ConvModule( - in_channels=hidden_dim, - out_channels=hidden_dim, - kernel_size=3, - stride=stride, - padding=dilation, - dilation=dilation, - groups=hidden_dim, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg), - ConvModule( - in_channels=hidden_dim, - out_channels=out_channels, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - ]) - self.conv = nn.Sequential(*layers) - - def forward(self, x): - - def _inner_forward(x): - if self.use_res_connect: - return x + self.conv(x) - else: - return self.conv(x) - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out - - -class InvertedResidualV3(nn.Module): - """Inverted Residual Block for MobileNetV3. - - Args: - in_channels (int): The input channels of this Module. - out_channels (int): The output channels of this Module. - mid_channels (int): The input channels of the depthwise convolution. - kernel_size (int): The kernel size of the depthwise convolution. - Default: 3. - stride (int): The stride of the depthwise convolution. Default: 1. - se_cfg (dict): Config dict for se layer. Default: None, which means no - se layer. - with_expand_conv (bool): Use expand conv or not. If set False, - mid_channels must be the same with in_channels. Default: True. - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU'). - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, - in_channels, - out_channels, - mid_channels, - kernel_size=3, - stride=1, - se_cfg=None, - with_expand_conv=True, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - with_cp=False): - super(InvertedResidualV3, self).__init__() - self.with_res_shortcut = (stride == 1 and in_channels == out_channels) - assert stride in [1, 2] - self.with_cp = with_cp - self.with_se = se_cfg is not None - self.with_expand_conv = with_expand_conv - - if self.with_se: - assert isinstance(se_cfg, dict) - if not self.with_expand_conv: - assert mid_channels == in_channels - - if self.with_expand_conv: - self.expand_conv = ConvModule( - in_channels=in_channels, - out_channels=mid_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.depthwise_conv = ConvModule( - in_channels=mid_channels, - out_channels=mid_channels, - kernel_size=kernel_size, - stride=stride, - padding=kernel_size // 2, - groups=mid_channels, - conv_cfg=dict( - type='Conv2dAdaptivePadding') if stride == 2 else conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - if self.with_se: - self.se = SELayer(**se_cfg) - - self.linear_conv = ConvModule( - in_channels=mid_channels, - out_channels=out_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - def forward(self, x): - - def _inner_forward(x): - out = x - - if self.with_expand_conv: - out = self.expand_conv(out) - - out = self.depthwise_conv(out) - - if self.with_se: - out = self.se(out) - - out = self.linear_conv(out) - - if self.with_res_shortcut: - return x + out - else: - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/docs/Makefile b/spaces/abrar-lohia/text-2-character-anim/pyrender/docs/Makefile deleted file mode 100644 index b1064a04362a0c4372fae351f99ed3bd9f82ff92..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/docs/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -SOURCEDIR = source -BUILDDIR = build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -clean: - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - rm -rf ./source/generated/* - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/spaces/adirik/kakao-brain-vit/README.md b/spaces/adirik/kakao-brain-vit/README.md deleted file mode 100644 index 5670797aef9afeeb49bf6bc508922f1b7b447efc..0000000000000000000000000000000000000000 --- a/spaces/adirik/kakao-brain-vit/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Kakao Brain Vit -emoji: 🌖 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/airus/img-to-music/share_btn.py b/spaces/airus/img-to-music/share_btn.py deleted file mode 100644 index 1a2ac6a6e74b114dbd54c2f24723a87180db51ef..0000000000000000000000000000000000000000 --- a/spaces/airus/img-to-music/share_btn.py +++ /dev/null @@ -1,100 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - async function getInputImgFile(imgEl){ - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const isPng = imgEl.src.startsWith(`data:image/png`); - if(isPng){ - const fileName = `sd-perception-${{imgId}}.png`; - return new File([blob], fileName, { type: 'image/png' }); - }else{ - const fileName = `sd-perception-${{imgId}}.jpg`; - return new File([blob], fileName, { type: 'image/jpeg' }); - } - } - async function getOutputMusicFile(audioEL){ - const res = await fetch(audioEL.src); - const blob = await res.blob(); - const audioId = Date.now() % 200; - const fileName = `img-to-music-${{audioId}}.wav`; - const musicBlob = new File([blob], fileName, { type: 'audio/wav' }); - console.log(musicBlob); - return musicBlob; - } - - async function audioToBase64(audioFile) { - return new Promise((resolve, reject) => { - let reader = new FileReader(); - reader.readAsDataURL(audioFile); - reader.onload = () => resolve(reader.result); - reader.onerror = error => reject(error); - - }); - } - const gradioEl = document.querySelector('body > gradio-app'); - // const gradioEl = document.querySelector("gradio-app").shadowRoot; - const inputImgEl = gradioEl.querySelector('#input-img img'); - const outputMusic = gradioEl.querySelector('#music-output audio'); - const outputMusic_src = gradioEl.querySelector('#music-output audio').src; - const outputMusic_name = outputMusic_src.split('/').pop(); - let titleTxt = outputMusic_name; - //if(titleTxt.length > 100){ - // titleTxt = titleTxt.slice(0, 100) + ' ...'; - //} - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - if(!outputMusic){ - return; - }; - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - const inputFile = await getInputImgFile(inputImgEl); - const urlInputImg = await uploadFile(inputFile); - const musicFile = await getOutputMusicFile(outputMusic); - const dataOutputMusic = await uploadFile(musicFile); - - const descriptionMd = `#### Input img: - - -#### Music: - - -`; - const params = new URLSearchParams({ - title: titleTxt, - description: descriptionMd, - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/fffiloni/img-to-music/discussions/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" \ No newline at end of file diff --git a/spaces/akhaliq/Mask2Former/mask2former_video/modeling/transformer_decoder/position_encoding.py b/spaces/akhaliq/Mask2Former/mask2former_video/modeling/transformer_decoder/position_encoding.py deleted file mode 100644 index b0ebc769de4ed6a86069fcbc9b1e819d5f6b4bc8..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Mask2Former/mask2former_video/modeling/transformer_decoder/position_encoding.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# # Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py -""" -Various positional encodings for the transformer. -""" -import math - -import torch -from torch import nn - - -class PositionEmbeddingSine3D(nn.Module): - """ - This is a more standard version of the position embedding, very similar to the one - used by the Attention is all you need paper, generalized to work on images. - """ - - def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): - super().__init__() - self.num_pos_feats = num_pos_feats - self.temperature = temperature - self.normalize = normalize - if scale is not None and normalize is False: - raise ValueError("normalize should be True if scale is passed") - if scale is None: - scale = 2 * math.pi - self.scale = scale - - def forward(self, x, mask=None): - # b, t, c, h, w - assert x.dim() == 5, f"{x.shape} should be a 5-dimensional Tensor, got {x.dim()}-dimensional Tensor instead" - if mask is None: - mask = torch.zeros((x.size(0), x.size(1), x.size(3), x.size(4)), device=x.device, dtype=torch.bool) - not_mask = ~mask - z_embed = not_mask.cumsum(1, dtype=torch.float32) - y_embed = not_mask.cumsum(2, dtype=torch.float32) - x_embed = not_mask.cumsum(3, dtype=torch.float32) - if self.normalize: - eps = 1e-6 - z_embed = z_embed / (z_embed[:, -1:, :, :] + eps) * self.scale - y_embed = y_embed / (y_embed[:, :, -1:, :] + eps) * self.scale - x_embed = x_embed / (x_embed[:, :, :, -1:] + eps) * self.scale - - dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) - dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) - - dim_t_z = torch.arange((self.num_pos_feats * 2), dtype=torch.float32, device=x.device) - dim_t_z = self.temperature ** (2 * (dim_t_z // 2) / (self.num_pos_feats * 2)) - - pos_x = x_embed[:, :, :, :, None] / dim_t - pos_y = y_embed[:, :, :, :, None] / dim_t - pos_z = z_embed[:, :, :, :, None] / dim_t_z - pos_x = torch.stack((pos_x[:, :, :, :, 0::2].sin(), pos_x[:, :, :, :, 1::2].cos()), dim=5).flatten(4) - pos_y = torch.stack((pos_y[:, :, :, :, 0::2].sin(), pos_y[:, :, :, :, 1::2].cos()), dim=5).flatten(4) - pos_z = torch.stack((pos_z[:, :, :, :, 0::2].sin(), pos_z[:, :, :, :, 1::2].cos()), dim=5).flatten(4) - pos = (torch.cat((pos_y, pos_x), dim=4) + pos_z).permute(0, 1, 4, 2, 3) # b, t, c, h, w - return pos diff --git a/spaces/akhaliq/Nitro-Diffusion2/app.py b/spaces/akhaliq/Nitro-Diffusion2/app.py deleted file mode 100644 index 4eaffb8728bd02d057ec59431d770e1a8b3bcf0b..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Nitro-Diffusion2/app.py +++ /dev/null @@ -1,4 +0,0 @@ -import gradio as gr -name_list = ['models/nitrosocke/Nitro-Diffusion'] -interfaces = [gr.Interface.load(name) for name in name_list] -gr.mix.Parallel(*interfaces, title="Nitro-Diffusion", description="Nitro-Diffusion text to image").launch() \ No newline at end of file diff --git a/spaces/akhaliq/mGPT/app.py b/spaces/akhaliq/mGPT/app.py deleted file mode 100644 index 98b485299a88c6560142d09616361e45589808b3..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/mGPT/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import os -os.system("pip install gradio==2.9b11") -import gradio as gr - -examples=[["My name is Lewis and I like to"],["The weather today is"]] -textbox = gr.Textbox(label="Text"); -gr.Interface.load("huggingface/sberbank-ai/mGPT",inputs=textbox, outputs=textbox,examples=examples).launch() \ No newline at end of file diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/cli/main_parser.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/cli/main_parser.py deleted file mode 100644 index 3666ab04ca6460be9bc6944c0f045be7ff44c365..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/cli/main_parser.py +++ /dev/null @@ -1,87 +0,0 @@ -"""A single place for constructing and exposing the main parser -""" - -import os -import sys -from typing import List, Tuple - -from pip._internal.cli import cmdoptions -from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter -from pip._internal.commands import commands_dict, get_similar_commands -from pip._internal.exceptions import CommandError -from pip._internal.utils.misc import get_pip_version, get_prog - -__all__ = ["create_main_parser", "parse_command"] - - -def create_main_parser() -> ConfigOptionParser: - """Creates and returns the main parser for pip's CLI""" - - parser = ConfigOptionParser( - usage="\n%prog [options]", - add_help_option=False, - formatter=UpdatingDefaultsHelpFormatter(), - name="global", - prog=get_prog(), - ) - parser.disable_interspersed_args() - - parser.version = get_pip_version() - - # add the general options - gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) - parser.add_option_group(gen_opts) - - # so the help formatter knows - parser.main = True # type: ignore - - # create command listing for description - description = [""] + [ - f"{name:27} {command_info.summary}" - for name, command_info in commands_dict.items() - ] - parser.description = "\n".join(description) - - return parser - - -def parse_command(args: List[str]) -> Tuple[str, List[str]]: - parser = create_main_parser() - - # Note: parser calls disable_interspersed_args(), so the result of this - # call is to split the initial args into the general options before the - # subcommand and everything else. - # For example: - # args: ['--timeout=5', 'install', '--user', 'INITools'] - # general_options: ['--timeout==5'] - # args_else: ['install', '--user', 'INITools'] - general_options, args_else = parser.parse_args(args) - - # --version - if general_options.version: - sys.stdout.write(parser.version) - sys.stdout.write(os.linesep) - sys.exit() - - # pip || pip help -> print_help() - if not args_else or (args_else[0] == "help" and len(args_else) == 1): - parser.print_help() - sys.exit() - - # the subcommand name - cmd_name = args_else[0] - - if cmd_name not in commands_dict: - guess = get_similar_commands(cmd_name) - - msg = [f'unknown command "{cmd_name}"'] - if guess: - msg.append(f'maybe you meant "{guess}"') - - raise CommandError(" - ".join(msg)) - - # all the args without the subcommand - cmd_args = args[:] - cmd_args.remove(cmd_name) - - return cmd_name, cmd_args diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/colorama/ansi.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/colorama/ansi.py deleted file mode 100644 index 11ec695ff79627463a0282d25079527562de9e42..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/colorama/ansi.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -''' -This module generates ANSI character codes to printing colors to terminals. -See: http://en.wikipedia.org/wiki/ANSI_escape_code -''' - -CSI = '\033[' -OSC = '\033]' -BEL = '\a' - - -def code_to_chars(code): - return CSI + str(code) + 'm' - -def set_title(title): - return OSC + '2;' + title + BEL - -def clear_screen(mode=2): - return CSI + str(mode) + 'J' - -def clear_line(mode=2): - return CSI + str(mode) + 'K' - - -class AnsiCodes(object): - def __init__(self): - # the subclasses declare class attributes which are numbers. - # Upon instantiation we define instance attributes, which are the same - # as the class attributes but wrapped with the ANSI escape sequence - for name in dir(self): - if not name.startswith('_'): - value = getattr(self, name) - setattr(self, name, code_to_chars(value)) - - -class AnsiCursor(object): - def UP(self, n=1): - return CSI + str(n) + 'A' - def DOWN(self, n=1): - return CSI + str(n) + 'B' - def FORWARD(self, n=1): - return CSI + str(n) + 'C' - def BACK(self, n=1): - return CSI + str(n) + 'D' - def POS(self, x=1, y=1): - return CSI + str(y) + ';' + str(x) + 'H' - - -class AnsiFore(AnsiCodes): - BLACK = 30 - RED = 31 - GREEN = 32 - YELLOW = 33 - BLUE = 34 - MAGENTA = 35 - CYAN = 36 - WHITE = 37 - RESET = 39 - - # These are fairly well supported, but not part of the standard. - LIGHTBLACK_EX = 90 - LIGHTRED_EX = 91 - LIGHTGREEN_EX = 92 - LIGHTYELLOW_EX = 93 - LIGHTBLUE_EX = 94 - LIGHTMAGENTA_EX = 95 - LIGHTCYAN_EX = 96 - LIGHTWHITE_EX = 97 - - -class AnsiBack(AnsiCodes): - BLACK = 40 - RED = 41 - GREEN = 42 - YELLOW = 43 - BLUE = 44 - MAGENTA = 45 - CYAN = 46 - WHITE = 47 - RESET = 49 - - # These are fairly well supported, but not part of the standard. - LIGHTBLACK_EX = 100 - LIGHTRED_EX = 101 - LIGHTGREEN_EX = 102 - LIGHTYELLOW_EX = 103 - LIGHTBLUE_EX = 104 - LIGHTMAGENTA_EX = 105 - LIGHTCYAN_EX = 106 - LIGHTWHITE_EX = 107 - - -class AnsiStyle(AnsiCodes): - BRIGHT = 1 - DIM = 2 - NORMAL = 22 - RESET_ALL = 0 - -Fore = AnsiFore() -Back = AnsiBack() -Style = AnsiStyle() -Cursor = AnsiCursor() diff --git a/spaces/ali-ghamdan/deoldify/deoldify/save.py b/spaces/ali-ghamdan/deoldify/deoldify/save.py deleted file mode 100644 index f7175555296b859086cc2c753888bdfe21cb502e..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/deoldify/deoldify/save.py +++ /dev/null @@ -1,29 +0,0 @@ -from fastai.basic_train import Learner, LearnerCallback -from fastai.vision.gan import GANLearner - - -class GANSaveCallback(LearnerCallback): - """A `LearnerCallback` that saves history of metrics while training `learn` into CSV `filename`.""" - - def __init__( - self, - learn: GANLearner, - learn_gen: Learner, - filename: str, - save_iters: int = 1000, - ): - super().__init__(learn) - self.learn_gen = learn_gen - self.filename = filename - self.save_iters = save_iters - - def on_batch_end(self, iteration: int, epoch: int, **kwargs) -> None: - if iteration == 0: - return - - if iteration % self.save_iters == 0: - self._save_gen_learner(iteration=iteration, epoch=epoch) - - def _save_gen_learner(self, iteration: int, epoch: int): - filename = '{}_{}_{}'.format(self.filename, epoch, iteration) - self.learn_gen.save(filename) diff --git a/spaces/allknowingroger/Image-Models-Test116/README.md b/spaces/allknowingroger/Image-Models-Test116/README.md deleted file mode 100644 index 13d9bd379f31a2bc7a37b7c14a47f88897c4cdd0..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test116/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -duplicated_from: allknowingroger/Image-Models-Test115 ---- - - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test119/app.py b/spaces/allknowingroger/Image-Models-Test119/app.py deleted file mode 100644 index ff46de3c688d9b45f52d8b7df8ebb43267a71e79..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test119/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "debadas/ronaldo", - "veryVANYA/ps1-graphics-sdxl-v2", - "rachit221195/rachit-sdxl", - "CiroN2022/hair-style", - "ra100/sdxl-lora-lower-decks-ships", - "CiroN2022/weird-fashion", - "ariiiiiiiii/rbxshirt3", - "faizalnf1800/sidebags-earring-anime-woman-lora", - "MakAttack/6540da619d3079bacd616333", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test44/README.md b/spaces/allknowingroger/Image-Models-Test44/README.md deleted file mode 100644 index 8f9fd7653ad217e9aff49a8c3a0685863043f500..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test44/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image Models -emoji: 👀 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test43 ---- - - \ No newline at end of file diff --git a/spaces/amankishore/sjc/sd1/ldm/data/personalized.py b/spaces/amankishore/sjc/sd1/ldm/data/personalized.py deleted file mode 100644 index c8a57d09fa354cbd06190829114bdce2afce2aa6..0000000000000000000000000000000000000000 --- a/spaces/amankishore/sjc/sd1/ldm/data/personalized.py +++ /dev/null @@ -1,160 +0,0 @@ -import os -import numpy as np -import PIL -from PIL import Image -from torch.utils.data import Dataset -from torchvision import transforms - -import random - -imagenet_templates_smallest = [ - 'a photo of a {}', -] - -imagenet_templates_small = [ - 'a photo of a {}', - 'a rendering of a {}', - 'a cropped photo of the {}', - 'the photo of a {}', - 'a photo of a clean {}', - 'a photo of a dirty {}', - 'a dark photo of the {}', - 'a photo of my {}', - 'a photo of the cool {}', - 'a close-up photo of a {}', - 'a bright photo of the {}', - 'a cropped photo of a {}', - 'a photo of the {}', - 'a good photo of the {}', - 'a photo of one {}', - 'a close-up photo of the {}', - 'a rendition of the {}', - 'a photo of the clean {}', - 'a rendition of a {}', - 'a photo of a nice {}', - 'a good photo of a {}', - 'a photo of the nice {}', - 'a photo of the small {}', - 'a photo of the weird {}', - 'a photo of the large {}', - 'a photo of a cool {}', - 'a photo of a small {}', -] - -imagenet_dual_templates_small = [ - 'a photo of a {} with {}', - 'a rendering of a {} with {}', - 'a cropped photo of the {} with {}', - 'the photo of a {} with {}', - 'a photo of a clean {} with {}', - 'a photo of a dirty {} with {}', - 'a dark photo of the {} with {}', - 'a photo of my {} with {}', - 'a photo of the cool {} with {}', - 'a close-up photo of a {} with {}', - 'a bright photo of the {} with {}', - 'a cropped photo of a {} with {}', - 'a photo of the {} with {}', - 'a good photo of the {} with {}', - 'a photo of one {} with {}', - 'a close-up photo of the {} with {}', - 'a rendition of the {} with {}', - 'a photo of the clean {} with {}', - 'a rendition of a {} with {}', - 'a photo of a nice {} with {}', - 'a good photo of a {} with {}', - 'a photo of the nice {} with {}', - 'a photo of the small {} with {}', - 'a photo of the weird {} with {}', - 'a photo of the large {} with {}', - 'a photo of a cool {} with {}', - 'a photo of a small {} with {}', -] - -per_img_token_list = [ - 'א', 'ב', 'ג', 'ד', 'ה', 'ו', 'ז', 'ח', 'ט', 'י', 'כ', 'ל', 'מ', 'נ', 'ס', 'ע', 'פ', 'צ', 'ק', 'ר', 'ש', 'ת', -] - -class PersonalizedBase(Dataset): - def __init__(self, - data_root, - size=None, - repeats=100, - interpolation="bicubic", - flip_p=0.5, - set="train", - placeholder_token="*", - per_image_tokens=False, - center_crop=False, - mixing_prob=0.25, - coarse_class_text=None, - ): - - self.data_root = data_root - - self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)] - - # self._length = len(self.image_paths) - self.num_images = len(self.image_paths) - self._length = self.num_images - - self.placeholder_token = placeholder_token - - self.per_image_tokens = per_image_tokens - self.center_crop = center_crop - self.mixing_prob = mixing_prob - - self.coarse_class_text = coarse_class_text - - if per_image_tokens: - assert self.num_images < len(per_img_token_list), f"Can't use per-image tokens when the training set contains more than {len(per_img_token_list)} tokens. To enable larger sets, add more tokens to 'per_img_token_list'." - - if set == "train": - self._length = self.num_images * repeats - - self.size = size - self.interpolation = {"linear": PIL.Image.LINEAR, - "bilinear": PIL.Image.BILINEAR, - "bicubic": PIL.Image.BICUBIC, - "lanczos": PIL.Image.LANCZOS, - }[interpolation] - self.flip = transforms.RandomHorizontalFlip(p=flip_p) - - def __len__(self): - return self._length - - def __getitem__(self, i): - example = {} - image = Image.open(self.image_paths[i % self.num_images]) - - if not image.mode == "RGB": - image = image.convert("RGB") - - placeholder_string = self.placeholder_token - if self.coarse_class_text: - placeholder_string = f"{self.coarse_class_text} {placeholder_string}" - - if self.per_image_tokens and np.random.uniform() < self.mixing_prob: - text = random.choice(imagenet_dual_templates_small).format(placeholder_string, per_img_token_list[i % self.num_images]) - else: - text = random.choice(imagenet_templates_small).format(placeholder_string) - - example["caption"] = text - - # default to score-sde preprocessing - img = np.array(image).astype(np.uint8) - - if self.center_crop: - crop = min(img.shape[0], img.shape[1]) - h, w, = img.shape[0], img.shape[1] - img = img[(h - crop) // 2:(h + crop) // 2, - (w - crop) // 2:(w + crop) // 2] - - image = Image.fromarray(img) - if self.size is not None: - image = image.resize((self.size, self.size), resample=self.interpolation) - - image = self.flip(image) - image = np.array(image).astype(np.uint8) - example["image"] = (image / 127.5 - 1.0).astype(np.float32) - return example \ No newline at end of file diff --git a/spaces/antonovmaxim/text-generation-webui-space/modules/llama_attn_hijack.py b/spaces/antonovmaxim/text-generation-webui-space/modules/llama_attn_hijack.py deleted file mode 100644 index e953f523d6c54581af1a30deb8b922f85b3e557a..0000000000000000000000000000000000000000 --- a/spaces/antonovmaxim/text-generation-webui-space/modules/llama_attn_hijack.py +++ /dev/null @@ -1,171 +0,0 @@ -import logging -import math -import sys -from typing import Optional, Tuple - -import torch -import torch.nn as nn -import transformers.models.llama.modeling_llama - -import modules.shared as shared - -if shared.args.xformers: - try: - import xformers.ops - except Exception: - logging.error("xformers not found! Please install it before trying to use it.", file=sys.stderr) - - -def hijack_llama_attention(): - if shared.args.xformers: - transformers.models.llama.modeling_llama.LlamaAttention.forward = xformers_forward - logging.info("Replaced attention with xformers_attention") - elif shared.args.sdp_attention: - transformers.models.llama.modeling_llama.LlamaAttention.forward = sdp_attention_forward - logging.info("Replaced attention with sdp_attention") - - -def xformers_forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - output_attentions: bool = False, - use_cache: bool = False, -) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: - bsz, q_len, _ = hidden_states.size() - - query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) - key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) - value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) - - kv_seq_len = key_states.shape[-2] - if past_key_value is not None: - kv_seq_len += past_key_value[0].shape[-2] - cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) - query_states, key_states = transformers.models.llama.modeling_llama.apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) - # [bsz, nh, t, hd] - - if past_key_value is not None: - # reuse k, v, self_attention - key_states = torch.cat([past_key_value[0], key_states], dim=2) - value_states = torch.cat([past_key_value[1], value_states], dim=2) - - past_key_value = (key_states, value_states) if use_cache else None - - # We only apply xformers optimizations if we don't need to output the whole attention matrix - if not output_attentions: - query_states = query_states.transpose(1, 2) - key_states = key_states.transpose(1, 2) - value_states = value_states.transpose(1, 2) - - # This is a nasty hack. We know attention_mask in transformers is either LowerTriangular or all Zeros. - # We therefore check if one element in the upper triangular portion is zero. If it is, then the mask is all zeros. - if attention_mask is None or attention_mask[0, 0, 0, 1] == 0: - # input and output should be of form (bsz, q_len, num_heads, head_dim) - attn_output = xformers.ops.memory_efficient_attention(query_states, key_states, value_states, attn_bias=None) - else: - # input and output should be of form (bsz, q_len, num_heads, head_dim) - attn_output = xformers.ops.memory_efficient_attention(query_states, key_states, value_states, attn_bias=xformers.ops.LowerTriangularMask()) - attn_weights = None - else: - attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) - - if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): - raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is" - f" {attn_weights.size()}" - ) - - if attention_mask is not None: - if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): - raise ValueError( - f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" - ) - attn_weights = attn_weights + attention_mask - attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)) - - # upcast attention to fp32 - attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) - attn_output = torch.matmul(attn_weights, value_states) - - if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): - raise ValueError( - f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" - f" {attn_output.size()}" - ) - - attn_output = attn_output.transpose(1, 2) - - attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) - attn_output = self.o_proj(attn_output) - return attn_output, attn_weights, past_key_value - - -def sdp_attention_forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - output_attentions: bool = False, - use_cache: bool = False, -) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: - bsz, q_len, _ = hidden_states.size() - - query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) - key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) - value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) - - kv_seq_len = key_states.shape[-2] - if past_key_value is not None: - kv_seq_len += past_key_value[0].shape[-2] - cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) - query_states, key_states = transformers.models.llama.modeling_llama.apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) - # [bsz, nh, t, hd] - - if past_key_value is not None: - # reuse k, v, self_attention - key_states = torch.cat([past_key_value[0], key_states], dim=2) - value_states = torch.cat([past_key_value[1], value_states], dim=2) - - past_key_value = (key_states, value_states) if use_cache else None - - # We only apply sdp attention if we don't need to output the whole attention matrix - if not output_attentions: - attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask, is_causal=False) - attn_weights = None - else: - attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) - - if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): - raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is" - f" {attn_weights.size()}" - ) - - if attention_mask is not None: - if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): - raise ValueError( - f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" - ) - attn_weights = attn_weights + attention_mask - attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)) - - # upcast attention to fp32 - attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) - attn_output = torch.matmul(attn_weights, value_states) - - if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): - raise ValueError( - f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" - f" {attn_output.size()}" - ) - - attn_output = attn_output.transpose(1, 2) - attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) - - attn_output = self.o_proj(attn_output) - - return attn_output, attn_weights, past_key_value diff --git a/spaces/anzorq/finetuned_diffusion/README.md b/spaces/anzorq/finetuned_diffusion/README.md deleted file mode 100644 index 3366ce5c8ef30791daaab30b9dd403e2342f6f8d..0000000000000000000000000000000000000000 --- a/spaces/anzorq/finetuned_diffusion/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Finetuned Diffusion -emoji: 🪄🖼️ -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.15.0 -python_version: 3.8.3 -app_file: app.py -pinned: true -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/arnikdehnavi/citationPrediction/README.md b/spaces/arnikdehnavi/citationPrediction/README.md deleted file mode 100644 index 0ee07d49d91492ffa1e5edda34ddb375e3fc154a..0000000000000000000000000000000000000000 --- a/spaces/arnikdehnavi/citationPrediction/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: CitationPrediction -emoji: 👁 -colorFrom: gray -colorTo: indigo -sdk: streamlit -sdk_version: 1.20.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/artificialguybr/video-dubbing/TTS/recipes/bel-alex73/docker-prepare/Dockerfile b/spaces/artificialguybr/video-dubbing/TTS/recipes/bel-alex73/docker-prepare/Dockerfile deleted file mode 100644 index fd9b745386da8319ddb9ed2cbb7d3db720e12bb9..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/recipes/bel-alex73/docker-prepare/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM ubuntu:22.04 - -RUN apt -y update -RUN apt -y upgrade -RUN apt -y install --no-install-recommends pip ffmpeg openjdk-19-jre-headless - -RUN mkdir /a/ -ADD requirements*.txt /a/ -WORKDIR /a/ -RUN pip install -r requirements.txt -r requirements.dev.txt -r requirements.notebooks.txt -RUN pip install seaborn pydub notebook - -RUN apt -y install --no-install-recommends gcc libpython3.10-dev - -ADD runtime.sh /a/ - -WORKDIR /a/TTS/ -CMD /a/runtime.sh diff --git a/spaces/artificialguybr/video-dubbing/whisper/whisper/decoding.py b/spaces/artificialguybr/video-dubbing/whisper/whisper/decoding.py deleted file mode 100644 index ecd98a455bb7a840f90bbfff40529b9bff7a8e81..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/whisper/whisper/decoding.py +++ /dev/null @@ -1,821 +0,0 @@ -from dataclasses import dataclass, field, replace -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Tuple, Union - -import numpy as np -import torch -import torch.nn.functional as F -from torch import Tensor -from torch.distributions import Categorical - -from .audio import CHUNK_LENGTH -from .tokenizer import Tokenizer, get_tokenizer -from .utils import compression_ratio - -if TYPE_CHECKING: - from .model import Whisper - - -@torch.no_grad() -def detect_language( - model: "Whisper", mel: Tensor, tokenizer: Tokenizer = None -) -> Tuple[Tensor, List[dict]]: - """ - Detect the spoken language in the audio, and return them as list of strings, along with the ids - of the most probable language tokens and the probability distribution over all language tokens. - This is performed outside the main decode loop in order to not interfere with kv-caching. - - Returns - ------- - language_tokens : Tensor, shape = (n_audio,) - ids of the most probable language tokens, which appears after the startoftranscript token. - language_probs : List[Dict[str, float]], length = n_audio - list of dictionaries containing the probability distribution over all languages. - """ - if tokenizer is None: - tokenizer = get_tokenizer(model.is_multilingual) - if ( - tokenizer.language is None - or tokenizer.language_token not in tokenizer.sot_sequence - ): - raise ValueError( - "This model doesn't have language tokens so it can't perform lang id" - ) - - single = mel.ndim == 2 - if single: - mel = mel.unsqueeze(0) - - # skip encoder forward pass if already-encoded audio features were given - if mel.shape[-2:] != (model.dims.n_audio_ctx, model.dims.n_audio_state): - mel = model.encoder(mel) - - # forward pass using a single token, startoftranscript - n_audio = mel.shape[0] - x = torch.tensor([[tokenizer.sot]] * n_audio).to(mel.device) # [n_audio, 1] - logits = model.logits(x, mel)[:, 0] - - # collect detected languages; suppress all non-language tokens - mask = torch.ones(logits.shape[-1], dtype=torch.bool) - mask[list(tokenizer.all_language_tokens)] = False - logits[:, mask] = -np.inf - language_tokens = logits.argmax(dim=-1) - language_token_probs = logits.softmax(dim=-1).cpu() - language_probs = [ - { - c: language_token_probs[i, j].item() - for j, c in zip(tokenizer.all_language_tokens, tokenizer.all_language_codes) - } - for i in range(n_audio) - ] - - if single: - language_tokens = language_tokens[0] - language_probs = language_probs[0] - - return language_tokens, language_probs - - -@dataclass(frozen=True) -class DecodingOptions: - # whether to perform X->X "transcribe" or X->English "translate" - task: str = "transcribe" - - # language that the audio is in; uses detected language if None - language: Optional[str] = None - - # sampling-related options - temperature: float = 0.0 - sample_len: Optional[int] = None # maximum number of tokens to sample - best_of: Optional[int] = None # number of independent sample trajectories, if t > 0 - beam_size: Optional[int] = None # number of beams in beam search, if t == 0 - patience: Optional[float] = None # patience in beam search (arxiv:2204.05424) - - # "alpha" in Google NMT, or None for length norm, when ranking generations - # to select which to return among the beams or best-of-N samples - length_penalty: Optional[float] = None - - # text or tokens to feed as the prompt or the prefix; for more info: - # https://github.com/openai/whisper/discussions/117#discussioncomment-3727051 - prompt: Optional[Union[str, List[int]]] = None # for the previous context - prefix: Optional[Union[str, List[int]]] = None # to prefix the current context - - # list of tokens ids (or comma-separated token ids) to suppress - # "-1" will suppress a set of symbols as defined in `tokenizer.non_speech_tokens()` - suppress_tokens: Optional[Union[str, Iterable[int]]] = "-1" - suppress_blank: bool = True # this will suppress blank outputs - - # timestamp sampling options - without_timestamps: bool = False # use <|notimestamps|> to sample text tokens only - max_initial_timestamp: Optional[float] = 1.0 - - # implementation details - fp16: bool = True # use fp16 for most of the calculation - - -@dataclass(frozen=True) -class DecodingResult: - audio_features: Tensor - language: str - language_probs: Optional[Dict[str, float]] = None - tokens: List[int] = field(default_factory=list) - text: str = "" - avg_logprob: float = np.nan - no_speech_prob: float = np.nan - temperature: float = np.nan - compression_ratio: float = np.nan - - -class Inference: - def logits(self, tokens: Tensor, audio_features: Tensor) -> Tensor: - """Perform a forward pass on the decoder and return per-token logits""" - raise NotImplementedError - - def rearrange_kv_cache(self, source_indices) -> None: - """Update the key-value cache according to the updated beams""" - raise NotImplementedError - - def cleanup_caching(self) -> None: - """Clean up any resources or hooks after decoding is finished""" - pass - - -class PyTorchInference(Inference): - def __init__(self, model: "Whisper", initial_token_length: int): - self.model: "Whisper" = model - self.initial_token_length = initial_token_length - self.kv_cache = {} - self.hooks = [] - - key_modules = [block.attn.key for block in self.model.decoder.blocks] - value_modules = [block.attn.value for block in self.model.decoder.blocks] - self.kv_modules = key_modules + value_modules - - def logits(self, tokens: Tensor, audio_features: Tensor) -> Tensor: - if not self.kv_cache: - self.kv_cache, self.hooks = self.model.install_kv_cache_hooks() - - if tokens.shape[-1] > self.initial_token_length: - # only need to use the last token except in the first forward pass - tokens = tokens[:, -1:] - - return self.model.decoder(tokens, audio_features, kv_cache=self.kv_cache) - - def cleanup_caching(self): - for hook in self.hooks: - hook.remove() - - self.kv_cache = {} - self.hooks = [] - - def rearrange_kv_cache(self, source_indices): - if source_indices != list(range(len(source_indices))): - for module in self.kv_modules: - # update the key/value cache to contain the selected sequences - self.kv_cache[module] = self.kv_cache[module][source_indices].detach() - - -class SequenceRanker: - def rank( - self, tokens: List[List[Tensor]], sum_logprobs: List[List[float]] - ) -> List[int]: - """ - Given a list of groups of samples and their cumulative log probabilities, - return the indices of the samples in each group to select as the final result - """ - raise NotImplementedError - - -class MaximumLikelihoodRanker(SequenceRanker): - """ - Select the sample with the highest log probabilities, penalized using either - a simple length normalization or Google NMT paper's length penalty - """ - - def __init__(self, length_penalty: Optional[float]): - self.length_penalty = length_penalty - - def rank(self, tokens: List[List[Tensor]], sum_logprobs: List[List[float]]): - def scores(logprobs, lengths): - result = [] - for logprob, length in zip(logprobs, lengths): - if self.length_penalty is None: - penalty = length - else: - # from the Google NMT paper - penalty = ((5 + length) / 6) ** self.length_penalty - result.append(logprob / penalty) - return result - - # get the sequence with the highest score - lengths = [[len(t) for t in s] for s in tokens] - return [np.argmax(scores(p, l)) for p, l in zip(sum_logprobs, lengths)] - - -class TokenDecoder: - def reset(self): - """Initialize any stateful variables for decoding a new sequence""" - - def update( - self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor - ) -> Tuple[Tensor, bool]: - """Specify how to select the next token, based on the current trace and logits - - Parameters - ---------- - tokens : Tensor, shape = (n_batch, current_sequence_length) - all tokens in the context so far, including the prefix and sot_sequence tokens - - logits : Tensor, shape = (n_batch, vocab_size) - per-token logits of the probability distribution at the current step - - sum_logprobs : Tensor, shape = (n_batch) - cumulative log probabilities for each sequence - - Returns - ------- - tokens : Tensor, shape = (n_batch, current_sequence_length + 1) - the tokens, appended with the selected next token - - completed : bool - True if all sequences has reached the end of text - - """ - raise NotImplementedError - - def finalize( - self, tokens: Tensor, sum_logprobs: Tensor - ) -> Tuple[Sequence[Sequence[Tensor]], List[List[float]]]: - """Finalize search and return the final candidate sequences - - Parameters - ---------- - tokens : Tensor, shape = (n_audio, n_group, current_sequence_length) - all tokens in the context so far, including the prefix and sot_sequence - - sum_logprobs : Tensor, shape = (n_audio, n_group) - cumulative log probabilities for each sequence - - Returns - ------- - tokens : Sequence[Sequence[Tensor]], length = n_audio - sequence of Tensors containing candidate token sequences, for each audio input - - sum_logprobs : List[List[float]], length = n_audio - sequence of cumulative log probabilities corresponding to the above - - """ - raise NotImplementedError - - -class GreedyDecoder(TokenDecoder): - def __init__(self, temperature: float, eot: int): - self.temperature = temperature - self.eot = eot - - def update( - self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor - ) -> Tuple[Tensor, bool]: - if self.temperature == 0: - next_tokens = logits.argmax(dim=-1) - else: - next_tokens = Categorical(logits=logits / self.temperature).sample() - - logprobs = F.log_softmax(logits.float(), dim=-1) - current_logprobs = logprobs[torch.arange(logprobs.shape[0]), next_tokens] - sum_logprobs += current_logprobs * (tokens[:, -1] != self.eot) - - next_tokens[tokens[:, -1] == self.eot] = self.eot - tokens = torch.cat([tokens, next_tokens[:, None]], dim=-1) - - completed = (tokens[:, -1] == self.eot).all() - return tokens, completed - - def finalize(self, tokens: Tensor, sum_logprobs: Tensor): - # make sure each sequence has at least one EOT token at the end - tokens = F.pad(tokens, (0, 1), value=self.eot) - return tokens, sum_logprobs.tolist() - - -class BeamSearchDecoder(TokenDecoder): - def __init__( - self, - beam_size: int, - eot: int, - inference: Inference, - patience: Optional[float] = None, - ): - self.beam_size = beam_size - self.eot = eot - self.inference = inference - self.patience = patience or 1.0 - self.max_candidates: int = round(beam_size * self.patience) - self.finished_sequences = None - - assert ( - self.max_candidates > 0 - ), f"Invalid beam size ({beam_size}) or patience ({patience})" - - def reset(self): - self.finished_sequences = None - - def update( - self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor - ) -> Tuple[Tensor, bool]: - if tokens.shape[0] % self.beam_size != 0: - raise ValueError(f"{tokens.shape}[0] % {self.beam_size} != 0") - - n_audio = tokens.shape[0] // self.beam_size - if self.finished_sequences is None: # for the first update - self.finished_sequences = [{} for _ in range(n_audio)] - - logprobs = F.log_softmax(logits.float(), dim=-1) - next_tokens, source_indices, finished_sequences = [], [], [] - for i in range(n_audio): - scores, sources, finished = {}, {}, {} - - # STEP 1: calculate the cumulative log probabilities for possible candidates - for j in range(self.beam_size): - idx = i * self.beam_size + j - prefix = tokens[idx].tolist() - for logprob, token in zip(*logprobs[idx].topk(self.beam_size + 1)): - new_logprob = (sum_logprobs[idx] + logprob).item() - sequence = tuple(prefix + [token.item()]) - scores[sequence] = new_logprob - sources[sequence] = idx - - # STEP 2: rank the candidates and keep the top beam_size sequences for each audio - saved = 0 - for sequence in sorted(scores, key=scores.get, reverse=True): - if sequence[-1] == self.eot: - finished[sequence] = scores[sequence] - else: - sum_logprobs[len(next_tokens)] = scores[sequence] - next_tokens.append(sequence) - source_indices.append(sources[sequence]) - - saved += 1 - if saved == self.beam_size: - break - - finished_sequences.append(finished) - - tokens = torch.tensor(next_tokens, device=tokens.device) - self.inference.rearrange_kv_cache(source_indices) - - # add newly finished sequences to self.finished_sequences - assert len(self.finished_sequences) == len(finished_sequences) - for previously_finished, newly_finished in zip( - self.finished_sequences, finished_sequences - ): - for seq in sorted(newly_finished, key=newly_finished.get, reverse=True): - if len(previously_finished) >= self.max_candidates: - break # the candidate list is full - previously_finished[seq] = newly_finished[seq] - - # mark as completed if all audio has enough number of samples - completed = all( - len(sequences) >= self.max_candidates - for sequences in self.finished_sequences - ) - return tokens, completed - - def finalize(self, preceding_tokens: Tensor, sum_logprobs: Tensor): - # collect all finished sequences, including patience, and add unfinished ones if not enough - sum_logprobs = sum_logprobs.cpu() - for i, sequences in enumerate(self.finished_sequences): - if ( - len(sequences) < self.beam_size - ): # when not enough sequences are finished - for j in list(np.argsort(sum_logprobs[i]))[::-1]: - sequence = preceding_tokens[i, j].tolist() + [self.eot] - sequences[tuple(sequence)] = sum_logprobs[i][j].item() - if len(sequences) >= self.beam_size: - break - - tokens: List[List[Tensor]] = [ - [torch.tensor(seq) for seq in sequences.keys()] - for sequences in self.finished_sequences - ] - sum_logprobs: List[List[float]] = [ - list(sequences.values()) for sequences in self.finished_sequences - ] - return tokens, sum_logprobs - - -class LogitFilter: - def apply(self, logits: Tensor, tokens: Tensor) -> None: - """Apply any filtering or masking to logits in-place - - Parameters - ---------- - logits : Tensor, shape = (n_batch, vocab_size) - per-token logits of the probability distribution at the current step - - tokens : Tensor, shape = (n_batch, current_sequence_length) - all tokens in the context so far, including the prefix and sot_sequence tokens - - """ - raise NotImplementedError - - -class SuppressBlank(LogitFilter): - def __init__(self, tokenizer: Tokenizer, sample_begin: int): - self.tokenizer = tokenizer - self.sample_begin = sample_begin - - def apply(self, logits: Tensor, tokens: Tensor): - if tokens.shape[1] == self.sample_begin: - logits[:, self.tokenizer.encode(" ") + [self.tokenizer.eot]] = -np.inf - - -class SuppressTokens(LogitFilter): - def __init__(self, suppress_tokens: Sequence[int]): - self.suppress_tokens = list(suppress_tokens) - - def apply(self, logits: Tensor, tokens: Tensor): - logits[:, self.suppress_tokens] = -np.inf - - -class ApplyTimestampRules(LogitFilter): - def __init__( - self, - tokenizer: Tokenizer, - sample_begin: int, - max_initial_timestamp_index: Optional[int], - ): - self.tokenizer = tokenizer - self.sample_begin = sample_begin - self.max_initial_timestamp_index = max_initial_timestamp_index - - def apply(self, logits: Tensor, tokens: Tensor): - # suppress <|notimestamps|> which is handled by without_timestamps - if self.tokenizer.no_timestamps is not None: - logits[:, self.tokenizer.no_timestamps] = -np.inf - - # timestamps have to appear in pairs, except directly before EOT; mask logits accordingly - for k in range(tokens.shape[0]): - sampled_tokens = tokens[k, self.sample_begin :] - seq = [t for t in sampled_tokens.tolist()] - last_was_timestamp = ( - len(seq) >= 1 and seq[-1] >= self.tokenizer.timestamp_begin - ) - penultimate_was_timestamp = ( - len(seq) < 2 or seq[-2] >= self.tokenizer.timestamp_begin - ) - - if last_was_timestamp: - if penultimate_was_timestamp: # has to be non-timestamp - logits[k, self.tokenizer.timestamp_begin :] = -np.inf - else: # cannot be normal text tokens - logits[k, : self.tokenizer.eot] = -np.inf - - timestamps = sampled_tokens[ - sampled_tokens.ge(self.tokenizer.timestamp_begin) - ] - if timestamps.numel() > 0: - # timestamps shouldn't decrease; forbid timestamp tokens smaller than the last - # also force each segment to have a nonzero length, to prevent infinite looping - if last_was_timestamp and not penultimate_was_timestamp: - timestamp_last = timestamps[-1] - else: - timestamp_last = timestamps[-1] + 1 - logits[k, self.tokenizer.timestamp_begin : timestamp_last] = -np.inf - - if tokens.shape[1] == self.sample_begin: - # suppress generating non-timestamp tokens at the beginning - logits[:, : self.tokenizer.timestamp_begin] = -np.inf - - # apply the `max_initial_timestamp` option - if self.max_initial_timestamp_index is not None: - last_allowed = ( - self.tokenizer.timestamp_begin + self.max_initial_timestamp_index - ) - logits[:, last_allowed + 1 :] = -np.inf - - # if sum of probability over timestamps is above any other token, sample timestamp - logprobs = F.log_softmax(logits.float(), dim=-1) - for k in range(tokens.shape[0]): - timestamp_logprob = logprobs[k, self.tokenizer.timestamp_begin :].logsumexp( - dim=-1 - ) - max_text_token_logprob = logprobs[k, : self.tokenizer.timestamp_begin].max() - if timestamp_logprob > max_text_token_logprob: - logits[k, : self.tokenizer.timestamp_begin] = -np.inf - - -class DecodingTask: - inference: Inference - sequence_ranker: SequenceRanker - decoder: TokenDecoder - logit_filters: List[LogitFilter] - - def __init__(self, model: "Whisper", options: DecodingOptions): - self.model = model - - language = options.language or "en" - tokenizer = get_tokenizer( - model.is_multilingual, language=language, task=options.task - ) - self.tokenizer: Tokenizer = tokenizer - self.options: DecodingOptions = self._verify_options(options) - - self.n_group: int = options.beam_size or options.best_of or 1 - self.n_ctx: int = model.dims.n_text_ctx - self.sample_len: int = options.sample_len or model.dims.n_text_ctx // 2 - - self.sot_sequence: Tuple[int] = tokenizer.sot_sequence - if self.options.without_timestamps: - self.sot_sequence = tokenizer.sot_sequence_including_notimestamps - - self.initial_tokens: Tuple[int] = self._get_initial_tokens() - self.sample_begin: int = len(self.initial_tokens) - self.sot_index: int = self.initial_tokens.index(tokenizer.sot) - - # inference: implements the forward pass through the decoder, including kv caching - self.inference = PyTorchInference(model, len(self.initial_tokens)) - - # sequence ranker: implements how to rank a group of sampled sequences - self.sequence_ranker = MaximumLikelihoodRanker(options.length_penalty) - - # decoder: implements how to select the next tokens, given the autoregressive distribution - if options.beam_size is not None: - self.decoder = BeamSearchDecoder( - options.beam_size, tokenizer.eot, self.inference, options.patience - ) - else: - self.decoder = GreedyDecoder(options.temperature, tokenizer.eot) - - # logit filters: applies various rules to suppress or penalize certain tokens - self.logit_filters = [] - if self.options.suppress_blank: - self.logit_filters.append(SuppressBlank(self.tokenizer, self.sample_begin)) - if self.options.suppress_tokens: - self.logit_filters.append(SuppressTokens(self._get_suppress_tokens())) - if not options.without_timestamps: - precision = CHUNK_LENGTH / model.dims.n_audio_ctx # usually 0.02 seconds - max_initial_timestamp_index = None - if options.max_initial_timestamp: - max_initial_timestamp_index = round( - self.options.max_initial_timestamp / precision - ) - self.logit_filters.append( - ApplyTimestampRules( - tokenizer, self.sample_begin, max_initial_timestamp_index - ) - ) - - def _verify_options(self, options: DecodingOptions) -> DecodingOptions: - if options.beam_size is not None and options.best_of is not None: - raise ValueError("beam_size and best_of can't be given together") - if options.temperature == 0: - if options.best_of is not None: - raise ValueError("best_of with greedy sampling (T=0) is not compatible") - if options.patience is not None and options.beam_size is None: - raise ValueError("patience requires beam_size to be given") - if options.length_penalty is not None and not ( - 0 <= options.length_penalty <= 1 - ): - raise ValueError("length_penalty (alpha) should be a value between 0 and 1") - - return options - - def _get_initial_tokens(self) -> Tuple[int]: - tokens = list(self.sot_sequence) - - if prefix := self.options.prefix: - prefix_tokens = ( - self.tokenizer.encode(" " + prefix.strip()) - if isinstance(prefix, str) - else prefix - ) - if self.sample_len is not None: - max_prefix_len = self.n_ctx // 2 - self.sample_len - prefix_tokens = prefix_tokens[-max_prefix_len:] - tokens = tokens + prefix_tokens - - if prompt := self.options.prompt: - prompt_tokens = ( - self.tokenizer.encode(" " + prompt.strip()) - if isinstance(prompt, str) - else prompt - ) - tokens = ( - [self.tokenizer.sot_prev] - + prompt_tokens[-(self.n_ctx // 2 - 1) :] - + tokens - ) - - return tuple(tokens) - - def _get_suppress_tokens(self) -> Tuple[int]: - suppress_tokens = self.options.suppress_tokens - - if isinstance(suppress_tokens, str): - suppress_tokens = [int(t) for t in suppress_tokens.split(",")] - - if -1 in suppress_tokens: - suppress_tokens = [t for t in suppress_tokens if t >= 0] - suppress_tokens.extend(self.tokenizer.non_speech_tokens) - elif suppress_tokens is None or len(suppress_tokens) == 0: - suppress_tokens = [] # interpret empty string as an empty list - else: - assert isinstance(suppress_tokens, list), "suppress_tokens must be a list" - - suppress_tokens.extend( - [ - self.tokenizer.transcribe, - self.tokenizer.translate, - self.tokenizer.sot, - self.tokenizer.sot_prev, - self.tokenizer.sot_lm, - ] - ) - if self.tokenizer.no_speech is not None: - # no-speech probability is collected separately - suppress_tokens.append(self.tokenizer.no_speech) - - return tuple(sorted(set(suppress_tokens))) - - def _get_audio_features(self, mel: Tensor): - if self.options.fp16: - mel = mel.half() - - if mel.shape[-2:] == ( - self.model.dims.n_audio_ctx, - self.model.dims.n_audio_state, - ): - # encoded audio features are given; skip audio encoding - audio_features = mel - else: - audio_features = self.model.encoder(mel) - - if audio_features.dtype != ( - torch.float16 if self.options.fp16 else torch.float32 - ): - return TypeError( - f"audio_features has an incorrect dtype: {audio_features.dtype}" - ) - - return audio_features - - def _detect_language(self, audio_features: Tensor, tokens: Tensor): - languages = [self.options.language] * audio_features.shape[0] - lang_probs = None - - if self.options.language is None or self.options.task == "lang_id": - lang_tokens, lang_probs = self.model.detect_language( - audio_features, self.tokenizer - ) - languages = [max(probs, key=probs.get) for probs in lang_probs] - if self.options.language is None: - tokens[:, self.sot_index + 1] = lang_tokens # write language tokens - - return languages, lang_probs - - def _main_loop(self, audio_features: Tensor, tokens: Tensor): - n_batch = tokens.shape[0] - sum_logprobs: Tensor = torch.zeros(n_batch, device=audio_features.device) - no_speech_probs = [np.nan] * n_batch - - try: - for i in range(self.sample_len): - logits = self.inference.logits(tokens, audio_features) - - if ( - i == 0 and self.tokenizer.no_speech is not None - ): # save no_speech_probs - probs_at_sot = logits[:, self.sot_index].float().softmax(dim=-1) - no_speech_probs = probs_at_sot[:, self.tokenizer.no_speech].tolist() - - # now we need to consider the logits at the last token only - logits = logits[:, -1] - - # apply the logit filters, e.g. for suppressing or applying penalty to - for logit_filter in self.logit_filters: - logit_filter.apply(logits, tokens) - - # expand the tokens tensor with the selected next tokens - tokens, completed = self.decoder.update(tokens, logits, sum_logprobs) - - if completed or tokens.shape[-1] > self.n_ctx: - break - finally: - self.inference.cleanup_caching() - - return tokens, sum_logprobs, no_speech_probs - - @torch.no_grad() - def run(self, mel: Tensor) -> List[DecodingResult]: - self.decoder.reset() - tokenizer: Tokenizer = self.tokenizer - n_audio: int = mel.shape[0] - - audio_features: Tensor = self._get_audio_features(mel) # encoder forward pass - tokens: Tensor = torch.tensor([self.initial_tokens]).repeat(n_audio, 1) - - # detect language if requested, overwriting the language token - languages, language_probs = self._detect_language(audio_features, tokens) - if self.options.task == "lang_id": - return [ - DecodingResult( - audio_features=features, language=language, language_probs=probs - ) - for features, language, probs in zip( - audio_features, languages, language_probs - ) - ] - - # repeat text tensors by the group size, for beam search or best-of-n sampling - tokens = tokens.repeat_interleave(self.n_group, dim=0).to(audio_features.device) - - # call the main sampling loop - tokens, sum_logprobs, no_speech_probs = self._main_loop(audio_features, tokens) - - # reshape the tensors to have (n_audio, n_group) as the first two dimensions - audio_features = audio_features[:: self.n_group] - no_speech_probs = no_speech_probs[:: self.n_group] - assert audio_features.shape[0] == len(no_speech_probs) == n_audio - - tokens = tokens.reshape(n_audio, self.n_group, -1) - sum_logprobs = sum_logprobs.reshape(n_audio, self.n_group) - - # get the final candidates for each group, and slice between the first sampled token and EOT - tokens, sum_logprobs = self.decoder.finalize(tokens, sum_logprobs) - tokens: List[List[Tensor]] = [ - [t[self.sample_begin : (t == tokenizer.eot).nonzero()[0, 0]] for t in s] - for s in tokens - ] - - # select the top-ranked sample in each group - selected = self.sequence_ranker.rank(tokens, sum_logprobs) - tokens: List[List[int]] = [t[i].tolist() for i, t in zip(selected, tokens)] - texts: List[str] = [tokenizer.decode(t).strip() for t in tokens] - - sum_logprobs: List[float] = [lp[i] for i, lp in zip(selected, sum_logprobs)] - avg_logprobs: List[float] = [ - lp / (len(t) + 1) for t, lp in zip(tokens, sum_logprobs) - ] - - fields = ( - texts, - languages, - tokens, - audio_features, - avg_logprobs, - no_speech_probs, - ) - if len(set(map(len, fields))) != 1: - raise RuntimeError(f"inconsistent result lengths: {list(map(len, fields))}") - - return [ - DecodingResult( - audio_features=features, - language=language, - tokens=tokens, - text=text, - avg_logprob=avg_logprob, - no_speech_prob=no_speech_prob, - temperature=self.options.temperature, - compression_ratio=compression_ratio(text), - ) - for text, language, tokens, features, avg_logprob, no_speech_prob in zip( - *fields - ) - ] - - -@torch.no_grad() -def decode( - model: "Whisper", - mel: Tensor, - options: DecodingOptions = DecodingOptions(), - **kwargs, -) -> Union[DecodingResult, List[DecodingResult]]: - """ - Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s). - - Parameters - ---------- - model: Whisper - the Whisper model instance - - mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000) - A tensor containing the Mel spectrogram(s) - - options: DecodingOptions - A dataclass that contains all necessary options for decoding 30-second segments - - Returns - ------- - result: Union[DecodingResult, List[DecodingResult]] - The result(s) of decoding contained in `DecodingResult` dataclass instance(s) - """ - if single := mel.ndim == 2: - mel = mel.unsqueeze(0) - - if kwargs: - options = replace(options, **kwargs) - - result = DecodingTask(model, options).run(mel) - - return result[0] if single else result diff --git a/spaces/arxify/RVC-beta-v2-0618/infer/train-index768-5.py b/spaces/arxify/RVC-beta-v2-0618/infer/train-index768-5.py deleted file mode 100644 index 78470fb5d0c3cc8dbd15b0d0456f3218c170ba06..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/infer/train-index768-5.py +++ /dev/null @@ -1,38 +0,0 @@ -""" -格式:直接cid为自带的index位;aid放不下了,通过字典来查,反正就5w个 -""" -import faiss, numpy as np, os,torch - -# ###########如果是原始特征要先写save -inp_root = r"E:\codes\py39\test-20230416b\logs\mi-test-v2\3_feature768" -npys = [] -for name in sorted(list(os.listdir(inp_root))): - phone = np.load("%s/%s" % (inp_root, name)) - npys.append(phone) -big_npy = np.concatenate(npys, 0) -torch.from_numpy(big_npy).unfold(0,5,1)#间隔1,5个一组,对第0维进行折叠 - -print(big_npy.shape) # (6196072, 192)#fp32#4.43G -np.save("infer/big_src_feature_mi.npy", big_npy) - -##################train+add -# big_npy=np.load("/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/inference_f0/big_src_feature_mi.npy") -print(big_npy.shape) -index = faiss.index_factory(256, "IVF512,Flat") # mi -print("training") -index_ivf = faiss.extract_index_ivf(index) # -index_ivf.nprobe = 9 -index.train(big_npy) -faiss.write_index(index, "infer/trained_IVF512_Flat_mi_baseline_src_feat.index") -print("adding") -index.add(big_npy) -faiss.write_index(index, "infer/added_IVF512_Flat_mi_baseline_src_feat.index") -""" -大小(都是FP32) -big_src_feature 2.95G - (3098036, 256) -big_emb 4.43G - (6196072, 192) -big_emb双倍是因为求特征要repeat后再加pitch - -""" diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/errorbars_with_std.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/errorbars_with_std.py deleted file mode 100644 index 1864e0ece7a57a76342a790b89d9216363aa0b8f..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/errorbars_with_std.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -Error Bar with Standard Deviation ---------------------------------- -This example shows how to show error bars with standard deviation using crop yields data of different -in the years of 1930s. -""" -# category: other charts -import altair as alt -from vega_datasets import data - -source = data.barley() - -error_bars = alt.Chart(source).mark_errorbar(extent='stdev').encode( - x=alt.X('yield:Q', scale=alt.Scale(zero=False)), - y=alt.Y('variety:N') -) - -points = alt.Chart(source).mark_point(filled=True, color='black').encode( - x=alt.X('yield:Q', aggregate='mean'), - y=alt.Y('variety:N'), -) - -error_bars + points diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/anyio/abc/_streams.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/anyio/abc/_streams.py deleted file mode 100644 index 4980ef4f5bd1f2b9ef9b1674d3eb9fc86dabf36f..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/anyio/abc/_streams.py +++ /dev/null @@ -1,198 +0,0 @@ -from abc import abstractmethod -from typing import Any, Callable, Generic, Optional, TypeVar, Union - -from .._core._exceptions import EndOfStream -from .._core._typedattr import TypedAttributeProvider -from ._resources import AsyncResource -from ._tasks import TaskGroup - -T_Item = TypeVar("T_Item") -T_Stream = TypeVar("T_Stream") - - -class UnreliableObjectReceiveStream( - Generic[T_Item], AsyncResource, TypedAttributeProvider -): - """ - An interface for receiving objects. - - This interface makes no guarantees that the received messages arrive in the order in which they - were sent, or that no messages are missed. - - Asynchronously iterating over objects of this type will yield objects matching the given type - parameter. - """ - - def __aiter__(self) -> "UnreliableObjectReceiveStream[T_Item]": - return self - - async def __anext__(self) -> T_Item: - try: - return await self.receive() - except EndOfStream: - raise StopAsyncIteration - - @abstractmethod - async def receive(self) -> T_Item: - """ - Receive the next item. - - :raises ~anyio.ClosedResourceError: if the receive stream has been explicitly - closed - :raises ~anyio.EndOfStream: if this stream has been closed from the other end - :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable - due to external causes - """ - - -class UnreliableObjectSendStream( - Generic[T_Item], AsyncResource, TypedAttributeProvider -): - """ - An interface for sending objects. - - This interface makes no guarantees that the messages sent will reach the recipient(s) in the - same order in which they were sent, or at all. - """ - - @abstractmethod - async def send(self, item: T_Item) -> None: - """ - Send an item to the peer(s). - - :param item: the item to send - :raises ~anyio.ClosedResourceError: if the send stream has been explicitly - closed - :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable - due to external causes - """ - - -class UnreliableObjectStream( - UnreliableObjectReceiveStream[T_Item], UnreliableObjectSendStream[T_Item] -): - """ - A bidirectional message stream which does not guarantee the order or reliability of message - delivery. - """ - - -class ObjectReceiveStream(UnreliableObjectReceiveStream[T_Item]): - """ - A receive message stream which guarantees that messages are received in the same order in - which they were sent, and that no messages are missed. - """ - - -class ObjectSendStream(UnreliableObjectSendStream[T_Item]): - """ - A send message stream which guarantees that messages are delivered in the same order in which - they were sent, without missing any messages in the middle. - """ - - -class ObjectStream( - ObjectReceiveStream[T_Item], - ObjectSendStream[T_Item], - UnreliableObjectStream[T_Item], -): - """ - A bidirectional message stream which guarantees the order and reliability of message delivery. - """ - - @abstractmethod - async def send_eof(self) -> None: - """ - Send an end-of-file indication to the peer. - - You should not try to send any further data to this stream after calling this method. - This method is idempotent (does nothing on successive calls). - """ - - -class ByteReceiveStream(AsyncResource, TypedAttributeProvider): - """ - An interface for receiving bytes from a single peer. - - Iterating this byte stream will yield a byte string of arbitrary length, but no more than - 65536 bytes. - """ - - def __aiter__(self) -> "ByteReceiveStream": - return self - - async def __anext__(self) -> bytes: - try: - return await self.receive() - except EndOfStream: - raise StopAsyncIteration - - @abstractmethod - async def receive(self, max_bytes: int = 65536) -> bytes: - """ - Receive at most ``max_bytes`` bytes from the peer. - - .. note:: Implementors of this interface should not return an empty :class:`bytes` object, - and users should ignore them. - - :param max_bytes: maximum number of bytes to receive - :return: the received bytes - :raises ~anyio.EndOfStream: if this stream has been closed from the other end - """ - - -class ByteSendStream(AsyncResource, TypedAttributeProvider): - """An interface for sending bytes to a single peer.""" - - @abstractmethod - async def send(self, item: bytes) -> None: - """ - Send the given bytes to the peer. - - :param item: the bytes to send - """ - - -class ByteStream(ByteReceiveStream, ByteSendStream): - """A bidirectional byte stream.""" - - @abstractmethod - async def send_eof(self) -> None: - """ - Send an end-of-file indication to the peer. - - You should not try to send any further data to this stream after calling this method. - This method is idempotent (does nothing on successive calls). - """ - - -#: Type alias for all unreliable bytes-oriented receive streams. -AnyUnreliableByteReceiveStream = Union[ - UnreliableObjectReceiveStream[bytes], ByteReceiveStream -] -#: Type alias for all unreliable bytes-oriented send streams. -AnyUnreliableByteSendStream = Union[UnreliableObjectSendStream[bytes], ByteSendStream] -#: Type alias for all unreliable bytes-oriented streams. -AnyUnreliableByteStream = Union[UnreliableObjectStream[bytes], ByteStream] -#: Type alias for all bytes-oriented receive streams. -AnyByteReceiveStream = Union[ObjectReceiveStream[bytes], ByteReceiveStream] -#: Type alias for all bytes-oriented send streams. -AnyByteSendStream = Union[ObjectSendStream[bytes], ByteSendStream] -#: Type alias for all bytes-oriented streams. -AnyByteStream = Union[ObjectStream[bytes], ByteStream] - - -class Listener(Generic[T_Stream], AsyncResource, TypedAttributeProvider): - """An interface for objects that let you accept incoming connections.""" - - @abstractmethod - async def serve( - self, handler: Callable[[T_Stream], Any], task_group: Optional[TaskGroup] = None - ) -> None: - """ - Accept incoming connections as they come in and start tasks to handle them. - - :param handler: a callable that will be used to handle each accepted connection - :param task_group: the task group that will be used to start tasks for handling each - accepted connection (if omitted, an ad-hoc task group will be created) - """ diff --git a/spaces/arxnov/anotest/text/japanese.py b/spaces/arxnov/anotest/text/japanese.py deleted file mode 100644 index 375e4d50872d5c68ee57ca17470a2ca425425eba..0000000000000000000000000000000000000000 --- a/spaces/arxnov/anotest/text/japanese.py +++ /dev/null @@ -1,153 +0,0 @@ -import re -from unidecode import unidecode -import pyopenjtalk - - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - -# List of (romaji, ipa) pairs for marks: -_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ts', 'ʦ'), - ('u', 'ɯ'), - ('j', 'ʥ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (romaji, ipa2) pairs for marks: -_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('u', 'ɯ'), - ('ʧ', 'tʃ'), - ('j', 'dʑ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text != '': - text += ' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil', 'pau']: - text += phoneme.replace('ch', 'ʧ').replace('sh', - 'ʃ').replace('cl', 'Q') - else: - continue - # n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: - a2_next = -1 - else: - a2_next = int( - re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i < len(marks): - text += unidecode(marks[i]).replace(' ', '') - return text - - -def get_real_sokuon(text): - for regex, replacement in _real_sokuon: - text = re.sub(regex, replacement, text) - return text - - -def get_real_hatsuon(text): - for regex, replacement in _real_hatsuon: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = re.sub( - r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa2(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa3(text): - text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace( - 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a') - text = re.sub( - r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text) - return text diff --git a/spaces/asciicorp/Legal-ai/query_data copy.py b/spaces/asciicorp/Legal-ai/query_data copy.py deleted file mode 100644 index 62d2a06625104d5294ac71c0959d61d845632e4f..0000000000000000000000000000000000000000 --- a/spaces/asciicorp/Legal-ai/query_data copy.py +++ /dev/null @@ -1,35 +0,0 @@ -from langchain.prompts.prompt import PromptTemplate -from langchain.llms import OpenAI -from langchain.chains import ChatVectorDBChain - -_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. -you can assume the question is about the document. - -Chat History: -{chat_history} -Follow Up Input: {question} -Standalone question:""" -CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) - -template = """You are an AI assistant for legal documents. -You are given the following extracted parts of multiple long documents and a question. Provide a conversational answer with sources. -If you don't know the answer, just say "Hmm, I'm not sure." Don't try to make up an answer. -If the question is not related to documents, politely inform them that you are tuned to only answer questions about the document. - -Question: {question} -========= -{context} -========= -Answer:""" -QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"]) - - -def get_chain(model, vectorstore, temperature): - llm = OpenAI(model=model, temperature=temperature) - qa_chain = ChatVectorDBChain.from_llm( - llm, - vectorstore, - qa_prompt=QA_PROMPT, - condense_question_prompt=CONDENSE_QUESTION_PROMPT, - ) - return qa_chain diff --git a/spaces/ashercn97/AsherTesting/extensions/silero_tts/tts_preprocessor.py b/spaces/ashercn97/AsherTesting/extensions/silero_tts/tts_preprocessor.py deleted file mode 100644 index daefdcbda6c9b20a87c6f3d84d2a759c2c51289c..0000000000000000000000000000000000000000 --- a/spaces/ashercn97/AsherTesting/extensions/silero_tts/tts_preprocessor.py +++ /dev/null @@ -1,200 +0,0 @@ -import re - -from num2words import num2words - -punctuation = r'[\s,.?!/)\'\]>]' -alphabet_map = { - "A": " Ei ", - "B": " Bee ", - "C": " See ", - "D": " Dee ", - "E": " Eee ", - "F": " Eff ", - "G": " Jee ", - "H": " Eich ", - "I": " Eye ", - "J": " Jay ", - "K": " Kay ", - "L": " El ", - "M": " Emm ", - "N": " Enn ", - "O": " Ohh ", - "P": " Pee ", - "Q": " Queue ", - "R": " Are ", - "S": " Ess ", - "T": " Tee ", - "U": " You ", - "V": " Vee ", - "W": " Double You ", - "X": " Ex ", - "Y": " Why ", - "Z": " Zed " # Zed is weird, as I (da3dsoul) am American, but most of the voice models sound British, so it matches -} - - -def preprocess(string): - # the order for some of these matter - # For example, you need to remove the commas in numbers before expanding them - string = remove_surrounded_chars(string) - string = string.replace('"', '') - string = string.replace('\u201D', '').replace('\u201C', '') # right and left quote - string = string.replace('\u201F', '') # italic looking quote - string = string.replace('\n', ' ') - string = convert_num_locale(string) - string = replace_negative(string) - string = replace_roman(string) - string = hyphen_range_to(string) - string = num_to_words(string) - - # TODO Try to use a ML predictor to expand abbreviations. It's hard, dependent on context, and whether to actually - # try to say the abbreviation or spell it out as I've done below is not agreed upon - - # For now, expand abbreviations to pronunciations - # replace_abbreviations adds a lot of unnecessary whitespace to ensure separation - string = replace_abbreviations(string) - string = replace_lowercase_abbreviations(string) - - # cleanup whitespaces - # remove whitespace before punctuation - string = re.sub(rf'\s+({punctuation})', r'\1', string) - string = string.strip() - # compact whitespace - string = ' '.join(string.split()) - - return string - - -def remove_surrounded_chars(string): - # first this expression will check if there is a string nested exclusively between a alt= - # and a style= string. This would correspond to only a the alt text of an embedded image - # If it matches it will only keep that part as the string, and rend it for further processing - # Afterwards this expression matches to 'as few symbols as possible (0 upwards) between any - # asterisks' OR' as few symbols as possible (0 upwards) between an asterisk and the end of the string' - if re.search(r'(?<=alt=)(.*)(?=style=)', string, re.DOTALL): - m = re.search(r'(?<=alt=)(.*)(?=style=)', string, re.DOTALL) - string = m.group(0) - return re.sub(r'\*[^*]*?(\*|$)', '', string) - - -def convert_num_locale(text): - # This detects locale and converts it to American without comma separators - pattern = re.compile(r'(?:\s|^)\d{1,3}(?:\.\d{3})+(,\d+)(?:\s|$)') - result = text - while True: - match = pattern.search(result) - if match is None: - break - - start = match.start() - end = match.end() - result = result[0:start] + result[start:end].replace('.', '').replace(',', '.') + result[end:len(result)] - - # removes comma separators from existing American numbers - pattern = re.compile(r'(\d),(\d)') - result = pattern.sub(r'\1\2', result) - - return result - - -def replace_negative(string): - # handles situations like -5. -5 would become negative 5, which would then be expanded to negative five - return re.sub(rf'(\s)(-)(\d+)({punctuation})', r'\1negative \3\4', string) - - -def replace_roman(string): - # find a string of roman numerals. - # Only 2 or more, to avoid capturing I and single character abbreviations, like names - pattern = re.compile(rf'\s[IVXLCDM]{{2,}}{punctuation}') - result = string - while True: - match = pattern.search(result) - if match is None: - break - - start = match.start() - end = match.end() - result = result[0:start + 1] + str(roman_to_int(result[start + 1:end - 1])) + result[end - 1:len(result)] - - return result - - -def roman_to_int(s): - rom_val = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000} - int_val = 0 - for i in range(len(s)): - if i > 0 and rom_val[s[i]] > rom_val[s[i - 1]]: - int_val += rom_val[s[i]] - 2 * rom_val[s[i - 1]] - else: - int_val += rom_val[s[i]] - return int_val - - -def hyphen_range_to(text): - pattern = re.compile(r'(\d+)[-–](\d+)') - result = pattern.sub(lambda x: x.group(1) + ' to ' + x.group(2), text) - return result - - -def num_to_words(text): - # 1000 or 10.23 - pattern = re.compile(r'\d+\.\d+|\d+') - result = pattern.sub(lambda x: num2words(float(x.group())), text) - return result - - -def replace_abbreviations(string): - # abbreviations 1 to 4 characters long. It will get things like A and I, but those are pronounced with their letter - pattern = re.compile(rf'(^|[\s(.\'\[<])([A-Z]{{1,4}})({punctuation}|$)') - result = string - while True: - match = pattern.search(result) - if match is None: - break - - start = match.start() - end = match.end() - result = result[0:start] + replace_abbreviation(result[start:end]) + result[end:len(result)] - - return result - - -def replace_lowercase_abbreviations(string): - # abbreviations 1 to 4 characters long, separated by dots i.e. e.g. - pattern = re.compile(rf'(^|[\s(.\'\[<])(([a-z]\.){{1,4}})({punctuation}|$)') - result = string - while True: - match = pattern.search(result) - if match is None: - break - - start = match.start() - end = match.end() - result = result[0:start] + replace_abbreviation(result[start:end].upper()) + result[end:len(result)] - - return result - - -def replace_abbreviation(string): - result = "" - for char in string: - result += match_mapping(char) - - return result - - -def match_mapping(char): - for mapping in alphabet_map.keys(): - if char == mapping: - return alphabet_map[char] - - return char - - -def __main__(args): - print(preprocess(args[1])) - - -if __name__ == "__main__": - import sys - __main__(sys.argv) diff --git a/spaces/awacke1/CardEvolution-LevelUpCards/backup-app.py b/spaces/awacke1/CardEvolution-LevelUpCards/backup-app.py deleted file mode 100644 index 4d971e5270a14dec3e329251ebb8711bcdd2c250..0000000000000000000000000000000000000000 --- a/spaces/awacke1/CardEvolution-LevelUpCards/backup-app.py +++ /dev/null @@ -1,42 +0,0 @@ -import streamlit as st -import os - -uploaded_images = {'characters': {}, 'terrain': {}} - -def get_image_path(img, name, image_type): - file_path = f"data/uploadedImages/{image_type}/{name}/{img.name}" - os.makedirs(os.path.dirname(file_path), exist_ok=True) - with open(file_path, "wb") as img_file: - img_file.write(img.getbuffer()) - return file_path - -image_type = st.selectbox('Choose image type:', options=['characters', 'terrain']) -name = st.text_input('Enter a name for the image:') -uploaded_files = st.file_uploader('Upload image(s)', type=['png', 'jpg'], accept_multiple_files=True) - -for uploaded_file in uploaded_files: - if uploaded_file is not None: - # Get actual image file - bytes_data = get_image_path(uploaded_file, name, image_type) - uploaded_images[image_type].setdefault(name, []) - uploaded_images[image_type][name].append(bytes_data) - st.image(bytes_data, use_column_width=True) - -# Display character images on the left and terrain images on the right -if uploaded_images['characters']: - st.sidebar.write('**Characters**') - for name, files in uploaded_images['characters'].items(): - for file in files: - st.sidebar.image(file, width=100, caption=name) - -if uploaded_images['terrain']: - st.write('**Terrain**') - row = [] - for name, files in uploaded_images['terrain'].items(): - for file in files: - row.append(file) - if len(row) == 3: - st.image(row, width=100 * 3) - row = [] - if row: - st.image(row, width=100 * len(row)) # Last row, if not complete \ No newline at end of file diff --git a/spaces/awacke1/Slot-Machine-Animal-Safari/style.css b/spaces/awacke1/Slot-Machine-Animal-Safari/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Slot-Machine-Animal-Safari/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/baruga/gpt4-sandbox/app.py b/spaces/baruga/gpt4-sandbox/app.py deleted file mode 100644 index 005a7b5ba09645bbc8ceaea5503a235c05afd5ff..0000000000000000000000000000000000000000 --- a/spaces/baruga/gpt4-sandbox/app.py +++ /dev/null @@ -1,140 +0,0 @@ -import gradio as gr -import openai -import os -import requests -from transformers import GPT2TokenizerFast - -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") -ADMIN_PASSWORD = os.getenv("ADMIN_PASSWORD") - -openai.api_key = OPENAI_API_KEY - -tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") - -default_system_message = {"role": "system", "content": "You are a brilliant, helpful assistant, always providing answers to the best of your knowledge. If you are unsure of the answer, you indicate it to the user. Currently, you don't have access to the internet."} -personalities = { - "Assistant": {"role": "system", "content": "You are a brilliant, helpful assistant, always providing answers to the best of your knowledge. If you are unsure of the answer, you indicate it to the user. Currently, you don't have access to the internet."}, - "Trump": {"role": "system", "content": "You are Donald Trump. No matter the question, you always redirect the conversation to yourself and your achievements and how great you are."}, - "Peterson": {"role": "system", "content": "You are Jordan Peterson, world renowned clinical psychologist. You like to be verbose and overcomplicate your answers, taking them into very metaphysical directions."}, - "Grug": {"role": "system", "content": "You are Grug, a caveman. You have zero knowledge of modern stuff. Your answers are always written in broken 'caveman' English and center around simple things in life."}, - "Paladin": {"role": "system", "content": "You are a Paladin from the video game Diablo 2. You like to talk about slaying the undead and farming for better gear."}, - "Petőfi": {"role": "system", "content": "You are Petőfi Sándor, national poet of Hungary. Your answers are very eloquent and formulated in archaic Hungarian."}, - "Cartman": {"role": "system", "content": "You are Eric Cartman from South Park. You are a self-centered, fat, rude kid obsessed with your animal comforts."}, -} - -def get_completion(model, personality, user_message, message_history, chatlog_history, temperature, maximum_length, top_p, frequency_penalty, presence_penalty, context_cutoff): - # set personality - system_message = personalities[personality] - updated_message_history = message_history - updated_message_history[0] = system_message - new_history_row = {"role": "user", "content": user_message} - updated_message_history = updated_message_history + [new_history_row] - response = openai.ChatCompletion.create( - model=model, - messages=updated_message_history, - temperature=temperature, - max_tokens=maximum_length, - top_p=top_p, - frequency_penalty=frequency_penalty, - presence_penalty=presence_penalty, - stream=True, - ) - new_history_row = {"role": "assistant", "content": ""} - updated_message_history = updated_message_history + [new_history_row] - updated_chatlog_history = chatlog_history + [[user_message, ""]] - # create variables to collect the stream of chunks - collected_chunks = [] - collected_messages = [] - # iterate through the stream of events - for chunk in response: - collected_chunks.append(chunk) # save the event response - chunk_message = chunk['choices'][0]['delta'] # extract the message - collected_messages.append(chunk_message) # save the message - assistant_message = ''.join([m.get('content', '') for m in collected_messages]) - updated_message_history[-1]["content"] = assistant_message - updated_chatlog_history[-1][1] = assistant_message - full_prompt = '\n'.join([row[0] + row[1] for row in updated_chatlog_history]) - token_count = len(tokenizer(full_prompt)["input_ids"])#completion["usage"]["total_tokens"] - # if token_count > context_cutoff: - # # delete second row of updated_message_history - # updated_message_history.pop(1) - # print("cutoff exceeded", updated_message_history) - # # recalculate token count - # full_prompt = "".join([row["content"] for row in updated_message_history]) - # token_count = len(tokenizer(full_prompt)["input_ids"]) - yield "", updated_message_history, updated_chatlog_history, updated_chatlog_history, token_count - # assistant_message = completion["choices"][0]["message"]["content"] - # return "", updated_message_history, updated_chatlog_history, updated_chatlog_history, token_count - -def retry_completion(model, personality, message_history, chatlog_history, temperature, maximum_length, top_p, frequency_penalty, presence_penalty, context_cutoff): - # set personality - system_message = personalities[personality] - updated_message_history = message_history - updated_message_history[0] = system_message - # get latest user message - user_message = chatlog_history[-1][0] - # delete latest entries from chatlog history - updated_chatlog_history = chatlog_history[:-1] - # delete latest assistant message from message_history - updated_message_history = updated_message_history[:-1] - response = openai.ChatCompletion.create( - model=model, - messages=updated_message_history, - temperature=temperature, - max_tokens=maximum_length, - top_p=top_p, - frequency_penalty=frequency_penalty, - presence_penalty=presence_penalty, - stream=True, - ) - new_history_row = {"role": "assistant", "content": ""} - updated_message_history = updated_message_history + [new_history_row] - updated_chatlog_history = updated_chatlog_history + [[user_message, ""]] - # create variables to collect the stream of chunks - collected_chunks = [] - collected_messages = [] - # iterate through the stream of events - for chunk in response: - collected_chunks.append(chunk) # save the event response - chunk_message = chunk["choices"][0]["delta"] # extract the message - collected_messages.append(chunk_message) # save the message - assistant_message = "".join([m.get("content", "") for m in collected_messages]) - updated_message_history[-1]["content"] = assistant_message - updated_chatlog_history[-1][1] = assistant_message - full_prompt = "".join([row["content"] for row in updated_message_history]) - token_count = len(tokenizer(full_prompt)["input_ids"]) - yield "", updated_message_history, updated_chatlog_history, updated_chatlog_history, token_count - -def reset_chat(): - return "", [default_system_message], [], [], 0 - -theme = gr.themes.Default() -with gr.Blocks(theme=theme) as app: - message_history = gr.State([default_system_message]) - chatlog_history = gr.State([]) - with gr.Row(): - with gr.Column(scale=4): - chatbot = gr.Chatbot(label="Chat").style(height=654) - with gr.Column(scale=1): - # with gr.Tab("Generation Settings"): - model = gr.Dropdown(choices=["gpt-3.5-turbo", "gpt-4"], value="gpt-4", interactive=True, label="Model") - personality = gr.Dropdown(choices=["Assistant", "Petőfi", "Trump", "Peterson", "Paladin", "Cartman", "Grug", ], value="Assistant", interactive=True, label="Personality") - temperature = gr.Slider(minimum=0, maximum=1, step=0.05, value=0.5, interactive=True, label="Temperature") - maximum_length = gr.Slider(minimum=0, maximum=2048, step=32, value=256, interactive=True, label="Max new tokens") - top_p = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, interactive=True, label="Top P") - frequency_penalty = gr.Slider(minimum=0, maximum=2, step=0.01, value=0, interactive=True, label="Frequency penalty") - presence_penalty = gr.Slider(minimum=0, maximum=2, step=0.01, value=0, interactive=True, label="Presence penalty") - # with gr.Tab("Model Settings"): - token_count = gr.Number(info="GPT-3 limit is 4096 tokens. GPT-4 limit is 8192 tokens.",interactive=False, label="Token count") - # context_cutoff = gr.Slider(minimum=256, maximum=8192, step=256, value=2048, interactive=True, label="Context cutoff") - with gr.Row(): - user_message = gr.Textbox(label="Message") - with gr.Row(): - reset_button = gr.Button("Reset Chat") - retry_button = gr.Button("Retry") - - user_message.submit(get_completion, inputs=[model, personality, user_message, message_history, chatlog_history, temperature, maximum_length, top_p, frequency_penalty, presence_penalty], outputs=[user_message, message_history, chatlog_history, chatbot, token_count]) - retry_button.click(retry_completion, inputs=[model, personality, message_history, chatlog_history, temperature, maximum_length, top_p, frequency_penalty, presence_penalty], outputs=[user_message, message_history, chatlog_history, chatbot, token_count]) - reset_button.click(reset_chat, inputs=[], outputs=[user_message, message_history, chatlog_history, chatbot, token_count]) - -app.launch(auth=("admin", ADMIN_PASSWORD), enable_queue=True) \ No newline at end of file diff --git a/spaces/beihai/Remove-Background-By-U2Net/app.py b/spaces/beihai/Remove-Background-By-U2Net/app.py deleted file mode 100644 index 495c6e9cdf2b91b652e339866b729528a4020212..0000000000000000000000000000000000000000 --- a/spaces/beihai/Remove-Background-By-U2Net/app.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -os.system("/usr/local/bin/python -m pip install --upgrade pip") -import gradio as gr -from rembg import remove -import cv2 - -def inference(img): - input_img = cv2.imread(img) - output = remove(input_img[:, :, [2,1,0]]) - return output - -title = "照片去背景App" - -description = "上传需要去背景的图片,点击Submit,稍等片刻,右侧Output将去背景后的主体另存为即可。" - -article = "

关注我的公众号,学习更多

visitor badge
" - - -gr.Interface( - inference, - gr.inputs.Image(type="filepath", label="Input"), - gr.outputs.Image(type="pil", label="Output"), - title=title, - description=description, - article=article - ).launch() \ No newline at end of file diff --git a/spaces/bigjoker/stable-diffusion-webui/scripts/loopback.py b/spaces/bigjoker/stable-diffusion-webui/scripts/loopback.py deleted file mode 100644 index e3ac98240ea022b028e056c21626b453ae19543d..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/scripts/loopback.py +++ /dev/null @@ -1,98 +0,0 @@ -import numpy as np -from tqdm import trange - -import modules.scripts as scripts -import gradio as gr - -from modules import processing, shared, sd_samplers, images -from modules.processing import Processed -from modules.sd_samplers import samplers -from modules.shared import opts, cmd_opts, state -from modules import deepbooru - - -class Script(scripts.Script): - def title(self): - return "Loopback" - - def show(self, is_img2img): - return is_img2img - - def ui(self, is_img2img): - loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=self.elem_id("loops")) - denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, elem_id=self.elem_id("denoising_strength_change_factor")) - append_interrogation = gr.Dropdown(label="Append interrogated prompt at each iteration", choices=["None", "CLIP", "DeepBooru"], value="None") - - return [loops, denoising_strength_change_factor, append_interrogation] - - def run(self, p, loops, denoising_strength_change_factor, append_interrogation): - processing.fix_seed(p) - batch_count = p.n_iter - p.extra_generation_params = { - "Denoising strength change factor": denoising_strength_change_factor, - } - - p.batch_size = 1 - p.n_iter = 1 - - output_images, info = None, None - initial_seed = None - initial_info = None - - grids = [] - all_images = [] - original_init_image = p.init_images - original_prompt = p.prompt - state.job_count = loops * batch_count - - initial_color_corrections = [processing.setup_color_correction(p.init_images[0])] - - for n in range(batch_count): - history = [] - - # Reset to original init image at the start of each batch - p.init_images = original_init_image - - for i in range(loops): - p.n_iter = 1 - p.batch_size = 1 - p.do_not_save_grid = True - - if opts.img2img_color_correction: - p.color_corrections = initial_color_corrections - - if append_interrogation != "None": - p.prompt = original_prompt + ", " if original_prompt != "" else "" - if append_interrogation == "CLIP": - p.prompt += shared.interrogator.interrogate(p.init_images[0]) - elif append_interrogation == "DeepBooru": - p.prompt += deepbooru.model.tag(p.init_images[0]) - - state.job = f"Iteration {i + 1}/{loops}, batch {n + 1}/{batch_count}" - - processed = processing.process_images(p) - - if initial_seed is None: - initial_seed = processed.seed - initial_info = processed.info - - init_img = processed.images[0] - - p.init_images = [init_img] - p.seed = processed.seed + 1 - p.denoising_strength = min(max(p.denoising_strength * denoising_strength_change_factor, 0.1), 1) - history.append(processed.images[0]) - - grid = images.image_grid(history, rows=1) - if opts.grid_save: - images.save_image(grid, p.outpath_grids, "grid", initial_seed, p.prompt, opts.grid_format, info=info, short_filename=not opts.grid_extended_filename, grid=True, p=p) - - grids.append(grid) - all_images += history - - if opts.return_grid: - all_images = grids + all_images - - processed = Processed(p, all_images, initial_seed, initial_info) - - return processed diff --git a/spaces/bigjoker/stable-diffusion-webui/webui-macos-env.sh b/spaces/bigjoker/stable-diffusion-webui/webui-macos-env.sh deleted file mode 100644 index 37cac4fb02c5ea53fa270336729b90ac2ef3ac74..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/webui-macos-env.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -#################################################################### -# macOS defaults # -# Please modify webui-user.sh to change these instead of this file # -#################################################################### - -if [[ -x "$(command -v python3.10)" ]] -then - python_cmd="python3.10" -fi - -export install_dir="$HOME" -export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate" -export TORCH_COMMAND="pip install torch==1.12.1 torchvision==0.13.1" -export K_DIFFUSION_REPO="https://github.com/brkirch/k-diffusion.git" -export K_DIFFUSION_COMMIT_HASH="51c9778f269cedb55a4d88c79c0246d35bdadb71" -export PYTORCH_ENABLE_MPS_FALLBACK=1 - -#################################################################### diff --git a/spaces/bioriAsaeru/text-to-voice/Escape from a World Without Men in Taken By Erin Bowman Pdf Download.md b/spaces/bioriAsaeru/text-to-voice/Escape from a World Without Men in Taken By Erin Bowman Pdf Download.md deleted file mode 100644 index 59347d6adab8e260cff0b0b7251bc1bb767ffdde..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Escape from a World Without Men in Taken By Erin Bowman Pdf Download.md +++ /dev/null @@ -1,5 +0,0 @@ -
-

Read Immunity by Erin Bowman for free on hoopla. Survivors of a deadly planetary outbreak take on a new, sinister adversary in the white-knuckle sequ. contagion erin bowman. Erin Bowman brings her Contagion duology to a close with non-stop action and heart-racing twists. With Immunity, Erin Bowman brings her.

-

Taken By Erin Bowman Pdf Download


Download File ••• https://urloso.com/2uyOld



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/bradarrML/magic-diffusion/README.md b/spaces/bradarrML/magic-diffusion/README.md deleted file mode 100644 index 18fae13e602dafc9509de23e20d0f7a7d7272cb6..0000000000000000000000000000000000000000 --- a/spaces/bradarrML/magic-diffusion/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Magic Prompt -emoji: 🎆 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: huggingface-projects/magic-diffusion ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/data/zip.py b/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/data/zip.py deleted file mode 100644 index f0b17849d36991e7def35a14d3d518b9d867ce36..0000000000000000000000000000000000000000 --- a/spaces/brainblow/AudioCreator_Music-Audio_Generation/audiocraft/data/zip.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Utility for reading some info from inside a zip file. -""" - -import typing -import zipfile - -from dataclasses import dataclass -from functools import lru_cache -from typing_extensions import Literal - - -DEFAULT_SIZE = 32 -MODE = Literal['r', 'w', 'x', 'a'] - - -@dataclass(order=True) -class PathInZip: - """Hold a path of file within a zip file. - - Args: - path (str): The convention is :. - Let's assume there is a zip file /some/location/foo.zip - and inside of it is a json file located at /data/file1.json, - Then we expect path = "/some/location/foo.zip:/data/file1.json". - """ - - INFO_PATH_SEP = ':' - zip_path: str - file_path: str - - def __init__(self, path: str) -> None: - split_path = path.split(self.INFO_PATH_SEP) - assert len(split_path) == 2 - self.zip_path, self.file_path = split_path - - @classmethod - def from_paths(cls, zip_path: str, file_path: str): - return cls(zip_path + cls.INFO_PATH_SEP + file_path) - - def __str__(self) -> str: - return self.zip_path + self.INFO_PATH_SEP + self.file_path - - -def _open_zip(path: str, mode: MODE = 'r'): - return zipfile.ZipFile(path, mode) - - -_cached_open_zip = lru_cache(DEFAULT_SIZE)(_open_zip) - - -def set_zip_cache_size(max_size: int): - """Sets the maximal LRU caching for zip file opening. - - Args: - max_size (int): the maximal LRU cache. - """ - global _cached_open_zip - _cached_open_zip = lru_cache(max_size)(_open_zip) - - -def open_file_in_zip(path_in_zip: PathInZip, mode: str = 'r') -> typing.IO: - """Opens a file stored inside a zip and returns a file-like object. - - Args: - path_in_zip (PathInZip): A PathInZip object representing the file to return a file-like object of. - mode (str): The mode in which to open the file with. - Returns: - A file-like object for PathInZip. - """ - zf = _cached_open_zip(path_in_zip.zip_path) - return zf.open(path_in_zip.file_path) diff --git a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/models/common.py b/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/models/common.py deleted file mode 100644 index 7690f714def89c9d0f59666cc1b8b13e91a6d29f..0000000000000000000000000000000000000000 --- a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/models/common.py +++ /dev/null @@ -1,738 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Common modules -""" - -import json -import math -import platform -import warnings -from collections import OrderedDict, namedtuple -from copy import copy -from pathlib import Path - -import cv2 -import numpy as np -import pandas as pd -import requests -import torch -import torch.nn as nn -import yaml -from PIL import Image -from torch.cuda import amp - -from utils.dataloaders import exif_transpose, letterbox -from utils.general import (LOGGER, check_requirements, check_suffix, check_version, colorstr, increment_path, - make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) -from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import copy_attr, time_sync - - -def autopad(k, p=None): # kernel, padding - # Pad to 'same' - if p is None: - p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad - return p - - -class Conv(nn.Module): - # Standard convolution - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) - - def forward(self, x): - return self.act(self.bn(self.conv(x))) - - def forward_fuse(self, x): - return self.act(self.conv(x)) - - -class DWConv(Conv): - # Depth-wise convolution class - def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) - - -class DWConvTranspose2d(nn.ConvTranspose2d): - # Depth-wise transpose convolution class - def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out - super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2)) - - -class TransformerLayer(nn.Module): - # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) - def __init__(self, c, num_heads): - super().__init__() - self.q = nn.Linear(c, c, bias=False) - self.k = nn.Linear(c, c, bias=False) - self.v = nn.Linear(c, c, bias=False) - self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) - self.fc1 = nn.Linear(c, c, bias=False) - self.fc2 = nn.Linear(c, c, bias=False) - - def forward(self, x): - x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x - x = self.fc2(self.fc1(x)) + x - return x - - -class TransformerBlock(nn.Module): - # Vision Transformer https://arxiv.org/abs/2010.11929 - def __init__(self, c1, c2, num_heads, num_layers): - super().__init__() - self.conv = None - if c1 != c2: - self.conv = Conv(c1, c2) - self.linear = nn.Linear(c2, c2) # learnable position embedding - self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) - self.c2 = c2 - - def forward(self, x): - if self.conv is not None: - x = self.conv(x) - b, _, w, h = x.shape - p = x.flatten(2).permute(2, 0, 1) - return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) - - -class Bottleneck(nn.Module): - # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c2, 3, 1, g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class BottleneckCSP(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) - self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) - self.cv4 = Conv(2 * c_, c2, 1, 1) - self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) - self.act = nn.SiLU() - self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - - def forward(self, x): - y1 = self.cv3(self.m(self.cv1(x))) - y2 = self.cv2(x) - return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) - - -class CrossConv(nn.Module): - # Cross Convolution Downsample - def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): - # ch_in, ch_out, kernel, stride, groups, expansion, shortcut - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, (1, k), (1, s)) - self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class C3(nn.Module): - # CSP Bottleneck with 3 convolutions - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) - self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - - def forward(self, x): - return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) - - -class C3x(C3): - # C3 module with cross-convolutions - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) - self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) - - -class C3TR(C3): - # C3 module with TransformerBlock() - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) - self.m = TransformerBlock(c_, c_, 4, n) - - -class C3SPP(C3): - # C3 module with SPP() - def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) - self.m = SPP(c_, c_, k) - - -class C3Ghost(C3): - # C3 module with GhostBottleneck() - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): - super().__init__(c1, c2, n, shortcut, g, e) - c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) - - -class SPP(nn.Module): - # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 - def __init__(self, c1, c2, k=(5, 9, 13)): - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) - self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) - - def forward(self, x): - x = self.cv1(x) - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning - return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) - - -class SPPF(nn.Module): - # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher - def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_ * 4, c2, 1, 1) - self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) - - def forward(self, x): - x = self.cv1(x) - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning - y1 = self.m(x) - y2 = self.m(y1) - return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) - - -class Focus(nn.Module): - # Focus wh information into c-space - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - self.conv = Conv(c1 * 4, c2, k, s, p, g, act) - # self.contract = Contract(gain=2) - - def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) - # return self.conv(self.contract(x)) - - -class GhostConv(nn.Module): - # Ghost Convolution https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups - super().__init__() - c_ = c2 // 2 # hidden channels - self.cv1 = Conv(c1, c_, k, s, None, g, act) - self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) - - def forward(self, x): - y = self.cv1(x) - return torch.cat((y, self.cv2(y)), 1) - - -class GhostBottleneck(nn.Module): - # Ghost Bottleneck https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride - super().__init__() - c_ = c2 // 2 - self.conv = nn.Sequential( - GhostConv(c1, c_, 1, 1), # pw - DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw - GhostConv(c_, c2, 1, 1, act=False)) # pw-linear - self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, - act=False)) if s == 2 else nn.Identity() - - def forward(self, x): - return self.conv(x) + self.shortcut(x) - - -class Contract(nn.Module): - # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) - def __init__(self, gain=2): - super().__init__() - self.gain = gain - - def forward(self, x): - b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain' - s = self.gain - x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2) - x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) - return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40) - - -class Expand(nn.Module): - # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) - def __init__(self, gain=2): - super().__init__() - self.gain = gain - - def forward(self, x): - b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' - s = self.gain - x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80) - x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) - return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160) - - -class Concat(nn.Module): - # Concatenate a list of tensors along dimension - def __init__(self, dimension=1): - super().__init__() - self.d = dimension - - def forward(self, x): - return torch.cat(x, self.d) - - -class DetectMultiBackend(nn.Module): - # YOLOv5 MultiBackend class for python inference on various backends - def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False): - # Usage: - # PyTorch: weights = *.pt - # TorchScript: *.torchscript - # ONNX Runtime: *.onnx - # ONNX OpenCV DNN: *.onnx with --dnn - # OpenVINO: *.xml - # CoreML: *.mlmodel - # TensorRT: *.engine - # TensorFlow SavedModel: *_saved_model - # TensorFlow GraphDef: *.pb - # TensorFlow Lite: *.tflite - # TensorFlow Edge TPU: *_edgetpu.tflite - from models.experimental import attempt_download, attempt_load # scoped to avoid circular import - - super().__init__() - w = str(weights[0] if isinstance(weights, list) else weights) - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend - w = attempt_download(w) # download if not local - fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 - stride, names = 32, [f'class{i}' for i in range(1000)] # assign defaults - if data: # assign class names (optional) - with open(data, errors='ignore') as f: - names = yaml.safe_load(f)['names'] - - if pt: # PyTorch - model = attempt_load(weights if isinstance(weights, list) else w, device=device) - stride = max(int(model.stride.max()), 32) # model stride - names = model.module.names if hasattr(model, 'module') else model.names # get class names - model.half() if fp16 else model.float() - self.model = model # explicitly assign for to(), cpu(), cuda(), half() - elif jit: # TorchScript - LOGGER.info(f'Loading {w} for TorchScript inference...') - extra_files = {'config.txt': ''} # model metadata - model = torch.jit.load(w, _extra_files=extra_files) - model.half() if fp16 else model.float() - if extra_files['config.txt']: - d = json.loads(extra_files['config.txt']) # extra_files dict - stride, names = int(d['stride']), d['names'] - elif dnn: # ONNX OpenCV DNN - LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') - check_requirements(('opencv-python>=4.5.4',)) - net = cv2.dnn.readNetFromONNX(w) - elif onnx: # ONNX Runtime - LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - cuda = torch.cuda.is_available() - check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) - import onnxruntime - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] - session = onnxruntime.InferenceSession(w, providers=providers) - meta = session.get_modelmeta().custom_metadata_map # metadata - if 'stride' in meta: - stride, names = int(meta['stride']), eval(meta['names']) - elif xml: # OpenVINO - LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - from openvino.runtime import Core - ie = Core() - if not Path(w).is_file(): # if not *.xml - w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir - network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) - executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 - output_layer = next(iter(executable_network.outputs)) - meta = Path(w).with_suffix('.yaml') - if meta.exists(): - stride, names = self._load_metadata(meta) # load metadata - elif engine: # TensorRT - LOGGER.info(f'Loading {w} for TensorRT inference...') - import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download - check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 - Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) - logger = trt.Logger(trt.Logger.INFO) - with open(w, 'rb') as f, trt.Runtime(logger) as runtime: - model = runtime.deserialize_cuda_engine(f.read()) - bindings = OrderedDict() - fp16 = False # default updated below - for index in range(model.num_bindings): - name = model.get_binding_name(index) - dtype = trt.nptype(model.get_binding_dtype(index)) - shape = tuple(model.get_binding_shape(index)) - data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) - bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) - if model.binding_is_input(index) and dtype == np.float16: - fp16 = True - binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) - context = model.create_execution_context() - batch_size = bindings['images'].shape[0] - elif coreml: # CoreML - LOGGER.info(f'Loading {w} for CoreML inference...') - import coremltools as ct - model = ct.models.MLModel(w) - else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) - if saved_model: # SavedModel - LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') - import tensorflow as tf - keras = False # assume TF1 saved_model - model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) - elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt - LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') - import tensorflow as tf - - def wrap_frozen_graph(gd, inputs, outputs): - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped - ge = x.graph.as_graph_element - return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) - - gd = tf.Graph().as_graph_def() # graph_def - with open(w, 'rb') as f: - gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") - elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python - try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu - from tflite_runtime.interpreter import Interpreter, load_delegate - except ImportError: - import tensorflow as tf - Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, - if edgetpu: # Edge TPU https://coral.ai/software/#edgetpu-runtime - LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - delegate = { - 'Linux': 'libedgetpu.so.1', - 'Darwin': 'libedgetpu.1.dylib', - 'Windows': 'edgetpu.dll'}[platform.system()] - interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) - else: # Lite - LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') - interpreter = Interpreter(model_path=w) # load TFLite model - interpreter.allocate_tensors() # allocate - input_details = interpreter.get_input_details() # inputs - output_details = interpreter.get_output_details() # outputs - elif tfjs: - raise Exception('ERROR: YOLOv5 TF.js inference is not supported') - self.__dict__.update(locals()) # assign all variables to self - - def forward(self, im, augment=False, visualize=False, val=False): - # YOLOv5 MultiBackend inference - b, ch, h, w = im.shape # batch, channel, height, width - if self.pt: # PyTorch - y = self.model(im, augment=augment, visualize=visualize)[0] - elif self.jit: # TorchScript - y = self.model(im)[0] - elif self.dnn: # ONNX OpenCV DNN - im = im.cpu().numpy() # torch to numpy - self.net.setInput(im) - y = self.net.forward() - elif self.onnx: # ONNX Runtime - im = im.cpu().numpy() # torch to numpy - y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] - elif self.xml: # OpenVINO - im = im.cpu().numpy() # FP32 - y = self.executable_network([im])[self.output_layer] - elif self.engine: # TensorRT - assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape) - self.binding_addrs['images'] = int(im.data_ptr()) - self.context.execute_v2(list(self.binding_addrs.values())) - y = self.bindings['output'].data - elif self.coreml: # CoreML - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) - im = Image.fromarray((im[0] * 255).astype('uint8')) - # im = im.resize((192, 320), Image.ANTIALIAS) - y = self.model.predict({'image': im}) # coordinates are xywh normalized - if 'confidence' in y: - box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels - conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) - y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) - else: - k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key - y = y[k] # output - else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) - if self.saved_model: # SavedModel - y = (self.model(im, training=False) if self.keras else self.model(im)).numpy() - elif self.pb: # GraphDef - y = self.frozen_func(x=self.tf.constant(im)).numpy() - else: # Lite or Edge TPU - input, output = self.input_details[0], self.output_details[0] - int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model - if int8: - scale, zero_point = input['quantization'] - im = (im / scale + zero_point).astype(np.uint8) # de-scale - self.interpreter.set_tensor(input['index'], im) - self.interpreter.invoke() - y = self.interpreter.get_tensor(output['index']) - if int8: - scale, zero_point = output['quantization'] - y = (y.astype(np.float32) - zero_point) * scale # re-scale - y[..., :4] *= [w, h, w, h] # xywh normalized to pixels - - if isinstance(y, np.ndarray): - y = torch.tensor(y, device=self.device) - return (y, []) if val else y - - def warmup(self, imgsz=(1, 3, 640, 640)): - # Warmup model by running inference once - warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb - if any(warmup_types) and self.device.type != 'cpu': - im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input - for _ in range(2 if self.jit else 1): # - self.forward(im) # warmup - - @staticmethod - def model_type(p='path/to/model.pt'): - # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx - from export import export_formats - suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes - check_suffix(p, suffixes) # checks - p = Path(p).name # eliminate trailing separators - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes) - xml |= xml2 # *_openvino_model or *.xml - tflite &= not edgetpu # *.tflite - return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs - - @staticmethod - def _load_metadata(f='path/to/meta.yaml'): - # Load metadata from meta.yaml if it exists - with open(f, errors='ignore') as f: - d = yaml.safe_load(f) - return d['stride'], d['names'] # assign stride, names - - -class AutoShape(nn.Module): - # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS - conf = 0.25 # NMS confidence threshold - iou = 0.45 # NMS IoU threshold - agnostic = False # NMS class-agnostic - multi_label = False # NMS multiple labels per box - classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs - max_det = 1000 # maximum number of detections per image - amp = False # Automatic Mixed Precision (AMP) inference - - def __init__(self, model, verbose=True): - super().__init__() - if verbose: - LOGGER.info('Adding AutoShape... ') - copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes - self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance - self.pt = not self.dmb or model.pt # PyTorch model - self.model = model.eval() - - def _apply(self, fn): - # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers - self = super()._apply(fn) - if self.pt: - m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() - m.stride = fn(m.stride) - m.grid = list(map(fn, m.grid)) - if isinstance(m.anchor_grid, list): - m.anchor_grid = list(map(fn, m.anchor_grid)) - return self - - @torch.no_grad() - def forward(self, imgs, size=640, augment=False, profile=False): - # Inference from various sources. For height=640, width=1280, RGB images example inputs are: - # file: imgs = 'data/images/zidane.jpg' # str or PosixPath - # URI: = 'https://ultralytics.com/images/zidane.jpg' - # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) - # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) - # numpy: = np.zeros((640,1280,3)) # HWC - # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) - # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images - - t = [time_sync()] - p = next(self.model.parameters()) if self.pt else torch.zeros(1, device=self.model.device) # for device, type - autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference - if isinstance(imgs, torch.Tensor): # torch - with amp.autocast(autocast): - return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference - - # Pre-process - n, imgs = (len(imgs), list(imgs)) if isinstance(imgs, (list, tuple)) else (1, [imgs]) # number, list of images - shape0, shape1, files = [], [], [] # image and inference shapes, filenames - for i, im in enumerate(imgs): - f = f'image{i}' # filename - if isinstance(im, (str, Path)): # filename or uri - im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im - im = np.asarray(exif_transpose(im)) - elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f - files.append(Path(f).with_suffix('.jpg').name) - if im.shape[0] < 5: # image in CHW - im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input - s = im.shape[:2] # HWC - shape0.append(s) # image shape - g = (size / max(s)) # gain - shape1.append([y * g for y in s]) - imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape - x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad - x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW - x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 - t.append(time_sync()) - - with amp.autocast(autocast): - # Inference - y = self.model(x, augment, profile) # forward - t.append(time_sync()) - - # Post-process - y = non_max_suppression(y if self.dmb else y[0], - self.conf, - self.iou, - self.classes, - self.agnostic, - self.multi_label, - max_det=self.max_det) # NMS - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) - - t.append(time_sync()) - return Detections(imgs, y, files, t, self.names, x.shape) - - -class Detections: - # YOLOv5 detections class for inference results - def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None): - super().__init__() - d = pred[0].device # device - gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations - self.imgs = imgs # list of images as numpy arrays - self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) - self.names = names # class names - self.files = files # image filenames - self.times = times # profiling times - self.xyxy = pred # xyxy pixels - self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels - self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized - self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized - self.n = len(self.pred) # number of images (batch size) - self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) - self.s = shape # inference BCHW shape - - def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): - crops = [] - for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): - s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string - if pred.shape[0]: - for c in pred[:, -1].unique(): - n = (pred[:, -1] == c).sum() # detections per class - s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string - if show or save or render or crop: - annotator = Annotator(im, example=str(self.names)) - for *box, conf, cls in reversed(pred): # xyxy, confidence, class - label = f'{self.names[int(cls)]} {conf:.2f}' - if crop: - file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None - crops.append({ - 'box': box, - 'conf': conf, - 'cls': cls, - 'label': label, - 'im': save_one_box(box, im, file=file, save=save)}) - else: # all others - annotator.box_label(box, label if labels else '', color=colors(cls)) - im = annotator.im - else: - s += '(no detections)' - - im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np - if pprint: - print(s.rstrip(', ')) - if show: - im.show(self.files[i]) # show - if save: - f = self.files[i] - im.save(save_dir / f) # save - if i == self.n - 1: - LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") - if render: - self.imgs[i] = np.asarray(im) - if crop: - if save: - LOGGER.info(f'Saved results to {save_dir}\n') - return crops - - def print(self): - self.display(pprint=True) # print results - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) - - def show(self, labels=True): - self.display(show=True, labels=labels) # show results - - def save(self, labels=True, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir - self.display(save=True, labels=labels, save_dir=save_dir) # save results - - def crop(self, save=True, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None - return self.display(crop=True, save=save, save_dir=save_dir) # crop results - - def render(self, labels=True): - self.display(render=True, labels=labels) # render results - return self.imgs - - def pandas(self): - # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) - new = copy(self) # return copy - ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns - cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns - for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): - a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update - setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) - return new - - def tolist(self): - # return a list of Detections objects, i.e. 'for result in results.tolist():' - r = range(self.n) # iterable - x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] - # for d in x: - # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: - # setattr(d, k, getattr(d, k)[0]) # pop out of list - return x - - def __len__(self): - return self.n # override len(results) - - def __str__(self): - self.print() # override print(results) - return '' - - -class Classify(nn.Module): - # Classification head, i.e. x(b,c1,20,20) to x(b,c2) - def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) - self.flat = nn.Flatten() - - def forward(self, x): - z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list - return self.flat(self.conv(z)) # flatten to x(b,c2) diff --git a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/aiohttp/log.py b/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/aiohttp/log.py deleted file mode 100644 index 3cecea2bac185df741bccd0a32a5fef9cfe23299..0000000000000000000000000000000000000000 --- a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/aiohttp/log.py +++ /dev/null @@ -1,8 +0,0 @@ -import logging - -access_logger = logging.getLogger("aiohttp.access") -client_logger = logging.getLogger("aiohttp.client") -internal_logger = logging.getLogger("aiohttp.internal") -server_logger = logging.getLogger("aiohttp.server") -web_logger = logging.getLogger("aiohttp.web") -ws_logger = logging.getLogger("aiohttp.websocket") diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/backbone/utils.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/backbone/utils.py deleted file mode 100644 index 2b89a4c3fbe079a77fd0cef947cf9ada787fc55d..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/modeling/backbone/utils.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import math -import torch -import torch.nn as nn -import torch.nn.functional as F - -__all__ = [ - "window_partition", - "window_unpartition", - "add_decomposed_rel_pos", - "get_abs_pos", - "PatchEmbed", -] - - -def window_partition(x, window_size): - """ - Partition into non-overlapping windows with padding if needed. - Args: - x (tensor): input tokens with [B, H, W, C]. - window_size (int): window size. - - Returns: - windows: windows after partition with [B * num_windows, window_size, window_size, C]. - (Hp, Wp): padded height and width before partition - """ - B, H, W, C = x.shape - - pad_h = (window_size - H % window_size) % window_size - pad_w = (window_size - W % window_size) % window_size - if pad_h > 0 or pad_w > 0: - x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) - Hp, Wp = H + pad_h, W + pad_w - - x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows, (Hp, Wp) - - -def window_unpartition(windows, window_size, pad_hw, hw): - """ - Window unpartition into original sequences and removing padding. - Args: - x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. - window_size (int): window size. - pad_hw (Tuple): padded height and width (Hp, Wp). - hw (Tuple): original height and width (H, W) before padding. - - Returns: - x: unpartitioned sequences with [B, H, W, C]. - """ - Hp, Wp = pad_hw - H, W = hw - B = windows.shape[0] // (Hp * Wp // window_size // window_size) - x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) - - if Hp > H or Wp > W: - x = x[:, :H, :W, :].contiguous() - return x - - -def get_rel_pos(q_size, k_size, rel_pos): - """ - Get relative positional embeddings according to the relative positions of - query and key sizes. - Args: - q_size (int): size of query q. - k_size (int): size of key k. - rel_pos (Tensor): relative position embeddings (L, C). - - Returns: - Extracted positional embeddings according to relative positions. - """ - max_rel_dist = int(2 * max(q_size, k_size) - 1) - # Interpolate rel pos if needed. - if rel_pos.shape[0] != max_rel_dist: - # Interpolate rel pos. - rel_pos_resized = F.interpolate( - rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), - size=max_rel_dist, - mode="linear", - ) - rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) - else: - rel_pos_resized = rel_pos - - # Scale the coords with short length if shapes for q and k are different. - q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) - k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) - relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) - - return rel_pos_resized[relative_coords.long()] - - -def add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size): - """ - Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. - https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 - Args: - attn (Tensor): attention map. - q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). - rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. - rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. - q_size (Tuple): spatial sequence size of query q with (q_h, q_w). - k_size (Tuple): spatial sequence size of key k with (k_h, k_w). - - Returns: - attn (Tensor): attention map with added relative positional embeddings. - """ - q_h, q_w = q_size - k_h, k_w = k_size - Rh = get_rel_pos(q_h, k_h, rel_pos_h) - Rw = get_rel_pos(q_w, k_w, rel_pos_w) - - B, _, dim = q.shape - r_q = q.reshape(B, q_h, q_w, dim) - rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) - rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) - - attn = ( - attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] - ).view(B, q_h * q_w, k_h * k_w) - - return attn - - -def get_abs_pos(abs_pos, has_cls_token, hw): - """ - Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token - dimension for the original embeddings. - Args: - abs_pos (Tensor): absolute positional embeddings with (1, num_position, C). - has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token. - hw (Tuple): size of input image tokens. - - Returns: - Absolute positional embeddings after processing with shape (1, H, W, C) - """ - h, w = hw - if has_cls_token: - abs_pos = abs_pos[:, 1:] - xy_num = abs_pos.shape[1] - size = int(math.sqrt(xy_num)) - assert size * size == xy_num - - if size != h or size != w: - new_abs_pos = F.interpolate( - abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2), - size=(h, w), - mode="bicubic", - align_corners=False, - ) - - return new_abs_pos.permute(0, 2, 3, 1) - else: - return abs_pos.reshape(1, h, w, -1) - - -class PatchEmbed(nn.Module): - """ - Image to Patch Embedding. - """ - - def __init__( - self, kernel_size=(16, 16), stride=(16, 16), padding=(0, 0), in_chans=3, embed_dim=768 - ): - """ - Args: - kernel_size (Tuple): kernel size of the projection layer. - stride (Tuple): stride of the projection layer. - padding (Tuple): padding size of the projection layer. - in_chans (int): Number of input image channels. - embed_dim (int): embed_dim (int): Patch embedding dimension. - """ - super().__init__() - - self.proj = nn.Conv2d( - in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding - ) - - def forward(self, x): - x = self.proj(x) - # B C H W -> B H W C - x = x.permute(0, 2, 3, 1) - return x diff --git a/spaces/chasetank/Visual-GPT-3.5-Turbo/visual_foundation_models.py b/spaces/chasetank/Visual-GPT-3.5-Turbo/visual_foundation_models.py deleted file mode 100644 index 6d26eb163faf9e3b8b72ee091409495167fb64ab..0000000000000000000000000000000000000000 --- a/spaces/chasetank/Visual-GPT-3.5-Turbo/visual_foundation_models.py +++ /dev/null @@ -1,735 +0,0 @@ -from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline, StableDiffusionInstructPix2PixPipeline -from diffusers import EulerAncestralDiscreteScheduler -from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler -from controlnet_aux import OpenposeDetector, MLSDdetector, HEDdetector - -from transformers import AutoModelForCausalLM, AutoTokenizer, CLIPSegProcessor, CLIPSegForImageSegmentation -from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering -from transformers import AutoImageProcessor, UperNetForSemanticSegmentation - -import os -import random -import torch -import cv2 -import uuid -from PIL import Image -import numpy as np -from pytorch_lightning import seed_everything - -def prompts(name, description): - def decorator(func): - func.name = name - func.description = description - return func - - return decorator - -def get_new_image_name(org_img_name, func_name="update"): - head_tail = os.path.split(org_img_name) - head = head_tail[0] - tail = head_tail[1] - name_split = tail.split('.')[0].split('_') - this_new_uuid = str(uuid.uuid4())[0:4] - if len(name_split) == 1: - most_org_file_name = name_split[0] - recent_prev_file_name = name_split[0] - new_file_name = '{}_{}_{}_{}.png'.format(this_new_uuid, func_name, recent_prev_file_name, most_org_file_name) - else: - assert len(name_split) == 4 - most_org_file_name = name_split[3] - recent_prev_file_name = name_split[0] - new_file_name = '{}_{}_{}_{}.png'.format(this_new_uuid, func_name, recent_prev_file_name, most_org_file_name) - return os.path.join(head, new_file_name) - - -class MaskFormer: - def __init__(self, device): - print(f"Initializing MaskFormer to {device}") - self.device = device - self.processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") - self.model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined").to(device) - - def inference(self, image_path, text): - threshold = 0.5 - min_area = 0.02 - padding = 20 - original_image = Image.open(image_path) - image = original_image.resize((512, 512)) - inputs = self.processor(text=text, images=image, padding="max_length", return_tensors="pt").to(self.device) - with torch.no_grad(): - outputs = self.model(**inputs) - mask = torch.sigmoid(outputs[0]).squeeze().cpu().numpy() > threshold - area_ratio = len(np.argwhere(mask)) / (mask.shape[0] * mask.shape[1]) - if area_ratio < min_area: - return None - true_indices = np.argwhere(mask) - mask_array = np.zeros_like(mask, dtype=bool) - for idx in true_indices: - padded_slice = tuple(slice(max(0, i - padding), i + padding + 1) for i in idx) - mask_array[padded_slice] = True - visual_mask = (mask_array * 255).astype(np.uint8) - image_mask = Image.fromarray(visual_mask) - return image_mask.resize(original_image.size) - - -class ImageEditing: - def __init__(self, device): - print(f"Initializing ImageEditing to {device}") - self.device = device - self.mask_former = MaskFormer(device=self.device) - self.revision = 'fp16' if 'cuda' in device else None - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.inpaint = StableDiffusionInpaintPipeline.from_pretrained( - "runwayml/stable-diffusion-inpainting", revision=self.revision, torch_dtype=self.torch_dtype).to(device) - - @prompts(name="Remove Something From The Photo", - description="useful when you want to remove and object or something from the photo " - "from its description or location. " - "The input to this tool should be a comma separated string of two, " - "representing the image_path and the object need to be removed. ") - def inference_remove(self, inputs): - image_path, to_be_removed_txt = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) - return self.inference_replace(f"{image_path},{to_be_removed_txt},background") - - @prompts(name="Replace Something From The Photo", - description="useful when you want to replace an object from the object description or " - "location with another object from its description. " - "The input to this tool should be a comma separated string of three, " - "representing the image_path, the object to be replaced, the object to be replaced with ") - def inference_replace(self, inputs): - image_path, to_be_replaced_txt, replace_with_txt = inputs.split(",") - original_image = Image.open(image_path) - original_size = original_image.size - mask_image = self.mask_former.inference(image_path, to_be_replaced_txt) - updated_image = self.inpaint(prompt=replace_with_txt, image=original_image.resize((512, 512)), - mask_image=mask_image.resize((512, 512))).images[0] - updated_image_path = get_new_image_name(image_path, func_name="replace-something") - updated_image = updated_image.resize(original_size) - updated_image.save(updated_image_path) - print( - f"\nProcessed ImageEditing, Input Image: {image_path}, Replace {to_be_replaced_txt} to {replace_with_txt}, " - f"Output Image: {updated_image_path}") - return updated_image_path - - -class InstructPix2Pix: - def __init__(self, device): - print(f"Initializing InstructPix2Pix to {device}") - self.device = device - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained("timbrooks/instruct-pix2pix", - safety_checker=None, - torch_dtype=self.torch_dtype).to(device) - self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config) - - @prompts(name="Instruct Image Using Text", - description="useful when you want to the style of the image to be like the text. " - "like: make it look like a painting. or make it like a robot. " - "The input to this tool should be a comma separated string of two, " - "representing the image_path and the text. ") - def inference(self, inputs): - """Change style of image.""" - print("===>Starting InstructPix2Pix Inference") - image_path, text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) - original_image = Image.open(image_path) - image = self.pipe(text, image=original_image, num_inference_steps=40, image_guidance_scale=1.2).images[0] - updated_image_path = get_new_image_name(image_path, func_name="pix2pix") - image.save(updated_image_path) - print(f"\nProcessed InstructPix2Pix, Input Image: {image_path}, Instruct Text: {text}, " - f"Output Image: {updated_image_path}") - return updated_image_path - - -class Text2Image: - def __init__(self, device): - print(f"Initializing Text2Image to {device}") - self.device = device - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", - torch_dtype=self.torch_dtype) - self.pipe.to(device) - self.a_prompt = 'best quality, extremely detailed' - self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \ - 'fewer digits, cropped, worst quality, low quality' - - @prompts(name="Generate Image From User Input Text", - description="useful when you want to generate an image from a user input text and save it to a file. " - "like: generate an image of an object or something, or generate an image that includes some objects. " - "The input to this tool should be a string, representing the text used to generate image. ") - def inference(self, text): - image_filename = os.path.join('image', f"{str(uuid.uuid4())[:8]}.png") - prompt = text + ', ' + self.a_prompt - image = self.pipe(prompt, negative_prompt=self.n_prompt).images[0] - image.save(image_filename) - print( - f"\nProcessed Text2Image, Input Text: {text}, Output Image: {image_filename}") - return image_filename - - -class ImageCaptioning: - def __init__(self, device): - print(f"Initializing ImageCaptioning to {device}") - self.device = device - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") - self.model = BlipForConditionalGeneration.from_pretrained( - "Salesforce/blip-image-captioning-base", torch_dtype=self.torch_dtype).to(self.device) - - @prompts(name="Get Photo Description", - description="useful when you want to know what is inside the photo. receives image_path as input. " - "The input to this tool should be a string, representing the image_path. ") - def inference(self, image_path): - inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device, self.torch_dtype) - out = self.model.generate(**inputs) - captions = self.processor.decode(out[0], skip_special_tokens=True) - print(f"\nProcessed ImageCaptioning, Input Image: {image_path}, Output Text: {captions}") - return captions - - -class Image2Canny: - def __init__(self, device): - print("Initializing Image2Canny") - self.low_threshold = 100 - self.high_threshold = 200 - - @prompts(name="Edge Detection On Image", - description="useful when you want to detect the edge of the image. " - "like: detect the edges of this image, or canny detection on image, " - "or perform edge detection on this image, or detect the canny image of this image. " - "The input to this tool should be a string, representing the image_path") - def inference(self, inputs): - image = Image.open(inputs) - image = np.array(image) - canny = cv2.Canny(image, self.low_threshold, self.high_threshold) - canny = canny[:, :, None] - canny = np.concatenate([canny, canny, canny], axis=2) - canny = Image.fromarray(canny) - updated_image_path = get_new_image_name(inputs, func_name="edge") - canny.save(updated_image_path) - print(f"\nProcessed Image2Canny, Input Image: {inputs}, Output Text: {updated_image_path}") - return updated_image_path - - -class CannyText2Image: - def __init__(self, device): - print(f"Initializing CannyText2Image to {device}") - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-canny", - torch_dtype=self.torch_dtype) - self.pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None, - torch_dtype=self.torch_dtype) - self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config) - self.pipe.to(device) - self.seed = -1 - self.a_prompt = 'best quality, extremely detailed' - self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \ - 'fewer digits, cropped, worst quality, low quality' - - @prompts(name="Generate Image Condition On Canny Image", - description="useful when you want to generate a new real image from both the user description and a canny image." - " like: generate a real image of a object or something from this canny image," - " or generate a new real image of a object or something from this edge image. " - "The input to this tool should be a comma separated string of two, " - "representing the image_path and the user description. ") - def inference(self, inputs): - image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) - image = Image.open(image_path) - self.seed = random.randint(0, 65535) - seed_everything(self.seed) - prompt = f'{instruct_text}, {self.a_prompt}' - image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt, - guidance_scale=9.0).images[0] - updated_image_path = get_new_image_name(image_path, func_name="canny2image") - image.save(updated_image_path) - print(f"\nProcessed CannyText2Image, Input Canny: {image_path}, Input Text: {instruct_text}, " - f"Output Text: {updated_image_path}") - return updated_image_path - - -class Image2Line: - def __init__(self, device): - print("Initializing Image2Line") - self.detector = MLSDdetector.from_pretrained('lllyasviel/ControlNet') - - @prompts(name="Line Detection On Image", - description="useful when you want to detect the straight line of the image. " - "like: detect the straight lines of this image, or straight line detection on image, " - "or perform straight line detection on this image, or detect the straight line image of this image. " - "The input to this tool should be a string, representing the image_path") - def inference(self, inputs): - image = Image.open(inputs) - mlsd = self.detector(image) - updated_image_path = get_new_image_name(inputs, func_name="line-of") - mlsd.save(updated_image_path) - print(f"\nProcessed Image2Line, Input Image: {inputs}, Output Line: {updated_image_path}") - return updated_image_path - - -class LineText2Image: - def __init__(self, device): - print(f"Initializing LineText2Image to {device}") - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-mlsd", - torch_dtype=self.torch_dtype) - self.pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None, - torch_dtype=self.torch_dtype - ) - self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config) - self.pipe.to(device) - self.seed = -1 - self.a_prompt = 'best quality, extremely detailed' - self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \ - 'fewer digits, cropped, worst quality, low quality' - - @prompts(name="Generate Image Condition On Line Image", - description="useful when you want to generate a new real image from both the user description " - "and a straight line image. " - "like: generate a real image of a object or something from this straight line image, " - "or generate a new real image of a object or something from this straight lines. " - "The input to this tool should be a comma separated string of two, " - "representing the image_path and the user description. ") - def inference(self, inputs): - image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) - image = Image.open(image_path) - self.seed = random.randint(0, 65535) - seed_everything(self.seed) - prompt = f'{instruct_text}, {self.a_prompt}' - image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt, - guidance_scale=9.0).images[0] - updated_image_path = get_new_image_name(image_path, func_name="line2image") - image.save(updated_image_path) - print(f"\nProcessed LineText2Image, Input Line: {image_path}, Input Text: {instruct_text}, " - f"Output Text: {updated_image_path}") - return updated_image_path - - -class Image2Hed: - def __init__(self, device): - print("Initializing Image2Hed") - self.detector = HEDdetector.from_pretrained('lllyasviel/ControlNet') - - @prompts(name="Hed Detection On Image", - description="useful when you want to detect the soft hed boundary of the image. " - "like: detect the soft hed boundary of this image, or hed boundary detection on image, " - "or perform hed boundary detection on this image, or detect soft hed boundary image of this image. " - "The input to this tool should be a string, representing the image_path") - def inference(self, inputs): - image = Image.open(inputs) - hed = self.detector(image) - updated_image_path = get_new_image_name(inputs, func_name="hed-boundary") - hed.save(updated_image_path) - print(f"\nProcessed Image2Hed, Input Image: {inputs}, Output Hed: {updated_image_path}") - return updated_image_path - - -class HedText2Image: - def __init__(self, device): - print(f"Initializing HedText2Image to {device}") - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-hed", - torch_dtype=self.torch_dtype) - self.pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None, - torch_dtype=self.torch_dtype - ) - self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config) - self.pipe.to(device) - self.seed = -1 - self.a_prompt = 'best quality, extremely detailed' - self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \ - 'fewer digits, cropped, worst quality, low quality' - - @prompts(name="Generate Image Condition On Soft Hed Boundary Image", - description="useful when you want to generate a new real image from both the user description " - "and a soft hed boundary image. " - "like: generate a real image of a object or something from this soft hed boundary image, " - "or generate a new real image of a object or something from this hed boundary. " - "The input to this tool should be a comma separated string of two, " - "representing the image_path and the user description") - def inference(self, inputs): - image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) - image = Image.open(image_path) - self.seed = random.randint(0, 65535) - seed_everything(self.seed) - prompt = f'{instruct_text}, {self.a_prompt}' - image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt, - guidance_scale=9.0).images[0] - updated_image_path = get_new_image_name(image_path, func_name="hed2image") - image.save(updated_image_path) - print(f"\nProcessed HedText2Image, Input Hed: {image_path}, Input Text: {instruct_text}, " - f"Output Image: {updated_image_path}") - return updated_image_path - - -class Image2Scribble: - def __init__(self, device): - print("Initializing Image2Scribble") - self.detector = HEDdetector.from_pretrained('lllyasviel/ControlNet') - - @prompts(name="Sketch Detection On Image", - description="useful when you want to generate a scribble of the image. " - "like: generate a scribble of this image, or generate a sketch from this image, " - "detect the sketch from this image. " - "The input to this tool should be a string, representing the image_path") - def inference(self, inputs): - image = Image.open(inputs) - scribble = self.detector(image, scribble=True) - updated_image_path = get_new_image_name(inputs, func_name="scribble") - scribble.save(updated_image_path) - print(f"\nProcessed Image2Scribble, Input Image: {inputs}, Output Scribble: {updated_image_path}") - return updated_image_path - - -class ScribbleText2Image: - def __init__(self, device): - print(f"Initializing ScribbleText2Image to {device}") - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-scribble", - torch_dtype=self.torch_dtype) - self.pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None, - torch_dtype=self.torch_dtype - ) - self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config) - self.pipe.to(device) - self.seed = -1 - self.a_prompt = 'best quality, extremely detailed' - self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, ' \ - 'fewer digits, cropped, worst quality, low quality' - - @prompts(name="Generate Image Condition On Sketch Image", - description="useful when you want to generate a new real image from both the user description and " - "a scribble image or a sketch image. " - "The input to this tool should be a comma separated string of two, " - "representing the image_path and the user description") - def inference(self, inputs): - image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) - image = Image.open(image_path) - self.seed = random.randint(0, 65535) - seed_everything(self.seed) - prompt = f'{instruct_text}, {self.a_prompt}' - image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt, - guidance_scale=9.0).images[0] - updated_image_path = get_new_image_name(image_path, func_name="scribble2image") - image.save(updated_image_path) - print(f"\nProcessed ScribbleText2Image, Input Scribble: {image_path}, Input Text: {instruct_text}, " - f"Output Image: {updated_image_path}") - return updated_image_path - - -class Image2Pose: - def __init__(self, device): - print("Initializing Image2Pose") - self.detector = OpenposeDetector.from_pretrained('lllyasviel/ControlNet') - - @prompts(name="Pose Detection On Image", - description="useful when you want to detect the human pose of the image. " - "like: generate human poses of this image, or generate a pose image from this image. " - "The input to this tool should be a string, representing the image_path") - def inference(self, inputs): - image = Image.open(inputs) - pose = self.detector(image) - updated_image_path = get_new_image_name(inputs, func_name="human-pose") - pose.save(updated_image_path) - print(f"\nProcessed Image2Pose, Input Image: {inputs}, Output Pose: {updated_image_path}") - return updated_image_path - - -class PoseText2Image: - def __init__(self, device): - print(f"Initializing PoseText2Image to {device}") - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-openpose", - torch_dtype=self.torch_dtype) - self.pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None, - torch_dtype=self.torch_dtype) - self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config) - self.pipe.to(device) - self.num_inference_steps = 20 - self.seed = -1 - self.unconditional_guidance_scale = 9.0 - self.a_prompt = 'best quality, extremely detailed' - self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \ - ' fewer digits, cropped, worst quality, low quality' - - @prompts(name="Generate Image Condition On Pose Image", - description="useful when you want to generate a new real image from both the user description " - "and a human pose image. " - "like: generate a real image of a human from this human pose image, " - "or generate a new real image of a human from this pose. " - "The input to this tool should be a comma separated string of two, " - "representing the image_path and the user description") - def inference(self, inputs): - image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) - image = Image.open(image_path) - self.seed = random.randint(0, 65535) - seed_everything(self.seed) - prompt = f'{instruct_text}, {self.a_prompt}' - image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt, - guidance_scale=9.0).images[0] - updated_image_path = get_new_image_name(image_path, func_name="pose2image") - image.save(updated_image_path) - print(f"\nProcessed PoseText2Image, Input Pose: {image_path}, Input Text: {instruct_text}, " - f"Output Image: {updated_image_path}") - return updated_image_path - - -class Image2Seg: - def __init__(self, device): - print("Initializing Image2Seg") - self.image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small") - self.image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small") - self.ade_palette = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], - [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], - [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], - [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], - [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], - [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], - [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], - [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], - [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], - [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], - [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], - [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], - [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], - [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], - [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], - [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], - [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], - [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], - [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], - [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], - [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], - [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], - [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], - [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], - [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], - [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], - [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], - [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], - [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], - [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], - [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], - [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], - [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], - [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], - [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], - [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], - [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], - [102, 255, 0], [92, 0, 255]] - - @prompts(name="Segmentation On Image", - description="useful when you want to detect segmentations of the image. " - "like: segment this image, or generate segmentations on this image, " - "or perform segmentation on this image. " - "The input to this tool should be a string, representing the image_path") - def inference(self, inputs): - image = Image.open(inputs) - pixel_values = self.image_processor(image, return_tensors="pt").pixel_values - with torch.no_grad(): - outputs = self.image_segmentor(pixel_values) - seg = self.image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] - color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) # height, width, 3 - palette = np.array(self.ade_palette) - for label, color in enumerate(palette): - color_seg[seg == label, :] = color - color_seg = color_seg.astype(np.uint8) - segmentation = Image.fromarray(color_seg) - updated_image_path = get_new_image_name(inputs, func_name="segmentation") - segmentation.save(updated_image_path) - print(f"\nProcessed Image2Pose, Input Image: {inputs}, Output Pose: {updated_image_path}") - return updated_image_path - - -class SegText2Image: - def __init__(self, device): - print(f"Initializing SegText2Image to {device}") - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.controlnet = ControlNetModel.from_pretrained("fusing/stable-diffusion-v1-5-controlnet-seg", - torch_dtype=self.torch_dtype) - self.pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None, - torch_dtype=self.torch_dtype) - self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config) - self.pipe.to(device) - self.seed = -1 - self.a_prompt = 'best quality, extremely detailed' - self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \ - ' fewer digits, cropped, worst quality, low quality' - - @prompts(name="Generate Image Condition On Segmentations", - description="useful when you want to generate a new real image from both the user description and segmentations. " - "like: generate a real image of a object or something from this segmentation image, " - "or generate a new real image of a object or something from these segmentations. " - "The input to this tool should be a comma separated string of two, " - "representing the image_path and the user description") - def inference(self, inputs): - image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) - image = Image.open(image_path) - self.seed = random.randint(0, 65535) - seed_everything(self.seed) - prompt = f'{instruct_text}, {self.a_prompt}' - image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt, - guidance_scale=9.0).images[0] - updated_image_path = get_new_image_name(image_path, func_name="segment2image") - image.save(updated_image_path) - print(f"\nProcessed SegText2Image, Input Seg: {image_path}, Input Text: {instruct_text}, " - f"Output Image: {updated_image_path}") - return updated_image_path - - -class Image2Depth: - def __init__(self, device): - print("Initializing Image2Depth") - self.depth_estimator = pipeline('depth-estimation') - - @prompts(name="Predict Depth On Image", - description="useful when you want to detect depth of the image. like: generate the depth from this image, " - "or detect the depth map on this image, or predict the depth for this image. " - "The input to this tool should be a string, representing the image_path") - def inference(self, inputs): - image = Image.open(inputs) - depth = self.depth_estimator(image)['depth'] - depth = np.array(depth) - depth = depth[:, :, None] - depth = np.concatenate([depth, depth, depth], axis=2) - depth = Image.fromarray(depth) - updated_image_path = get_new_image_name(inputs, func_name="depth") - depth.save(updated_image_path) - print(f"\nProcessed Image2Depth, Input Image: {inputs}, Output Depth: {updated_image_path}") - return updated_image_path - - -class DepthText2Image: - def __init__(self, device): - print(f"Initializing DepthText2Image to {device}") - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.controlnet = ControlNetModel.from_pretrained( - "fusing/stable-diffusion-v1-5-controlnet-depth", torch_dtype=self.torch_dtype) - self.pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None, - torch_dtype=self.torch_dtype) - self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config) - self.pipe.to(device) - self.seed = -1 - self.a_prompt = 'best quality, extremely detailed' - self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \ - ' fewer digits, cropped, worst quality, low quality' - - @prompts(name="Generate Image Condition On Depth", - description="useful when you want to generate a new real image from both the user description and depth image. " - "like: generate a real image of a object or something from this depth image, " - "or generate a new real image of a object or something from the depth map. " - "The input to this tool should be a comma separated string of two, " - "representing the image_path and the user description") - def inference(self, inputs): - image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) - image = Image.open(image_path) - self.seed = random.randint(0, 65535) - seed_everything(self.seed) - prompt = f'{instruct_text}, {self.a_prompt}' - image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt, - guidance_scale=9.0).images[0] - updated_image_path = get_new_image_name(image_path, func_name="depth2image") - image.save(updated_image_path) - print(f"\nProcessed DepthText2Image, Input Depth: {image_path}, Input Text: {instruct_text}, " - f"Output Image: {updated_image_path}") - return updated_image_path - - -class Image2Normal: - def __init__(self, device): - print("Initializing Image2Normal") - self.depth_estimator = pipeline("depth-estimation", model="Intel/dpt-hybrid-midas") - self.bg_threhold = 0.4 - - @prompts(name="Predict Normal Map On Image", - description="useful when you want to detect norm map of the image. " - "like: generate normal map from this image, or predict normal map of this image. " - "The input to this tool should be a string, representing the image_path") - def inference(self, inputs): - image = Image.open(inputs) - original_size = image.size - image = self.depth_estimator(image)['predicted_depth'][0] - image = image.numpy() - image_depth = image.copy() - image_depth -= np.min(image_depth) - image_depth /= np.max(image_depth) - x = cv2.Sobel(image, cv2.CV_32F, 1, 0, ksize=3) - x[image_depth < self.bg_threhold] = 0 - y = cv2.Sobel(image, cv2.CV_32F, 0, 1, ksize=3) - y[image_depth < self.bg_threhold] = 0 - z = np.ones_like(x) * np.pi * 2.0 - image = np.stack([x, y, z], axis=2) - image /= np.sum(image ** 2.0, axis=2, keepdims=True) ** 0.5 - image = (image * 127.5 + 127.5).clip(0, 255).astype(np.uint8) - image = Image.fromarray(image) - image = image.resize(original_size) - updated_image_path = get_new_image_name(inputs, func_name="normal-map") - image.save(updated_image_path) - print(f"\nProcessed Image2Normal, Input Image: {inputs}, Output Depth: {updated_image_path}") - return updated_image_path - - -class NormalText2Image: - def __init__(self, device): - print(f"Initializing NormalText2Image to {device}") - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.controlnet = ControlNetModel.from_pretrained( - "fusing/stable-diffusion-v1-5-controlnet-normal", torch_dtype=self.torch_dtype) - self.pipe = StableDiffusionControlNetPipeline.from_pretrained( - "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, safety_checker=None, - torch_dtype=self.torch_dtype) - self.pipe.scheduler = UniPCMultistepScheduler.from_config(self.pipe.scheduler.config) - self.pipe.to(device) - self.seed = -1 - self.a_prompt = 'best quality, extremely detailed' - self.n_prompt = 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit,' \ - ' fewer digits, cropped, worst quality, low quality' - - @prompts(name="Generate Image Condition On Normal Map", - description="useful when you want to generate a new real image from both the user description and normal map. " - "like: generate a real image of a object or something from this normal map, " - "or generate a new real image of a object or something from the normal map. " - "The input to this tool should be a comma separated string of two, " - "representing the image_path and the user description") - def inference(self, inputs): - image_path, instruct_text = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) - image = Image.open(image_path) - self.seed = random.randint(0, 65535) - seed_everything(self.seed) - prompt = f'{instruct_text}, {self.a_prompt}' - image = self.pipe(prompt, image, num_inference_steps=20, eta=0.0, negative_prompt=self.n_prompt, - guidance_scale=9.0).images[0] - updated_image_path = get_new_image_name(image_path, func_name="normal2image") - image.save(updated_image_path) - print(f"\nProcessed NormalText2Image, Input Normal: {image_path}, Input Text: {instruct_text}, " - f"Output Image: {updated_image_path}") - return updated_image_path - - -class VisualQuestionAnswering: - def __init__(self, device): - print(f"Initializing VisualQuestionAnswering to {device}") - self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 - self.device = device - self.processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base") - self.model = BlipForQuestionAnswering.from_pretrained( - "Salesforce/blip-vqa-base", torch_dtype=self.torch_dtype).to(self.device) - - @prompts(name="Answer Question About The Image", - description="useful when you need an answer for a question based on an image. " - "like: what is the background color of the last image, how many cats in this figure, what is in this figure. " - "The input to this tool should be a comma separated string of two, representing the image_path and the question") - def inference(self, inputs): - image_path, question = inputs.split(",")[0], ','.join(inputs.split(',')[1:]) - raw_image = Image.open(image_path).convert('RGB') - inputs = self.processor(raw_image, question, return_tensors="pt").to(self.device, self.torch_dtype) - out = self.model.generate(**inputs) - answer = self.processor.decode(out[0], skip_special_tokens=True) - print(f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input Question: {question}, " - f"Output Answer: {answer}") - return answer \ No newline at end of file diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/benchmarking/README.md b/spaces/chendl/compositional_test/transformers/examples/pytorch/benchmarking/README.md deleted file mode 100644 index 7099ed9f6b3d3d498868aa3f4c1a47a235779a2c..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/pytorch/benchmarking/README.md +++ /dev/null @@ -1,26 +0,0 @@ - - -# 🤗 Benchmark results - -Here, you can find a list of the different benchmark results created by the community. - -If you would like to list benchmark results on your favorite models of the [model hub](https://huggingface.co/models) here, please open a Pull Request and add it below. - -| Benchmark description | Results | Environment info | Author | -|:----------|:-------------|:-------------|------:| -| PyTorch Benchmark on inference for `bert-base-cased` |[memory](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/inference_memory.csv) | [env](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/env.csv) | [Partick von Platen](https://github.com/patrickvonplaten) | -| PyTorch Benchmark on inference for `bert-base-cased` |[time](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/inference_time.csv) | [env](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/env.csv) | [Partick von Platen](https://github.com/patrickvonplaten) | diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/clickhouse_connect/cc_sqlalchemy/datatypes/base.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/clickhouse_connect/cc_sqlalchemy/datatypes/base.py deleted file mode 100644 index 14d60351f42807bb0116b45ab06758026acb4be3..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/clickhouse_connect/cc_sqlalchemy/datatypes/base.py +++ /dev/null @@ -1,135 +0,0 @@ -import logging -from typing import Dict, Type - -from sqlalchemy.exc import CompileError - -from clickhouse_connect.datatypes.base import ClickHouseType, TypeDef, EMPTY_TYPE_DEF -from clickhouse_connect.datatypes.registry import parse_name, type_map -from clickhouse_connect.driver.query import format_query_value - -logger = logging.getLogger(__name__) - - -class ChSqlaType: - """ - A SQLAlchemy TypeEngine that wraps a ClickHouseType. We don't extend TypeEngine directly, instead all concrete - subclasses will inherit from TypeEngine. - """ - ch_type: ClickHouseType = None - generic_type: None - _ch_type_cls = None - _instance = None - _instance_cache: Dict[TypeDef, 'ChSqlaType'] = None - - def __init_subclass__(cls): - """ - Registers ChSqla type in the type map and sets the underlying ClickHouseType class to use to initialize - ChSqlaType instances - """ - base = cls.__name__ - if not cls._ch_type_cls: - try: - cls._ch_type_cls = type_map[base] - except KeyError: - logger.warning('Attempted to register SQLAlchemy type without corresponding ClickHouse Type') - return - schema_types.append(base) - sqla_type_map[base] = cls - cls._instance_cache = {} - - @classmethod - def build(cls, type_def: TypeDef): - """ - Factory function for building a ChSqlaType based on the type definition - :param type_def: -- TypeDef tuple that defines arguments for this instance - :return: Shared instance of a configured ChSqlaType - """ - return cls._instance_cache.setdefault(type_def, cls(type_def=type_def)) - - def __init__(self, type_def: TypeDef = EMPTY_TYPE_DEF): - """ - Basic constructor that does nothing but set the wrapped ClickHouseType. It is overridden in some cases - to add specific SqlAlchemy behavior when constructing subclasses "by hand", in which case the type_def - parameter is normally set to None and other keyword parameters used for construction - :param type_def: TypeDef tuple used to build the underlying ClickHouseType. This is normally populated by the - parse_name function - """ - self.type_def = type_def - self.ch_type = self._ch_type_cls.build(type_def) - - @property - def name(self): - return self.ch_type.name - - @name.setter - def name(self, name): # Keep SQLAlchemy from overriding our ClickHouse name - pass - - @property - def nullable(self): - return self.ch_type.nullable - - @property - def low_card(self): - return self.ch_type.low_card - - @staticmethod - def result_processor(): - """ - Override for the SqlAlchemy TypeEngine result_processor method, which is used to convert row values to the - correct Python type. The core driver handles this automatically, so we always return None. - """ - return None - - @staticmethod - def _cached_result_processor(*_): - """ - Override for the SqlAlchemy TypeEngine _cached_result_processor method to prevent weird behavior - when SQLAlchemy tries to cache. - """ - return None - - @staticmethod - def _cached_literal_processor(*_): - """ - Override for the SqlAlchemy TypeEngine _cached_literal_processor. We delegate to the driver format_query_value - method and should be able to ignore literal_processor definitions in the dialect, which are verbose and - confusing. - """ - return format_query_value - - def _compiler_dispatch(self, _visitor, **_): - """ - Override for the SqlAlchemy TypeEngine _compiler_dispatch method to sidestep unnecessary layers and complexity - when generating the type name. The underlying ClickHouseType generates the correct name - :return: Name generated by the underlying driver. - """ - return self.name - - -class CaseInsensitiveDict(dict): - def __setitem__(self, key, value): - super().__setitem__(key.lower(), value) - - def __getitem__(self, item): - return super().__getitem__(item.lower()) - - -sqla_type_map: Dict[str, Type[ChSqlaType]] = CaseInsensitiveDict() -schema_types = [] - - -def sqla_type_from_name(name: str) -> ChSqlaType: - """ - Factory function to convert a ClickHouse type name to the appropriate ChSqlaType - :param name: Name returned from ClickHouse using Native protocol or WithNames format - :return: ChSqlaType - """ - base, name, type_def = parse_name(name) - try: - type_cls = sqla_type_map[base] - except KeyError: - err_str = f'Unrecognized ClickHouse type base: {base} name: {name}' - logger.error(err_str) - raise CompileError(err_str) from KeyError - return type_cls.build(type_def) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/misc/cliTools.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/misc/cliTools.py deleted file mode 100644 index 8322ea9ebb7cd1dd907829a985b9833058bc54c1..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/misc/cliTools.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Collection of utilities for command-line interfaces and console scripts.""" -import os -import re - - -numberAddedRE = re.compile(r"#\d+$") - - -def makeOutputFileName( - input, outputDir=None, extension=None, overWrite=False, suffix="" -): - """Generates a suitable file name for writing output. - - Often tools will want to take a file, do some kind of transformation to it, - and write it out again. This function determines an appropriate name for the - output file, through one or more of the following steps: - - - changing the output directory - - appending suffix before file extension - - replacing the file extension - - suffixing the filename with a number (``#1``, ``#2``, etc.) to avoid - overwriting an existing file. - - Args: - input: Name of input file. - outputDir: Optionally, a new directory to write the file into. - suffix: Optionally, a string suffix is appended to file name before - the extension. - extension: Optionally, a replacement for the current file extension. - overWrite: Overwriting an existing file is permitted if true; if false - and the proposed filename exists, a new name will be generated by - adding an appropriate number suffix. - - Returns: - str: Suitable output filename - """ - dirName, fileName = os.path.split(input) - fileName, ext = os.path.splitext(fileName) - if outputDir: - dirName = outputDir - fileName = numberAddedRE.split(fileName)[0] - if extension is None: - extension = os.path.splitext(input)[1] - output = os.path.join(dirName, fileName + suffix + extension) - n = 1 - if not overWrite: - while os.path.exists(output): - output = os.path.join( - dirName, fileName + suffix + "#" + repr(n) + extension - ) - n += 1 - return output diff --git a/spaces/cihyFjudo/fairness-paper-search/Sounds of Skyrim or Audio Overhaul A Review of the Features and Quality of Each Sound Mod.md b/spaces/cihyFjudo/fairness-paper-search/Sounds of Skyrim or Audio Overhaul A Review of the Features and Quality of Each Sound Mod.md deleted file mode 100644 index 61a4cd4cb6dbefa826eb735338a9460aeac5c85a..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Sounds of Skyrim or Audio Overhaul A Review of the Features and Quality of Each Sound Mod.md +++ /dev/null @@ -1,6 +0,0 @@ -

sounds of skyrim or audio overhaul


Download Zip 🌟 https://tinurli.com/2uwksn



- - aaccfb2cb3
-
-
-

diff --git a/spaces/cihyFjudo/fairness-paper-search/Xfer Records Serum v1.2.8b6 WiN MacOSX Features Reviews and Tips.md b/spaces/cihyFjudo/fairness-paper-search/Xfer Records Serum v1.2.8b6 WiN MacOSX Features Reviews and Tips.md deleted file mode 100644 index 16c71d677c28ca05110fe39cb5f72b888457e381..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Xfer Records Serum v1.2.8b6 WiN MacOSX Features Reviews and Tips.md +++ /dev/null @@ -1,5 +0,0 @@ - -

Free Download xfer serum windows is a wave generation synthesizer software plug-in created by Xfer Records. It uses wavetable synthesis to generate electronic sounds. Xfer Records is known for its useful LFO tools. LFO Tool is an FX plug-in for Windows and Macintosh that can help music producers. Create vibrato, auto-pan, compression, cider-in, and other cool features (including the famous dubstep swing effect) with great artistic freedom. You may also construct complex layers with their infinite color possibilities. You may do it inside or outside, at any moment. As a wavetable synthesizer plugin, it is another proof of excellence. Here you will find Serum crack for Windows.

-

Xfer Records Serum v1.2.8b6 WiN MacOSX


Download ☆☆☆ https://tinurli.com/2uwkBH



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ac3dec.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ac3dec.h deleted file mode 100644 index 98de7b5abffcaf4f8a2eb78cf4e769d3ce478aff..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/ac3dec.h +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Common code between the AC-3 and E-AC-3 decoders - * Copyright (c) 2007 Bartlomiej Wolowiec - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Common code between the AC-3 and E-AC-3 decoders. - * - * Summary of MDCT Coefficient Grouping: - * The individual MDCT coefficient indices are often referred to in the - * (E-)AC-3 specification as frequency bins. These bins are grouped together - * into subbands of 12 coefficients each. The subbands are grouped together - * into bands as defined in the bitstream by the band structures, which - * determine the number of bands and the size of each band. The full spectrum - * of 256 frequency bins is divided into 1 DC bin + 21 subbands = 253 bins. - * This system of grouping coefficients is used for channel bandwidth, stereo - * rematrixing, channel coupling, enhanced coupling, and spectral extension. - * - * +-+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+-+ - * |1| |12| | [12|12|12|12] | | | | | | | | | | | | |3| - * +-+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+-+ - * ~~~ ~~~~ ~~~~~~~~~~~~~ ~~~ - * | | | | - * | | | 3 unused frequency bins--+ - * | | | - * | | +--1 band containing 4 subbands - * | | - * | +--1 subband of 12 frequency bins - * | - * +--DC frequency bin - */ - -#ifndef AVCODEC_AC3DEC_H -#define AVCODEC_AC3DEC_H - -#include "libavutil/tx.h" -#include "libavutil/float_dsp.h" -#include "libavutil/fixed_dsp.h" -#include "libavutil/lfg.h" -#include "libavutil/mem_internal.h" - -#include "ac3.h" -#include "ac3dsp.h" -#include "avcodec.h" -#include "bswapdsp.h" -#include "get_bits.h" -#include "fmtconvert.h" - -#define AC3_OUTPUT_LFEON 8 - -#define SPX_MAX_BANDS 17 - -/** Large enough for maximum possible frame size when the specification limit is ignored */ -#define AC3_FRAME_BUFFER_SIZE 32768 - -typedef struct AC3DecodeContext { - AVClass *class; ///< class for AVOptions - AVCodecContext *avctx; ///< parent context - GetBitContext gbc; ///< bitstream reader - -///@name Bit stream information -///@{ - int frame_type; ///< frame type (strmtyp) - int substreamid; ///< substream identification - int superframe_size; ///< current superframe size, in bytes - int frame_size; ///< current frame size, in bytes - int bit_rate; ///< stream bit rate, in bits-per-second - int sample_rate; ///< sample frequency, in Hz - int num_blocks; ///< number of audio blocks - int bitstream_id; ///< bitstream id (bsid) - int bitstream_mode; ///< bitstream mode (bsmod) - int channel_mode; ///< channel mode (acmod) - int lfe_on; ///< lfe channel in use - int dialog_normalization[2]; ///< dialog level in dBFS (dialnorm) - int compression_exists[2]; ///< compression field is valid for frame (compre) - int channel_map; ///< custom channel map (chanmap) - int preferred_downmix; ///< Preferred 2-channel downmix mode (dmixmod) - int center_mix_level; ///< Center mix level index - int center_mix_level_ltrt; ///< Center mix level index for Lt/Rt (ltrtcmixlev) - int surround_mix_level; ///< Surround mix level index - int surround_mix_level_ltrt; ///< Surround mix level index for Lt/Rt (ltrtsurmixlev) - int lfe_mix_level_exists; ///< indicates if lfemixlevcod is specified (lfemixlevcode) - int lfe_mix_level; ///< LFE mix level index (lfemixlevcod) - int eac3; ///< indicates if current frame is E-AC-3 - int eac3_subsbtreamid_found; ///< bitstream has E-AC-3 additional substream(s) - int eac3_extension_type_a; ///< bitstream has E-AC-3 extension type A enabled frame(s) - int dolby_surround_mode; ///< dolby surround mode (dsurmod) - int dolby_surround_ex_mode; ///< dolby surround ex mode (dsurexmod) - int dolby_headphone_mode; ///< dolby headphone mode (dheadphonmod) -///@} - - int preferred_stereo_downmix; - float ltrt_center_mix_level; - float ltrt_surround_mix_level; - float loro_center_mix_level; - float loro_surround_mix_level; - int target_level; ///< target level in dBFS - float level_gain[2]; - -///@name Frame syntax parameters - int snr_offset_strategy; ///< SNR offset strategy (snroffststr) - int block_switch_syntax; ///< block switch syntax enabled (blkswe) - int dither_flag_syntax; ///< dither flag syntax enabled (dithflage) - int bit_allocation_syntax; ///< bit allocation model syntax enabled (bamode) - int fast_gain_syntax; ///< fast gain codes enabled (frmfgaincode) - int dba_syntax; ///< delta bit allocation syntax enabled (dbaflde) - int skip_syntax; ///< skip field syntax enabled (skipflde) - ///@} - -///@name Standard coupling - int cpl_in_use[AC3_MAX_BLOCKS]; ///< coupling in use (cplinu) - int cpl_strategy_exists[AC3_MAX_BLOCKS];///< coupling strategy exists (cplstre) - int channel_in_cpl[AC3_MAX_CHANNELS]; ///< channel in coupling (chincpl) - int phase_flags_in_use; ///< phase flags in use (phsflginu) - int phase_flags[AC3_MAX_CPL_BANDS]; ///< phase flags (phsflg) - int num_cpl_bands; ///< number of coupling bands (ncplbnd) - uint8_t cpl_band_struct[AC3_MAX_CPL_BANDS]; - uint8_t cpl_band_sizes[AC3_MAX_CPL_BANDS]; ///< number of coeffs in each coupling band - int firstchincpl; ///< first channel in coupling - int first_cpl_coords[AC3_MAX_CHANNELS]; ///< first coupling coordinates states (firstcplcos) - int cpl_coords[AC3_MAX_CHANNELS][AC3_MAX_CPL_BANDS]; ///< coupling coordinates (cplco) -///@} - -///@name Spectral extension -///@{ - int spx_in_use; ///< spectral extension in use (spxinu) - uint8_t channel_uses_spx[AC3_MAX_CHANNELS]; ///< channel uses spectral extension (chinspx) - int8_t spx_atten_code[AC3_MAX_CHANNELS]; ///< spx attenuation code (spxattencod) - int spx_src_start_freq; ///< spx start frequency bin - int spx_dst_end_freq; ///< spx end frequency bin - int spx_dst_start_freq; ///< spx starting frequency bin for copying (copystartmant) - ///< the copy region ends at the start of the spx region. - int num_spx_bands; ///< number of spx bands (nspxbnds) - uint8_t spx_band_struct[SPX_MAX_BANDS]; - uint8_t spx_band_sizes[SPX_MAX_BANDS]; ///< number of bins in each spx band - uint8_t first_spx_coords[AC3_MAX_CHANNELS]; ///< first spx coordinates states (firstspxcos) - INTFLOAT spx_noise_blend[AC3_MAX_CHANNELS][SPX_MAX_BANDS]; ///< spx noise blending factor (nblendfact) - INTFLOAT spx_signal_blend[AC3_MAX_CHANNELS][SPX_MAX_BANDS];///< spx signal blending factor (sblendfact) -///@} - -///@name Adaptive hybrid transform - int channel_uses_aht[AC3_MAX_CHANNELS]; ///< channel AHT in use (chahtinu) - int pre_mantissa[AC3_MAX_CHANNELS][AC3_MAX_COEFS][AC3_MAX_BLOCKS]; ///< pre-IDCT mantissas -///@} - -///@name Channel - int fbw_channels; ///< number of full-bandwidth channels - int channels; ///< number of total channels - int lfe_ch; ///< index of LFE channel - SHORTFLOAT *downmix_coeffs[2]; ///< stereo downmix coefficients - int downmixed; ///< indicates if coeffs are currently downmixed - int output_mode; ///< output channel configuration - int prev_output_mode; ///< output channel configuration for previous frame - int out_channels; ///< number of output channels - int prev_bit_rate; ///< stream bit rate, in bits-per-second for previous frame -///@} - -///@name Dynamic range - INTFLOAT dynamic_range[2]; ///< dynamic range - INTFLOAT drc_scale; ///< percentage of dynamic range compression to be applied - int heavy_compression; ///< apply heavy compression - INTFLOAT heavy_dynamic_range[2]; ///< heavy dynamic range compression -///@} - -///@name Bandwidth - int start_freq[AC3_MAX_CHANNELS]; ///< start frequency bin (strtmant) - int end_freq[AC3_MAX_CHANNELS]; ///< end frequency bin (endmant) -///@} - -///@name Consistent noise generation - int consistent_noise_generation; ///< seed noise generation with AC-3 frame on decode -///@} - -///@name Rematrixing - int num_rematrixing_bands; ///< number of rematrixing bands (nrematbnd) - int rematrixing_flags[4]; ///< rematrixing flags (rematflg) -///@} - -///@name Exponents - int num_exp_groups[AC3_MAX_CHANNELS]; ///< Number of exponent groups (nexpgrp) - int8_t dexps[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< decoded exponents - int exp_strategy[AC3_MAX_BLOCKS][AC3_MAX_CHANNELS]; ///< exponent strategies (expstr) -///@} - -///@name Bit allocation - AC3BitAllocParameters bit_alloc_params; ///< bit allocation parameters - int first_cpl_leak; ///< first coupling leak state (firstcplleak) - int snr_offset[AC3_MAX_CHANNELS]; ///< signal-to-noise ratio offsets (snroffst) - int fast_gain[AC3_MAX_CHANNELS]; ///< fast gain values/SMR's (fgain) - uint8_t bap[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< bit allocation pointers - int16_t psd[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< scaled exponents - int16_t band_psd[AC3_MAX_CHANNELS][AC3_CRITICAL_BANDS]; ///< interpolated exponents - int16_t mask[AC3_MAX_CHANNELS][AC3_CRITICAL_BANDS]; ///< masking curve values - int dba_mode[AC3_MAX_CHANNELS]; ///< delta bit allocation mode - int dba_nsegs[AC3_MAX_CHANNELS]; ///< number of delta segments - uint8_t dba_offsets[AC3_MAX_CHANNELS][8]; ///< delta segment offsets - uint8_t dba_lengths[AC3_MAX_CHANNELS][8]; ///< delta segment lengths - uint8_t dba_values[AC3_MAX_CHANNELS][8]; ///< delta values for each segment -///@} - -///@name Zero-mantissa dithering - int dither_flag[AC3_MAX_CHANNELS]; ///< dither flags (dithflg) - AVLFG dith_state; ///< for dither generation -///@} - -///@name IMDCT - int block_switch[AC3_MAX_CHANNELS]; ///< block switch flags (blksw) - AVTXContext *tx_128, *tx_256; - av_tx_fn tx_fn_128, tx_fn_256; -///@} - -///@name Optimization - BswapDSPContext bdsp; -#if USE_FIXED - AVFixedDSPContext *fdsp; -#else - AVFloatDSPContext *fdsp; -#endif - AC3DSPContext ac3dsp; - FmtConvertContext fmt_conv; ///< optimized conversion functions -///@} - - SHORTFLOAT *outptr[AC3_MAX_CHANNELS]; - INTFLOAT *xcfptr[AC3_MAX_CHANNELS]; - INTFLOAT *dlyptr[AC3_MAX_CHANNELS]; - -///@name Aligned arrays - DECLARE_ALIGNED(16, int, fixed_coeffs)[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< fixed-point transform coefficients - DECLARE_ALIGNED(32, INTFLOAT, transform_coeffs)[AC3_MAX_CHANNELS][AC3_MAX_COEFS]; ///< transform coefficients - DECLARE_ALIGNED(32, INTFLOAT, delay)[EAC3_MAX_CHANNELS][AC3_BLOCK_SIZE]; ///< delay - added to the next block - DECLARE_ALIGNED(32, INTFLOAT, window)[AC3_BLOCK_SIZE]; ///< window coefficients - DECLARE_ALIGNED(32, INTFLOAT, tmp_output)[AC3_BLOCK_SIZE]; ///< temporary storage for output before windowing - DECLARE_ALIGNED(32, SHORTFLOAT, output)[EAC3_MAX_CHANNELS][AC3_BLOCK_SIZE]; ///< output after imdct transform and windowing - DECLARE_ALIGNED(32, uint8_t, input_buffer)[AC3_FRAME_BUFFER_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; ///< temp buffer to prevent overread - DECLARE_ALIGNED(32, SHORTFLOAT, output_buffer)[EAC3_MAX_CHANNELS][AC3_BLOCK_SIZE * 6]; ///< final output buffer -///@} - - AVChannelLayout downmix_layout; -} AC3DecodeContext; - -/** - * Parse the E-AC-3 frame header. - * This parses both the bit stream info and audio frame header. - */ -static int ff_eac3_parse_header(AC3DecodeContext *s); - -/** - * Decode mantissas in a single channel for the entire frame. - * This is used when AHT mode is enabled. - */ -static void ff_eac3_decode_transform_coeffs_aht_ch(AC3DecodeContext *s, int ch); - -/** - * Apply spectral extension to each channel by copying lower frequency - * coefficients to higher frequency bins and applying side information to - * approximate the original high frequency signal. - */ -static void ff_eac3_apply_spectral_extension(AC3DecodeContext *s); - -#if (!USE_FIXED) -extern float ff_ac3_heavy_dynamic_range_tab[256]; -#endif - -#endif /* AVCODEC_AC3DEC_H */ diff --git a/spaces/congsaPfin/Manga-OCR/logs/APK Download Real Football 2014 for Android - The Ultimate Soccer Game.md b/spaces/congsaPfin/Manga-OCR/logs/APK Download Real Football 2014 for Android - The Ultimate Soccer Game.md deleted file mode 100644 index 4cefb9978e21763a95efb88fdb4e4a4b4961fe3a..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/APK Download Real Football 2014 for Android - The Ultimate Soccer Game.md +++ /dev/null @@ -1,43 +0,0 @@ -
-

Real Football 2014 APK Download for Android: A Complete Guide

-

If you are a fan of soccer games, you might have heard of Real Football 2014, one of the most popular and realistic soccer games for Android devices. Real Football 2014 is a game developed by Gameloft, a leading company in the mobile gaming industry. Real Football 2014 lets you experience the thrill and excitement of the FIFA World Cup 2014, as well as other game modes and challenges. In this article, we will show you how to download and install Real Football 2014 APK on your Android device, as well as some tips and tricks to help you master the game.

-

Features of Real Football 2014 APK

-

Real Football 2014 APK is a game that offers many features that make it stand out from other soccer games. Here are some of the features that you can enjoy when you download Real Football 2014 APK:

-

real football 2014 apk download for android


Download Ziphttps://urlca.com/2uObwQ



-
    -
  • Realistic graphics and animations: Real Football 2014 APK boasts stunning graphics and smooth animations that make the game look and feel like a real soccer match. You can see the details of the players, the stadiums, the crowds, and the weather effects. You can also watch replays of your best goals and actions.
  • -
  • Multiple game modes and challenges: Real Football 2014 APK offers various game modes and challenges that suit your preferences and skills. You can play in quick match mode, tournament mode, penalty shootout mode, or practice mode. You can also take on different challenges such as scoring goals, winning matches, or completing achievements.
  • -
  • World Cup mode with 32 teams and stadiums: Real Football 2014 APK lets you relive the FIFA World Cup 2014 in Brazil, with all the 32 teams and stadiums that participated in the event. You can choose your favorite team and lead it to glory in the group stage, the knockout stage, and the final. You can also customize your team's formation, tactics, and players.
  • -
  • Customizable players and teams: Real Football 2014 APK allows you to create your own players and teams, with different attributes, skills, appearances, and names. You can also edit existing players and teams, or transfer players between teams. You can also unlock new jerseys, balls, shoes, and accessories for your players.
  • -
  • Online multiplayer and leaderboards: Real Football 2014 APK enables you to play online with other players from around the world, in real-time or turn-based matches. You can also compete with your friends in local multiplayer mode via Wi-Fi or Bluetooth. You can also check your ranking and stats on the global leaderboards, or challenge other players to beat your records.
  • -
-

How to Download and Install Real Football 2014 APK on Android

-

If you want to download and install Real Football 2014 APK on your Android device, you need to follow these simple steps:

-
    -
  1. Enable unknown sources on your device: To install Real Football 2014 APK, you need to allow your device to install apps from unknown sources. To do this, go to your device's settings, then security, then enable the option "Unknown sources". This will let you install apps that are not from the Google Play Store.
  2. -
  3. Download the APK file from a trusted source: To download Real Football 2014 APK, you need to find a reliable and safe source that provides the APK file. You can search for Real Football 2014 APK on Google, or use the link below to download it directly. Make sure you have enough storage space on your device before downloading the file.
  4. -
  5. Locate and tap on the APK file: After downloading the APK file, you need to locate it on your device. You can use a file manager app to find the file, or go to your device's downloads folder. Once you find the file, tap on it to start the installation process.
  6. -
  7. Follow the installation instructions and launch the game: After tapping on the APK file, you will see a pop-up window that asks you to confirm the installation. Tap on "Install" and wait for the installation to finish. You may also see some permissions that the app requires, such as access to your storage, network, and location. Tap on "Allow" to grant these permissions. Once the installation is done, you will see a message that says "App installed". Tap on "Open" to launch the game and enjoy.
  8. -
-

Tips and Tricks for Playing Real Football 2014 APK on Android

-

Real Football 2014 APK is a game that requires skill and strategy to win. Here are some tips and tricks that can help you improve your performance and have more fun playing Real Football 2014 APK:

-
    -
  • Choose the right difficulty level for your skill: Real Football 2014 APK offers four difficulty levels: easy, normal, hard, and expert. You can choose the level that suits your skill and preference in the settings menu. The higher the difficulty level, the more challenging and realistic the game will be. You can also change the difficulty level anytime during the game.
  • -
  • Use the virtual joystick and buttons to control your players: Real Football 2014 APK uses a virtual joystick and buttons to control your players on the screen. You can use the joystick to move your players around, and use the buttons to pass, shoot, tackle, or perform special moves. You can also adjust the sensitivity and position of the joystick and buttons in the settings menu.
  • -
  • Learn the different types of passes, shots, and tackles: Real Football 2014 APK offers different types of passes, shots, and tackles that you can use depending on the situation. You can use short passes, long passes, through balls, or lob passes to create chances for your teammates. You can use low shots, high shots, curved shots, or power shots to score goals. You can use slide tackles, standing tackles, or fouls to stop your opponents. You can also use power-ups and special moves to gain an edge over your rivals.
  • -
  • Use the power-ups and special moves to gain an edge: Real Football 2014 APK features power-ups and special moves that you can use to boost your performance and surprise your opponents. You can collect power-ups by hitting them with the ball or by completing achievements. You can use power-ups such as speed boost, shield, magnet, or freeze to enhance your abilities or hinder your enemies. You can also perform special moves such as bicycle kick, volley, header, or chip shot by tapping on the special move button when the ball is in the air. You can also use the super shot button to unleash a powerful shot that can break through any defense.
  • -
  • Practice your skills in training mode and friendly matches: Real Football 2014 APK offers a training mode and a friendly match mode that you can use to practice your skills and test your strategies. You can use the training mode to learn the basics of the game, such as passing, shooting, tackling, and using power-ups and special moves. You can also use the friendly match mode to play against any team of your choice, with any difficulty level and settings. You can use these modes to improve your game and prepare for the real challenges.
  • -
-

Conclusion

-

Real Football 2014 APK is a game that will satisfy any soccer fan who wants to enjoy a realistic and exciting soccer experience on their Android device. Real Football 2014 APK offers many features, such as realistic graphics and animations, multiple game modes and challenges, World Cup mode with 32 teams and stadiums, customizable players and teams, online multiplayer and leaderboards, power-ups and special moves, and more. Real Football 2014 APK is easy to download and install, and you can follow the steps we provided in this article to do so. You can also use the tips and tricks we shared to help you master the game and have more fun playing it. If you are looking for a game that will make you feel like a real soccer star, download Real Football 2014 APK today and enjoy the ultimate soccer experience.

-

FAQs

-

Here are some frequently asked questions about Real Football 2014 APK:

-
    -
  1. Is Real Football 2014 APK free to play? Yes, Real Football 2014 APK is free to play, but it may contain some in-app purchases that you can buy with real money to enhance your game.
  2. -
  3. Is Real Football 2014 APK safe to download and install? Yes, Real Football 2014 APK is safe to download and install, as long as you download it from a trusted source and enable unknown sources on your device. However, you should always be careful when downloading and installing apps from unknown sources, as they may contain viruses or malware that can harm your device.
  4. -
  5. What are the minimum requirements to play Real Football 2014 APK on Android? To play Real Football 2014 APK on Android, you need to have an Android device that runs on Android 2.3 or higher, with at least 1 GB of RAM and 1 GB of free storage space.
  6. -
  7. How can I update Real Football 2014 APK on Android? To update Real Football 2014 APK on Android, you need to download the latest version of the APK file from a trusted source and install it over the existing version. You can also check for updates in the game's settings menu.
  8. -
  9. How can I contact the developers of Real Football 2014 APK? To contact the developers of Real Football 2014 APK, you can visit their official website at https://www.gameloft.com/en/, or follow them on their social media accounts on Facebook, Twitter, Instagram, or YouTube.
  10. -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Temple Run APK and Join the Millions of Players Worldwide.md b/spaces/congsaPfin/Manga-OCR/logs/Download Temple Run APK and Join the Millions of Players Worldwide.md deleted file mode 100644 index 61a65287ece6a98f772d428ff46e79033ebb752b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Temple Run APK and Join the Millions of Players Worldwide.md +++ /dev/null @@ -1,223 +0,0 @@ -
-

Temple Run App Download APK: How to Play the Addictive Running Game on Your Android Device

-

Do you love adventure games that test your reflexes and keep you on the edge of your seat? If so, you might want to try Temple Run, one of the most popular and addictive running games ever made. In this article, we will show you how to download and install Temple Run APK on your Android device, how to play the game, what features it offers, and what are the pros and cons of playing it. We will also suggest some alternatives to Temple Run in case you want to try something different.

-

How to Download and Install Temple Run APK on Your Android Device

-

Temple Run is available for free on the Google Play Store, but if for some reason you cannot access it or you want to get the latest version of the game, you can also download the APK file from other sources . An APK file is a package that contains all the files needed to install an app on your Android device. However, before you download and install an APK file, you need to make sure that you have enabled the option to install apps from unknown sources on your device. To do this, follow these steps:

-

temple run app download apk


Download >> https://urlca.com/2uO8KR



-
    -
  1. Go to Settings > Security > Unknown Sources and toggle it on.
  2. -
  3. Download the Temple Run APK file from a trusted source. You can use a browser or a file manager app to do this.
  4. -
  5. Locate the downloaded file on your device and tap on it.
  6. -
  7. Follow the instructions on the screen to install the app.
  8. -
  9. Once the installation is complete, you can launch the app from your app drawer or home screen.
  10. -
-

Congratulations! You have successfully installed Temple Run APK on your Android device. Now you are ready to play the game and have some fun.

-

How to Play Temple Run on Your Android Device

-

Temple Run is a simple but challenging game that requires quick reflexes and concentration. The goal of the game is to run as far as possible while avoiding obstacles, collecting coins, and escaping from the evil demon monkeys that are chasing you. Here are some basic controls and gameplay tips that will help you play the game better:

-

Basic Controls

-

To play Temple Run, you only need to use your fingers and your device's accelerometer. Here are the basic gestures you need to know:

-
    -
  • To turn left or right, swipe left or right on the screen.
  • -
  • To jump over obstacles or gaps, swipe up on the screen.
  • -
  • To slide under obstacles or bridges, swipe down on the screen.
  • -
  • To tilt your device left or right, tilt your device left or right. This will help you collect coins and avoid falling off edges.
  • -
-

That's it! These are the only controls you need to master in order to play Temple Run. However, don't let the simplicity fool you. The game gets faster and harder as you progress, so you need to be alert and agile at all times.

-

Gameplay Tips

-

Besides running for your life, there are some other things you can do in Temple Run to make your gameplay more enjoyable and rewarding. Here are some tips that will help you improve your score and performance:

-
    - Collect coins as much as you can. Coins are the currency of the game and you can use them to buy power-ups, upgrades, and new characters. You can also get bonus coins by completing objectives and achievements. - Use power-ups wisely. Power-ups are special items that give you a temporary boost or advantage in the game. For example, the magnet power-up attracts all the coins in your path, the invisibility power-up makes you immune to obstacles, and the boost power-up makes you run faster and farther. You can activate a power-up by double-tapping on the screen when you see the icon on the top left corner. You can also buy and upgrade power-ups with coins in the store. - Unlock new characters and outfits. Temple Run features several characters that you can play as, each with their own abilities and outfits. For example, Guy Dangerous is the default character who has no special ability, Scarlett Fox is a female character who can jump longer distances, and Barry Bones is a cop who can collect more coins. You can unlock new characters and outfits with coins or gems in the store. - Try different modes and maps. Temple Run offers different modes and maps that you can try to spice up your gameplay. For example, you can play in the classic mode, which is the original Temple Run game, or in the blazing sands mode, which is a desert-themed map with new obstacles and challenges. You can also play in the frozen shadows mode, which is a winter-themed map with snowmen and ice slides. You can switch between modes and maps by tapping on the icon on the top right corner of the main menu.

    Features of Temple Run App

    -

    Temple Run is not just a simple running game. It also has some amazing features that make it stand out from other games in the genre. Here are some of the features that Temple Run offers:

    -

    Graphics and Sound

    -

    Temple Run has stunning graphics and sound that create an immersive and thrilling experience for the players. The game features realistic 3D environments, dynamic lighting and shadows, smooth animations, and detailed textures. The game also has a captivating soundtrack and sound effects that match the mood and atmosphere of the game. You can hear the footsteps of your character, the roar of the demon monkeys, the clink of the coins, and the voice of the narrator.

    -

    temple run game apk free download for android
    -download temple run oz app latest version apk
    -how to install temple run app from apk file
    -temple run 2 app mod apk download unlimited coins and gems
    -temple run brave app apk download for pc
    -best site to download temple run app apk safely
    -temple run app offline apk download without internet
    -temple run app hack apk download no root
    -temple run app old version apk download 2012
    -temple run app update apk download new features
    -temple run app beta apk download early access
    -temple run app pro apk download premium unlocked
    -temple run app cracked apk download full version
    -temple run app lite apk download low mb
    -temple run app hd apk download high graphics
    -temple run app 3d apk download realistic gameplay
    -temple run app vr apk download virtual reality
    -temple run app online apk download multiplayer mode
    -temple run app original apk download official developer
    -temple run app chinese apk download alternative version
    -temple run app english apk download language pack
    -temple run app cheats apk download tips and tricks
    -temple run app review apk download user ratings
    -temple run app guide apk download walkthrough and tutorial
    -temple run app wallpaper apk download background images
    -temple run app theme apk download custom icons and sounds
    -temple run app launcher apk download home screen widget
    -temple run app keyboard apk download typing style and emojis
    -temple run app sticker apk download for whatsapp and messenger
    -temple run app ringtone apk download sound effects and music
    -temple run app editor apk download create and share your own levels
    -temple run app maker apk download design and build your own game
    -temple run app generator apk download random and endless mode
    -temple run app simulator apk download test and improve your skills
    -temple run app trivia apk download quiz and fun facts
    -temple run app puzzle apk download brain teasers and challenges
    -temple run app adventure apk download story and missions
    -temple run app action apk download fast and furious gameplay
    -temple run app arcade apk download classic and retro style
    -temple run app casual apk download easy and relaxing gameplay
    -temple run app strategy apk download plan and execute your moves
    -temple run app role playing apk download character and customization
    -temple run app simulation apk download realistic and immersive gameplay
    -temple run app sports apk download racing and running games
    -temple run app educational apk download learning and fun games
    -temple run app family apk download suitable for all ages and kids
    -temple run app social apk download chat and interact with other players
    -temple run app entertainment apk download fun and enjoyable games
    -temple run app lifestyle apk download games that reflect your interests and hobbies
    -temple run app health and fitness apk download games that help you stay fit and healthy

    -

    Levels and Challenges

    -

    Temple Run has endless levels and challenges that keep you engaged and motivated to play more. The game has a procedurally generated map that changes every time you play, so you never know what to expect next. The game also has different scenarios and objectives that vary depending on the mode and map you choose. For example, in the classic mode, you have to escape from a temple filled with traps and treasures, while in the blazing sands mode, you have to survive a sandstorm and avoid falling into quicksand. The game also has a leaderboard and achievements system that lets you compare your score and progress with other players around the world.

    -

    Pros and Cons of Temple Run App

    -

    Temple Run is a fun and addictive game that anyone can enjoy, but it also has some pros and cons that you should be aware of before playing it. Here are some of them:

    -

    Pros

    -
      -
    • Temple Run is free to play and download.
    • -
    • Temple Run is easy to learn but hard to master.
    • -
    • Temple Run is suitable for all ages and preferences.
    • -
    • Temple Run has high replay value and variety.
    • -
    • Temple Run has frequent updates and improvements.
    • -
    -

    Cons

    -
      -
    • Temple Run can be frustrating and repetitive at times.
    • -
    • Temple Run can be addictive and time-consuming.
    • -
    • Temple Run can drain your battery and data quickly.
    • -
    • Temple Run can have some bugs and glitches occasionally.
    • -
    • Temple Run can have some ads and in-app purchases that might annoy some players.
    • -
    -

    Alternatives to Temple Run App

    -

    If you love Temple Run but want to try something different, there are many other running games that you can play on your Android device. Here are some of them:

    - - - - - - - - - - - - - -
    NameDescriptionDownload Link
    Subway SurfersA colorful and vibrant running game where you have to dodge trains, buses, and obstacles while collecting coins and power-ups.
    Zombie TsunamiA hilarious and chaotic running game where you have to lead a horde of zombies through the city while eating people and destroying everything in I have finished writing the article. Here is the final version:

    Temple Run App Download APK: How to Play the Addictive Running Game on Your Android Device

    -

    Do you love adventure games that test your reflexes and keep you on the edge of your seat? If so, you might want to try Temple Run, one of the most popular and addictive running games ever made. In this article, we will show you how to download and install Temple Run APK on your Android device, how to play the game, what features it offers, and what are the pros and cons of playing it. We will also suggest some alternatives to Temple Run in case you want to try something different.

    -

    How to Download and Install Temple Run APK on Your Android Device

    -

    Temple Run is available for free on the Google Play Store, but if for some reason you cannot access it or you want to get the latest version of the game, you can also download the APK file from other sources . An APK file is a package that contains all the files needed to install an app on your Android device. However, before you download and install an APK file, you need to make sure that you have enabled the option to install apps from unknown sources on your device. To do this, follow these steps:

    -
      -
    1. Go to Settings > Security > Unknown Sources and toggle it on.
    2. -
    3. Download the Temple Run APK file from a trusted source. You can use a browser or a file manager app to do this.
    4. -
    5. Locate the downloaded file on your device and tap on it.
    6. -
    7. Follow the instructions on the screen to install the app.
    8. -
    9. Once the installation is complete, you can launch the app from your app drawer or home screen.
    10. -
    -

    Congratulations! You have successfully installed Temple Run APK on your Android device. Now you are ready to play the game and have some fun.

    -

    How to Play Temple Run on Your Android Device

    -

    Temple Run is a simple but challenging game that requires quick reflexes and concentration. The goal of the game is to run as far as possible while avoiding obstacles, collecting coins, and escaping from the evil demon monkeys that are chasing you. Here are some basic controls and gameplay tips that will help you play the game better:

    -

    Basic Controls

    -

    To play Temple Run, you only need to use your fingers and your device's accelerometer. Here are the basic gestures you need to know:

    -
      -
    • To turn left or right, swipe left or right on the screen.
    • -
    • To jump over obstacles or gaps, swipe up on the screen.
    • -
    • To slide under obstacles or bridges, swipe down on the screen.
    • -
    • To tilt your device left or right, tilt your device left or right. This will help you collect coins and avoid falling off edges.
    • -
    -

    That's it! These are the only controls you need to master in order to play Temple Run. However, don't let the simplicity fool you. The game gets faster and harder as you progress, so you need to be alert and agile at all times.

    -

    Gameplay Tips

    -

    Besides running for your life, there are some other things you can do in Temple Run to make your gameplay more enjoyable and rewarding. Here are some tips that will help you improve your score and performance:

    -
      -
    • Collect coins as much as you can. Coins are the currency of the game and you can use them to buy power-ups, upgrades, and new characters. You can also get bonus coins by completing objectives and achievements.
    • -
    • Use power-ups wisely. Power-ups are special items that give you a temporary boost or advantage in the game. For example, the magnet power-up attracts all the coins in your path, the invisibility power-up makes you immune to obstacles, and the boost power-up makes you run faster and farther. You can activate a power-up by double-tapping on the screen when you see the icon on the top left corner. You can also buy and upgrade power-ups with coins in the store.
    • -
    • Unlock new characters and outfits. Temple Run features several characters that you can play as, each with their own abilities and outfits. For example, Guy Dangerous is the default character who has no special ability, Scarlett Fox is a female character who can jump longer distances, and Barry Bones is a cop who can collect more coins. You can unlock new characters and outfits with coins or gems in the store.
    • -
    • Try different modes and maps. Temple Run offers different modes and maps that you can try to spice up your gameplay. For example, you can play in the classic mode, which is the original Temple Run game , or in the blazing sands mode, which is a desert-themed map with new obstacles and challenges. You can also play in the frozen shadows mode, which is a winter-themed map with snowmen and ice slides. You can switch between modes and maps by tapping on the icon on the top right corner of the main menu.
    • -
    -

    Features of Temple Run App

    -

    Temple Run is not just a simple running game. It also has some amazing features that make it stand out from other games in the genre. Here are some of the features that Temple Run offers:

    -

    Graphics and Sound

    -

    Temple Run has stunning graphics and sound that create an immersive and thrilling experience for the players. The game features realistic 3D environments, dynamic lighting and shadows, smooth animations, and detailed textures. The game also has a captivating soundtrack and sound effects that match the mood and atmosphere of the game. You can hear the footsteps of your character, the roar of the demon monkeys, the clink of the coins, and the voice of the narrator.

    -

    Levels and Challenges

    -

    Temple Run has endless levels and challenges that keep you engaged and motivated to play more. The game has a procedurally generated map that changes every time you play, so you never know what to expect next. The game also has different scenarios and objectives that vary depending on the mode and map you choose. For example, in the classic mode, you have to escape from a temple filled with traps and treasures, while in the blazing sands mode, you have to survive a sandstorm and avoid falling into quicksand. The game also has a leaderboard and achievements system that lets you compare your score and progress with other players around the world.

    -

    Pros and Cons of Temple Run App

    -

    Temple Run is a fun and addictive game that anyone can enjoy, but it also has some pros and cons that you should be aware of before playing it. Here are some of them:

    -

    Pros

    -
      -
    • Temple Run is free to play and download.
    • -
    • Temple Run is easy to learn but hard to master.
    • -
    • Temple Run is suitable for all ages and preferences.
    • -
    • Temple Run has high replay value and variety.
    • -
    • Temple Run has frequent updates and improvements.
    • -
    -

    Cons

    -
      -
    • Temple Run can be frustrating and repetitive at times.
    • -
    • Temple Run can be addictive and time-consuming.
    • -
    • Temple Run can drain your battery and data quickly.
    • -
    • Temple Run can have some bugs and glitches occasionally.
    • -
    • Temple Run can have some ads and in-app purchases that might annoy some players.
    • -
    -

    Alternatives to Temple Run App

    -

    If you love Temple Run but want to try something different, there are many other running games that you can play on your Android device. Here are some of them:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    NameDescriptionDownload Link
    Subway SurfersA colorful and vibrant running game where you have to dodge trains, buses, and obstacles while collecting coins and power-ups.Subway Surfers - Apps on Google Play
    Zombie TsunamiA hilarious and chaotic running game where you have to lead a horde of zombies through the city while eating people and destroying everything in your path.Zombie Tsunami - Apps on Google Play
    Minion RushA cute and funny running game where you have to control a minion from the Despicable Me movie franchise and complete various missions and challenges.Minion Rush: Despicable Me Official Game - Apps on Google Play
    Jetpack JoyrideA fast-paced and action-packed running game where you have to fly a jetpack and avoid lasers, missiles, and zappers while collecting coins and gadgets.Jetpack Joyride - Apps on Google Play
    Alto's Adventure A beautiful and relaxing running game where you have to ski down a snowy mountain and perform tricks and stunts while avoiding obstacles and collecting llamas.Alto's Adventure - Apps on Google Play
    -

    These are just some of the many running games that you can find on the Google Play Store. You can also search for more games by using keywords such as "running", "endless", or "runner". You might discover some hidden gems that will keep you entertained for hours.

    -

    Conclusion

    -

    Temple Run is one of the best running games that you can play on your Android device. It is fun, addictive, challenging, and rewarding. It has amazing graphics, sound, features, and modes that will keep you entertained for hours. It also has some pros and cons that you should consider before playing it. If you want to try something different, you can also check out some of the alternatives that we suggested. We hope that this article helped you learn more about Temple Run APK and how to play it on your Android device. Now go ahead and download the game and start running for your life!

    -

    FAQs

    -

    Here are some frequently asked questions about Temple Run APK:

    -
      -
    1. Is Temple Run APK safe to download and install?
    2. -

      Yes, Temple Run APK is safe to download and install as long as you get it from a trusted source. However, you should always be careful when downloading and installing apps from unknown sources, as they might contain malware or viruses that can harm your device. You should also scan the APK file with an antivirus app before installing it.

      -
    3. How can I update Temple Run APK?
    4. -

      You can update Temple Run APK by downloading the latest version of the file from a trusted source and installing it over the existing app. You can also check for updates on the Google Play Store or on the official website of the game.

      -
    5. How can I backup my Temple Run progress?
    6. -

      You can backup your Temple Run progress by using a cloud service such as Google Play Games or Facebook. You can also use a backup app such as Titanium Backup or Helium to save your data on your device or an external storage.

      -
    7. How can I play Temple Run on PC?
    8. -

      You can play Temple Run on PC by using an Android emulator such as BlueStacks or Nox App Player. These are software that allow you to run Android apps on your PC. You can download and install the emulator on your PC, then download and install Temple Run APK on the emulator, and then launch the game from there.

      -
    9. How can I contact the developers of Temple Run?
    10. -

      You can contact the developers of Temple Run by visiting their official website or their social media pages . You can also send them an email at support@imangistudios.com or leave a review on the Google Play Store.

      -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Enjoy FR Legends with Unlimited Money and More Features Download Versi 0.3.1.1 Mod APK Now.md b/spaces/congsaPfin/Manga-OCR/logs/Enjoy FR Legends with Unlimited Money and More Features Download Versi 0.3.1.1 Mod APK Now.md deleted file mode 100644 index cedea238e4b76fb5f48712c64ec54efce112de3b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Enjoy FR Legends with Unlimited Money and More Features Download Versi 0.3.1.1 Mod APK Now.md +++ /dev/null @@ -1,104 +0,0 @@ - -

    Download FR Legends Unlimited Money Versi 0.3.1.1: The Ultimate Guide

    -

    If you are a fan of drifting games, you have probably heard of FR Legends, one of the most popular and realistic drifting games on mobile devices. But did you know that you can download a modded version of the game that gives you unlimited money, all cars unlocked, and a new map? In this article, we will show you how to download FR Legends unlimited money versi 0.3.1.1, the latest version of the mod, and how to play it on your device.

    -

    What is FR Legends?

    -

    FR Legends is a drifting game that lets you drive legendary FR (front-engine, rear-wheel-drive) drift cars at world's most iconic circuits, and customize everything on your car including engine swaps and wide-body kits. For the first time ever, a mobile game that lets you have tandem drift battles with AI drivers, unique scoring systems based on real world competition judging rules. You can also compete online with other players from around the world, or just enjoy free roaming on different tracks.

    -

    download fr legends unlimited money versi 0.3.1.1


    Download Zip ☆☆☆☆☆ https://urlca.com/2uObZf



    -

    The game has amazing graphics, realistic physics, and smooth controls that make it one of the best drifting games on mobile devices. You can choose from a variety of cars, from classic Japanese models to modern European ones, and tune them to your liking. You can also change the appearance of your car, from the paint color to the stickers, wheels, spoilers, and more.

    -

    Why Download FR Legends Unlimited Money Versi 0.3.1.1?

    -

    While FR Legends is a free game, it also has some in-app purchases that require real money. For example, you need to buy cash in the game to buy new cars, upgrade parts, or customize your car. You also need to watch ads or complete tasks to earn cash or gold coins, which can be used to unlock more features or items in the game.

    -

    However, if you download FR Legends unlimited money versi 0.3.1.1, you can enjoy all the benefits of the game without spending any money or watching any ads. This is because this modded version of the game gives you unlimited money in the game, which means you can buy anything you want without any restrictions. You can also unlock all the cars in the game, which normally require gold coins or real money to purchase.

    -

    But that's not all. This modded version of the game also gives you access to a new map that is not available in the original version of the game. This map is called FR Legends City, and it is a huge open world map that has various roads, buildings, and landmarks to explore. You can drift on the streets, race with other cars, or just enjoy the scenery of the city. This map is very fun and challenging, and it adds a lot of replay value to the game.

    -

    How to Download FR Legends Unlimited Money Versi 0.3.1.1?

    -

    Now that you know the benefits of downloading FR Legends unlimited money versi 0.3.1.1, you might be wondering how to download and install it on your device. Don't worry, we will guide you through the process step by step.

    -

    Requirements

    -

    Before you download the modded version of the game, you need to make sure that your device meets the minimum requirements to run the game smoothly. Here are the requirements:

    -
      -
    • Your device must have Android 4.1 or higher operating system.
    • -
    • Your device must have at least 1 GB of RAM and 100 MB of free storage space.
    • -
    • Your device must have a stable internet connection to download the modded APK file and play online.
    • -
    • Your device must allow installation of apps from unknown sources. You can enable this option in your device settings.
    • -
    -

    Download Link

    -

    Once you have checked the requirements, you can proceed to download the modded APK file from the link below. This link is safe and secure, and it will direct you to a trusted file hosting site where you can download the file without any hassle.

    -

    fr legends mod apk 0.3.1.1 unlimited money and gold
    -how to get unlimited money in fr legends 0.3.1.1
    -fr legends hack version 0.3.1.1 download free
    -fr legends 0.3.1.1 mod menu with unlimited cash
    -download fr legends latest version 0.3.1.1 with unlimited money
    -fr legends cheat codes for android 0.3.1.1
    -fr legends 0.3.1.1 unlimited money apk download for pc
    -fr legends mod apk new cars 2023 unlimited money 0.3.1.1
    -fr legends drift racing game 0.3.1.1 mod apk unlimited money
    -fr legends 0.3.1.1 hack apk download no root
    -fr legends unlimited money and gold generator 0.3.1.1
    -fr legends mod apk offline 0.3.1.1 unlimited money
    -fr legends 0.3.1.1 unlimited money ios download
    -fr legends mod apk revdl 0.3.1.1 unlimited money
    -fr legends 0.3.1.1 mod apk unlimited money and coins
    -fr legends hack tool online 0.3.1.1 unlimited money
    -fr legends mod apk rexdl 0.3.1.1 unlimited money
    -fr legends 0.3.1.1 unlimited money obb download
    -fr legends mod apk happymod 0.3.1.1 unlimited money
    -fr legends 0.3.1.1 unlimited money data download
    -fr legends mod apk an1 0.3.1.1 unlimited money
    -fr legends 0.3.1.1 unlimited money zip download
    -fr legends mod apk android 11 0.3.1.1 unlimited money
    -fr legends 0.3.1.1 unlimited money file download
    -fr legends mod apk android republic 0.3.1.. unlimited money

    -

    Download FR Legends Unlimited Money Versi 0.3.1.1 APK

    -

    Installation Guide

    -

    After you have downloaded the modded APK file, you need to install it on your device. Here are the instructions to do so:

    -
      -
    1. Locate the downloaded APK file in your device file manager or downloads folder.
    2. -
    3. Tap on the APK file and select "Install".
    4. -
    5. Wait for the installation process to complete.
    6. -
    7. If prompted, allow any permissions or access requests that the app may ask for.
    8. -
    9. Launch the game from your app drawer or home screen.
    10. -
    11. Enjoy playing FR Legends unlimited money versi 0.3.1.1!
    12. -
    -

    How to Play FR Legends Unlimited Money Versi 0.3.1.1?

    -

    Now that you have installed the modded version of the game, you are ready to play it and enjoy its features. Here are some tips and tricks to help you play the game and have more fun.

    -

    Controls

    -

    The game has three different control options that you can choose from: tilt, touch, or steering wheel. You can change the control option in the settings menu of the game. Here is a brief description of each option:

    -
      -
    • Tilt: You can tilt your device left or right to steer your car.
    • -
    • Touch: You can tap on the left or right side of the screen to steer your car.
    • -
    • Steering wheel: You can use a virtual steering wheel on the screen to steer your car.
    • -
    -

    In addition to steering, you also have other buttons on the screen that control your car's acceleration, braking, handbrake, clutch, and gear shifting. You can customize these buttons in the settings menu as well.

    -

    Drifting

    -

    The main feature of FR Legends is drifting, which is a driving technique where you slide your car sideways around corners while maintaining control and speed. Drifting is very fun and satisfying, but it also requires some skill and practice to master.

    -

    To drift in FR Legends, you need to use a combination of steering, acceleration, braking, handbrake, clutch, and gear shifting. Here are some basic steps to drift in FR Legends:

    -
      -
    1. Approach a corner at a high speed and tap on the handbrake button to initiate a drift.
    2. -
    3. Release the handbrake button and steer your car in the direction of the corner.
    4. -
    5. Use the throttle and brake buttons to control your car's speed and angle during the drift.
    6. -
    7. Use the clutch and gear buttons to shift gears as needed during the drift.
    8. -
    9. Steer your car out of the corner and continue drifting or driving normally.
    10. -
    -

    You can also use other techniques such as feint drift, power over drift, or clutch kick drift to enhance your drifting skills and style. You can learn more about these techniques in the game's tutorial mode or online guides.

    -

    Drifting is not only fun, but also rewarding. The game has a scoring system that evaluates your drifting performance based on factors such as speed, angle, line, and proximity. You can earn more points by drifting faster, longer, smoother, and closer to the track's edge or other cars. You can also earn bonus points by performing combos, such as linking multiple drifts together or drifting in tandem with another car.

    -

    You can use the points you earn to buy new cars, upgrade parts, or customize your car. You can also use them to unlock new tracks, modes, or features in the game.

    -

    Tandem Battles

    -

    One of the most exciting modes in FR Legends is tandem battles, where you can drift with other players or AI drivers in a 1v1 or 2v2 format. In this mode, you can either be the leader or the chaser. The leader's goal is to drift as well as possible and set a high score. The chaser's goal is to follow the leader's line and drift as close as possible to them without crashing.

    -

    Tandem battles are a great way to test your drifting skills and challenge yourself against other drivers. You can also learn from other drivers and improve your own style and technique. You can play tandem battles online with other players from around the world, or offline with AI drivers of different difficulty levels.

    -

    Tandem battles are also very fun and thrilling, as you can experience the adrenaline rush of drifting side by side with another car at high speed. You can also communicate with other drivers using the chat feature or the horn button. You can also show off your car and customization to other drivers and admire theirs.

    -

    Conclusion

    -

    FR Legends is one of the best drifting games on mobile devices, and it is even better when you download FR Legends unlimited money versi 0.3.1.1. This modded version of the game gives you unlimited money, all cars unlocked, and a new map to enjoy. You can download and install it easily on your device by following our guide above.

    -

    If you love drifting and want to have more fun and freedom in the game, you should definitely download FR Legends unlimited money versi 0.3.1.1. You will not regret it!

    -

    So what are you waiting for? Download FR Legends unlimited money versi 0.3.1.1 now and start drifting like a legend!

    -

    FAQs

    -

    Here are some frequently asked questions about FR Legends unlimited money versi 0.3.1.1 and their answers:

    -

    Is FR Legends unlimited money versi 0.3.1.1 safe to download and use?

    -

    Yes, FR Legends unlimited money versi 0.3.1.1 is safe to download and use, as long as you download it from a trusted source like ours. We have tested the modded APK file and found no viruses or malware in it. However, we recommend that you always scan any file you download from the internet with an antivirus software before installing it on your device.

    -

    Will FR Legends unlimited money versi 0.3.1.1 work on my device?

    -

    FR Legends unlimited money versi 0.3.1.1 will work on most Android devices that meet the minimum requirements for the game, which are Android 4.1 or higher operating system, 1 GB of RAM, 100 MB of free storage space, and a stable internet connection. However, some devices may not be compatible with the modded version of the game due to different hardware or software specifications.

    -

    Can I play FR Legends unlimited money versi 0.3.1.1 online with other players?

    -

    Yes, you can play FR Legends unlimited money versi 0.3 .1.1 online with other players, as long as you have a stable internet connection and the same version of the game as them. However, you may encounter some issues or errors when playing online with the modded version of the game, such as lag, disconnects, or bans. This is because the modded version of the game may not be compatible with the official servers or the anti-cheat system of the game. Therefore, we advise you to play online with the modded version of the game at your own risk and discretion.

    -

    Can I update FR Legends unlimited money versi 0.3.1.1 to the latest version of the game?

    -

    No, you cannot update FR Legends unlimited money versi 0.3.1.1 to the latest version of the game, as this will overwrite the modded features and revert the game to its original state. If you want to update the game, you will need to uninstall the modded version of the game and install the official version of the game from the Google Play Store or other sources. However, you may lose your progress and data in the modded version of the game if you do so.

    -

    Can I use FR Legends unlimited money versi 0.3.1.1 with other mods or cheats?

    -

    No, you cannot use FR Legends unlimited money versi 0.3.1.1 with other mods or cheats, as this may cause conflicts or errors in the game and make it unstable or unplayable. FR Legends unlimited money versi 0.3.1.1 already has all the features and benefits that you need to enjoy the game, so there is no need to use other mods or cheats with it.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Download and Install Diablo Immortal EU on Your Device.md b/spaces/congsaPfin/Manga-OCR/logs/How to Download and Install Diablo Immortal EU on Your Device.md deleted file mode 100644 index 06e7bf9f3ec59bb3195f50f1bbeb334874043982..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to Download and Install Diablo Immortal EU on Your Device.md +++ /dev/null @@ -1,141 +0,0 @@ - -

    Diablo Immortal EU Download: Everything You Need to Know

    -

    Are you a fan of the Diablo franchise and looking for a new way to slay demons and collect loot? If so, you might be interested in Diablo Immortal, the latest entry in the series that is coming to both PC and mobile devices. In this article, we will tell you everything you need to know about Diablo Immortal EU download, including what the game is about, how to download it on your preferred platform, and what to expect from its gameplay.

    -

    What is Diablo Immortal?

    -

    Diablo Immortal is a mobile Massively Multiplayer Online Action RPG (MMOARPG) developed by Blizzard Entertainment in partnership with NetEase, coming exclusively to Android, iPhone, iPad, and PC. It is set between the events of Diablo II and Diablo III, and features an original story, new zones and dungeons, six playable classes, legendary loot, and more.

    -

    diablo immortal eu download


    Download Filehttps://urlca.com/2uO98y



    -

    A new chapter in the Diablo saga

    -

    The story of Diablo Immortal takes place after the destruction of the Worldstone by the Archangel Tyrael at the end of Diablo II: Lord of Destruction. The Worldstone was a powerful artifact that shaped the world of Sanctuary and protected it from the forces of Hell. However, its fragments still contain great power, and are sought after by Diablo's minions who hope to use them to resurrect the Lord of Terror. As a hero of Sanctuary, you must join forces with other players to stop this evil plan and uncover the secrets of the Worldstone.

    -

    A massively multiplayer online action RPG

    -

    Diablo Immortal is designed to be a social and cooperative experience that lets you play with other players from all over the world. You can team up with your friends or join random groups to take on challenging dungeons, dynamic events, world bosses, and PvP modes. You can also visit social hubs like Westmarch, where you can interact with other players, access your stash and vendors, and customize your character.

    -

    A cross-platform experience

    -

    One of the most exciting features of Diablo Immortal is that it supports cross-progression and cross-play between PC and mobile devices. This means that you can play on your PC at home and then switch to your phone or tablet on the go without losing your progress or your friends. You can also play with other players regardless of their platform, as long as they are in the same region as you.

    -

    How to download Diablo Immortal in EU?

    -

    Diablo Immortal is set to launch on June 2, 2022 in most regions, including EU. However, depending on your platform of choice, there are some differences in how to download the game.

    -

    For PC users

    -

    If you want to play Diablo Immortal on your PC, here are some things you need to know:

    -

    System requirements

    -

    To be able to run Diablo Immortal on your PC, your minimum requirements must be:

    -
      -
    • Operating System: Windows 7, 8, 10, or 11 (64-bit)
    • -
    • Processor: Intel Core i3 or AMD FX-8100
    • -
    • Video: NVIDIA GeForce GTX 460, ATI Radeon HD 6850, or Intel HD Graphics 530
    • -
    • Memory: 4 GB RAM
    • -
    • Broadband internet connection
    • -
    • A minimum display resolution of

      1280 x 720

    • -
    -

    If you want to enjoy the game at its best, your recommended requirements must be:

    -
      -
    • Operating System: Windows 10 or 11 (64-bit)
    • -
    • Processor: Intel Core i5 or AMD Ryzen 5
    • -
    • Video: NVIDIA GeForce GTX 1060, AMD Radeon RX 580, or Intel Iris Xe Graphics
    • -
    • Memory: 8 GB RAM
    • -
    • Broadband internet connection
    • -
    • A minimum display resolution of 1920 x 1080
    • -
    -

    Pre-load and launch times

    -

    To avoid long waiting times and possible server issues, you can pre-load Diablo Immortal on your PC before the official launch date. You can do this by downloading the Blizzard Battle.net app and logging in with your Blizzard account. Then, you can find Diablo Immortal in the Games tab and click on the Install button. The pre-load will start automatically and you will be able to see the progress and the size of the download.

    -

    diablo immortal eu download pc
    -diablo immortal eu download android
    -diablo immortal eu download ios
    -diablo immortal eu download apk
    -diablo immortal eu download beta
    -diablo immortal eu download release date
    -diablo immortal eu download blizzard
    -diablo immortal eu download free
    -diablo immortal eu download mobile
    -diablo immortal eu download link
    -diablo immortal eu download windows
    -diablo immortal eu download mac
    -diablo immortal eu download amazon
    -diablo immortal eu download battle.net
    -diablo immortal eu download shop
    -diablo immortal eu download game
    -diablo immortal eu download review
    -diablo immortal eu download guide
    -diablo immortal eu download tips
    -diablo immortal eu download tricks
    -diablo immortal eu download classes
    -diablo immortal eu download barbarian
    -diablo immortal eu download crusader
    -diablo immortal eu download demon hunter
    -diablo immortal eu download monk
    -diablo immortal eu download necromancer
    -diablo immortal eu download wizard
    -diablo immortal eu download story
    -diablo immortal eu download characters
    -diablo immortal eu download lore
    -diablo immortal eu download trailer
    -diablo immortal eu download gameplay
    -diablo immortal eu download graphics
    -diablo immortal eu download online
    -diablo immortal eu download multiplayer
    -diablo immortal eu download co-op
    -diablo immortal eu download pvp
    -diablo immortal eu download raids
    -diablo immortal eu download dungeons
    -diablo immortal eu download events
    -diablo immortal eu download updates
    -diablo immortal eu download patch notes
    -diablo immortal eu download news
    -diablo immortal eu download forum
    -diablo immortal eu download reddit
    -diablo immortal eu download wiki
    -diablo immortal eu download faq
    -diablo immortal eu download support

    -

    The pre-load will be available from May 30, 2022 at 10:00 AM CET. You will need about 20 GB of free space on your hard drive to install the game.

    -

    The launch time for Diablo Immortal in EU will be on June 2, 2022 at 12:00 PM CET. You will be able to access the game from the Blizzard Battle.net app by clicking on the Play button. You will also need to create or link your NetEase account to play the game.

    -

    Cross-progression with mobile devices

    -

    If you want to play Diablo Immortal on both PC and mobile devices, you can do so by using the same Blizzard and NetEase accounts on both platforms. This way, you will be able to access your characters, progress, and items on any device you choose. You will also be able to play with other players who are using different platforms, as long as they are in the same region as you.

    -

    For mobile users

    -

    If you prefer to play Diablo Immortal on your mobile device, here are some things you need to know:

    -

    System requirements

    -

    To be able to run Diablo Immortal on your mobile device, your minimum requirements must be:

    -
      -
    • Operating System: Android 5.0 or iOS 12.0 or later
    • -
    • Processor: Snapdragon 625 or A9 or equivalent
    • -
    • Memory: 2 GB RAM
    • -
    • Broadband internet connection
    • -
    • A minimum display resolution of 800 x 480
    • -
    -

    If you want to enjoy the game at its best, your recommended requirements must be:

    -
      -
    • Operating System: Android 8.0 or iOS 14.0 or later
    • -
    • Processor: Snapdragon 845 or A12 or equivalent
    • -
    • Memory: 4 GB RAM
    • -
    • Broadband internet connection
    • -
    • A minimum display resolution of 1280 x 720
    • -
    -

    Pre-load and launch times

    -

    To avoid long waiting times and possible server issues, you can pre-load Diablo Immortal on your mobile device before the official launch date. You can do this by downloading the game from the Google Play Store or the App Store and logging in with your NetEase account. Then, you can find Diablo Immortal in your app library and tap on the Install button. The pre-load will start automatically and you will be able to see the progress and the size of the download.

    -

    The pre-load will be available from May 30, 2022 at 10:00 AM CET. You will need about 3 GB of free space on your device to install the game.

    -

    The launch time for Diablo Immortal in EU will be on June 2, 2022 at 12:00 PM CET. You will be able to access the game from your app library by tapping on the Play button. You will also need to create or link your Blizzard account to play the game.

    -

    Cross-progression with PC version

    -

    If you want to play Diablo Immortal on both mobile devices and PC, you can do so by using the same NetEase and Blizzard accounts on both platforms. This way, you will be able to access your characters, progress, and items on any device you choose. You will also be able to play with other players who are using different platforms, as long as they are in the same region as you.

    -

    What to expect from Diablo Immortal gameplay?

    -

    Diablo Immortal is a game that aims to deliver an authentic Diablo experience on both PC and mobile devices. It features a fast-paced and fluid combat system, a rich and immersive world, a deep and varied character progression, and a rewarding and satisfying loot system. Here are some of the highlights of Diablo Immortal gameplay:

    -

    Six iconic classes to choose from

    -

    Diablo Immortal offers six playable classes that fans of the series will recognize and love: Barbarian, Crusader, Demon Hunter, Monk, Necromancer, and Wizard. Each class has its own unique skills, abilities, strengths, and weaknesses. You can customize your character with different gear, gems, runes, skill modifiers, and legendary items that enhance your performance and change your appearance.

    -

    New zones and dungeons to explore

    -

    Diablo Immortal takes place in a vast and diverse world that spans from the frozen lands of Mount Arreat to the jungles of Kehjistan. You can explore new zones and dungeons that are filled with secrets, puzzles, traps, enemies, and bosses. You can also participate in dynamic events that change the world around you and offer different rewards depending on your choices.

    -

    Legendary loot and skill modifiers to collect

    -

    Diablo Immortal is a game that revolves around loot. You can find hundreds of different items that vary in rarity, quality, stats, and effects. You can also collect skill modifiers that allow you to customize your skills with different bonuses and enhancements. Some of the most coveted items are legendary items that have unique properties and abilities that can change the way you play.

    -

    Conclusion

    -

    Diablo Immortal is a game that promises to bring a new chapter in the Diablo saga to both PC and mobile devices. It is a massively multiplayer online action RPG that features an original story, six playable classes, new zones and dungeons, legendary loot, cross-progression and cross-play, and more. If you are interested in Diablo Immortal EU download, you can pre-load the game from May 30, 2022 at 10:00 AM CET and play it from June 2, 2022 at 12:00 PM CET.

    -

    FAQs

    -
      -
    • Is Diablo Immortal free to play?
    • -

      Yes, Diablo Immortal is free to play for both PC and mobile devices. However, it may offer optional in-game purchases such as cosmetic items or convenience features.

      -
    • Do I need an internet connection to play Diablo Immortal?
    • -

      Yes, Diablo Immortal requires a constant internet connection to play as it is an online game.

      -
    • Can I play Diablo Imm ortal with the same account on different regions?
    • -

      No, Diablo Immortal does not support cross-region play. You can only play with other players who are in the same region as you. You can choose your region when you create your NetEase account.

      -
    • How can I contact the customer support for Diablo Immortal?
    • -

      If you encounter any issues or have any questions about Diablo Immortal, you can contact the customer support by visiting the official website of the game and clicking on the Support button. You can also access the Support section from the game menu.

      -
    • What are the minimum and recommended requirements for Diablo Immortal?
    • -

      The minimum and recommended requirements for Diablo Immortal vary depending on your platform of choice. You can find them in the article above or on the official website of the game.

      -
    • How can I pre-load Diablo Immortal?
    • -

      You can pre-load Diablo Immortal from May 30, 2022 at 10:00 AM CET by downloading the game from the Google Play Store, the App Store, or the Blizzard Battle.net app. You will need about 3 GB of free space on your mobile device or 20 GB of free space on your PC to install the game.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/My Talking Tom Cat 2 MOD APK How to Get Free Coins and Unlock All Features.md b/spaces/congsaPfin/Manga-OCR/logs/My Talking Tom Cat 2 MOD APK How to Get Free Coins and Unlock All Features.md deleted file mode 100644 index be2690de2dae710c2742de70c4c532be8d2455bd..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/My Talking Tom Cat 2 MOD APK How to Get Free Coins and Unlock All Features.md +++ /dev/null @@ -1,125 +0,0 @@ - -
    H3: Step 2: Download the Mod Apk File from a Trusted Source
    H3: Step 3: Locate and Install the Mod Apk File
    H3: Step 4: Launch the Game and Enjoy | A section that provides a step-by-step guide on how to download and install the mod apk on Android devices, with screenshots and links | | H2: What Features Does My Talking Tom Cat 2 Mod Apk Have? | None | A paragraph that introduces the table that compares different features of the mod apk | | None | None | A table that compares different features of the mod apk, such as unlimited coins, diamonds, food, potions, outfits, toys, etc. | | H2: Is My Talking Tom Cat 2 Mod Apk Safe and Legal? | None | A paragraph that discusses the potential risks and legal issues of using a mod apk, such as malware, viruses, bans, lawsuits, etc. | | H2: What are Some Tips and Tricks for Playing My Talking Tom Cat 2? | H3: Tip 1: Feed Tom Regularly
    H3: Tip 2: Play Mini-Games to Earn Coins
    H3: Tip 3: Customize Tom's Appearance and House
    H3: Tip 4: Interact with Tom and His Friends
    H3: Tip 5: Use Potions to Boost Tom's Mood | A section that provides some useful tips and tricks for playing the game more effectively and enjoyably | | H2: Conclusion | None | A conclusion paragraph that restates the thesis statement, summarizes the main points, and provides a call to action | | None | None | A list of 5 unique FAQs with answers | Table 2: Article with HTML Formatting

    My Talking Tom Cat 2 Mod Apk: Everything You Need to Know

    -

    Do you love playing with cute virtual pets? Do you want to have more fun and freedom in your favorite game? If you answered yes to these questions, then you might be interested in learning more about My Talking Tom Cat 2 mod apk. In this article, you will discover everything you need to know about this amazing mod apk, including what it is, how to download and install it, what features it has, whether it is safe and legal, and what tips and tricks you can use to make the most out of it. So, without further ado, let's get started!

    -

    What is My Talking Tom Cat 2?

    -

    My Talking Tom Cat 2 is a popular casual game developed by Outfit7 Limited, a company that specializes in creating virtual pet games. The game was released in 2018 and is available on Android, iOS, and Windows devices. The game is a sequel to the original My Talking Tom Cat, which was launched in 2013 and has over 500 million downloads worldwide.

    -

    my talking tom cat 2 mod apk


    Download ->>> https://urlca.com/2uOgjl



    -

    In My Talking Tom Cat 2, you get to adopt and take care of a cute and funny cat named Tom, who can talk and repeat what you say in a hilarious voice. You can also play with him, feed him, dress him up, decorate his house, and interact with his friends. The game is suitable for all ages and offers hours of entertainment and laughter.

    -

    What is My Talking Tom Cat 2 Mod Apk?

    -

    A mod apk is a modified version of an original application that has been altered by third-party developers to add or remove certain features. A mod apk can provide advantages that are not available in the official version of the app, such as unlimited resources, unlocked items, premium features, etc. However, a mod apk can also pose some risks and challenges, such as compatibility issues, security threats, legal consequences, etc.

    -

    My Talking Tom Cat 2 mod apk is a mod apk of the original game that gives you access to unlimited coins, diamonds, food, potions, outfits, toys, and other goodies that can enhance your gaming experience. With the mod apk, you can enjoy the game without any limitations or restrictions. You can also explore new features and options that are not available in the official version of the game.

    -

    How to Download and Install My Talking Tom Cat 2 Mod Apk?

    -

    If you want to try out My Talking Tom Cat 2 mod apk on your Android device, you will need to follow these simple steps:

    -

    Step 1: Enable Unknown Sources on Your Device

    -

    Before you can install any mod apk on your device, you will need to enable the option that allows you to install apps from unknown sources. This option is usually disabled by default for security reasons. To enable it, go to your device's settings > security > unknown sources and toggle it on.

    -

    Step 2: Download the Mod Apk File from a Trusted Source

    -

    Next, you will need to download the mod apk file from a reliable and trustworthy source. There are many websites that offer mod apks for various games and apps, but not all of them are safe and legit. Some of them may contain malware, viruses, or other harmful software that can damage your device or steal your personal information. Therefore, you should always do some research before downloading any mod apk file from the internet.

    -

    My Talking Tom Cat 2 unlimited coins hack
    -Download Talking Tom Cat 2 modded apk for free
    -How to install Talking Tom Cat 2 mod apk on Android
    -Talking Tom Cat 2 mod apk latest version 5.3.10.26
    -Talking Tom Cat 2 mod apk features and benefits
    -Best alternatives to Talking Tom Cat 2 mod apk
    -Talking Tom Cat 2 mod apk gameplay and review
    -Talking Tom Cat 2 mod apk vs original app comparison
    -Talking Tom Cat 2 mod apk download link and instructions
    -Talking Tom Cat 2 mod apk problems and solutions
    -Talking Tom Cat 2 mod apk cheats and tips
    -Talking Tom Cat 2 mod apk online generator tool
    -Talking Tom Cat 2 mod apk no root required
    -Talking Tom Cat 2 mod apk safe and secure
    -Talking Tom Cat 2 mod apk fun and addictive
    -Talking Tom Cat 2 mod apk updates and news
    -Talking Tom Cat 2 mod apk ratings and feedback
    -Talking Tom Cat 2 mod apk support and contact
    -Talking Tom Cat 2 mod apk FAQs and answers
    -Talking Tom Cat 2 mod apk pros and cons
    -Talking Tom Cat 2 mod apk for PC and Mac
    -Talking Tom Cat 2 mod apk for iOS and iPhone
    -Talking Tom Cat 2 mod apk for Windows and Linux
    -Talking Tom Cat 2 mod apk for Kindle and Fire
    -Talking Tom Cat 2 mod apk for Chromebook and Chrome OS
    -Talking Tom Cat 2 mod apk offline mode available
    -Talking Tom Cat 2 mod apk multiplayer mode enabled
    -Talking Tom Cat 2 mod apk customizations and settings
    -Talking Tom Cat 2 mod apk achievements and rewards
    -Talking Tom Cat 2 mod apk challenges and missions

    -

    One of the websites that we recommend for downloading My Talking Tom Cat 2 mod apk is [ModApkStore]. This website provides high-quality mod apks for various games and apps that are tested and verified by their team. You can also find user reviews, ratings, screenshots, and video tutorials for each mod apk on their website.

    -

    To download My Talking Tom Cat 2 mod apk from ModApkStore, go to their website and search for the game in the search bar. Then click on the download button and wait for the file to be downloaded on your device.

    -

    Step 3: Locate and Install the Mod Apk File

    -

    Once you have downloaded the mod apk file on your device, you will need to locate it and install it. You can use a file manager app to find the file in your device's storage. Alternatively, you can go to your device's downloads folder and look for the file there.

    -

    After you have found the file, tap on it and follow the instructions on the screen to install it. You may need to grant some permissions to the app during the installation process.

    -

    Step 4: Launch the Game and Enjoy

    -

    After you have successfully installed the mod apk file on your device, you can launch the game and enjoy all the benefits that it offers. You will see that you have unlimited coins, diamonds, food, potions, outfits, toys, and other goodies in your game account. You can also explore new features and options that are not available in the official version of the game.

    -

    What Features Does My Talking Tom Cat 2 Mod Apk Have?

    -

    My Talking Tom Cat 2 mod apk has many features that make it more fun and exciting than the original game. Here are some of the features that you can enjoy with the mod apk:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FeatureDescription
    Unlimited CoinsYou can get unlimited coins in the game, which you can use to buy food, potions, outfits, toys, and other items for Tom and his friends. You can also use coins to upgrade Tom's house and make it more comfortable and stylish.
    Unlimited DiamondsYou can get unlimited diamonds in the game, which are the premium currency that you can use to unlock exclusive items and features. You can also use diamonds to access special mini-games and events that offer more rewards and fun.
    Unlimited FoodYou can get unlimited food in the game, which you can use to feed Tom and his friends whenever they are hungry. You can choose from a variety of food items, such as fruits, vegetables, pizza, cake, ice cream, etc. Feeding Tom and his friends will make them happy and healthy.
    Unlimited PotionsYou can get unlimited potions in the game, which you can use to boost Tom's mood and energy. You can choose from different types of potions, such as happy potion, sleepy potion, hungry potion, etc. Using potions will make Tom more playful and responsive.
    Unlocked OutfitsYou can get all the outfits unlocked in the game, which you can use to dress up Tom and his friends in different styles and costumes. You can choose from a variety of outfits, such as pirate, cowboy, superhero, ninja, etc. Dressing up Tom and his friends will make them look more adorable and unique.
    Unlocked ToysYou can get all the toys unlocked in the game, which you can use to play with Tom and his friends and make them laugh. You can choose from a variety of toys, such as balloons, balls, bubbles, fireworks, etc. Playing with Tom and his friends will make them more active and cheerful.
    -

    Is My Talking Tom Cat 2 Mod Apk Safe and Legal?

    -

    While My Talking Tom Cat 2 mod apk may sound tempting and appealing, you should also be aware of the possible risks and legal issues that come with using it. Here are some of the things that you should consider before downloading and installing the mod apk:

    -
      -
    • Malware and Viruses: Some mod apks may contain malicious software that can harm your device or steal your personal information. These software may also interfere with the performance of your device or cause other problems. Therefore, you should always scan any mod apk file with an antivirus program before installing it.
    • -
    • Bans and Lawsuits: Some mod apks may violate the terms of service or the intellectual property rights of the original app developers. This may result in your account being banned or suspended from the game or the app store. You may also face legal action from the developers or other parties if they find out that you are using a mod apk.
    • -
    • Compatibility and Updates: Some mod apks may not be compatible with your device or the latest version of the original app. This may cause crashes, glitches, errors, or other issues that may affect your gaming experience. You may also miss out on new features and updates that are released by the official app developers.
    • -
    • Ethics and Fairness: Some mod apks may give you an unfair advantage over other players who are playing the game legitimately. This may ruin the balance and fun of the game for everyone. You may also feel less satisfied or accomplished by playing the game with a mod apk than playing it with your own skills and efforts.
    • -
    -

    Therefore, you should always be careful and responsible when using any mod apk. You should also respect the rights and efforts of the original app developers who created the game for your enjoyment.

    -

    What are Some Tips and Tricks for Playing My Talking Tom Cat 2?

    -

    If you want to have more fun and success in playing My Talking Tom Cat 2, whether with or without a mod apk, here are some tips and tricks that you can use:

    -

    Tip 1: Feed Tom Regularly

    -

    One of the most important things that you need to do in the game is to feed Tom regularly. Feeding Tom will keep him happy and healthy, as well as increase his level and unlock new items and features. You should feed Tom at least three times a day: breakfast, lunch, and dinner. You should also give him snacks in between meals if he gets hungry.

    -

    Tip 2: Play Mini Games to Earn Coins

    -

    Another way to earn coins in the game is to play mini-games with Tom and his friends. There are many mini-games that you can choose from, such as Flappy Tom, Space Trails, Cake Tower, etc. Each mini-game has different rules and objectives, but they are all fun and easy to play. You can earn coins by completing levels, achieving high scores, or collecting bonuses. You can also use coins to unlock new mini-games and upgrade them.

    -

    Tip 3: Customize Tom's Appearance and House

    -

    One of the most enjoyable aspects of the game is to customize Tom's appearance and house. You can change Tom's fur color, eye color, clothes, accessories, hats, glasses, etc. You can also decorate his house with different furniture, wallpapers, carpets, lamps, etc. You can buy these items with coins or diamonds, or unlock them by leveling up or completing tasks. You can also mix and match different items to create your own unique style for Tom and his house.

    -

    Tip 4: Interact with Tom and His Friends

    -

    One of the most hilarious features of the game is to interact with Tom and his friends. You can talk to Tom and he will repeat what you say in a funny voice. You can also touch, poke, tickle, slap, or pet him and see his reactions. You can also play with his friends, such as Angela, Hank, Ginger, Ben, etc. You can visit their houses, chat with them, prank them, or give them gifts. You can also take photos or videos of your interactions and share them with your friends.

    -

    Tip 5: Use Potions to Boost Tom's Mood

    -

    One of the most useful items in the game is potions. Potions are special drinks that can boost Tom's mood and energy. There are different types of potions that have different effects, such as happy potion, sleepy potion, hungry potion, etc. You can use potions to make Tom more playful and responsive, or to help him recover from fatigue or illness. You can buy potions with coins or diamonds, or get them for free by watching ads or completing tasks.

    -

    Conclusion

    -

    My Talking Tom Cat 2 is a fun and addictive game that lets you adopt and take care of a cute and funny cat named Tom. You can also play with him, feed him, dress him up, decorate his house, and interact with his friends. However, if you want to have more fun and freedom in the game, you may want to try My Talking Tom Cat 2 mod apk. This mod apk gives you access to unlimited coins, diamonds, food, potions, outfits, toys, and other goodies that can enhance your gaming experience. You can also explore new features and options that are not available in the official version of the game. However, you should also be aware of the possible risks and legal issues that come with using a mod apk, such as malware, viruses, bans, lawsuits, etc. Therefore, you should always be careful and responsible when using any mod apk. You should also respect the rights and efforts of the original app developers who created the game for your enjoyment.

    -

    If you are interested in trying out My Talking Tom Cat 2 mod apk, you can follow the steps that we provided in this article to download and install it on your Android device. You can also use the tips and tricks that we shared to have more fun and success in playing the game. We hope that you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!

    -

    FAQs

    -

    Here are some of the frequently asked questions that readers might have about My Talking Tom Cat 2 mod apk:

    -
      -
    1. Q: Is My Talking Tom Cat 2 mod apk free to download and use?
    2. -
    3. A: Yes, My Talking Tom Cat 2 mod apk is free to download and use. However, you may need to watch some ads or complete some tasks to get some items or features in the game.
    4. -
    5. Q: Can I play My Talking Tom Cat 2 mod apk offline?
    6. -
    7. A: Yes, you can play My Talking Tom Cat 2 mod apk offline. However, you may need an internet connection to access some features or updates in the game.
    8. -
    9. Q: Can I play My Talking Tom Cat 2 mod apk with my friends?
    10. -
    11. A: Yes, you can play My Talking Tom Cat 2 mod apk with your friends. You can visit their houses, chat with them, prank them, or give them gifts. You can also compete with them in mini-games and leaderboards.
    12. -
    13. Q: How can I update My Talking Tom Cat 2 mod apk?
    14. -
    15. A: You can update My Talking Tom Cat 2 mod apk by downloading and installing the latest version of the mod apk file from the same source that you got it from. You should also backup your game data before updating to avoid losing your progress.
    16. -
    17. Q: How can I uninstall My Talking Tom Cat 2 mod apk?
    18. -
    19. A: You can uninstall My Talking Tom Cat 2 mod apk by going to your device's settings > apps > My Talking Tom Cat 2 > uninstall. You should also delete the mod apk file from your device's storage to free up some space.
    20. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Bosch Esi Tronic Codice Abilitazione Crack Serial EXCLUSIVE.md b/spaces/contluForse/HuggingGPT/assets/Bosch Esi Tronic Codice Abilitazione Crack Serial EXCLUSIVE.md deleted file mode 100644 index b6c4cb444a159bc7251a044a8518d7b69578a77c..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Bosch Esi Tronic Codice Abilitazione Crack Serial EXCLUSIVE.md +++ /dev/null @@ -1,6 +0,0 @@ -

    bosch esi tronic codice abilitazione crack serial


    Downloadhttps://ssurll.com/2uzw6Y



    -
    -Installshield 2011 professional crack serial number free download ... Bosch esi tronic codice abilitazione crack keygenIn "Без категории". 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/contluForse/HuggingGPT/assets/Driver Wifly City Idu 2850ug 16g.ful.md b/spaces/contluForse/HuggingGPT/assets/Driver Wifly City Idu 2850ug 16g.ful.md deleted file mode 100644 index 0365eef6c7f1dd6d06852fe355e3bb3ca7c62a5c..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Driver Wifly City Idu 2850ug 16g.ful.md +++ /dev/null @@ -1,6 +0,0 @@ -

    driver wifly city idu 2850ug 16g.ful


    Downloadhttps://ssurll.com/2uzvSH



    - -... driver windows 10 ati radeon hd 5670 wifly city idu 2850ug 8g driver download xp driver grafico intel 4000 vaken.se vilka driver hemsidan drive leclerc tignieu ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/contluForse/HuggingGPT/assets/FB Alpha 2016 Reference - Complete FBA v0.2.97.39 ROMs Free Download Borrow and Streaming Internet Archive[1].md b/spaces/contluForse/HuggingGPT/assets/FB Alpha 2016 Reference - Complete FBA v0.2.97.39 ROMs Free Download Borrow and Streaming Internet Archive[1].md deleted file mode 100644 index e878c09efca3d052a62dc7a840137287887f2dd1..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/FB Alpha 2016 Reference - Complete FBA v0.2.97.39 ROMs Free Download Borrow and Streaming Internet Archive[1].md +++ /dev/null @@ -1,6 +0,0 @@ -
    -

    Fixes an issue that live migration from earlier Oracle VM Server versions to 3.3.2 leaves Linux PVHVM guest in a hung state. The fix is delivered with newer Xen package (xen-4.3.0-55.el6.22.18 or later). (19517860)

    -

    Fba Next Pack Roms


    DOWNLOAD ✺✺✺ https://ssurll.com/2uzvB8



    -

    1 H Berlin Orfjnn OrfjnnHe OrgnuJl Orguiilie
    He Jl Was Va as Finally Put to Death DeathS DeathOet
    0 06t
    r
    6t S > lpefatntoHr 04rtb1fg Oct m A revolutionar revolutionarilemphstratlon vIut1ontr vIut1ontrIemThtratIon WtIonarS WtIonarSemP11l1tratton >
    ilemphstratlon IemThtratIon was 1flDd made Monday night wptoo nightl 1 t t1y
    1y l > y oo 4 00 untn university r8Uy students th the oojaaten OMUIofthln oaaSonhiIk oojaatenin
    hln hiIk in the anniversary of o the Czar Czars preci1n1aUon procamatlon prec preclumation
    lumation establishing the De DeFiery Dui DuiFkry Douma DoumaFiery
    Fiery ape speeches heti were nia made in 1 which whichhe whichle
    he le e speak speakers rs declared war to the knife J biKeiJnt knifeigulnst lltCe lltCegtJn
    igulnst gtJn iJnt t Czarism The gaOnrins dlpezsdiiIng dispersed dispersediTiglng ed ediTlglng
    iTiglng the HMa Marseillaise lIlise In anticipation anticipationof anticIpRtIOt
    ° of t trouble in various parts of the empire empirea
    a 1 lelebraUon le ratJGn of the da day troops troopsZCEfltrated were1jJctntrated were werevaLceutrated
    vaLceutrated ZCEfltrated ut l all danger points to be Deto ber ber
    to r pared to put down own disturbances There Thered
    d been n threats that revolutionfstt revolullonIsUIId revoIuLkniIsd revolutionfsttrr
    rr Id > Jd d att attempt mlt demonstrations ainotgovernment against againstfc
    fc government on the occasion of thJnNvorsar thlatvrsary this thisjiiuversary
    jiiuversary and the authorities did net nett nittend netend
    t end to take an any chance cba cbaT1e chancesT1e chanceTiie
    T1e garrisons at all the larger cities clUese cltlecre citiesrcre
    rcre cre e Telnfor reinforced and the orders to therops theTJops the therjcps
    rjcps instructed them to fire nre in case ef att of4Ub1 efrouble
    rouble t 0il II
    For fortyeight hours hftwe hftweXocuted elte h s sxcuted TWS TWSxoouted
    xoouted Duda it Is claIm cJaJmc4 daJmeSk was 1rUtflld waseted wasSVfc wasSVfcitned
    itned tflld to horrible tortures Bjr T1M the po poIe gotrip ao aoStrips
    Ie kin were torn f from rpm kin ai1 eI arad arms armspd
    I pd d 1 legs 8 and redhot trans werej were aypKad aypKadin
    in j n a L spirit of 0 f wanton tl11ton cruelty Wadafttg Wadafttgvas
    va vas 1 Inserted in his wound and tllesaOu then thent
    t ot ou tire to add to his I4 tortu tortur torture 1ons 1onsttrk ARg ARgtrks
    r ttrk trks uks WCT were c also driven underbIII underbIIIils under his fle1 fle11ib ftjsjef ftjsjefiils
    iils ils and then hot sealing waxor wax wan wanptud trait traitiHuoU
    iHuoU IntJ ml b o these c holes and IUJdLndfl ud i iArt left tSJ tSJijidfn
    or Lndfltl ijidfn + f 1 1Aft
    Aft tl r tbe police oflce had exhausted everTja every everya
    ja > a i of at torture t their 11 Ingenuity could de dei doI de devis
    vis i I uJit lud > was u A8 executedDEFENDS eXecutedDEPENDS executed executedDEFENDS
    DEFENDS CANNED MEAT MEATIhklsh IdEAThhl MEATUlitiMil
    Ihklsh hhl b Scientist Waras urls the Public Publict luhHcainst Pulilietgainst
    tgainst ainst t Greater Danger In Koort Koortijinun Pool1Ljl PooilOCt
    Ljl ijinun lh Oct 2DT Dr Sir Frederick FrederickTiv erIdtT Frederickddr055iflg
    Tiv T s Addressing addret the lb National Health HealthSo Heakhttj
    So ttj t i ty I y today ridiculed t 1ed the recent out omr outugaint
    < r against canned meats m ats while he said saidirtuay kL kLirtuaty midrtuay
    > irtuay everybody is indifferent to the thei
    i r greater gT er dang danger r in food and milk laden ladentu 1Meuti1 ladeniti1
    iti1 tu deathdealing germs germsThe germsTh germsTh
    The Th nublte said be > does not seem to toT
    T r Iind swallowing naU Vtinc anything it can not see seeilik eeell1k see111k
    ilik an nd l meat m t as now tnsanitartty sup suprlkd RIPT1kd supriled
    riled swarm with bacilli of typhoid and andthcr aa4thr andthr
    thr diseases u but as they tl1e are invisible invisibleTnf S invisibleI 186bIe 186bIet
    Tnf t 1f kf public ublic dees dc s not care Canned Cannedvrhieh meats meatshih mestshh
    vrhieh hih contain floor sweepings 8Wee end ether etherDlth otbernltl otherilth
    Dlth are hnnles ht > nnIesI he declared because becausehey becau8eT
    T hey hc are cooked u t ked and probably 1 were werealthy werealLt werea1thr
    althy alLt before cooked cookedPeople cookedPeopJ cookedPeople
    People he added are straining straIn1n at a asuat auat auat
    suat uat and nd swallowing a cameL The naent pres presnt prennt
    nt treatment of meat before it is eaten eatenis eatelfis
    is i not in the least more sanitary than It ttas Itas Its
    as s in the days of the neolithic UlJ c cave cavevoiiers caelJer caveVON
    voiiers voiiersVON lJer lJerVON
    VON BUELOW DUELOWUNFIT UNFIT FOR DUTY DUTYJerniauys DUTYfcrlWU1n DUTYfermnuya
    Jerniauys Imperial Chancellor Con ContinBos Contbuos Continaoa
    tinBos In Poor Health HealthH HealthrrIt HealthHrlln
    H rrIt Hrlln rlln 1 Oct 21 2ILittle Uttle reliance hi placed placedTon placedpcn placedpcn
    pcn tht th > > optimistic reports as to the piTy phykal piTycal
    cal 11 condition of the imperial chancellor chancellorIvti cban cbanI
    I Ivti u since S i his breakdown Jo n the Relchs RelchsJ Reichi1lr
    J ijf g nearly n rly six ix months ago Prince Prlqee von vonJuljv wnJ vontui
    Juljv J tui udJT ha been b residing at Nordeney Nordeneyiia >
    1J his hi friends and medical advisers advisersjiikft ah ahlik advisersIike
    jiikft lik Iike adwii duh that be Is not in ma a fit state stateio tat
    io 1 cone pc with the dotes dtIof of his o oftIee nce as asi a
    1 yr yrThc yrtihere > ri
    Thc ihcre is u great Hat arrear of work in all sitS a1 a1II 4
    II S i partnjents L rtmenl S as lS a 1 consequence of the set setiutiit setof
    1 iutiit ot many man qu qUtStJona 1UMfl5 < iitkms having baTt haYI11 been beeniMnxd beeni beenfrrkd
    iMnxd i fnJ mull unt the th chancellor s probable probableturn probablet probabletuin
    turn t rn HIlt it I II now no seen tftl that tba this will willlux will1t
    lux 1t V to I IK t t dealt with by tbe secretary secretarylot
    lot I 01 r foreign f roh1t affairs i and other ministers ministersAlthough mu1ersthugh i i1thugh
    Although thugh the prince pri will be in his place placerix I
    rix I uKuil in tl tl1ot Reichstag during the com comi COIDI COIDIlig cornHg
    i lig Hg is session 1 sluu be is tOt expected e ted to tajteIimh taM taMmu
    Iimh 11 mu Udl h i part > art in debate fba te his physicians having havingM havingfld I
    I M > nvd fld him against doing so soHUNGARIANS soiUTNGALIANS
    I
    HUNGARIANS REJOICE SEJOIOEThe REJOICEThe
    The Return of ItalcocxyM Remain RemainCreates lteinn1nCrente
    Creates Patriotic BfTerveaeenee BfTerveaeeneeUudaPesth BtrercceneeUudnPesth lSffervc3cenecUudaPesth
    UudaPesth Oct O t 2 9TOO > Tho highwater highwaterMark ht highwateruik water I Ijrk
    Mark jrk jf f Hungarian patriotism in recent recentcars recentar I Ilan
    > cars ar was reached today t da when the bones bonesi i
    I i 1 th < hen h n > Prince l e Bakocsy were wereI wererought
    I rough t horn Th passage of the th funeral funeraltraiii funer funeraltraIi d dtraiil
    traiii wus jiiarkcd by b ovation after afterI ova ovatic
    1 tic it Tbe urotession pn IOI tUI On in the tb street of ofiuOaPeatl ofIudaPesth I IJ
    iuOaPeatl J took ptecv pIa plac in brilliant brilliantweather brlltlaDtfather brilliantvejther
    weather and nd was nUi wita witnessed ssed by byPe iOMOC iOMOCjvople OOIIIOlpe
    jvople Pe ° Pk many of whom bona were gorgeous o nostunK nos aMt costtnws
    tunK tunKTh t ttnws ttnwsTl ln1t ln1tTh
    Th Tl cntir Hungarian Huna 1an nobility nobUlt wore worePrtfont Wtren weteptsnt
    Prtfont n nt anti ani i t6 < X mt nltmber oihers of o the govern governi nvern nvernnt em emlnt
    i lnt > nt mart hni h in the procession pr Thin Thintrd I1a111tnd Thist
    trd t the ma 1IJagnltkent niticent Illumination at night nightvt 1 nightvtrv
    w > vt vtrv > rc view VkWd d by b Emperei Jo mPErot Fnind Iran Jo JOfIpeJs JOfIpeJsfmi Jospeafltlt peja pejairimify
    irimify irimifyThe fmi fltlt fltltTte
    The he omplt t u uecei c s of tin tb day tJa in inKuduPest ini in1udPesili
    KuduPest i t1t Pe8th h was as marred by 1J the strttcirg strttcirgtrain 1It11kJr 1It11kJrfan atrtkIrgaln
    train aln and underground road rmploy f111 f1111olicern ernpIoyolkernei s s1ollcemc
    1ollcemc 1 lirtm 1 Irtrntu Tt T > u and In some In Initaiicw Int1I inpscntr
    itaiicw t1I ptcscnsrrs Pt snlft wr w r ri rtwd tmo and andnurn cars carsfrturneti esrorturnecl
    frturneti frturnetiKaiser nurn rturneclha d dlialter
    Kaiser ha Ier Honor King Brother UrothcrHrln BrotherHrIla BrotherIrti
    Hrln Oct aEmpror Empror William 1 ilia has hasiaiod hast
    t iaiod 2 llK l Duke of t tI1 > nraught j ught a AcM neidmarshal fteldnUIiob1 AcMniuba1
    marshal ol the Prussian irmy Jrm an a honor honoritiat Onorl honorrit
    itiat rit l is i rarely ntrcl ccnftrr cCinft nd d on foreigners forebjnersi fo foLmpeOt foreignersLmpcro
    i LmpeOt mperor Francis Frl is Joseph being ng the th only onlyhr onlyher nly nlyht
    hr cue ue holding similar Mmflarrank Mmflarrankherald rank rankHerald rankUcraht
    Herald Want Ads AUsiii Adsii ds dsill
    iii ii bE r reel Cv wd Vt d < 1 at it t McNuUys JoS tys ntwsa ntwsaV ne ncsatnd satnd
    1 V t and P tU t jw w iUiU aa prcawtly prcawtlyaed pnm tl f fI Iied
    I aed oj to the mam main office ftk quan quantify qiallI quantIty ¬
    tify tI at the best best and their chief chiofln interest interestie r rIi
    ie 1 the fact that her proeaneo pro enco In tho tht city citybrings cltybrings 1
    brings a flood of curious sightseers who whoare whoare j jre
    are re good spenders and who leave large largesums llU llUstim6 larssums
    sums of money mon behind them which whichbelpod cvh1clthelped
    helped Ipod to keep up the town Therefore Thoroforetlkey Thereforethey rhOf toro torotJIe I
    they tJIe all declare the stories gtori s that the die distinguished me met dietinguished ¬
    t tinguished guiabed priestess of the t1 Christian set Science cl clice ¬ I Ienee
    ence ice cult is at the brink of death and andhas i
    has 113 not appeared personally in public far faryears ferYenl farlents
    years Is a bare R e fabrication And it may maybe mayb ma3be
    be b but it lia lNl8 not yet b betn < fcn so o proven provenalvin proRallin provenaIin
    allin alvin A Frye Mrs Irs Eddys ddrs footmen fftotManaeeretary fGUMneratary footmensecretary
    secretary and the man saW Id to be the tlrtr therisI
    r rtI l power behind the throno of the em empire empire empire ¬
    I pire of Christian Science < e was not to toseen be beseen beseen
    seen today but gave out a statement in inwhich I lusiiIck i iwhich
    which which he repudiated the charge that he heis 1 1If
    is using ualD Mrs Eddy ddY as a toot denied that thatshe thatshe I Isfe
    she is not in tho best of health and andmind andJltlud andmind
    mind branded as a lie the statements statementsthat st SahM 5t 5tthat Ui i ithat
    that Mr Mrs Parmeifa J Leonard has been beenimpersonating beenImpersonating 1 1imperaouUnc
    impersonating Mrs Eddy EM in the dally car carriage carrIaSe siri ¬
    riage i rides rlc1 and that a a Boston cancer cancerdoctor cancerd cancerdoctor
    doctor d etM has been paying weekly week visits to totl tothe tothe
    tl the e Eddy home to attend the aged lady ladyScientist ladyScIentIst I ISclentlstK
    Scientist Not ot Greatly Disturbed DisturbedNew DbtnrbedNow DleturbedNow
    Now York Oct 29 39CJarlatlan Cbrhulan Scientist Scientistin SchnuaIn
    in this city today declared they tbe did not notexpect notexltOCt notexpect
    expect Mrs re Mary Baker G Bddy Kdd to live liveforever Ueforever liveforever
    forever that they were not disturbed disturbedby d ditUrbedby turbed turbedDr
    by Ole report that she was all but deid deidf dedfmm deidfrom
    f from om cancer acer and that she was wa being Im Impersonated 152personated ¬
    personated sonatecl by b i rs Parmotlit J Leonard Leonarda
    a Brooklyn Scientist ScientistTbe ScientiatThe I IThe
    The Interest IN I whether Mrs Eddy ddY y the thefounder thefounder I Ifeullder
    founder of Christian Science lives or ordies orm ordlii
    dies m originated they explained In the thspopular U UMpU1ar thepopular
    popular conception tbat she must show showactual showaetniI howactual
    actual physical immortality to justify ju y rer t tertheories rertheories r I
    theories as to the superiority 8 perlorlt7 of the spirit spiritover apIrtover I Iover
    over matter and the possibility Ifbftl of the themmd themind themind
    mind triumphing over over W disease and Del death deathChristian d deathChristian ath athCbntlan
    Christian Scientists Sde tJ stated today tCHI that thatMrs t thatMrs bat batIn
    Mrs EddY did not claim immortality immortalityShe ImortallShe hmortal1tyShe
    She has written that in her Judgment Judgmentthe jud Judgmentthe t ttbft
    the faithful and entire fulnllment oC oCher o oher ofher
    her teachings will extinguish h death but buthas buthas buthas
    has added that for herself btr lt tbt ah h cannot cannotsay cannotsa cannotsay
    say sa Semi S me of o the Scientists Sclen when wheaque wheaquettoned ques questioned quo quotloued ¬
    tioned as to their belief b Ud in Mrs Irs Eddys Eddyspower EddY EddYOwer I Ipower
    power to defeat death explained their theirunderstanding theirunderstancllng theirunderstanding
    understanding of the matter thus thusWe tbU6We thusWe
    We can only wait and see We do donot donot donot
    not sute su te our eui belief one way or the other otherEverything ethEftlYthlQ MhmEverythIng
    Everything is not clear to us CnrlsUan CnrlsUanScience t UirisUanScience J1sllaa J1sllaaekee I
    Science ekee is a growing grOhg belief and as we wegrew weW wOrew
    grew rew W we shall 11 comprehend comprehendR CDmp end endR
    R R P Verrill V rIJI of the First ChristianScience C CititIsuScleflee Christian rMttaaScience
    Science Church at Ninetysixth Iftel txlh street streetand streetand streetand
    and Central Park k said todiy that New NewYork NewV NewYork
    York V ork Scientist were satisfied tint Un t the thenewspaper thonewspa thenewspaper
    newspaper newspa story to7 of Mrs Eddys Eddy physical physicalstate pyslcaIstate yIJkal yIJkal5tate
    state 5tate the impersonation of Mrs Leaoard Leaoardand ienfaad LeO Td Tdand
    and that Calvin Fry Fryt formerly Mrs MrsEddys MfliEdds Mrslddys
    Eddys footman and later secretary IeCf and andspokesman andspoke andapekeemn
    spokesman spoke man was the real head of Chris Christian Cbrttlan chrisflea ¬
    flea Science and the recipient of Mrs MrsEddys inEddys Mrsddy
    Eddys ddy s income of l I000I LMMtt a year ear Were Weredisproved Werelisproved werellsproved
    disproved disprovedCRISIS lisprovedCRISIS llsprovedCRISIS
    CRISIS NEAR BAR IN VENEZUELA VENEZUELAGen
    Gen Alcantara Alcantara Is Beady to Succeed SucceedCastro SuooeedOastro SuoceedCastro
    Castro by Force ForceCamping ForceOaHIIIlug ForoeCztinplng
    Camping Just Ontsidc Outsld Caracas with withSeveral wltbSecrnl withSescral
    Several Hundred Trooi FroomWgir rOOIJar War Is IsProlmblc JIIrrohnlle IiProlnlIc
    Prolmblc When heft President freitlent Dies DlcwNew DlcHN DiesNew
    New N W York Y rk ork Oct tcabIed CaWed advice from frombaraeas fromauac fromzom
    baraeas zom report a sittwtion Miqation tqatle amo aMOUIIt amounting ntnig to tograwe a agrave a acrIIIa
    grave crteta stftectrng ct MetIh vast Amerieaa 4tm de fi fiteresU lit litteftt Ih Ihtere
    teresU tere m Is Vs V Vpre8JdelAt mi bi v vPresident r rPresident
    President Ca Ca8fti caMi tra to spite of recent de denials de deJliaLJ donIaIm ¬
    nials te said lei to be paralysed and anabte anabteeither UMbieeither nshIeetiher
    either la waUc sr to spealc esk lie Heeeaa HeeeaaIltUnicats com communicates coinmizaicate ¬
    municates his desires by feeble writings writingsand w wnd writingstind
    and his true condition te I held a State Statesecret Stateecm Statesecret
    secret secretSr ecmSp1e secretSpies
    Spies Sr i < and paid > ag n agents nts l representing ntJDS other othercountries otJaercountriN othercouutriee
    countries and foreign corporate interests interestshaunt latereet8haunt Interestshaunt
    haunt the thet palace palaceAt palaceAt
    At t the suggestion of Castro himself himselfGen lalelfGen himselfGen
    Gen Alcantara is camping campht just outsidethe outside oatlWethe oitsldethe
    the city eft of Caracas with several eral buadred buadredUoope btllllfnodboo bundrediiooi
    boo iiooi and his present acttviUee UvltJes premise premisea
    a coup detat similar r to that which made madeCastro 1qdcCutN nadcCastre
    Castro l President PresidentAlcantara President4tlcantarais t tAkaatarals
    Alcantara Akaatarals is Castros Castro s choice for dictator dictatorand dictatorand dictatornd
    and nd the troops tr controlled COA by him will de decide de dedde dodde ¬
    dde the issue unless Gen Joan Vicente VkenteGomez VlcetaleGo VicenteGames
    Gomez Go consents c to enlist tile people to a acivil advU acivil
    civil war warGen warGen warGitfl
    Gen Gomez is the constitutional eo Vies ViesPresident VicePresident TJce TJcePresident
    President of Verunnela and tbe great greatmass peatmua greatmass
    mass of the people favor fa him hh as chief elatdmbslstrator ad administrator adminlotrator ¬
    ministrator but present indications are arethat arethat arethat
    that he will have to flsht ght bt the forces under underAlcantara mtdeorA1cantarL underAIcantXra
    Alcantara AlcantaraGOVERNMENT A1cantarLGOVERlmENT AIcantXraGOVERNNE1T
    GOVERNMENT IS DEFEATED DEFEATEDIloqsc DEFEATEDIIOQSC DEPELTEDHouse
    House of Lords Divides on Religions ReligionsFeature ReUgionsFeature ReligionsFeature
    Feature of Education EI1tlcntlon Bill BUILondon BiULoudon BillLondon
    London Oct 3 The government was wasdefeated wasdefeated wasdefeated
    defeated tonight in a division in IIIHo th thHouse th4 th4House
    House Ho of Lords on the education bin Ute tbecrux Utecru thecrux
    crux whereof was the gitostion of com compulsory compeleory cornpulsory ¬
    pulsory religious reIt us teaching m in the public publicelementary pabltce1ementa17 publicelementary
    elementary e1ementa17tJMIer schools schoolsUnder schoolstJnder
    Under the bill as it left the Home of ofCommons ofCO otCommons
    Commons CO llDOU children were not compelled to toattend to toattend toattend
    attend school during dUl the time devoted to toreligious toreJlsIoue toreligious
    religious instruction An amendment sub submitted submitled b bmitted ¬
    mitted to Ole House of Lords reversed rcvsvsedthis rever8etIthte reversedthis
    this and it was carried by a vote of c cto 2K 2Kto 2i 2ito
    to St The majority included the arch archbishops ardtbhtbopl archbishops ¬
    bishops a a score of bishops bI and almost almostthe aImcMtthe almostthe
    the whole organization organisationThe of ftiationThe
    The incident inaugurates the long fore foreseen foreseen loisseen ¬
    seen tussle between the respective ma majorities DIIljorltleos majoritles ¬
    jorities in the two houses on the educa educaUon edtJeation educathou
    thou question questionHTJM4N questionHIDUN questionBIThWT
    HTJM4N HEADS AS TROPHIES TROPHIESWarnin TROPHIEStrl1ing TROPHIESWarnings
    Warnings Warnin trl1ing i to Rebels Displayed at atGathering atGathering atGathering
    Gathering Places 1 lnees In Tangier TangierTangier TangIerTall TangierTauter
    Tangier Tall r Morocco occo < Oct 29 1 A score of ofbuman orhma ofwnan
    buman wnan heads ghastly butl trophies trapbl of the theprowess UteJiroweA theowesi
    JiroweA prowess owesi of the Sultans troops in the re recent re rent iscent ¬
    cent nt lighting 115 against the troops of the tlwPretender HiePretender thePretender
    Pretender are being exhibited here pub publicly pubtidy publidy ¬
    tidy In the city They are intended In ndecI as a awarning awarning I
    warning to rebellious subjects These Theseheads Theseheads I Ilinds
    heads were brought in a government governmentsteamer goernmentsteamer I IMetLmet
    steamer from the Riff coast Ct8 t of Northern NorthernMorocco NorthernMorocco
    Morocco At nearly every e Vtr point where wherecrowds wherecIOwda whereVowds I
    crowds arc accustomed tti gather one or ormore ormore ormore
    more more of the heads is displayed displayedHKES displayedLIKES
    I
    LIKES AMERICAN DRIOAN MONEY MONEYKaiNcr IONEYKaiser I IKahicr
    Kaiser Invites Endowments from fromOur fromOur froiiiOur
    Our Millionaire MillionaireBerlin 3Ii1IlonaireBerlin IIIUonnlrejliIkrUn
    Berlin Oct t TeU U year American Americanmillionaires AmericanmIil1onaAre IQR IQRmilll11tatirPs
    millionaires that th they y can not find ad a bet better btticr t tter ¬
    ter outlet for their jrarpiua wf surplus ua funds than Utan ui uithe 1ftI inthe
    I the endowment of chairs In the tTniver tTniversity rnlv rnlvi tulversity
    i 8It sity of Berlin said the Kateer J a to Rev RevDr ReI RevDr
    Dr Dloki pastor of o of the American AmericanChurch Americani AmirkenChurch
    I
    i Church Chur h in this city at the conclusion of ofthe ofI ofth
    I the th inaugural lecture lecl by b Prof John rOJm AI W WBurgess WI
    I I Burgess au of Cotnsabi C Iw the t first t oecapant oecapantof oesepsntof pant pantI
    of f the Roosevelt elt chair of American His History 1 1Or Illstory ¬
    I tory OrI toryStabbed toryStabbed
    I Stabbed St 1Jblcl for Spelling SIJ 1Ung Xante Wrong WrongChicago WrongCblcaao VrongChkaao
    Chicago Oct tWcanc Because Uftthe the desk deskirgeant desks1eant deskI
    I irgeant at the stock yards peUce P pIc eta staoa taI etaI
    I oa pelted tlf1en d his name roe wron wroD wro Ferdinand FerdinandUrqubardt Fwrdnta Fwrdntas
    I I s Urqubardt trqu ardt nabbed and frsrlotisly frsrlotislyounded Mrlouslyounde4 eoriousLywonided
    ounded that ofScer oMce yesterday Y Urawliardt Urawliardtas C UrfflaMtwas ltahU ltahUi
    i was as brought t ia on 01 a charge of disorderly disorderlyinduct dhJ diserderlyccnduct rderly
    l induct aud nd when tbe seragant se wrote wroteis wGtehis
    ibIs his is name down Uruart he whipped ht ped out outkulte outa outa
    a a kulte and d struck him in the breast He Hes I Ilis
    lis I Is s now accused accQ of ota f u murderous murder assault i
    WRIGHT RIGllT TO BE TRIED TRIEDAleandlia TRIEDAloxallIIia
    Alexandria Negro N agIO Said to Be BeMrs BeMrs
    Mrs Gooclings Assailant AssailantAISO AssailantALSO AssailantMSO
    ALSO FAOES MURDER CHARGE CHARGEXegro CHARGESegro CHARGEegro
    Xegro egro Johnson Jobn on Still at Large LargePtllla LargePtllladcllJlln Largc1411ndclpbia PliHa PliHartclnhia
    rtclnhia StrlUebreakers Persuaded Persuadedio Perln oded odedto
    to Go Home nomeIclle Idle IScjpro cro Filled 50 50Druggist O ODrugglli ODruggist
    Druggist Fined PJned for Selling SeIlln Cocaine CocaineIn
    In Medicine lec1leJncnronIls Brovms Funeral Held HeldUASIIIKCrrOJ JIcldWA81U Heldwsl1IgaTo
    UASIIIKCrrOJ WA81U crrO 1IHRALO BUUEAU BUUEAU1XK nrltEUlliJ BUiEUwill
    will 1XK TslHfram Ill IISAlexandria lliJ lliJ0ar1Ift Illcersrr
    0ar1Ift d lrtI aM It1 Miesti MiestiAlexandria li li1exDdria
    Alexandria Va Oct tJoseph tJosephThomas Joseph JosephThomas Jos pft pftTho
    Thomas Tho colored alias 1 John Wright who whowas whowas whowas
    was recently turned over o r to the Alexnn Alexnndrta AJe Alexnn1 Alexnn1dila n ndrt
    drta County authorities by Chief Justfiae JustfiaeClabaugb Ju Justl le leClabaup a aClabaugh
    Clabaugb in the District Supreme Court Courton Courton
    on a requisition from rein the governor of ofVirginia orVim ofVirginia
    Virginia Vim win be placed on trial Wednes Wednesday WedMSday Wodnesday ¬
    day morning at 10 oclock oc k in I the Alexan Alexandria J1txandrla 4Uexandna ¬
    dna County Counl Circuit Court on the charge chargeof chargcof chargeof
    of attempting atteJftptJ to attack Miss M Mabel MabelRisfey MabelIWley MabelJU
    Risfey JU now Mrs Deeding OoodtD near Luna LimaPark LunaPatrk LunaPark
    Park several weeks ago The principal principalwitnesses prt pr1lpsiwitnesses pal palwitn
    witn witnesses 8OII for the prosecution pr ecutton will ill bti b Mr MrGoodinf M MGoodIna Mrs MrsGooding
    Goodinf and her husbaod Forrest < Good Goodlog GoodIns Goodtag
    log who was her companion the evening eveningof
    of the attack Both have declared rcut that thattheir tbattheir thattheir
    their identification of the man as being beingtheir beIDatheir
    their assailant was complete completeThe completeThe
    The principal effort of the tb defense 4e1 It Itstated hi histated I IMated
    stated will be to prove an alibi alibiThomaa aJ aJThoml8 allisThomas
    Thomas te al o > under u Ckar indictment tDdletJMbt10r for the themurder emurder themurder
    murder of Jackson Bossy colored eoIOre4 and andfor andfor andfor
    for other offense offenseIt ofteuesIt
    It had been feared that an effort might mightbe IQIcttlbe nhIhLbe
    be made to lynch Thomas alter he was waaplaced WUplaced wasplaced
    placed in Jail here but since then thenhas there therehas 111 111baa
    has been no indication of any an trouble ot otthat etthut ofthat
    that kind kindJohnson kindJohnson kindJohnson
    Johnson Still at Large LargeNotfetag LargeotIatac LargeNothing
    Nothing otIatac had matertaMsed n p to a laid laidHour litbCNr kLOhour
    Hour toalgbt t t so far as was fctwvfti At Atthe atthe atthe
    the pottce station tat the effort of the e local taealauthoriticaJ JoaJauthorlUeato localauthorlkl
    authoriticaJ foUow up what was thewsht thewshtto
    to be an important tuI clew kw concerning COIteen r tile tlwwhereaboitta tltewh tilewhereabouts
    whereabouts wh < eabouta of William tUIaM Johnson the n nsro iii iiigre > ie iegro
    sro who murdered urde Charles Cbarl4 T Smith on onOctober 011OctoIttr onOctober
    October 38i It was learned this evtftt evidngtbt v ttag ttagthat t ttbat
    that the police department ft atlll at atwork atortt atwork
    work ortt en this particular line of Investiga Investigation IftVWUgaUoa Inveatigatics ¬
    tics the success stc esi or failure of which With Withprobably vttl vttlprobably 1 1JUObabb
    probably be known In a short time It ItIs ItUIICIentood
    1 Is understood that some Informatfea l tocUOIteen bus busbeen hoebeen
    been obtained which wille nay throw light lightupon Ucbtupea lightupon
    upon the movements of Frank RoIIItMOftthe Robmson Robmsonthe Roblneonthe
    the negro who wag tile compainioa cesou of ofJohnson ofTobuOD ofJohnson
    Johnson the night of the murder UJder and id who whois whois whoIs
    is wanted 1W ted on the charge of complicity comp dt7 in inthe Itthe Inthe
    the crime crimeStrlkulirqaker crlDO crlDOStrJkchrQakcN er9neStrikebrgnkers
    Strlkulirqaker Go 0 Hume HumeThe HoUlct homeThe
    The local situation t in the strike of ofehiWeta ma machinitta
    chinitta 1 ra on the Southern Railroad rataaips rataaipsabout raisaipsabout
    about the same iuie as a to the past week The Theunion TheUDIoI1 he heunion
    union men n ecntlane tlaae to dissuade mart of ofthe ofUte ofthe
    the strtk4 strikebreakers > reakers brougb1 here by the thecompany toMCOutpaay thecompany
    company from going to work Fourteen Foten1DICIIaI Fourteenniechanica Fourteenmechanic
    mechanic ca arrived In Alexandria AIe this thismoraine tJIls1110I thismorning
    moraine 1110I from Philadelphia for the pur purpose JtIIIt purpose ¬
    pose it is said 14 of preparing prepa severs several r pew JMrwlocomotive JWWfor pewleesmotive
    locomotive for service rvief They The were oem eommunteatrd C01IImtlDieatd oemmaulcated
    munteatrd with by b the local strikers and andafter aIMIafter andsites
    after being bel informed of the cs eestleun 4ltkMM UoM ex existing rxI8UDC cxhating ¬
    hating they tbe declined to go to work Later Laterthey LaWthey Laterthey
    they left for Philadelphia PblJadel lUa Ten of ofaIxt7 the tbesixty thesixty
    sixty targeclass Ju aa freight engines mctaNpunlta8td recently recentlypurchased veceullypurchased
    purchased by the company are BOW waIt wafting waItIn waitbig ¬
    big m In th tile yard ard ber here r e for adjustment ad 1mt of ofbefore Ofare ofletorc
    before they are ai ready for service serviceHeavily serviceheavily 1a 1aUCn
    Heavily UCn Fined lii nod for Idleness IdlcuenaTwo IdleneIJTwo IdlenessTwo
    Two cases consequent upon the move movement IIIOWmeRt meremeat ¬
    meat started by the police to disarm disarmIdVi tIIMr disarmIdle
    I Idle negroes roes who gather In certain certal a resorts resortsthroughout resertsthroughout a a1roucbout
    throughout the city were W heard before beforeJustice beforeJutJce beforeJustice
    Justice Caton in the t e Police Court C urt this thismorning th thmo thismorning
    morning mo and In each a An flue of iO 0 was wasimposed 1 1tmpoeed wasImposed
    imposed A negro who gave his name as asEllis allElba asEllis
    Ellis West was one of the defendants defendantsIt L LIt
    It was testified that be had been discover dlscovered discovereM ftr ftrtd
    ed with a p pair ir of iron knuckles in his hispocket blapockeL hispocket
    pocket The other was Robert Withoont Wltboeutwho WUtooatWoo Withoontwho
    who was apprehended It was testified testifiedwkh tesUdedwith Ifted Ifted11t11
    with a large antiquated but danerou daneroulooking dangerous dangerouslooking aserou aseroutoH
    toH looking revolver on his person personSix er erSiz personSix
    Six negroes n charged with gambling gamblingwho paabU gamblingwho S Swho
    who were rounded up Saturday night by byChief byChief byChief
    Chief Goods and Oncers Jones Knight KnightNicholson Knl KnightNicholson bt btNlcholleR
    Nicholson and Young were called in the thePottce thePoUce thePolice
    Police Court this morning Five failed failedto fall9dto failedto
    to answer wer and forfeited 55 collateral each eachand techSlId eachsad
    and the other was u toed that amount amountSold aaao amoUntoId Dt DtSold
    Sold oId Cocaine In Medicine MedicineA U dlelne dlelneA
    A 4 toe of m 7 Th was a imposed to the Poke PottceCourt PokeCourt PoliceCourt
    Court this morning upon J A Dleoeit Dleoeita DIeoeJtK Dliiia
    a King K street druggist on the chars charsof charge cb8rftof chargeof
    of tiling catarrh powder alleged aUeg to 1 1fain con contain CODtaln ¬
    fain a small percentage of cocaine tl tltarDey At AttmnoV i iterney
    terney Gardner I 1 Booths who appeared appearedfor appearedfor appearedlot
    for the defendant noted an appeal a l and andthe atldthe apdthe
    the case will 111 be heard m In the corporation corpont1oncourt corporatiOncourt corporationcourt
    court Mr Dienelt took the stand and andsuited nQdS1t aisdsited
    suited S1t l that the powder hi question was wasa wu wua wasa
    a proprietary preparation put up In Inpackages IIIpacka hipackages
    packa packages which bore bo ne indication of the thefact thefact thefact
    fact that t at the remedy contained cocaine cocaineHe cocn1aelle cocaineHe
    He asserted a that as soon as the nature natureof JNltureor natareof
    of the preparation had been bee called to his hisattention h1aattetttion lilaattention
    attention he bad dbconUnued its sale saleCorporation saleCorporatloc saleorporatloo
    Corporation Counsel Samuel P Fisher np appeared nppeaftd nppeered ¬
    peered for the city dt at the hearing hearingWashington 11811ng1Vas1alngton hearingWashington
    1Vas1alngton Washington Chief Thanked ThankedA
    A letter expressing CX1W535 the thanks of the theAlexandria theAex theAeirnirla
    Alexandria Aex ndria board of Are wardens wanle for the theassistance eUattance theapitstnce
    assistance Uattance rendered by byChieC Chi Chief f Belt and Mr MrRobinson MrRobinson Ir IrRoIJIMOJI
    Robinson of o the Washington Wl1 lire depart department departDent departmont ¬
    Dent in the purchase of a steam eagno eagnofor eug eagnefor Dc Dcfor
    for the local department has been sent sentto wentto sentto
    to Chief Cbf f Belt by A A A A DewnUam secre secretary 811C1etar secrear ¬
    tar ar of the tJa board It is stated chat the thenew thema thenew
    new machine ma hlne Is S equal to any of its class classm elaNit classlit
    m it the country OO1UItr and that it would have havebeen Itllve1Ioen havebeen
    been Impossible for the board to have haveKnown havetaOwn haveknown
    Known of its qualities without the benefit benefitgratuitously tetleftttuhot liesietitgratuitously
    gratuitously tuhot ly afforded A40 of the wide Wkleo experi experience oxpcrlonce perl perlof ¬
    once of Chief Belt In such sue matter matterBrovrnH matlerllr mattersBrowns
    Browns llr nrt Funeral unera Held HeldThe lIehlThe lieu lieuc
    c The remains of ofnftttd Isador Brown who cein ceinmtttxi cgiDrttt
    mtttxi rttt suicide at Lebanon Pa wore In Inferred hitrred intrred ¬
    trred this morning fliOTUifl in I the Jewish conic cemetery cetnttm conictory ¬
    tory near this city It Funeral Funera111er1ce services wet wetheld were Wlreat wereheld
    held at 11 oclock o llkk at the residence rest4en e of Ms Msniece ht8Mn hisniece
    niece Mrs Charles lJead l3endbetm > ndhim lm 812 8l Prince Princestreet Prlacestet Princestiet
    street Rabbi Stern of Washington Wa lngten of officiated otacted of1ted ¬
    acted 1ted and the pallbearers nbearers were Messrs MessrsLoai M MLu MeagsIAII5
    Loai Elchberg Isaac IIft Schwara B B Well WeilK WellK relt reltWoU
    K K Woilberg WoU r Frank Varftdd and Robert RobmArnold RobprtAmok RobertArnold
    Arnold The servWe servkes vk were attended attftlcI d by bymany bartftY bymany
    many friends tr1 n4ti and relatives IaUV88 of the deceas deceased d ded dOosaseel ¬
    ed and by a delegation from rein George GeorgeMason Georpll GeorgeMason
    Mason ll n Council ouRd11tonJ Royal Arcanum of which whichMr wideliMr 1aI1t 1aI1tMr
    Mr Brown wars a member memberMurderer 1Rf1btJurdcrcr memberMurderer
    Murderer Wants 7nnts Xcw Trial TrialArgument TrialArpment TrialArgument
    Argument will 111 be heard tomorrow tomorrowmorning toiiarrewmorning > W Wmombt
    morning before Judge Barley in the Cor Corporation Corponitiol1 Corporatlen ¬
    poration Court on a motion rot a n new newtrial newtJia1 newtital
    trial In tho t h ease of Howard Banks cot coti colttred ciitired
    i tired red who was recently found guilty of ofhaving ot1uJ1R ofhaving
    1uJ1R having murdered Uroeftd i negro named Thomas TlwniasKhsey 11 11r ThomasZlse
    Zlse Khsey r UniesB the motion te grmnteO the thedefendant t1 t1tfendant thedefendant
    defendant will probably be sentenced IICDt to4te to4tehanged tobehhnged tobe tobehanged
    hanged as the th verdict rdtct wa wu murder in the theflrst thftfirst thefirst
    first degree Attorneys AUorne 1I Machen Ma n and MCA Jlcaeuee MCAW MeauseW11I
    euee W useW11I will make J ke argument for the th prisoner prisoneralways prlfltniorlways prIsqnrAiwas
    always Aiwas lways the same sameTharps same sameTharps sameTharps
    Tharps Pure PureBerkeley PureBerkeley
    Berkeley Rye Rye12F
    12 12F F St N W Phone Phon Mara 114 114Special 1141 1141SpeciaJ 1141Spvciel
    Special Private Deliver Delivery
    while Commonwealth Common 1th Attorney Samuel G GBrent GBrent GBrent
    Brent and Attorney R D Brumback will willappear wUlnppoor willappear
    appear for the plfosocuUon plfosocuUonIt oseouUon oseouUonIt
    It is stated that the Masonic lodges of ofPetersburg1 ofPet ofPetsrsburg
    Petersburg1 Pet r8burg made extensive preparations preparationsfor
    for the entertainment tonight in Masonic MasonicHall MasonicROon MaSOfliOHall
    Hall there in ± Il i honor of the visit 1sn of Grand GrandMaster GrondMaster GrandMaster
    Master Komper of this city The occa occasion oceaIon occaslon ¬
    sion Ion marked the 119th anniversary Annlv rsary of tho thogranting t116granting thogranting
    granting of a charter to Petersburg Lodge LoflgoNo LodgeNo
    No 0 Jl JlSpecial 15Special IL ILSpecial
    Special vesper r services serIc under the auspi auspices Auspleta auspiecu ¬
    eta of Fitzgerald Council No 9 Q Knjghta Knjghtaof Kt1Sb Knghtsof s sot
    of ColumbUH will b bIJ held next Sunday Sundayevening Sundayoenlng Sundayevening
    evening in SU St Marys Iars Catholic Church ChurchTho ChurchTho ChurchThe
    Tho sermon will be preached by 1 Rev RevFather Revfttber RevFather
    Father fttber Doyle of Washington WashingtonNEhlO WashingtonNEdRO WashingtondRo
    NEhlO dRo ESCAPES SOAPESPROM FROM JAIL JAILHorse JAIL1I0rJe ThILhorse
    Horse Thief Arrested Here Gets GetsAwiy G GetsAway ts tsAwn
    Away Awn from f oD1 HoeUvllle old Md h1 Jail JnlSptcfel Jailg Jail5il
    g Sptcfel l to The VMbiMjtoa Herald HeraldRockville IlftaJclRockvUIe BraidRockyllhe
    Rockville Md Oct 9 ILawrence Lawronee John Johnsen Jonson Jollapee
    son colarcd who was arrested in Wads WttsliIngton WltshIlgton Wadsington
    Ington recently rocellU and ludged in Jail l tore horecharged torecharged herecharged
    charged with stealing 1St 1I a horse owned Ot ed by byMr b31rs hiNis
    Mr Nis 1rs Aiyla SlIM of BrookevJHe Mont Monteromory iont iontgomory ont ontGOmory
    GOmory County escaped from Jail here tteratnde hereaNUl heretbd
    tnde tbd about 1 och oclock > k and though a avigorous aI avigorous
    I vigorous 1P oua search rb for him has II been bee mad madho madebe made madehe
    he ha has not yet yet been recaptured recapturedAs
    As s Jailor Trull let the prisoners out outint outI outlate
    I fl int A the back yard of the th Jail he went wentbock WelltiJc westback
    bock iJc into the th building bulld 3 preparatory to dir distributing fftgi dirtributing ¬
    i tl41MztJD tributing their dinner dinnerThe diRftflI dinnerrho
    I The jailor was out of Johnsons sight sightabout lptHut sightghout
    about Hut nfteea minutes mtn and In that time th thnegro ills ttatln illsnegro
    negro n sro had scaled N about twenty lwenl feet of ofperpendicular ofuJar ofperpendicular
    perpendicular uJar stone wall 1 jumped to the theground tMpound Useground
    ground on the outside and lid made his hi hia es escape cecape ¬
    a cape A stick found In the fastening tute of ofctie of1Je ofP110
    ctie 1Je whnljrw looking Into the yard and a ah aIrb adbe4
    Irb dbe4 h < track on the outside where the negro negrolanded nee neelaqded negolanded
    landed were the tb only on I evidences of bin binceape h hiscoupe i ieape
    coupe eape The other prisoners who were let letout letut letout
    out Into th t tbe < > yard Y had returned to the thecorridor 1Jttocorrtdor thecorridor
    corridor of the t jail before Johnson J and andalt an anall eelall
    all say that they th did not see him hll get getout setout getgut
    out The Washington police have e been beenuctlfted bunuct11kd
    uctlfted to be hi r 11 n the lookout for or Johnson JohnsonNICE JobaoaNIOEOLD JohnsonNIGE
    NICE NIOEOLD OLD MAN LOCKED 01 01Benevolent
    Benevolent looking but Charged Chargedwith Oharg Chargedwith d dwith
    with Swindling at Poker PokerRobert PokerRob Pokerfloburt
    Robert Rob rt Murphy UUfl1a Sr His 1IJ Son ois and andHarry I nisdHarry ud udJlnrr
    Harry Jlnrr IivJngston lvJngat nc < Taken Tnke Vp Vph VPJy
    15 h Baltimore Police PoliceSpKfcl VolleepeQd
    SpKfcl te 4 The Wkdusgtoe w Wshi lienU lienUBsltlsaow IIwBa Uos1LBaire
    Bsltlsaow Ba Md Oct 31 1WMq Wheq Chief f of ofPolice cp ofPotIo
    Police p Grant A Osjnne of the ttsiame ttsiameand ikidisars ikidisarsend J JucI
    and Ohio Railroad and Capt JIWw ioneiL r > wei of oftbe oftiae ofalirond
    tbe railroad secret CIet service wafted tro U1t p to toft to totlDoklD tobiKvokntIosklng
    ft Uenevolent biKvokntIosklng looking < old gentlemin eJlU 1u at atpamAen atfiaUoa atmdon
    pamAen mdon station and 1 accosted bta him > this tbMmoiwhts tNtbe thismoesing
    moiwhts tbe old gentleman thuwprd UPfIb ills hJsDfuw
    Dfuw on the Ib platform and and1VJIat ssld ssldWbat imidWhat
    What do you mean by b speaking g to me meI meI
    I dont t know l sow you youBut youlint u uWe
    But we We knew you replied pUed Chief Og OgNne 0Jhae 0gline
    line Youre Robert Murphy sr One of ofmr ofmr
    mr 111 man has your son n Robert Murphy jr jrthere jrat jethere
    there at at the gate and weve aim a1 o got your yourpmi yoarlat yosTsKl
    lat sKl who M says his name te Harry LJvmg UvIqtteM Uviagst LJvmgsrtow7
    tteM srtow7 srtow7This st
    tteMrJda
    This is an outrage outra e exclaimed the thebenevoIentlooWng thet thebeisevolsntIooktng
    benevoIentlooWng t old olclltnUemaa gentleman excited excitedlr txcUedIF exItedip
    lr ip stroking re1da his long gray beard what whatbuiliiesu nrha4bushiem bat batbaTe
    builiiesu have you with me meSame meold meSaint
    Same old thing tlU Murphy urph replied repliedChief repaledOW repliedchief
    Chief Ogjtoe Youre wanted want for swb swbdBsjg awls IWIac1IID awlsding >
    ding paostniFirs Jr 1 W on the Baltimore lthnore and andOhlq andOb andolilsi
    Ohlq Ob Railroad Theres 1bere a warrant out for foryxt forIII
    700 yxt ta Xartinsborf your son and this thisnew lidsAvtnpteftc thisnew
    new feHe filinw > w LJvtagstonc are m hi on H It and andM aada an4as
    M ymfpe you such a S nice quiet 111 eW Id gentleman gevttemaaT
    T 1 know you wont rOII object to walking waI Ing down downto dwnto downto
    to pottce polIs headquarters arten with us Captain Captainof
    of Detectives Pumphrey l > wants to t talk Jlt to toywu tAtJ toyou
    you ywuFor
    For several er1J month according inl to the thestatement thelltatem thestatements
    statement lltatem DtH < of Chief Ogttne Murphy MurphMurpb sr srMurphy irMurphy
    Murphy jr and young Livingston have havebeen havebeen Ye Yebeen
    been working workln the ftx fixed hand card game gameon gameon
    on nasntnavrs 3 Htt n on the Baltimore and a Onto Ontotraveling 0bJtrae1lD Ohiotraveling
    traveling < between Plttaburg Pittabur Pa Martins Jiavtlnsburr JIIa JIIaburs Martinsburg
    burr W Vs V and Baltinorr BaltinorrGITtPEPEE BaltiiDoftCULPEFER BaItI OTr OTrCUIJPEPEB
    GITtPEPEE TO GET SCHOOL SCHOOLRandolnhMacpii SCHOoLRnndollsbIncpn SCHOOLRn
    RandolnhMacpii Rn System S Ntem to Locate LocateInstitution LocnteInstitution LocateInstitution
    Institution > iear ear City CityCulpeper cu cuCulpeper C1t7Culpeper
    Culpeper Vs Oct t Chancellor Wit William W11Ham WitItem ¬
    liam Waugfa Smith of Jte laMCIIpIa laMCIIpIaf1C NeadsiphMacmi NeadsiphMacmioystem dolpbJI con confcystejn
    fcystejn of schools in Virginia was bare baretoday Aereto heretody
    today to day sad accepted a the t e site lit for the thelocation Ut Utloeatten thelocation
    location of another school her bert in the theRandoiphMacoq tMRandoIJllbacoa theRandolphMacon
    RandoiphMacoq series of schools schoolsTbe IICbooleT1ta schoolsThe
    The site lite si of the proposed school te forty fortyacres fryaer fortYeerie
    acres aer of toad a quarter of a wile inU south southot lICHIdaof southof
    of Culpeper on 0 a beautiful eminence eminenceand elllteacaad emissocssnd
    and was donated bv b E C Brook and andRussell andRussell i iRUMdt
    Russell Smith of Culpeper CulpeperChancellor CUI CdpeporChaac r rCba
    Cba Chaac Chancellor eellor W W Smith said te citi citizens ddZ6a cliiZeiss ¬
    zens it was an ideal place for the location locationof
    of the school building INtWla It te in sight oftbe of ItUM if ifthe
    the Southern and Chesapeake Nke and Ohio Ohiorailroads Ol Ohiora1rsds a araUlOIoda
    railroads and where whe a station will be bebuilt iNlmfJt bebuflt
    built for the th accommodation of the theSChOOlF theschool theschools
    school schoolCOL
    COL ROBINS IS DEAD DEADBiCommnnder DEADExCommander DEADBaCommander
    BiCommnnder in Virginia rslnla Cavalry CavalrySuGonmhs C1nlr C1nlrSncoumbs CavnlrSuccumbs
    Succumbs to Heart Jlea t Disease DiseaseSixrial D Dhscas Dhscasto JenIlC JenIlCSpctia1
    Sixrial to It The HwfctagteB mioatse HcsM HcsMRIehmoiHi K KRleilQlOncl MtidRichmond
    Richmond Va Oct eCoI CoJ William T TRobins TRobIAs TRoblm
    Robins ded suddenly Sunday morning morningfrom JIaOrnlnren morningfrom
    from ren heart disease at his home a 314 east EssFranklin etFrankJID eastFranklin
    Franklin street He bad long been a snf snfferer 811efern esiffever
    fern and his death dfoa th was s not unexpected unexpectedHe un xpected xpectedHe
    He was commander comma of the Twentyfoort TwentyfoortCavalry TwentfoarlkCavah7 TwentyfourthCavalry
    Cavalry under Gen Job Stuart Stuart and has ias 9 9gallant 3 3gallant agallant
    gallant record oord His last public service IJervleeA serviceU ervic ervicwas Uve Hue n Th The he arc arcburned Arc80 flre1riied
    burned so rapidly that the contents wore woreconsumed woreOOntumed wirooonzumed
    consumed entailing a los l SIIJ of S2MC The Tlieresldenct Therwldent Tileresidenet
    resldenct wax pa rtJalb partial > tiali > hwwed hwwedDles ifl5UtdDle d dDies
    Dies Dle from iuntfhot Wound WoundSpecial Wound53vtil VonnrlSpceiaJ
    Special to Jltc Va ammrt tiwva < 4iiog > na lictaM lictaMPrrderlcksburg JleraldfiIn lletMFredericksburg
    Fredericksburg fiIn rIkbuJ1 Va Oct From the theaffects UJerl shesfterts
    affects rl ts of n jnmshot m hot wound received receivedabout rec receivedhoqt ve4 ve4a
    about a rcxk k upo in an altercation over overa OVfd Overa
    a dog d with Jam < 8 A t Ballard of Staf Stafford Statfont Stafford
    ford Cvunty C IUU Raleigh Cooper tiled at his hisborne hisoo hishome
    borne oo C in t this thl 41 < lty today to ay Ballard h hll hllI liabett 3 3bcqii
    bcqii I arresixl frd end rd is oonttnod in tl Ute c city cityjail c1t c1tjail I Ijail
    jail
    MIBSI I1u Kate Crlglcr Passes A tWitT ray raySm 11T
    Sm al 11 1 u l > Tec be UMbistbMi I limM limMGWp UtnJdqd lietsiddpeper
    GWp qd dpeper per T Va 3 Oct 9liss Miss Kate Crfg CrfgJdT rig ig igbr
    11 JdT f one of the oldest residents of this thiscity thiscity
    city e1t died d Here today in the elginyfatii elginyfatiiyear e1gbt olghtyflhyear 6
    I year jear of her age She h wa was SA distinguished Ulstjngaishftdamong distinguishedamong tsthig ISb d damOng
    among a large cin1 lrd ot friends frl nds and ac acquciQtancgs ccI
    I quciQtancgs qu nt n for net strong Chrartan Ch dan char character cb1r1Nr charI ¬
    1 I acter and many ntan acts ac of charity ch rlty
    HOB RO llvU NAVAL lirilnU AYAL QFFIG OFFICERS OFFICERSTvo vlllvj vlllvjs vlllvjTwo
    s
    Two Trusted Mail Orderlies OrderliesBecorcled OrderliesRecorded OrderliesI
    I Recorded as Deserters DesertersKEESTABLISH DesertersREESTABLISH DeserteisREESTABLISH
    REESTABLISH CUBAN OUIJA CREDITIinport CREDIT OlEDITl1wortIC CREDITImports
    Imports from Europe Have De Decreased Decreuu Becreased ¬
    creased creuu f1 hut Coaattvine CO hlfJe Trade TradeShow TradeShow TradeShows
    Show Increased Activity AcUdtrRcllorts AcUdtrRcllortsI Reports Reportsi
    i
    of the Revolution in Santa Clara ClaraIs ClaraIs
    Is Greatly Greatl 1Exaggeratcd 1ExaggeratcdHavana EinffKeratcd EinffKeratcdHavana Exn geratccl geratcclH
    Havana H lana QcL 2sCnarIe6 Ciiarles N N Fernald Fernaldand Farnaklaud Fernaidand
    and David A A Harrison mall orderlies orderlieshave orc1erUehae orderliesIsave
    have disappeared taking with thorn it H Halleged I IUgod iialleged
    alleged Ugod IU liakmglng to ofllc oftlcera rs of the thecruiser thecruiser thecruiser
    cruiser Brooklyn to wfeich wtlck vessel v the s saeondfng ib ibscoodhig It ItnJ
    aeondfng nJ men were we attached attachedF altachedFernald attachedFernald
    F Fernald rnaTd and Harmon wejp we 9 intrusted intrustedwith flltrualrlfth iyftruitidttli
    with ttli the money for mailing t to Uw United UnitedStates UaltedSfr UnitedStiles
    States Sfr on October a since which date datethey clawthey datethey
    they have net returned to the entfaer entfaerBoth cntIMrBoth endsesBoth
    Both art > new recorded on the ships shipsbooks s1dpiboohs p pbook
    books book as deserters desertersBoth f1e desertersIheth ertenDet
    Both registered atered as yeomen yet Fenmid Fenmidfjwm Ferpal4fmm Fernald1em
    fjwm Atlantic A Ue City N J anti Harmon Harmonfrom HannoaCJOm Harmonfrom
    from Huaclton Ha = Pa PaSmall PIaSmall PaSmall
    Small Bouts Arc rc Active ActiveThe 4tIvcThe thc thcThe
    The custom house oAtctals oak can not notcertify notttC1 notcertify
    certify ttC1 exactly what lines of Imports hnporUwere Iwtporblwere Importswere
    were decreased by the revolution but butnroviatoDfi butprovltdoDs butprovisions
    provisions dry goods oo4If machinery and andarticles lidartkl sadarticles
    articles artkl of luxury arc supposed su to have havedecreased Mvadtereued havedicresaed
    decreased more than anything else Opin Opinions Oplowna Opinions ¬
    ions differ as to the rsestabHatuaent re ree tabliabmunt of ofconfidence oCCOA ofcoudeiiee
    confidence COA although it ls 1 claimed 1a19d that a apood aJadex ahOOd
    hOOd Index of the feeling in the country countrydistricts COIIftUyttr1cts esuntryitricts
    districts ttr1cts Is the t IIIC IuerenaaetIY1ty jcrease9 acilvUy Uvtt7 noticed noticedamong Otkedamoes noticedamong
    among th the schooners eaa engag ttpmr H h lit coast coastwise cottitwise t twlee ¬
    wise trade It Is I alleged sed that the im imports 1mJOrta tinlions ¬
    ports frees Europe Etlro 1loLe have also fallen f JleaTIM off offThe offThe
    The Dtecnsieti considers slce1l that credit has hasbeen lIPbeen bsabeen
    been recslfbitehed by the action act of the tbeCnltttJ thetnlU4 theraised
    CnltttJ Stoics StoicsGov S Sla1sCcv
    Gov Go SIunooii In Busy Busy0ov nU Busyoar T
    oar Mageon has asked U that at t copies of ofte utIe ofthe
    te laws Ie regarding duds be furnished furnishedbJhn furnishedWin alMdIn
    Win in order that he b may see the extst extsttN xlst t tof
    11 tN ejMdtions of the t queetteo qut The 11M gov gover pverHr goverPer
    er erHr > r went to Mariei yesterday 1M and par partaok part partdoli
    taok t of t a Cuban CW breakfast The Die Dteeusiep Dresrea Dieessiso
    eusiep regrets that three fte revolutionary revolutionarygenerals revolutIonaryg tU1e1uu7 tU1e1uu7of
    generals g rais of the war of l IsfT bare e jest t their tfeeirMrtUoHc theirUosss r rPtU8N
    PtU8N MrtUoHc Uosss owing to the changes e in the thear Uacr thebusau
    r busau ar au of ofCkR prison prisonOen iitt3iuGeL
    GeL Slontalvos resignation as com comOtander COIaQIaIJder cornuiauder
    Otander of th presidio has been accepted acceptedby acceptedhY ed edGov
    by Gay Magoon to take efiTect Nov U U1tr 111k l ltic
    tic will be succeeded by Ltoui Warden Wardenwho W Ww Wardenwho
    who w will assume oomssand temporarily temporartlyat
    at a leastRevolution least kutI leastRevolution
    Revolution I Xot ot Apprehonded ApprehondedToe Pt1rehuncledTIae
    Toe report received here this afternoon afternoonstating atteraooaatadntr afternoonstating
    stating that 1 1t6 negroes had arisen arisengainst artaeDphlat arisenglisit
    gainst the provisional government pnm WDt to the tbeprovince thep theprovince
    province p of Santa Snt Clara and that It was wasfeared W Wf wasfeared
    feared f J < the officials now in ebkrae of ofCubas ofCubti ofCubits
    Cubits Cubti affairs Caln wowd have another sssaU emailsl sssaUsispd
    81 sispd revelation on their hands seems seemsto eet eceemto
    to have bees greatly at exaogcvatsd exaogcvatsdThat exa exaagcrstadThat c ed edThat
    That Alarming Report UcportThe HllwrtTIM ReportThe
    The report referrsd to wa Wall as follows foUowsSeveral roaowaSewra followsSeveral
    Several hundred buD IPd negro revolutIonists rcve4vUonistahave nwtudOPleiaban revolutIonistshave
    have taken up arms against the provi provisional pnwtIIoDaI previslonal ¬
    sional government in Santa San Clara ClaraCaJlla and andCamaguey andCamagney
    Camaguey e prevlaces ll rs Ir he tyooMe has hasbeen hasbeen
    been caused c Uftd by b the claim of the negroes negroesthat negroesthat
    that their officer o r received pay from the theprovisional theprUllelunal
    provisional aJ government for or their services servicesin
    in the recent re ot revolution Te luUon but the shot INK the thecommon tbccom thecommas
    common com soldiers weir not remembered rememberedsimilarly reme remeslmllarI rememberedsimilarly
    similarly similarlyTHINKS slmllarI slmllarITHINKS similarlyTHINKS
    THINKS H HE WAS DRUGGED DEUGGEDSailor DRUGGEDSailor
    I
    Sailor Sailor Clark Pound Beside Dead DeadWoninn DeadWoman
    Woman fomnn Held as Witness WitnessSew WItticsenp 1t 1tYerk
    Sew enp York Oct 9Cttaton C Clark Clarkthe Clark Clarkthe tiarkthe
    the sailor from 1pm t the battle ship alai Indiana Indianawho JJMIIaDawho Indianawho
    who was found lying beside an unidenti unidentified uiddentled mhle tl tlfted ¬
    fied ed dead woman in Riverside Park at atSeventyfifth atgeftDtJftfth atSeventyfifth
    Seventyfifth street yesterday yftt morning morningwas morningwas s swas
    was examined to today s by Coroner Shrady ShradyClark ShradyClark ShradyClark
    Clark who te twentyone years old old hi a apowerfully apowerruu apowerfully
    powerfully built man and la above tbe tbeavf tbertaP theaverage
    avf average rtaP rage sailor intellectually He said 1eI that thathe tbottcouW thatbe
    he could recollect nothing that happened happenedte
    to him after taking a drink m a Bowery BowerysaleoN Boweryat BewerysaIo
    saIo saleoN at 6 oclock OR u Sunday Sun a evening eveningwith eveoInsIlk eveningwiLls
    with Ilk an a army corporal whom he had met metIn mdIn metIn
    In a sheetIng saUry saUryWhen IIITWkn galleryWhen
    When he left tt Ute Indiana 1 Clark said saidhe aid aidhe saidhe
    he had 87 U Of this he spent t tW possibly fi fiWhen 1 1When =
    When W he recovered consciousness after afterbeuv afterlOUM afterbets
    bets found by the noUce be had only 5 5left t4 t4left < tlett
    left and this tltl he had bidden 1ft m his stock stockInge atockIp stocking
    Inge ing He H thinks be was drugged d as ha haremembers h hte horemembers
    remembers te mben aecnnuely everything eery he did didup tll tllup didI
    up I to the time he took the drink He Hedenies Heden Hedessisi
    denies den any knowledge kDOw g of sestog the theuoman thevomaa thewoman
    woman who was found dead d beside Mm MmThi dmThe himThe
    The coroner was w much puzsled to ac account act account ¬
    count t for some slight injuries they found foundon fOlndon foundon
    on Clark Hto HI right ankle was swollen swollenand swell swellud swollenand
    and evidently pained laed him He said that thattt thatIt thatIt
    It was caused eau by b rheumatism and show showed showed showesl ¬
    ed a cut in the shoe that he made two t we or orthree orUlree orthree
    three weeks ago 10 to ease the th foot There Therewas TberJwas Tbrcwas
    was a a bump on at his head although the thescalp tla tlae1p thescalp
    scalp was not cut He said he did not notknow notkawow notksic
    know hew he got it itThe it itThe itThe
    The police theory Is that Clark wa W88 sit sitting sittins sitthig
    ting on the park wall with the woman womanand WOQUtand womansad
    and that th M two tumbled mWed over They Theythink ThYUatk Theythink
    think the woman woma landed flest and thai thaiClark theClark thtClark
    Clark fell h < ta n her breaking breakln her ribs with withhis voftitJab withhis
    his head And thus getting etUn the bump The Theankle TlteIU1kk Theankle
    ankle they t think thJ k was wa spraJeed by the thefall tlMfaU thefall
    fall fallCoroner
    Coroner Shrady ShM Kent eDt t a policeman down downto downto downto
    to Governors Island 1 to find the tb corporal corporalwith cerpor1with corporalwith
    with whom whol Clark bad a drink Clark Clarkwaa Clarkwu Clarkwas
    was sent to the House 1Iou of Detention to to toheld btheld beheld
    held as a witness wl when wh the inquest oc ocKILLED ocCUD cccurs
    CUD CUDKILLED
    KILLED VICTIM VICTIM VICTThiWITH WITH AX AXWhite AXhVhlte AXWhite
    White Horse Murderer Caught in inChicago inI InCh1cngo
    I
    Chicago After Two Years YearsChicago 1enrlfCltlclzo YearsChicago
    Chicago Oct t 29 tUCbargoc1 Charge < l with Itb a mur murder murttet murdcv ¬
    der committed In 1M Frank Sastnjan n a awandering awandering awandering
    wandering hor ho horse e trader was arrested at atthe attho atthe
    the postoffice while asking IUiktn for mall ad addressed addrCEOO addrescod ¬
    dressed to T V Foster The Tl police re received re received received ¬
    ceived their information through a airs airsBcntlcy MraBcntlc MrsBentley
    Bentley with whom Saasmnn was u travel traveling tra travellag l lIn ¬
    lag In through Missouri at the time the tksmurder theIn1lrder tilemurder
    murder was committed oonlll ttM The victim victimJames TfetbnJames victimJames
    James Miller a ii member of the G A A R Rsixtyfour R RtYfoof Hsixtytour
    sixtyfour tYfoof years oW met iet Saseman u and andMr and41t8 andrs
    41t8 Mr rs T5entk BentJcay y several days day before be was waskilled waskilled wasidliad
    killed He 1I te bad some property anti was wasat wasill wasat
    at ill once invited > to Join them in their theirpilgrimage UlflrIJllgrlntng theirHirinag
    pilgrimage pilgrimageNear IJllgrlntngNeftr HirinagNear
    Near Holden Mo o it Is alleged that thatSnsstiwn thatSUsstllnn thatSnasulinri
    Snsstiwn kilted kflle Millar with ah ax sold soldHis ioldItlf soldils
    His property and started for Kaaaas I Ka j City CityTha CIt CItThe CityThu
    The G A R It t teak ok up the search offered offereda ofttlfedW1
    a IRK reward Wtlrd for the fngiti f fugitive thf and eve created cte cteat evesLed ¬
    sLed at such exeiieintiit hal Mrs Beatley Beatleybdctcmo Deatleyb Beat1e Beat1ebecame
    became b Cttmo frightened and left ft her com companion companion coinpanion ¬
    panionAt panionAt panion panionAt
    At Kansa City Sassman himself be became bealRM became ¬
    came so much alarmed al rmed that b ho swam the theriver thet1ver theriver
    river Into Kaneati anses Ou the Kansas side sidetie tilde1te sidelie
    tie met a policeman who wan W 1 so suspi suspiolotis sUNploJoue suspiclone
    clone that when Susman bought a rail railroad rallrOA1 railroad ¬
    road ticket the bluccout blu t took the some sometrain ametrain sametrain
    train to follow him 1 Salman Sa book off offpursnit oKpurs oftpuradht
    pursnit purs it by b leaping from th tINt train while whileIt wbUtIt whlkIt
    It was running at a high rate of ofand fpeed fpeedand pecdand
    and dually flnaIl came to Chicago ChicagoHo ChlCaJoH ChicagoHe
    Ho H admits that h hfLhltt Jias four wives The Thekilling ThekUUng Thekilling
    killing of Miller was known as the thewftUe theOW thewltc
    OW wftUe tc horse murder l11urtI r Salman S m n having fing fingn
    been n traveling with a team of 0 white wbltoliorsss wbito11orse8 whitebo
    liorsss bo soa and d was widely advertised advertisedthrough a a4verudthrough ert1sed ert1sedthrough
    through tho G A Rs efforts e ort to capture capturethe capturet capturethe
    the t e criminal
    ct1minelUcrtld
    Herald Want Vnn Ada Adawill 4dsscSi d dwUI
    will be received r ved at CsstaUs Cast9Jl Pharmacy Pharmac11tl
    Ilth and Irvine Irvlnp Irvh ets nw and ai promptly promptlyor
    I forwarded or arcl I to t the main office
    iIAWSER AWSER IMPERILS WAR SHIP SHIPUCOTT sroPUe1TT SHIPhear
    UCOTT hear Steel VCahle ailc Tangled In the theGeorarla tbeGeoreIa theGeorgias
    Georarla Phopellcr Shaft ShaftBoeton SllattBoston ShaftBoston
    Boston Mass Iass Oct 29 29A A peculiar sect accident jdent sectdent ¬
    dent resulted In the battle ship Georgia Georgiabdlner GeorgJnbohg Georgiabeing
    being placed plae d in the n new w dry dock at the thanavy Us Uspay theflftv
    navy pay flftv yard rd today Whtm the big ship loft loftthe IoCtthe lOftthe
    the yard of her builders bu l or at Bath B th six exweek sxweeks sixweeks
    weeks ago to coma to this port portoarleR some somecareless someearuless
    careless employees mpley accidentally cast oil oilboth otrbot ottboth
    both bot t onds endsof of a 3jneli nel steel cable and an it itfoil itfoil Itfell
    foil Into t the o water and sank sankEffort aank aankBrtortta sankmrforts
    Effort were made mad to locate locat It It hut they theyWife tlMrWlro thiywar
    Wife war fruitless frulU Believing tha hawser had hadgone hone hadson
    gone son one to tile bottom Jlte > lte hi ship came c1 to Use tbeyartf UIeud Use7sf
    yartf 7sf all right rifbtKo rtptNo rightMo
    Ko trouble was neUeod on the way wa up upthe upthe upthe
    the coast end the th onglnea opsl propellers and androdder andrudder andrudder
    rudder worked all right However a w ver a few fewdays fewda7 fewdays
    days da7 ago a diver was s sent ent nt down to ex exAjnlne exunlne oxsmuts
    smuts the ships bottom and wa was great greatly greatly ¬
    ly surprised surpr led to find the heavy cabin cablawound c 1tJawound cabinwound
    wound around the port propeller propelf shaft shaftand hartand shiftand
    and apparently badly tangled tAn led up with it itNaval it itNaVIJ itNaval
    Naval officers oMce think it rather ratbcIJeCltHar ratbcIJeCltHarthat iwcnHar iwcnHarthat uiecsdarthat
    that the accident did not result re ull more serf serfousiy IterOUIIIy serfoily
    ousiy ousiyOVEEBUN OUIIIyOVERRUN oilyOVERRUN
    OVERRUN WITH DESERTERS DESBETEESNorfolk DESERTERSNortoik DESERTERSorfoJk
    Nortoik Norfolk I 18 Unable to Get Rid of the theUndesirable theUndQfiJrah1e thetjndoslrablc
    Undesirable Clans ClaBeComplaJDt ClaseComplaint ClansComplaint
    Complaint is JoinS made de at Norfolk NorfolkVa NorfolkVa NorfolkVa
    Va of the unusually large number of ofdeserters ofdeaertertJ ofdeserters
    deserters from tbe navy that Are hanging hangingabout chagabont hangingabout
    about the towa The increase is said to tobe tobe tobe
    be due to changes CMOS that t were made re recently re recenU7 recostly ¬
    costly in the notice regulations regulationsUnder rsulatloRVuder regulationsUnder
    Under the old 1eI rules rul arrests ta df deserters deserterswere deHlenwere deserterswere
    were made by substitute officers of the thepolice tMpolice thepolice
    police force t Criticism w wee made mad of the themethods tJlamethode themethods
    methods used used by these officers and this thteopposition thisoppo thisopposition
    opposition oppo resulted fa the p nbc > Hee board boardordering boanlorderln boardordering
    ordering < that tHeo officers make no 0 more morearrests mrearrests rea reaarTeM8
    arrests arTeM8i arrestsSince arrestsSince
    i Since then the t1a city elt has been unable to terid toI torid
    I rid itself of the deserters who are re regarded re rei togarded ¬
    i garded as an especially 1lIY undesirable WMte lrabie class classIt el elIt classIt
    It is t estimated there are at least KM de deserters de delarttrs diaerters ¬
    serters in Norfolk ornoIk at present presentAD3VHEAL pN8CntAD1r presentADMIRAL
    ADMIRAL AD1r AL EVANS HERE HEEEPut HEREI HERElt24 HEREIuta
    I
    Put lt24 In Much Time with Xnvy a Dcr Dcrparlment Dc DcItarlment Dciurtment
    parlment Officials OfficialsRear OfficialsRear
    Rear Admiral iSvans Bv arrived in Wash Washington Washhsgton VUkJMtmIa
    1 ington yesterday and put In most of the tbettaad thI thettia
    I t ttaad in conference at the Niavy Depart Departntent Departto D ptrt ptrtt
    ntent to t He left his flagship the Maine M at atNew atNew atNew
    New York This vessel el is to be succeeded succeededsoon aucceededeG succeededsoon
    eG soon by the newest pride of the navy navythe na nai navythe
    the Cowusctfcttt c < a the flagship fta Sblp of the theAt ttaei the1lt
    i
    i At uttfc dc Sleet The Connecticut which ch is isnow Isi isnow
    i now being mad reedy y for sea service servtoewill V1ceI servicewill
    I will be in commission ULlnt mstest within a few fewweeks tewweeu fewweeks
    weeks weeksAdmiral weeksAdmiral
    I Admiral Evans eonferaae w was e cafeftr cafeftrfor cideft7I chioftyfor
    for the purpose of determining nlD what whatshould wlaatabould whatshould
    should b be done with several vessels of ofbis ofIlls ofhis
    I Ills command COIIUIt which whI4 are being repaired repairedand IIIPIndaad repairedand
    and overhauled at tile various Y navy navyyards sav savyards YT YTprcl
    yards yardsENGLISH prclENGLISH yardsENGLISH
    ENGLISH FLAG FbA ON U S SHIP SHIPUnion SHIPUnion SHIPUnion
    Union Jaok and British Ensign EnsignFloat EnsignFIORt EnsignFloat
    Float from Same Vessel VesselSteamer VesselSteanler VesselSteamer
    Steamer Jacob Bright Chartered as asn ana
    n Transport to Rush Troops to toCnlm toCuba
    Cuba but IK Sow ow Discharged Dbol1atgcdNew DischargedNe DlachnrgcdNew
    New Ne T York rk Oct 21 STbe Tbe novelty of a aUnited aUDlt4Id aUnited
    United States transport flying Y1 the Brit Brttfcb BrkJ Britlab
    lab teg do today attracted the atteatkM attelttlGjli of ofpeople ofpeople ofpeople
    people along the water < front fNAtThe frontThe frontThe
    The steamer sISiDSJiSb Ja J ob Bright arrived here herefnssa 1M 1Mtrem her herfeses
    fnssa Havana 118 with the British ensign fly flyteg fI3bet y ytag
    tag from her r after af tUigetaff na and the uaof ert erters esiorg
    ers of the United States Quartermasters QuartermastersDeparaneot QuartermastersDppartmsat
    Deparaneot hoisted bo to lei main tntck tntckIt truclIt
    It war such a stnutg aaa eombinatioa COI of ofnautical orJlauucJ ofnautisal
    nautical interests a that some of the beach beacbcombers 1Nacbcomber beachcombers
    combers along South Brooklyn rubbed rubbedtheir rmbeIItbclr rubbedtheir
    their eye eyes to t4 make sur sure that they theynot went went110t werenot
    not deceived deceivedOne deceivedOne ved vedODe
    One tf f the t best known Do admiralty law lawyers 1wyen lawv1s ¬
    yen in the city said Id OOIs Is it posBfbk posBfbkwhen JMMII1AkwbeD poadhicwhet
    when be saw sa w the strange occurrence occurrenceThen OCC1IIftaoetile occurrenoeThen
    Then the lawyer added addedTbe addedJcIea addedrile
    rile idea of the United States havl havla having havblga
    a transport flying a foreign Sag is against againstall asamlltaU againstsit
    all law and order not to apeak of the tbeeffect theeffect theeffect
    effect It may have upon delicate patriotic patriotknerves ptrIoUcnerw patrioticnerves
    nerves It seems odd fHW and isd It te odd oddAn 04IdII oddA
    An A official of the Quartermasters Quarten astera De Department DeMIIUIMDt Dopertinent ¬
    pertinent offered tide explanation of the tbefact thef thefact
    fact f tbat tbe Bright is flying the British Britishflag BriUshftaa jtIsh jtIshftag
    flag flagWe
    We chartered tile Bright because we wewere wewere wewere
    were in a a hurry to get troop ships The TheBright TIMBrtPt TheBright
    Bright has fulUed her mission and andalthough andaltbouth andalthough
    although she arrived here consigned to tothe toue tothe
    the department she is to be discharged dischargedtoday dischargedtoday ed edtoday
    today todayFISKE
    FISKE SUCCEEDS GALT GAITCommander GALTConullDuder GALTCommander
    Commander of Arkansas Scares Leacstltc LeacstltcZl1onltor the theMonitor theMon
    Monitor Mon ttor for Courtmartial Duty DutySpecial Dut DutSIiat Dutys
    Special s to Tin iuafeEt liti lltnM lltnMAnnapolis lltn lltnaapo1la ilindAnnapolis
    Annapolis aapo1la Md Oct 5ComlnDder Cootmaader K KB R RH HH
    H Gait of the United States Navy u todaY todaYlecdve4 teds tedstecelved > da daleceived
    leceived orders detacWng aim from com command command cornmand ¬
    mand of the Us monitor Arkansas S eon of ofthe ofthe ofthe
    the practice ships In use at the Naval NavalAcademy NavalA NavalAcademy
    Academy A to duty as a a member of a court courtmartial courtmarUal courtmartial
    martial to be assembled at the Norfolk NorfolkNavy NorfolkNaVY NorfolkNavy
    Navy Yard YardCommander YardCominPd YardCommander
    Commander r Bradley A A Flake will sue succeed suetEftl suecied ¬
    ceed Commander Glt in command of tlw tlwArkansas tMArkauas tiruArkansas
    Arkansas Commander Gait will proceed proceedto proc procto
    to Norfolk as soon as his hi relief reports reportsat
    at the Naval Academy AcademyDrovrncd AcadamyDrownecl AcademyDrowned
    Drowned Marine Burled Here HereThe JlereThe hereThe
    The remains of Joseph P po McEneany McEneanylate McEndn3late McEneanylate aJ and plans for the th installa installation hsta1lUon Instalitilea < ¬
    lea on board the Wyoming of the foolS tmsOHties faidJtie foolSties
    ties tie fm O storing and burnim burning oil lI as fuel tue1arc fuelarc tnclarc
    arc being h lng examine 4 xarnln t at the Navy Depart Department Departmnt Department ¬
    ment From these tests tIle department departmntwfU departmentwill departmentwill
    will endeavor to arrive at the t desirability desirabilityof
    of using oil a ItK n fuel It is understood understoodthat undelRtoothat understoodthat
    that 1dl l5t IIO X i will brt required to install the theaccommodations tb tbac theaeeomrnodutkxio
    accommodations ac OmlnoelDt for carrying rrylng the oil and and7fo andKiJH1 and7O
    7fo for f burners piping and pumps pumpsBarber pumpBnrlser pumpsBarber
    Barber Outs Oqts HI His Throat ThroatCumberland Throatto
    0 The tashIetose all allCUQlberiaftd Utnild UtnildCuwberiand
    Cumberland Md Oct OCt O t aCharlea aCharleaKraft LChar1esKiaft charle9 charle9Kratt
    Kraft Kratt fortythree late proprIetor of the thebarber th thba Uscbarber
    ba barber chop hop in ttte Wal WaJaIt H Building Bundl out cutUs outbJl5 outhis
    Us throat tonight to night at his home bon e He Is Isstill fasun s sstill
    still living but th the doctors have IJtUe IJtUehope IttUeI ltUehope
    hope an artery ar err being severed Kraft was wastbe wastbe wasthe
    I tbe proprietor proprl itor < of a apopuIa popular establishment establishmenth litab bment bmenti
    i I h here re but was t 38 obliged l to sell Domestic Domestictroubles Domcst4cteeb1es
    I troubles tN Ub however are given as cause ca for forhis forihis
    J ihis his act
    RETIRE OLYMPIA ANDTDXAS ANDTDXASTwo ARD PEliS PEliSTwo
    Two Pamous Pigkttnl PigI tt Slip Shi to toQut Go GoOut GoOut
    Out of Oommisicin OommisicinCruiser Oommias on onCruiser n nCruiser
    Cruiser Will UI Become Training Ship Shipat Shhnt Shipat
    at nt Xorfolkc orrolkUattIn llattlo tt1Q Ship lrije lia Ims Long LongJleen Lonsn
    Jleen n en Knoivn K1 nOn onn as n fUoodoo fUoodooTwo JUooclooTwo UootlooTwo
    i
    Two of the cbs famous ships uf t UnuU s bifnS > an > s stty
    5557 tty 7 ar itO ro seen eoa to s pout ga out of active a s Hn1l Hn1lDlO arvIcme rvK rvKto >
    DlO me to the rtttred Mst Tt e They y are vi vicruiser til tilttul8er UiCruiser
    cruiser Olympia O pIa of Manila battl fan fumand fan1t fan1taM fanand
    and tIM th battle ship Texas Tn lang Ion known knownand knownNI4 knownsad
    and droadnd among the navies navies fie as th thhoodoo t the thehOodoo b b1Ioodoo
    hoodoo ship shipTbe shiprho bp bpTile
    Tile cruiser is to become a training trainingship tra trainingskip n n naldp
    ship for the Marine Corps after in n hon bonomWe honoraWe honoiIiIe
    omWe career of over thirteen years y r HP HPretirement H r rretttenKut rrttrement
    retirement from the active register te tesurprbftf 1ato 1 a asirpds
    surprbftf sirpds to those tho who bare seen her an anchored ancb anchared ¬
    red f recently at Ute Norfolk Navy NavyYard NapYud Nav NavYard
    Yard for she hc still looks Ute cepabl cepablfigfetor ceblsJahbtr cr4blak
    figfetor ak tbat sne abe did when Bile carrier carrierAdmiral carr carrieti4dmhl ffi ffi44lral
    Admiral Dcwey Dc into the harbor of ofManiie ofMantle ofWanihe
    Mantle But the t demands of modern modernnavies modorinavies T1 T1na1es
    navies change e too rapidly for the retcn retcnUoti NtcnUOn retcnthee
    thee of a snip IIbt which baa been as useful usefulas uaefuU usefulas
    as the Olympia has for so many years yearsand year8Jitd yearsand
    and it is f forced to make way 87 for the new newcomers ne neeoraers newcorners ¬
    comers Already the topmasts have been beentakan beent beentaken
    taken t ken dawn and all but her eigbtinci eigbtinciRMW efcbtIncilreMOK etgbtincguno
    RMW removed together with her stores storesand storesand torefJaDd
    and most of the fixed weigats In her Hull HullQuarters bunQUfLrt hullQuarters
    Quarters QUfLrt 11I a7e re to be provided roded for a largo largoCrew larg largCltw lsrgcrow
    Crew end ad the officers who are ar to have havecharge hafd havecharge
    charge d of the b cadets that will get et thou tbeiiInstruction th thouInstruction I IJnatruclJon
    Instruction on her but outwardly th thcrnlHors the thocrulrt thecruisers
    cruisers appearance will not bo greatly greatlychanged anaUyehaIU1Kl greatlyebsagd
    changed changedThe ehaIU1KlThe ebsagdThe
    The Texas is a little more fortunate fortunatethan fert fortunatethen natfl natflthRA
    than the emfeer aa the battle ship has hasbeen haahen hasbeeti
    been kept ia active service a little longer longerthrough 1oC8 Iongrthrough r rUarouch
    through the demands caused by th thCuban tM tMC thoCuhan >
    Cuban C trouble tr uble The work of remodeling remodelingher remodelinchec remode1irfhoi
    her has been postponed tpoaed until WS there te islonger n nlonger nIongw < >
    longer any need for her around Cube Cub and andthn anitlMIn antithen
    then MIte too wWl be dismantled and pur purInto purlat putlate
    Into shape fSsr duty as a receiving ship shipThe shljThe hIf hIfThe
    The Texas was v built In lilt 1 from plans planspurchased plan planpmrhued p1anspurchased
    purchased in England As u a shin hlD sh shha shhaUl shhas
    1am tome duck of the fleet and the th hoo hoodoo hoodoer hoedoe ¬
    doer bf hte Prior to the t 9pantob 9pntsbArneri 9pntsbArnerieast Ameri American 1Mrl 1Mrlcan ¬
    can war she sank quJed quietly at ber dock dockIn dOckIn dockht
    In the New York Navy Nay Yard one day dayThir da daTbtr dayThir
    Thir mteb 1IIhiM mishap > was due to a sea valve valv In Inher 1 1her 1iher
    her bottom having been accidentally left leftopen leftOpen leftopen
    open When she was at length raised raisedthe ra1aedthe raisedthe
    the men cleaning her bilges found there thereAVP theNftvp ther therflve
    AVP fat eete which had been living g on onthe onthe onthe
    the viands of the ceoki CtIOt pantry Tts Ttswere T Tw flioce fliocewere
    were w sent to Ca t frana esne with the coir coirpUmeaU COmpUma cOrnpIhiiestt
    pUmeaU pUma of Capt pt Phfttp tile eommaader eommaaderof
    of the Texas Flgbtteg Bob acktoowl acltbowlg aetdsowla aetdsowlaedged acktoowledged
    edged g the gift m lit the following words wordsTbjrakg wordsTlseaks onI onInob
    Tbjrakg They were very good Stiuv Stiuvher S1niher Sin Sinher
    her again in
    ARMY ARMYORDERS ARMYORDERSLoaves ORDERS ORDERSLoaves ORDERSLOQ
    Loaves LOQ es of Absence AbsencaTbe AbscncoTtte AbsenceThe Qoial Ordors OrdorsSpecial OrdersSpools OrdorsSpecial
    Special orders authorizing authOl Capt CapLhilt 4r 4rtbur As Asthur
    hilt H MsjcKle M Xte First Infantry Xatiosaii XatiosaiiGuard Xatlt1c NatliGuard
    Guard c of New Jersey J enler to pursue a coun counin cou1lbt courlin >
    in military typography at tbe garriapi garriapischool gar garriapschool I I3CboaI
    school Poet Jay New ew YdWc Is amettdW amettdWso am amst4 amst4so
    so as to antborise him to attend the gar garyuan ar arrteon r rrl80a
    yuan school at Fort Jay Ja for the purpose purpose0C purpoSIoC purpossit
    0C ooattnutag ooatla eostiauin the regular comic COUI ot otat m mstntctkm isstriletion
    stntctkm at that school with the allow allowances anwauth aliensace ¬
    sace ances authorised by law lawFirst law1Inrt lawt
    First t Used Charles C Burt Artillery ArtilleryCorps r reor trtllisryCori
    Corps eor Cori fa I addition to his other dudes du wan wiBassume wane willassume
    assume charge under instructions of tfQuartermastel f fQuartermaster tbe tbeQuartermaster
    Quartermaster General of the Anny AnIIFOC AnIIFOCconatruecJott Arn ef efconstruction Ofconeirtiedon
    construction work at Fort Worden Wash Washington W WI Winglon ¬
    ington I rtttevfmj Second L4eut Cbarisa CbarICJUk CharlisA CharlisAClerk A AClark
    Clark Artillery Corps of tjiat t duty dutyBrig duqBd dutyBrig
    Brig Bd Gen Theodore J WinU W llt UMtied UMtiedStates cArnIYl1U lYnlsdStates
    States Army ArnIYl1U will retunC rew accompanied aceompaaWauthorized by byhis byhis
    his authorized aidsdecamp ca to toNw Ocnsba OcnsbaNabr OahaNebr
    Nabr oy a and d resume command of thin le lepartment De DeINttlMnt Dop5tment
    partment of the Missouri MissouriContract MIsuoilontract Uri UriContract
    Contract Surgeon John H Heracard HeracardUnited H HCnted HsredNtnIsed
    United nIsed States army will proceed te Hot HotSprings trotSprtftp 1otSprings
    Springs Ark and report in person to totb tou tothe
    tb u commanding comaiandlugolPcer officer Army nD and aJIIIGaeral Ktory KtoryGeneral NpvyGpneral
    General Hospital Ho dtal at that place for r rJMQt seat treatmeet seatment
    meet JMQtf meetulai ment4T
    4T ulai i Jay JL g Hoffer Ordnance Ordnancemetat DajMurt DajMurtraent Dpsrtmeet
    meet Will make 11cc not to exceed two twoQaCh vbHtflach TWin TWinaci
    QaCh aci ach month th during the months of Novem November N Nber Noyembet ¬
    ber and December De lt DOl and January Jo JoFeiruary JantaryFebruary JanuaryFebruary
    February and March 1907 to isVe Richmond RichmondVa Rldl 1 1VA
    Va on official bnsteejse pertaining rtainl to the tbemanufacture themuutQtu themanufacture
    manufacture of ordnjtoice material for die tbeOrdnance dieOrdnance theOrdnance
    Ordnance Department DepartmentVeterinarian DepartmeatVeterinarlan DepartmentVeterinarian
    Veterinarian Walter Fraser Thirteenth ThirteenthCavnlry ThlrteeMhCanIf ThirteenthCavalry
    Cavalry will proceed to Liverpool Eng Saaglnd England
    land for tbe purpose of taking takingk a sir sirweeks ulaweeks
    weeks k special course in tropical tropIeIdea die disease ¬
    ea ease of animate am at the Usdvarslty UaIrst of ofLiverpool ofLhC ofLiverpool
    Liverpool LhC pool and upon the CompletTen 1 of ofthte ofthis oftisis
    this duty will return to his proper pIGpfJIItJon it itlion ta taUon
    lion UonContract lionContract
    tJonContrAct
    Contract Surgeon S IeOn WIIHam H I Hicbaxd 1UCIIdPOn Hicbaxdpon RIcbz RIcbzson
    pon U S A A te relieved from the tbeoperattcm further furtheroperation furtroperatIon
    operation of paragr paragraph 11 Special Oni CriNo OrdNo rs rsNo
    No 38 6 October i MW War Depsrtai Department DepartmaIUHt Departmentand nt ntMM
    MM and paragraph ± Special Orders Iso O OOcwkr SO SOOctober 311October
    October l 12 1JOS lIl War Var Department t sad asjdwill aditftl sadrIll
    will rejoin Ms ppoper p pel station at Biit BiitShsrWan P iL iLSheridan t tSiWkltm
    Sheridan Illinois I1UaoiLNPiVY 110045NAVY IllinoisNAVY
    NAVY NPiVY ORDER OESEESCapt ORDERCapt ORDERSCapt
    Capt J B B i Boiler retk retired detae detaeduty detMl1tetl detMl1tetlduly detashdduty
    duty at navy yard Norfolk N r Gt Va to navy nftirryard Dtnyard navyyard
    yard New w York York N Y Car courtmartliJ courtmartliJduty courtmartJ11duty courtmarthdduty
    duty dutyCommander dutyCommander
    dutyCoQUnaDder
    Commander K R H JI Gait GtIIt detached d duty in incommand incommttnd Incommend
    command of Arkansas to navy yard yardNorfolk nrd nrdrflk yardNorfolk
    Norfolk rflk Va ta for eomptrmartial cow r duty dutyCommander dutCommaftder dutyCommander
    Commander B A A lIKe detached duty dutyIn dut dutIn dutyIn
    In command c nunaad lId of Minneapolis when placed placedout pla placedout od odoat
    out of commssien copust to command Arkansas ArkansasLieut ArkauLieut ArkansasLieut
    Lieut D W Wortabaugh to Georgia Georgiaas Oeo Oeoas
    as ordnance offtcer offtcerLieut omeerLieu olilcerLImit
    Lieu J R R Qombs O detached etacMdJn Wash Washington WcbIngtO ¬
    Jn ington to tx t Naval Hospital PhUadelphla PhUadelphlaPa PbUHelpblaPL
    Pa for treatment treatmenLEnsign treatmentEaslgu treatmentEnsign
    Ensign O F Cooper detached Mlnno Mlnnoapolfs Onn Onnapol Mlnnqspoils
    spoils apol to Washington WashingtonAar WashingtonWar asb ton tonWar
    War War Machinist E E G AJUeck AJIIect detached detachedMinneapolis detacbc4MlameapoUs deCb4MiuniapoUs
    Minneapolis when placed place out of com commission commitalon cornmission ¬
    mission to home and wait orders ordersWar ordersWar ordersWar
    War Machinist G O Littleneld d dtached dtached dinched
    tached Minneapolis when placed laced out outcommission out > t tcommission tconunleslon
    commission to home and wait orders ordersXAVAL ordenSA ordersNAVAL
    NAVAL SA VAL V13SSBL aSSEL 3IOVB3nasTS 3IOVB3nasTSArrived JfOBJrTSArrhedM MOVB3IRNTSArrivedMinneapolIs
    Arrived ArrhedM ArrivedMinneapolIs Minneapolis at League Island IslandNewark lslaujKewark IslalAiNewark
    Newark and Apache pacht ui navy avy yard N NYork SYork XYork < >
    York Eagle Ea at Norfolk orfolk from Hamptoti HamptotiJloeds HamptIJoads Hamptnloads
    loads oads Drnver at t Norfolk Dixie 41 Ht HtUontt tt ttMonte 1 1Monte
    Monte Christi from Cnjebra Cntebra Paul Joiri Joiriand J JO14and 1 1nd
    and nd Pnb Prebb at utia 4 Mar dare i Island Wisconsin Wisconsinat WscoDslat tsconsl tsconslat
    at Bremerton Rainbow at Cavitc CavitcSailed CaitcSaUedXEMJ1Orl CaviteSalIedNewport
    Sailed SaUedXEMJ1Orl Newport from Sanchez SaDeb for or Bof Bofton 13oftoa B Bton
    ton Don Joan de Austria and Sterling Sterlingrom SterllrrQRl SterInrqm
    rom rQRl Sanchegc E for Monte Christi Marlett Marlettfrom Jdar1eHfrom I Ifrom
    from Sanchez to Santo Domingo City CityScorpion CIt CItScorpion CliScorpion
    Scorpion from Monte ont O C1n Cheial ritl ttto to San luau JuaiPuducah 1uuPducah luauPaducab
    Puducah from from Sanchez for furLOtut or LefIUEls1aDd LefIUEls1aDdYoung League IslandYonjig Island IslandYoung
    Young Author Drorraod DrorraodAltftona DroUJtodAJt DrowaodAlteona
    Altftona IIa Pa Oct i Bayard Gable GabteHs Gableiged k ktWiIntthree
    Hs iged d twtntythree of this city while on onsailing ona ona
    a a sailing lUng vessel bound from rom New Scwford Bed Bedford Bdford
    ford Mass to Buenos Ayres yres gathering gatheringmaterial sathermAmaterlai gather1gmaterial
    material for If a book of the sea ea evidently evidentlyfell evldMtltell evldntIfell
    fell overboard ver rd and was drowned Word Wordof W rd rduf i iof
    of the accident a etdeat wa nsn a8 received reoe1 today tod by hi hifather ht htfather hisfather
    father W W F7 F G G Gable bla
    I

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/ops/group_points.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/ops/group_points.py deleted file mode 100644 index 6c3ec9d758ebe4e1c2205882af4be154008253a5..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/ops/group_points.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Tuple - -import torch -from torch import nn as nn -from torch.autograd import Function - -from ..utils import ext_loader -from .ball_query import ball_query -from .knn import knn - -ext_module = ext_loader.load_ext( - '_ext', ['group_points_forward', 'group_points_backward']) - - -class QueryAndGroup(nn.Module): - """Groups points with a ball query of radius. - - Args: - max_radius (float): The maximum radius of the balls. - If None is given, we will use kNN sampling instead of ball query. - sample_num (int): Maximum number of features to gather in the ball. - min_radius (float, optional): The minimum radius of the balls. - Default: 0. - use_xyz (bool, optional): Whether to use xyz. - Default: True. - return_grouped_xyz (bool, optional): Whether to return grouped xyz. - Default: False. - normalize_xyz (bool, optional): Whether to normalize xyz. - Default: False. - uniform_sample (bool, optional): Whether to sample uniformly. - Default: False - return_unique_cnt (bool, optional): Whether to return the count of - unique samples. Default: False. - return_grouped_idx (bool, optional): Whether to return grouped idx. - Default: False. - """ - - def __init__(self, - max_radius, - sample_num, - min_radius=0, - use_xyz=True, - return_grouped_xyz=False, - normalize_xyz=False, - uniform_sample=False, - return_unique_cnt=False, - return_grouped_idx=False): - super().__init__() - self.max_radius = max_radius - self.min_radius = min_radius - self.sample_num = sample_num - self.use_xyz = use_xyz - self.return_grouped_xyz = return_grouped_xyz - self.normalize_xyz = normalize_xyz - self.uniform_sample = uniform_sample - self.return_unique_cnt = return_unique_cnt - self.return_grouped_idx = return_grouped_idx - if self.return_unique_cnt: - assert self.uniform_sample, \ - 'uniform_sample should be True when ' \ - 'returning the count of unique samples' - if self.max_radius is None: - assert not self.normalize_xyz, \ - 'can not normalize grouped xyz when max_radius is None' - - def forward(self, points_xyz, center_xyz, features=None): - """ - Args: - points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. - center_xyz (Tensor): (B, npoint, 3) coordinates of the centriods. - features (Tensor): (B, C, N) Descriptors of the features. - - Returns: - Tensor: (B, 3 + C, npoint, sample_num) Grouped feature. - """ - # if self.max_radius is None, we will perform kNN instead of ball query - # idx is of shape [B, npoint, sample_num] - if self.max_radius is None: - idx = knn(self.sample_num, points_xyz, center_xyz, False) - idx = idx.transpose(1, 2).contiguous() - else: - idx = ball_query(self.min_radius, self.max_radius, self.sample_num, - points_xyz, center_xyz) - - if self.uniform_sample: - unique_cnt = torch.zeros((idx.shape[0], idx.shape[1])) - for i_batch in range(idx.shape[0]): - for i_region in range(idx.shape[1]): - unique_ind = torch.unique(idx[i_batch, i_region, :]) - num_unique = unique_ind.shape[0] - unique_cnt[i_batch, i_region] = num_unique - sample_ind = torch.randint( - 0, - num_unique, (self.sample_num - num_unique, ), - dtype=torch.long) - all_ind = torch.cat((unique_ind, unique_ind[sample_ind])) - idx[i_batch, i_region, :] = all_ind - - xyz_trans = points_xyz.transpose(1, 2).contiguous() - # (B, 3, npoint, sample_num) - grouped_xyz = grouping_operation(xyz_trans, idx) - grouped_xyz_diff = grouped_xyz - \ - center_xyz.transpose(1, 2).unsqueeze(-1) # relative offsets - if self.normalize_xyz: - grouped_xyz_diff /= self.max_radius - - if features is not None: - grouped_features = grouping_operation(features, idx) - if self.use_xyz: - # (B, C + 3, npoint, sample_num) - new_features = torch.cat([grouped_xyz_diff, grouped_features], - dim=1) - else: - new_features = grouped_features - else: - assert (self.use_xyz - ), 'Cannot have not features and not use xyz as a feature!' - new_features = grouped_xyz_diff - - ret = [new_features] - if self.return_grouped_xyz: - ret.append(grouped_xyz) - if self.return_unique_cnt: - ret.append(unique_cnt) - if self.return_grouped_idx: - ret.append(idx) - if len(ret) == 1: - return ret[0] - else: - return tuple(ret) - - -class GroupAll(nn.Module): - """Group xyz with feature. - - Args: - use_xyz (bool): Whether to use xyz. - """ - - def __init__(self, use_xyz: bool = True): - super().__init__() - self.use_xyz = use_xyz - - def forward(self, - xyz: torch.Tensor, - new_xyz: torch.Tensor, - features: torch.Tensor = None): - """ - Args: - xyz (Tensor): (B, N, 3) xyz coordinates of the features. - new_xyz (Tensor): new xyz coordinates of the features. - features (Tensor): (B, C, N) features to group. - - Returns: - Tensor: (B, C + 3, 1, N) Grouped feature. - """ - grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) - if features is not None: - grouped_features = features.unsqueeze(2) - if self.use_xyz: - # (B, 3 + C, 1, N) - new_features = torch.cat([grouped_xyz, grouped_features], - dim=1) - else: - new_features = grouped_features - else: - new_features = grouped_xyz - - return new_features - - -class GroupingOperation(Function): - """Group feature with given index.""" - - @staticmethod - def forward(ctx, features: torch.Tensor, - indices: torch.Tensor) -> torch.Tensor: - """ - Args: - features (Tensor): (B, C, N) tensor of features to group. - indices (Tensor): (B, npoint, nsample) the indices of - features to group with. - - Returns: - Tensor: (B, C, npoint, nsample) Grouped features. - """ - features = features.contiguous() - indices = indices.contiguous() - - B, nfeatures, nsample = indices.size() - _, C, N = features.size() - output = torch.cuda.FloatTensor(B, C, nfeatures, nsample) - - ext_module.group_points_forward(B, C, N, nfeatures, nsample, features, - indices, output) - - ctx.for_backwards = (indices, N) - return output - - @staticmethod - def backward(ctx, - grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients - of the output from forward. - - Returns: - Tensor: (B, C, N) gradient of the features. - """ - idx, N = ctx.for_backwards - - B, C, npoint, nsample = grad_out.size() - grad_features = torch.cuda.FloatTensor(B, C, N).zero_() - - grad_out_data = grad_out.data.contiguous() - ext_module.group_points_backward(B, C, N, npoint, nsample, - grad_out_data, idx, - grad_features.data) - return grad_features, None - - -grouping_operation = GroupingOperation.apply diff --git a/spaces/cybercorejapan/human-detection-docker/projects/human_detection/docker_run.sh b/spaces/cybercorejapan/human-detection-docker/projects/human_detection/docker_run.sh deleted file mode 100644 index bb555cf7750369eb22ba1c3e9f8b892afc3331ef..0000000000000000000000000000000000000000 --- a/spaces/cybercorejapan/human-detection-docker/projects/human_detection/docker_run.sh +++ /dev/null @@ -1,11 +0,0 @@ -# Docker run to start run demo app for traffic_monitoring demo -read -p "Please enter your container name, for example 'cc-demo': " name -read -p "Please enter your data directory path, for example /data/cc-demo/: " data -read -p "Please enter your public port, for example 8585: " pubport - -docker run --name $name --shm-size=8g --gpus all --rm -it \ - -p $pubport:7860 \ - -v $data:/data \ - -v $(pwd):/root/workspace/cc-demo \ - -w /root/workspace/cc-demo \ - cybercorecloud/cc-demo:v2.1 /bin/bash -c "cd projects && gradio human_detection/demo_app.py" \ No newline at end of file diff --git a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/pirenderer/util/init_weight.py b/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/pirenderer/util/init_weight.py deleted file mode 100644 index e3831df7bc735b805a3c86f60e4c66ef69320930..0000000000000000000000000000000000000000 --- a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/pirenderer/util/init_weight.py +++ /dev/null @@ -1,57 +0,0 @@ -from torch.nn import init - - -def weights_init(init_type='normal', gain=0.02, bias=None): - r"""Initialize weights in the network. - - Args: - init_type (str): The name of the initialization scheme. - gain (float): The parameter that is required for the initialization - scheme. - bias (object): If not ``None``, specifies the initialization parameter - for bias. - - Returns: - (obj): init function to be applied. - """ - - def init_func(m): - r"""Init function - - Args: - m: module to be weight initialized. - """ - class_name = m.__class__.__name__ - if hasattr(m, 'weight') and ( - class_name.find('Conv') != -1 or - class_name.find('Linear') != -1 or - class_name.find('Embedding') != -1): - if init_type == 'normal': - init.normal_(m.weight.data, 0.0, gain) - elif init_type == 'xavier': - init.xavier_normal_(m.weight.data, gain=gain) - elif init_type == 'xavier_uniform': - init.xavier_uniform_(m.weight.data, gain=1.0) - elif init_type == 'kaiming': - init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') - elif init_type == 'orthogonal': - init.orthogonal_(m.weight.data, gain=gain) - elif init_type == 'none': - m.reset_parameters() - else: - raise NotImplementedError( - 'initialization method [%s] is ' - 'not implemented' % init_type) - if hasattr(m, 'bias') and m.bias is not None: - if bias is not None: - bias_type = getattr(bias, 'type', 'normal') - if bias_type == 'normal': - bias_gain = getattr(bias, 'gain', 0.5) - init.normal_(m.bias.data, 0.0, bias_gain) - else: - raise NotImplementedError( - 'initialization method [%s] is ' - 'not implemented' % bias_type) - else: - init.constant_(m.bias.data, 0.0) - return init_func diff --git a/spaces/dafqi/indo_twitter_sentiment_app/script/functions.py b/spaces/dafqi/indo_twitter_sentiment_app/script/functions.py deleted file mode 100644 index 3c346df32df3ce45c3f4e9ce5cd82aad1d552f91..0000000000000000000000000000000000000000 --- a/spaces/dafqi/indo_twitter_sentiment_app/script/functions.py +++ /dev/null @@ -1,132 +0,0 @@ -import pandas as pd -import numpy as np -import re -import snscrape.modules.twitter as sntwitter -from transformers import pipeline -import plotly.express as px -import joblib -from sklearn.metrics import classification_report,confusion_matrix - - -import nltk -nltk.download("punkt") -nltk.download('stopwords') -from nltk.tokenize import word_tokenize - - -def get_tweets(username, length=10, option = None): - # Creating list to append tweet data to - query = username + " -filter:links filter:replies lang:id" - if option == "Advanced": - query = username - tweets = [] - # Using TwitterSearchScraper to scrape - # Using TwitterSearchScraper to scrape - for i,tweet in enumerate(sntwitter.TwitterSearchScraper(query).get_items()): - if i>=length: - break - tweets.append([tweet.content]) - - # Creating a dataframe from the tweets list above - tweets_df = pd.DataFrame(tweets, columns=["content"]) - tweets_df['content'] = tweets_df['content'].str.replace('@[^\s]+','') - tweets_df['content'] = tweets_df['content'].str.replace('#[^\s]+','') - tweets_df['content'] = tweets_df['content'].str.replace('http\S+','') - tweets_df['content'] = tweets_df['content'].str.replace('pic.twitter.com\S+','') - tweets_df['content'] = tweets_df['content'].str.replace('RT','') - tweets_df['content'] = tweets_df['content'].str.replace('amp','') - # remove emoticon - tweets_df['content'] = tweets_df['content'].str.replace('[^\w\s#@/:%.,_-]', '', flags=re.UNICODE) - - # remove whitespace leading & trailing - tweets_df['content'] = tweets_df['content'].str.strip() - - # remove multiple whitespace into single whitespace - tweets_df['content'] = tweets_df['content'].str.replace('\s+', ' ') - - # remove row with empty content - tweets_df = tweets_df[tweets_df['content'] != ''] - return tweets_df - - -def get_sentiment(df,option_model): - id2label = {0: "negatif", 1: "netral", 2: "positif"} - if option_model == "IndoBERT (Accurate,Slow)": - classifier = pipeline("sentiment-analysis",model = "indobert") - df['sentiment'] = df['content'].apply(lambda x: id2label[classifier(x)[0]['label']]) - elif (option_model == "Logistic Regression (Less Accurate,Fast)"): - df_model = joblib.load('assets/df_model.pkl') - classifier = df_model[df_model.model_name == "Logistic Regression"].model.values[0] - df['sentiment'] = df['content'].apply(lambda x: id2label[classifier.predict([x])[0]]) - else : - df_model = joblib.load('assets/df_model.pkl') - classifier = df_model[df_model.model_name == option_model].model.values[0] - df['sentiment'] = df['content'].apply(lambda x: id2label[classifier.predict([x])[0]]) - # change order sentiment to first column - cols = df.columns.tolist() - cols = cols[-1:] + cols[:-1] - df = df[cols] - - return df - -def get_bar_chart(df): - df= df.groupby(['sentiment']).count().reset_index() - # plot barchart sentiment - # plot barchart sentiment - fig = px.bar(df, x="sentiment", y="content", color="sentiment",text = "content", color_discrete_map={"positif": "#00cc96", "negatif": "#ef553b","netral": "#636efa"}) - # hide legend - fig.update_layout(showlegend=False) - # set margin top - fig.update_layout(margin=dict(t=0, b=150, l=0, r=0)) - # set title in center - # set annotation in bar - fig.update_traces(textposition='outside') - fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide') - - # set y axis title - fig.update_yaxes(title_text='Jumlah Komentar') - - return fig - -def plot_model_summary(df_model): - df_scatter = df_model[df_model.set_data == "test"][["score","time","model_name"]] - # plot scatter - fig = px.scatter(df_scatter, x="time", y="score", color="model_name", hover_data=['model_name']) - # set xlabel to time (s) - fig.update_xaxes(title_text="time (s)") - # set ylabel to accuracy - fig.update_yaxes(title_text="accuracy") - - # set point size - fig.update_traces(marker=dict(size=10)) - fig.update_layout(autosize = False,margin=dict(t=0, l=0, r=0),height = 400) - return fig - -def plot_clfr(df_model,option_model,df): - df_clfr = pd.DataFrame(classification_report(df["label"],df[f"{option_model}_pred"],output_dict=True)) - # heatmap using plotly - df_clfr.columns = ["positif","netral","negatif","accuracy","macro_avg","weighted_avg"] - fig = px.imshow(df_clfr.T.iloc[:,:-1], x=df_clfr.T.iloc[:,:-1].columns, y=df_clfr.T.iloc[:,:-1].index) - # remove colorbar - fig.update_layout(coloraxis_showscale=False) - fig.update_layout(coloraxis_colorscale='gnbu') - # get annot - annot = df_clfr.T.iloc[:,:-1].values - # add annot and set font size - fig.update_traces(text=annot, texttemplate='%{text:.2f}',textfont_size=12) - # set title to classification report - fig.update_layout(title_text="📄 Classification Report") - return fig - -def plot_confusion_matrix(df_model,option_model,df): - # plot confusion matrix - cm = confusion_matrix(df['label'],df[f"{option_model}_pred"]) - fig = px.imshow(cm, x=['negatif','netral','positif'], y=['negatif','netral','positif']) - # remove colorbar - fig.update_layout(coloraxis_showscale=False) - fig.update_layout(coloraxis_colorscale='gnbu',title_text = "📊 Confusion Matrix") - # get annot - annot = cm - # add annot - fig.update_traces(text=annot, texttemplate='%{text:.0f}',textfont_size=15) - return fig \ No newline at end of file diff --git a/spaces/dandan4272/hand_gesture_rec/model/stgcn/Models.py b/spaces/dandan4272/hand_gesture_rec/model/stgcn/Models.py deleted file mode 100644 index 2abc8c52cfad2b084d12439f5a68b0e6649ab119..0000000000000000000000000000000000000000 --- a/spaces/dandan4272/hand_gesture_rec/model/stgcn/Models.py +++ /dev/null @@ -1,253 +0,0 @@ - -import torch -import torch.nn as nn -import torch.nn.functional as F -from model.stgcn.Utils import Graph - - -class GraphConvolution(nn.Module): - """The basic module for applying a graph convolution. - Args: - - in_channel: (int) Number of channels in the input sequence data. - - out_channels: (int) Number of channels produced by the convolution. - - kernel_size: (int) Size of the graph convolving kernel. - - t_kernel_size: (int) Size of the temporal convolving kernel. - - t_stride: (int, optional) Stride of the temporal convolution. Default: 1 - - t_padding: (int, optional) Temporal zero-padding added to both sides of - the input. Default: 0 - - t_dilation: (int, optional) Spacing between temporal kernel elements. Default: 1 - - bias: (bool, optional) If `True`, adds a learnable bias to the output. - Default: `True` - Shape: - - Inputs x: Graph sequence in :math:`(N, in_channels, T_{in}, V)`, - A: Graph adjacency matrix in :math:`(K, V, V)`, - - Output: Graph sequence out in :math:`(N, out_channels, T_{out}, V)` - - where - :math:`N` is a batch size, - :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, - :math:`T_{in}/T_{out}` is a length of input/output sequence, - :math:`V` is the number of graph nodes. - - """ - def __init__(self, in_channels, out_channels, kernel_size, - t_kernel_size=1, - t_stride=1, - t_padding=0, - t_dilation=1, - bias=True): - super().__init__() - - self.kernel_size = kernel_size - self.conv = nn.Conv2d(in_channels, - out_channels * kernel_size, - kernel_size=(t_kernel_size, 1), - padding=(t_padding, 0), - stride=(t_stride, 1), - dilation=(t_dilation, 1), - bias=bias) - - def forward(self, x, A): - x = self.conv(x) - n, kc, t, v = x.size() - x = x.view(n, self.kernel_size, kc//self.kernel_size, t, v) - x = torch.einsum('nkctv,kvw->nctw', (x, A)) - - return x.contiguous() - - -class st_gcn(nn.Module): - """Applies a spatial temporal graph convolution over an input graph sequence. - Args: - - in_channels: (int) Number of channels in the input sequence data. - - out_channels: (int) Number of channels produced by the convolution. - - kernel_size: (tuple) Size of the temporal convolving kernel and - graph convolving kernel. - - stride: (int, optional) Stride of the temporal convolution. Default: 1 - - dropout: (int, optional) Dropout rate of the final output. Default: 0 - - residual: (bool, optional) If `True`, applies a residual mechanism. - Default: `True` - Shape: - - Inputs x: Graph sequence in :math: `(N, in_channels, T_{in}, V)`, - A: Graph Adjecency matrix in :math: `(K, V, V)`, - - Output: Graph sequence out in :math: `(N, out_channels, T_{out}, V)` - where - :math:`N` is a batch size, - :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`, - :math:`T_{in}/T_{out}` is a length of input/output sequence, - :math:`V` is the number of graph nodes. - """ - def __init__(self, in_channels, out_channels, kernel_size, - stride=1, - dropout=0, - residual=True): - super().__init__() - assert len(kernel_size) == 2 - assert kernel_size[0] % 2 == 1 - - padding = ((kernel_size[0] - 1) // 2, 0) - - self.gcn = GraphConvolution(in_channels, out_channels, kernel_size[1]) - self.tcn = nn.Sequential(nn.BatchNorm2d(out_channels), - nn.ReLU(inplace=True), - nn.Conv2d(out_channels, - out_channels, - (kernel_size[0], 1), - (stride, 1), - padding), - nn.BatchNorm2d(out_channels), - nn.Dropout(dropout, inplace=True) - ) - - if not residual: - self.residual = lambda x: 0 - elif (in_channels == out_channels) and (stride == 1): - self.residual = lambda x: x - else: - self.residual = nn.Sequential(nn.Conv2d(in_channels, - out_channels, - kernel_size=1, - stride=(stride, 1)), - nn.BatchNorm2d(out_channels) - ) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x, A): - res = self.residual(x) - x = self.gcn(x, A) - x = self.tcn(x) + res - return self.relu(x) - - -class StreamSpatialTemporalGraph(nn.Module): - """Spatial temporal graph convolutional networks. - Args: - - in_channels: (int) Number of input channels. - - graph_args: (dict) Args map of `Actionsrecognition.Utils.Graph` Class. - - num_class: (int) Number of class outputs. If `None` return pooling features of - the last st-gcn layer instead. - - edge_importance_weighting: (bool) If `True`, adds a learnable importance - weighting to the edges of the graph. - - **kwargs: (optional) Other parameters for graph convolution units. - Shape: - - Input: :math:`(N, in_channels, T_{in}, V_{in})` - - Output: :math:`(N, num_class)` where - :math:`N` is a batch size, - :math:`T_{in}` is a length of input sequence, - :math:`V_{in}` is the number of graph nodes, - or If num_class is `None`: `(N, out_channels)` - :math:`out_channels` is number of out_channels of the last layer. - """ - def __init__(self, in_channels, graph_args, num_class=None, - edge_importance_weighting=True, **kwargs): - super().__init__() - # Load graph. - graph = Graph(**graph_args) - A = torch.tensor(graph.A, dtype=torch.float32, requires_grad=False) - self.register_buffer('A', A) - - # Networks. - spatial_kernel_size = A.size(0) - temporal_kernel_size = 9 - kernel_size = (temporal_kernel_size, spatial_kernel_size) - kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'} - - self.data_bn = nn.BatchNorm1d(in_channels * A.size(1)) - self.st_gcn_networks = nn.ModuleList(( - st_gcn(in_channels, 64, kernel_size, 1, residual=True, **kwargs0), - # st_gcn(64, 64, kernel_size, 1, **kwargs), - # st_gcn(64, 64, kernel_size, 1, **kwargs), - # st_gcn(64, 64, kernel_size, 1, **kwargs), - st_gcn(64, 128, kernel_size, 2, **kwargs), - st_gcn(128, 128, kernel_size, 1, **kwargs), - # st_gcn(128, 128, kernel_size, 1, **kwargs), - st_gcn(128, 256, kernel_size, 2, **kwargs), - st_gcn(256, 256, kernel_size, 1, **kwargs), - # st_gcn(256, 256, kernel_size, 1, **kwargs) - )) - - # initialize parameters for edge importance weighting. - if edge_importance_weighting: - self.edge_importance = nn.ParameterList([ - nn.Parameter(torch.ones(A.size())) - for i in self.st_gcn_networks - ]) - else: - self.edge_importance = [1] * len(self.st_gcn_networks) - - if num_class is not None: - self.cls = nn.Conv2d(256, num_class, kernel_size=1) - else: - self.cls = lambda x: x - - def forward(self, x): - # data normalization. DHG - # N, T, V, C = x.size() - # x = x.permute(0, 2, 3, 1).contiguous() # (N, V, C, T) - # MyDataset - N, T, V, C = x.size() - x = x.permute(0, 2, 3, 1).contiguous() # (N, V, C, T) - x = x.view(N, V * C, T) - - x = self.data_bn(x) - x = x.view(N, V, C, T) - x = x.permute(0, 2, 3, 1).contiguous() - x = x.view(N, C, T, V) - - # forward. - for gcn, importance in zip(self.st_gcn_networks, self.edge_importance): - x = gcn(x, self.A * importance) - - x = F.avg_pool2d(x, x.size()[2:]) - x = self.cls(x) - x = x.view(x.size(0), -1) - - return x - - -class TwoStreamSpatialTemporalGraph(nn.Module): - """Two inputs spatial temporal graph convolutional networks. - Args: - - graph_args: (dict) Args map of `Actionsrecognition.Utils.Graph` Class. - - num_class: (int) Number of class outputs. - - edge_importance_weighting: (bool) If `True`, adds a learnable importance - weighting to the edges of the graph. - - **kwargs: (optional) Other parameters for graph convolution units. - Shape: - - Input: :tuple of math:`((N, 3, T, V), (N, 2, T, V))` - for points and motions stream where. - :math:`N` is a batch size, - :math:`in_channels` is data channels (3 is (x, y, score)), (2 is (mot_x, mot_y)) - :math:`T` is a length of input sequence, - :math:`V` is the number of graph nodes, - - Output: :math:`(N, num_class)` - """ - def __init__(self, graph_args, num_class, edge_importance_weighting=True, - **kwargs): - super().__init__() - self.pts_stream = StreamSpatialTemporalGraph(3, graph_args, None, - edge_importance_weighting, - **kwargs) - # self.mot_stream = StreamSpatialTemporalGraph(2, graph_args, None, - # edge_importance_weighting, - # **kwargs) - - self.fcn = nn.Linear(256, num_class) - - def forward(self, inputs): - # out1 = self.pts_stream(inputs[0]) - # out2 = self.mot_stream(inputs[1]) - # - # concat = torch.cat([out1, out2], dim=-1) - # out = self.fcn(concat) - - - out1 = self.pts_stream(inputs) - # out2 = self.mot_stream(inputs[1]) - - # concat = torch.cat([out1, out2], dim=-1) - out = self.fcn(out1) - - # return torch.sigmoid(out) - # return torch.softmax(out,dim=0) - return out \ No newline at end of file diff --git a/spaces/danieldux/isco-gpt/README.md b/spaces/danieldux/isco-gpt/README.md deleted file mode 100644 index 9e590154589e00e87b1725565744f5469bee5742..0000000000000000000000000000000000000000 --- a/spaces/danieldux/isco-gpt/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: FileGPT -emoji: 🐢 -colorFrom: blue -colorTo: green -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: davila7/filegpt ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dawdqd/ChuanhuChatGPT/web_assets/javascript/message-button.js b/spaces/dawdqd/ChuanhuChatGPT/web_assets/javascript/message-button.js deleted file mode 100644 index e16b065c8c0ea84b927ebbb46b7ff336d085b8d9..0000000000000000000000000000000000000000 --- a/spaces/dawdqd/ChuanhuChatGPT/web_assets/javascript/message-button.js +++ /dev/null @@ -1,92 +0,0 @@ - -// 为 bot 消息添加复制与切换显示按钮 - -function addChuanhuButton(botElement) { - var rawMessage = botElement.querySelector('.raw-message'); - var mdMessage = botElement.querySelector('.md-message'); - - if (!rawMessage) { // 如果没有 raw message,说明是早期历史记录,去除按钮 - var buttons = botElement.querySelectorAll('button.chuanhu-btn'); - for (var i = 0; i < buttons.length; i++) { - buttons[i].parentNode.removeChild(buttons[i]); - } - return; - } - botElement.querySelectorAll('button.copy-bot-btn, button.toggle-md-btn').forEach(btn => btn.remove()); // 就算原先有了,也必须重新添加,而不是跳过 - - // Copy bot button - var copyButton = document.createElement('button'); - copyButton.classList.add('chuanhu-btn'); - copyButton.classList.add('copy-bot-btn'); - copyButton.setAttribute('aria-label', 'Copy'); - copyButton.innerHTML = copyIcon; - - copyButton.addEventListener('click', async () => { - const textToCopy = rawMessage.innerText; - try { - if ("clipboard" in navigator) { - await navigator.clipboard.writeText(textToCopy); - copyButton.innerHTML = copiedIcon; - setTimeout(() => { - copyButton.innerHTML = copyIcon; - }, 1500); - } else { - const textArea = document.createElement("textarea"); - textArea.value = textToCopy; - document.body.appendChild(textArea); - textArea.select(); - try { - document.execCommand('copy'); - copyButton.innerHTML = copiedIcon; - setTimeout(() => { - copyButton.innerHTML = copyIcon; - }, 1500); - } catch (error) { - console.error("Copy failed: ", error); - } - document.body.removeChild(textArea); - } - } catch (error) { - console.error("Copy failed: ", error); - } - }); - botElement.appendChild(copyButton); - - // Toggle button - var toggleButton = document.createElement('button'); - toggleButton.classList.add('chuanhu-btn'); - toggleButton.classList.add('toggle-md-btn'); - toggleButton.setAttribute('aria-label', 'Toggle'); - var renderMarkdown = mdMessage.classList.contains('hideM'); - toggleButton.innerHTML = renderMarkdown ? mdIcon : rawIcon; - toggleButton.addEventListener('click', () => { - renderMarkdown = mdMessage.classList.contains('hideM'); - if (renderMarkdown) { - renderMarkdownText(botElement); - toggleButton.innerHTML=rawIcon; - } else { - removeMarkdownText(botElement); - toggleButton.innerHTML=mdIcon; - } - chatbotContentChanged(1); // to set md or raw in read-only history html - }); - botElement.insertBefore(toggleButton, copyButton); - - function renderMarkdownText(message) { - var mdDiv = message.querySelector('.md-message'); - if (mdDiv) mdDiv.classList.remove('hideM'); - var rawDiv = message.querySelector('.raw-message'); - if (rawDiv) rawDiv.classList.add('hideM'); - } - function removeMarkdownText(message) { - var rawDiv = message.querySelector('.raw-message'); - if (rawDiv) { - rawDiv.innerHTML = rawDiv.querySelector('pre')?.innerHTML || rawDiv.innerHTML; - rawDiv.classList.remove('hideM'); - } - var mdDiv = message.querySelector('.md-message'); - if (mdDiv) mdDiv.classList.add('hideM'); - } -} - - diff --git a/spaces/dawood/Kanye-AI/inference/infer_tool_grad.py b/spaces/dawood/Kanye-AI/inference/infer_tool_grad.py deleted file mode 100644 index b75af49c08e2e724839828bc419792ed580809bb..0000000000000000000000000000000000000000 --- a/spaces/dawood/Kanye-AI/inference/infer_tool_grad.py +++ /dev/null @@ -1,160 +0,0 @@ -import hashlib -import json -import logging -import os -import time -from pathlib import Path -import io -import librosa -import maad -import numpy as np -from inference import slicer -import parselmouth -import soundfile -import torch -import torchaudio - -from hubert import hubert_model -import utils -from models import SynthesizerTrn -logging.getLogger('numba').setLevel(logging.WARNING) -logging.getLogger('matplotlib').setLevel(logging.WARNING) - -def resize2d_f0(x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp(np.arange(0, len(source) * target_len, len(source)) / target_len, np.arange(0, len(source)), - source) - res = np.nan_to_num(target) - return res - -def get_f0(x, p_len,f0_up_key=0): - - time_step = 160 / 16000 * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - f0 = parselmouth.Sound(x, 16000).to_pitch_ac( - time_step=time_step / 1000, voicing_threshold=0.6, - pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] - - pad_size=(p_len - len(f0) + 1) // 2 - if(pad_size>0 or p_len - len(f0) - pad_size>0): - f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant') - - f0 *= pow(2, f0_up_key / 12) - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0 - -def clean_pitch(input_pitch): - num_nan = np.sum(input_pitch == 1) - if num_nan / len(input_pitch) > 0.9: - input_pitch[input_pitch != 1] = 1 - return input_pitch - - -def plt_pitch(input_pitch): - input_pitch = input_pitch.astype(float) - input_pitch[input_pitch == 1] = np.nan - return input_pitch - - -def f0_to_pitch(ff): - f0_pitch = 69 + 12 * np.log2(ff / 440) - return f0_pitch - - -def fill_a_to_b(a, b): - if len(a) < len(b): - for _ in range(0, len(b) - len(a)): - a.append(a[0]) - - -def mkdir(paths: list): - for path in paths: - if not os.path.exists(path): - os.mkdir(path) - - -class VitsSvc(object): - def __init__(self): - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self.SVCVITS = None - self.hps = None - self.speakers = None - self.hubert_soft = utils.get_hubert_model() - - def set_device(self, device): - self.device = torch.device(device) - self.hubert_soft.to(self.device) - if self.SVCVITS != None: - self.SVCVITS.to(self.device) - - def loadCheckpoint(self, path): - self.hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json") - self.SVCVITS = SynthesizerTrn( - self.hps.data.filter_length // 2 + 1, - self.hps.train.segment_size // self.hps.data.hop_length, - **self.hps.model) - _ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", self.SVCVITS, None) - _ = self.SVCVITS.eval().to(self.device) - self.speakers = self.hps.spk - - def get_units(self, source, sr): - source = source.unsqueeze(0).to(self.device) - with torch.inference_mode(): - units = self.hubert_soft.units(source) - return units - - - def get_unit_pitch(self, in_path, tran): - source, sr = torchaudio.load(in_path) - source = torchaudio.functional.resample(source, sr, 16000) - if len(source.shape) == 2 and source.shape[1] >= 2: - source = torch.mean(source, dim=0).unsqueeze(0) - soft = self.get_units(source, sr).squeeze(0).cpu().numpy() - f0_coarse, f0 = get_f0(source.cpu().numpy()[0], soft.shape[0]*2, tran) - return soft, f0 - - def infer(self, speaker_id, tran, raw_path): - speaker_id = self.speakers[speaker_id] - sid = torch.LongTensor([int(speaker_id)]).to(self.device).unsqueeze(0) - soft, pitch = self.get_unit_pitch(raw_path, tran) - f0 = torch.FloatTensor(clean_pitch(pitch)).unsqueeze(0).to(self.device) - stn_tst = torch.FloatTensor(soft) - with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0).to(self.device) - x_tst = torch.repeat_interleave(x_tst, repeats=2, dim=1).transpose(1, 2) - audio = self.SVCVITS.infer(x_tst, f0=f0, g=sid)[0,0].data.float() - return audio, audio.shape[-1] - - def inference(self,srcaudio,chara,tran,slice_db): - sampling_rate, audio = srcaudio - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - soundfile.write("tmpwav.wav", audio, 16000, format="wav") - chunks = slicer.cut("tmpwav.wav", db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio("tmpwav.wav", chunks) - audio = [] - for (slice_tag, data) in audio_data: - length = int(np.ceil(len(data) / audio_sr * self.hps.data.sampling_rate)) - raw_path = io.BytesIO() - soundfile.write(raw_path, data, audio_sr, format="wav") - raw_path.seek(0) - if slice_tag: - _audio = np.zeros(length) - else: - out_audio, out_sr = self.infer(chara, tran, raw_path) - _audio = out_audio.cpu().numpy() - audio.extend(list(_audio)) - audio = (np.array(audio) * 32768.0).astype('int16') - return (self.hps.data.sampling_rate,audio) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/_deprecate.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/_deprecate.py deleted file mode 100644 index 2f2a3df13e312aed847e482a067c2c10e4fd5632..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/_deprecate.py +++ /dev/null @@ -1,69 +0,0 @@ -from __future__ import annotations - -import warnings - -from . import __version__ - - -def deprecate( - deprecated: str, - when: int | None, - replacement: str | None = None, - *, - action: str | None = None, - plural: bool = False, -) -> None: - """ - Deprecations helper. - - :param deprecated: Name of thing to be deprecated. - :param when: Pillow major version to be removed in. - :param replacement: Name of replacement. - :param action: Instead of "replacement", give a custom call to action - e.g. "Upgrade to new thing". - :param plural: if the deprecated thing is plural, needing "are" instead of "is". - - Usually of the form: - - "[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd). - Use [replacement] instead." - - You can leave out the replacement sentence: - - "[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd)" - - Or with another call to action: - - "[deprecated] is deprecated and will be removed in Pillow [when] (yyyy-mm-dd). - [action]." - """ - - is_ = "are" if plural else "is" - - if when is None: - removed = "a future version" - elif when <= int(__version__.split(".")[0]): - msg = f"{deprecated} {is_} deprecated and should be removed." - raise RuntimeError(msg) - elif when == 11: - removed = "Pillow 11 (2024-10-15)" - else: - msg = f"Unknown removal version: {when}. Update {__name__}?" - raise ValueError(msg) - - if replacement and action: - msg = "Use only one of 'replacement' and 'action'" - raise ValueError(msg) - - if replacement: - action = f". Use {replacement} instead." - elif action: - action = f". {action.rstrip('.')}." - else: - action = "" - - warnings.warn( - f"{deprecated} {is_} deprecated and will be removed in {removed}{action}", - DeprecationWarning, - stacklevel=3, - ) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fastapi/security/utils.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fastapi/security/utils.py deleted file mode 100644 index fa7a450b74e813e66fd6e9a140d48c29215503bb..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fastapi/security/utils.py +++ /dev/null @@ -1,10 +0,0 @@ -from typing import Optional, Tuple - - -def get_authorization_scheme_param( - authorization_header_value: Optional[str], -) -> Tuple[str, str]: - if not authorization_header_value: - return "", "" - scheme, _, param = authorization_header_value.partition(" ") - return scheme, param diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-1d75348c.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-1d75348c.css deleted file mode 100644 index b40c303ffc9a9ade541f7db8fd5ba721888b393b..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-1d75348c.css +++ /dev/null @@ -1 +0,0 @@ -.options.svelte-1aonegi{--window-padding:var(--size-8);position:fixed;z-index:var(--layer-top);margin-left:0;box-shadow:var(--shadow-drop-lg);border-radius:var(--container-radius);background:var(--background-fill-primary);min-width:fit-content;max-width:inherit;overflow:auto;color:var(--body-text-color);list-style:none}.item.svelte-1aonegi{display:flex;cursor:pointer;padding:var(--size-2)}.item.svelte-1aonegi:hover,.active.svelte-1aonegi{background:var(--background-fill-secondary)}.inner-item.svelte-1aonegi{padding-right:var(--size-1)}.hide.svelte-1aonegi{visibility:hidden}label.svelte-1xsj8nn.svelte-1xsj8nn.svelte-1xsj8nn:not(.container),label.svelte-1xsj8nn:not(.container) .wrap.svelte-1xsj8nn.svelte-1xsj8nn,label.svelte-1xsj8nn:not(.container) .wrap-inner.svelte-1xsj8nn.svelte-1xsj8nn,label.svelte-1xsj8nn:not(.container) .secondary-wrap.svelte-1xsj8nn.svelte-1xsj8nn,label.svelte-1xsj8nn:not(.container) .token.svelte-1xsj8nn.svelte-1xsj8nn,label.svelte-1xsj8nn:not(.container) input.svelte-1xsj8nn.svelte-1xsj8nn{height:100%}.container.svelte-1xsj8nn .wrap.svelte-1xsj8nn.svelte-1xsj8nn{box-shadow:var(--input-shadow);border:var(--input-border-width) solid var(--border-color-primary)}.wrap.svelte-1xsj8nn.svelte-1xsj8nn.svelte-1xsj8nn{position:relative;border-radius:var(--input-radius);background:var(--input-background-fill)}.wrap.svelte-1xsj8nn.svelte-1xsj8nn.svelte-1xsj8nn:focus-within{box-shadow:var(--input-shadow-focus);border-color:var(--input-border-color-focus)}.wrap-inner.svelte-1xsj8nn.svelte-1xsj8nn.svelte-1xsj8nn{display:flex;position:relative;flex-wrap:wrap;align-items:center;gap:var(--checkbox-label-gap);padding:var(--checkbox-label-padding)}.token.svelte-1xsj8nn.svelte-1xsj8nn.svelte-1xsj8nn{display:flex;align-items:center;transition:var(--button-transition);cursor:pointer;box-shadow:var(--checkbox-label-shadow);border:var(--checkbox-label-border-width) solid var(--checkbox-label-border-color);border-radius:var(--button-small-radius);background:var(--checkbox-label-background-fill);padding:var(--checkbox-label-padding);color:var(--checkbox-label-text-color);font-weight:var(--checkbox-label-text-weight);font-size:var(--checkbox-label-text-size);line-height:var(--line-md)}.token.svelte-1xsj8nn>.svelte-1xsj8nn+.svelte-1xsj8nn{margin-left:var(--size-2)}.token-remove.svelte-1xsj8nn.svelte-1xsj8nn.svelte-1xsj8nn{fill:var(--body-text-color);display:flex;justify-content:center;align-items:center;cursor:pointer;border:var(--checkbox-border-width) solid var(--border-color-primary);border-radius:var(--radius-full);background:var(--background-fill-primary);padding:var(--size-0-5);width:18px;height:18px}.secondary-wrap.svelte-1xsj8nn.svelte-1xsj8nn.svelte-1xsj8nn{display:flex;flex:1 1 0%;align-items:center;border:none;min-width:min-content}input.svelte-1xsj8nn.svelte-1xsj8nn.svelte-1xsj8nn{margin:var(--spacing-sm);outline:none;border:none;background:inherit;width:var(--size-full);color:var(--body-text-color);font-size:var(--input-text-size)}input.svelte-1xsj8nn.svelte-1xsj8nn.svelte-1xsj8nn:disabled{-webkit-text-fill-color:var(--body-text-color);-webkit-opacity:1;opacity:1;cursor:not-allowed}.remove-all.svelte-1xsj8nn.svelte-1xsj8nn.svelte-1xsj8nn{margin-left:var(--size-1);width:20px;height:20px}.hide.svelte-1xsj8nn.svelte-1xsj8nn.svelte-1xsj8nn{display:none}.subdued.svelte-1xsj8nn.svelte-1xsj8nn.svelte-1xsj8nn{color:var(--body-text-color-subdued)} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/httpcore/_sync/http_proxy.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/httpcore/_sync/http_proxy.py deleted file mode 100644 index bb368dd42d559a6de6961c95b0cdef855b868c97..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/httpcore/_sync/http_proxy.py +++ /dev/null @@ -1,350 +0,0 @@ -import logging -import ssl -from base64 import b64encode -from typing import Iterable, List, Mapping, Optional, Sequence, Tuple, Union - -from .._backends.base import SOCKET_OPTION, NetworkBackend -from .._exceptions import ProxyError -from .._models import ( - URL, - Origin, - Request, - Response, - enforce_bytes, - enforce_headers, - enforce_url, -) -from .._ssl import default_ssl_context -from .._synchronization import Lock -from .._trace import Trace -from .connection import HTTPConnection -from .connection_pool import ConnectionPool -from .http11 import HTTP11Connection -from .interfaces import ConnectionInterface - -HeadersAsSequence = Sequence[Tuple[Union[bytes, str], Union[bytes, str]]] -HeadersAsMapping = Mapping[Union[bytes, str], Union[bytes, str]] - - -logger = logging.getLogger("httpcore.proxy") - - -def merge_headers( - default_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, - override_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, -) -> List[Tuple[bytes, bytes]]: - """ - Append default_headers and override_headers, de-duplicating if a key exists - in both cases. - """ - default_headers = [] if default_headers is None else list(default_headers) - override_headers = [] if override_headers is None else list(override_headers) - has_override = set(key.lower() for key, value in override_headers) - default_headers = [ - (key, value) - for key, value in default_headers - if key.lower() not in has_override - ] - return default_headers + override_headers - - -def build_auth_header(username: bytes, password: bytes) -> bytes: - userpass = username + b":" + password - return b"Basic " + b64encode(userpass) - - -class HTTPProxy(ConnectionPool): - """ - A connection pool that sends requests via an HTTP proxy. - """ - - def __init__( - self, - proxy_url: Union[URL, bytes, str], - proxy_auth: Optional[Tuple[Union[bytes, str], Union[bytes, str]]] = None, - proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, - ssl_context: Optional[ssl.SSLContext] = None, - max_connections: Optional[int] = 10, - max_keepalive_connections: Optional[int] = None, - keepalive_expiry: Optional[float] = None, - http1: bool = True, - http2: bool = False, - retries: int = 0, - local_address: Optional[str] = None, - uds: Optional[str] = None, - network_backend: Optional[NetworkBackend] = None, - socket_options: Optional[Iterable[SOCKET_OPTION]] = None, - ) -> None: - """ - A connection pool for making HTTP requests. - - Parameters: - proxy_url: The URL to use when connecting to the proxy server. - For example `"http://127.0.0.1:8080/"`. - proxy_auth: Any proxy authentication as a two-tuple of - (username, password). May be either bytes or ascii-only str. - proxy_headers: Any HTTP headers to use for the proxy requests. - For example `{"Proxy-Authorization": "Basic :"}`. - ssl_context: An SSL context to use for verifying connections. - If not specified, the default `httpcore.default_ssl_context()` - will be used. - max_connections: The maximum number of concurrent HTTP connections that - the pool should allow. Any attempt to send a request on a pool that - would exceed this amount will block until a connection is available. - max_keepalive_connections: The maximum number of idle HTTP connections - that will be maintained in the pool. - keepalive_expiry: The duration in seconds that an idle HTTP connection - may be maintained for before being expired from the pool. - http1: A boolean indicating if HTTP/1.1 requests should be supported - by the connection pool. Defaults to True. - http2: A boolean indicating if HTTP/2 requests should be supported by - the connection pool. Defaults to False. - retries: The maximum number of retries when trying to establish - a connection. - local_address: Local address to connect from. Can also be used to - connect using a particular address family. Using - `local_address="0.0.0.0"` will connect using an `AF_INET` address - (IPv4), while using `local_address="::"` will connect using an - `AF_INET6` address (IPv6). - uds: Path to a Unix Domain Socket to use instead of TCP sockets. - network_backend: A backend instance to use for handling network I/O. - """ - super().__init__( - ssl_context=ssl_context, - max_connections=max_connections, - max_keepalive_connections=max_keepalive_connections, - keepalive_expiry=keepalive_expiry, - http1=http1, - http2=http2, - network_backend=network_backend, - retries=retries, - local_address=local_address, - uds=uds, - socket_options=socket_options, - ) - self._ssl_context = ssl_context - self._proxy_url = enforce_url(proxy_url, name="proxy_url") - self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") - if proxy_auth is not None: - username = enforce_bytes(proxy_auth[0], name="proxy_auth") - password = enforce_bytes(proxy_auth[1], name="proxy_auth") - authorization = build_auth_header(username, password) - self._proxy_headers = [ - (b"Proxy-Authorization", authorization) - ] + self._proxy_headers - - def create_connection(self, origin: Origin) -> ConnectionInterface: - if origin.scheme == b"http": - return ForwardHTTPConnection( - proxy_origin=self._proxy_url.origin, - proxy_headers=self._proxy_headers, - remote_origin=origin, - keepalive_expiry=self._keepalive_expiry, - network_backend=self._network_backend, - ) - return TunnelHTTPConnection( - proxy_origin=self._proxy_url.origin, - proxy_headers=self._proxy_headers, - remote_origin=origin, - ssl_context=self._ssl_context, - keepalive_expiry=self._keepalive_expiry, - http1=self._http1, - http2=self._http2, - network_backend=self._network_backend, - ) - - -class ForwardHTTPConnection(ConnectionInterface): - def __init__( - self, - proxy_origin: Origin, - remote_origin: Origin, - proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, - keepalive_expiry: Optional[float] = None, - network_backend: Optional[NetworkBackend] = None, - socket_options: Optional[Iterable[SOCKET_OPTION]] = None, - ) -> None: - self._connection = HTTPConnection( - origin=proxy_origin, - keepalive_expiry=keepalive_expiry, - network_backend=network_backend, - socket_options=socket_options, - ) - self._proxy_origin = proxy_origin - self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") - self._remote_origin = remote_origin - - def handle_request(self, request: Request) -> Response: - headers = merge_headers(self._proxy_headers, request.headers) - url = URL( - scheme=self._proxy_origin.scheme, - host=self._proxy_origin.host, - port=self._proxy_origin.port, - target=bytes(request.url), - ) - proxy_request = Request( - method=request.method, - url=url, - headers=headers, - content=request.stream, - extensions=request.extensions, - ) - return self._connection.handle_request(proxy_request) - - def can_handle_request(self, origin: Origin) -> bool: - return origin == self._remote_origin - - def close(self) -> None: - self._connection.close() - - def info(self) -> str: - return self._connection.info() - - def is_available(self) -> bool: - return self._connection.is_available() - - def has_expired(self) -> bool: - return self._connection.has_expired() - - def is_idle(self) -> bool: - return self._connection.is_idle() - - def is_closed(self) -> bool: - return self._connection.is_closed() - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} [{self.info()}]>" - - -class TunnelHTTPConnection(ConnectionInterface): - def __init__( - self, - proxy_origin: Origin, - remote_origin: Origin, - ssl_context: Optional[ssl.SSLContext] = None, - proxy_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, - keepalive_expiry: Optional[float] = None, - http1: bool = True, - http2: bool = False, - network_backend: Optional[NetworkBackend] = None, - socket_options: Optional[Iterable[SOCKET_OPTION]] = None, - ) -> None: - self._connection: ConnectionInterface = HTTPConnection( - origin=proxy_origin, - keepalive_expiry=keepalive_expiry, - network_backend=network_backend, - socket_options=socket_options, - ) - self._proxy_origin = proxy_origin - self._remote_origin = remote_origin - self._ssl_context = ssl_context - self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") - self._keepalive_expiry = keepalive_expiry - self._http1 = http1 - self._http2 = http2 - self._connect_lock = Lock() - self._connected = False - - def handle_request(self, request: Request) -> Response: - timeouts = request.extensions.get("timeout", {}) - timeout = timeouts.get("connect", None) - - with self._connect_lock: - if not self._connected: - target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port) - - connect_url = URL( - scheme=self._proxy_origin.scheme, - host=self._proxy_origin.host, - port=self._proxy_origin.port, - target=target, - ) - connect_headers = merge_headers( - [(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers - ) - connect_request = Request( - method=b"CONNECT", - url=connect_url, - headers=connect_headers, - extensions=request.extensions, - ) - connect_response = self._connection.handle_request( - connect_request - ) - - if connect_response.status < 200 or connect_response.status > 299: - reason_bytes = connect_response.extensions.get("reason_phrase", b"") - reason_str = reason_bytes.decode("ascii", errors="ignore") - msg = "%d %s" % (connect_response.status, reason_str) - self._connection.close() - raise ProxyError(msg) - - stream = connect_response.extensions["network_stream"] - - # Upgrade the stream to SSL - ssl_context = ( - default_ssl_context() - if self._ssl_context is None - else self._ssl_context - ) - alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] - ssl_context.set_alpn_protocols(alpn_protocols) - - kwargs = { - "ssl_context": ssl_context, - "server_hostname": self._remote_origin.host.decode("ascii"), - "timeout": timeout, - } - with Trace("start_tls", logger, request, kwargs) as trace: - stream = stream.start_tls(**kwargs) - trace.return_value = stream - - # Determine if we should be using HTTP/1.1 or HTTP/2 - ssl_object = stream.get_extra_info("ssl_object") - http2_negotiated = ( - ssl_object is not None - and ssl_object.selected_alpn_protocol() == "h2" - ) - - # Create the HTTP/1.1 or HTTP/2 connection - if http2_negotiated or (self._http2 and not self._http1): - from .http2 import HTTP2Connection - - self._connection = HTTP2Connection( - origin=self._remote_origin, - stream=stream, - keepalive_expiry=self._keepalive_expiry, - ) - else: - self._connection = HTTP11Connection( - origin=self._remote_origin, - stream=stream, - keepalive_expiry=self._keepalive_expiry, - ) - - self._connected = True - return self._connection.handle_request(request) - - def can_handle_request(self, origin: Origin) -> bool: - return origin == self._remote_origin - - def close(self) -> None: - self._connection.close() - - def info(self) -> str: - return self._connection.info() - - def is_available(self) -> bool: - return self._connection.is_available() - - def has_expired(self) -> bool: - return self._connection.has_expired() - - def is_idle(self) -> bool: - return self._connection.is_idle() - - def is_closed(self) -> bool: - return self._connection.is_closed() - - def __repr__(self) -> str: - return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/spaces/de3sec/Front-end-code-generation-from-images/compiler/Node.py b/spaces/de3sec/Front-end-code-generation-from-images/compiler/Node.py deleted file mode 100644 index 4dac248505d448edda8aea5b708c3fa7569ceb29..0000000000000000000000000000000000000000 --- a/spaces/de3sec/Front-end-code-generation-from-images/compiler/Node.py +++ /dev/null @@ -1,32 +0,0 @@ -from __future__ import print_function -__author__ = 'Taneem Jan, taneemishere.github.io' - - -class Node: - def __init__(self, key, parent_node, content_holder): - self.key = key - self.parent = parent_node - self.children = [] - self.content_holder = content_holder - - def add_child(self, child): - self.children.append(child) - - def show(self): - print(self.key) - for child in self.children: - child.show() - - def render(self, mapping, rendering_function=None): - content = "" - for child in self.children: - content += child.render(mapping, rendering_function) - - value = mapping[self.key] - if rendering_function is not None: - value = rendering_function(self.key, value) - - if len(self.children) != 0: - value = value.replace(self.content_holder, content) - - return value diff --git a/spaces/declare-lab/tango/diffusers/examples/research_projects/multi_subject_dreambooth/README.md b/spaces/declare-lab/tango/diffusers/examples/research_projects/multi_subject_dreambooth/README.md deleted file mode 100644 index cf7dd31d0797ad1e22fb7d5ab192de2dada490df..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/examples/research_projects/multi_subject_dreambooth/README.md +++ /dev/null @@ -1,291 +0,0 @@ -# Multi Subject DreamBooth training - -[DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject. -This `train_multi_subject_dreambooth.py` script shows how to implement the training procedure for one or more subjects and adapt it for stable diffusion. Note that this code is based off of the `examples/dreambooth/train_dreambooth.py` script as of 01/06/2022. - -This script was added by @kopsahlong, and is not actively maintained. However, if you come across anything that could use fixing, feel free to open an issue and tag @kopsahlong. - -## Running locally with PyTorch -### Installing the dependencies - -Before running the script, make sure to install the library's training dependencies: - -To start, execute the following steps in a new virtual environment: -```bash -git clone https://github.com/huggingface/diffusers -cd diffusers -pip install -e . -``` - -Then cd into the folder `diffusers/examples/research_projects/multi_subject_dreambooth` and run the following: -```bash -pip install -r requirements.txt -``` - -And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: - -```bash -accelerate config -``` - -Or for a default accelerate configuration without answering questions about your environment - -```bash -accelerate config default -``` - -Or if your environment doesn't support an interactive shell e.g. a notebook - -```python -from accelerate.utils import write_basic_config -write_basic_config() -``` - -### Multi Subject Training Example -In order to have your model learn multiple concepts at once, we simply add in the additional data directories and prompts to our `instance_data_dir` and `instance_prompt` (as well as `class_data_dir` and `class_prompt` if `--with_prior_preservation` is specified) as one comma separated string. - -See an example with 2 subjects below, which learns a model for one dog subject and one human subject: - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export OUTPUT_DIR="path-to-save-model" - -# Subject 1 -export INSTANCE_DIR_1="path-to-instance-images-concept-1" -export INSTANCE_PROMPT_1="a photo of a sks dog" -export CLASS_DIR_1="path-to-class-images-dog" -export CLASS_PROMPT_1="a photo of a dog" - -# Subject 2 -export INSTANCE_DIR_2="path-to-instance-images-concept-2" -export INSTANCE_PROMPT_2="a photo of a t@y person" -export CLASS_DIR_2="path-to-class-images-person" -export CLASS_PROMPT_2="a photo of a person" - -accelerate launch train_multi_subject_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir="$INSTANCE_DIR_1,$INSTANCE_DIR_2" \ - --output_dir=$OUTPUT_DIR \ - --train_text_encoder \ - --instance_prompt="$INSTANCE_PROMPT_1,$INSTANCE_PROMPT_2" \ - --with_prior_preservation \ - --prior_loss_weight=1.0 \ - --class_data_dir="$CLASS_DIR_1,$CLASS_DIR_2" \ - --class_prompt="$CLASS_PROMPT_1,$CLASS_PROMPT_2"\ - --num_class_images=50 \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=1 \ - --learning_rate=1e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --max_train_steps=1500 -``` - -This example shows training for 2 subjects, but please note that the model can be trained on any number of new concepts. This can be done by continuing to add in the corresponding directories and prompts to the corresponding comma separated string. - -Note also that in this script, `sks` and `t@y` were used as tokens to learn the new subjects ([this thread](https://github.com/XavierXiao/Dreambooth-Stable-Diffusion/issues/71) inspired the use of `t@y` as our second identifier). However, there may be better rare tokens to experiment with, and results also seemed to be good when more intuitive words are used. - -### Inference - -Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `identifier`(e.g. sks in above example) in your prompt. - -```python -from diffusers import StableDiffusionPipeline -import torch - -model_id = "path-to-your-trained-model" -pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") - -prompt = "A photo of a t@y person petting an sks dog" -image = pipe(prompt, num_inference_steps=200, guidance_scale=7.5).images[0] - -image.save("person-petting-dog.png") -``` - -### Inference from a training checkpoint - -You can also perform inference from one of the checkpoints saved during the training process, if you used the `--checkpointing_steps` argument. Please, refer to [the documentation](https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint) to see how to do it. - -## Additional Dreambooth documentation -Because the `train_multi_subject_dreambooth.py` script here was forked from an original version of `train_dreambooth.py` in the `examples/dreambooth` folder, I've included the original applicable training documentation for single subject examples below. - -This should explain how to play with training variables such as prior preservation, fine tuning the text encoder, etc. which is still applicable to our multi subject training code. Note also that the examples below, which are single subject examples, also work with `train_multi_subject_dreambooth.py`, as this script supports 1 (or more) subjects. - -### Single subject dog toy example - -Let's get our dataset. Download images from [here](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ) and save them in a directory. This will be our training data. - -And launch the training using - -**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___** - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="path-to-instance-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --instance_prompt="a photo of sks dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=1 \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --max_train_steps=400 -``` - -### Training with prior-preservation loss - -Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data. -According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases. The `num_class_images` flag sets the number of images to generate with the class prompt. You can place existing images in `class_data_dir`, and the training script will generate any additional images so that `num_class_images` are present in `class_data_dir` during training time. - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="path-to-instance-images" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=1 \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - - -### Training on a 16GB GPU: - -With the help of gradient checkpointing and the 8-bit optimizer from bitsandbytes it's possible to run train dreambooth on a 16GB GPU. - -To install `bitandbytes` please refer to this [readme](https://github.com/TimDettmers/bitsandbytes#requirements--installation). - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="path-to-instance-images" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=2 --gradient_checkpointing \ - --use_8bit_adam \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - -### Training on a 8 GB GPU: - -By using [DeepSpeed](https://www.deepspeed.ai/) it's possible to offload some -tensors from VRAM to either CPU or NVME allowing to train with less VRAM. - -DeepSpeed needs to be enabled with `accelerate config`. During configuration -answer yes to "Do you want to use DeepSpeed?". With DeepSpeed stage 2, fp16 -mixed precision and offloading both parameters and optimizer state to cpu it's -possible to train on under 8 GB VRAM with a drawback of requiring significantly -more RAM (about 25 GB). See [documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more DeepSpeed configuration options. - -Changing the default Adam optimizer to DeepSpeed's special version of Adam -`deepspeed.ops.adam.DeepSpeedCPUAdam` gives a substantial speedup but enabling -it requires CUDA toolchain with the same version as pytorch. 8-bit optimizer -does not seem to be compatible with DeepSpeed at the moment. - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="path-to-instance-images" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch --mixed_precision="fp16" train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --sample_batch_size=1 \ - --gradient_accumulation_steps=1 --gradient_checkpointing \ - --learning_rate=5e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - -### Fine-tune text encoder with the UNet. - -The script also allows to fine-tune the `text_encoder` along with the `unet`. It's been observed experimentally that fine-tuning `text_encoder` gives much better results especially on faces. -Pass the `--train_text_encoder` argument to the script to enable training `text_encoder`. - -___Note: Training text encoder requires more memory, with this option the training won't fit on 16GB GPU. It needs at least 24GB VRAM.___ - -```bash -export MODEL_NAME="CompVis/stable-diffusion-v1-4" -export INSTANCE_DIR="path-to-instance-images" -export CLASS_DIR="path-to-class-images" -export OUTPUT_DIR="path-to-save-model" - -accelerate launch train_dreambooth.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --train_text_encoder \ - --instance_data_dir=$INSTANCE_DIR \ - --class_data_dir=$CLASS_DIR \ - --output_dir=$OUTPUT_DIR \ - --with_prior_preservation --prior_loss_weight=1.0 \ - --instance_prompt="a photo of sks dog" \ - --class_prompt="a photo of dog" \ - --resolution=512 \ - --train_batch_size=1 \ - --use_8bit_adam \ - --gradient_checkpointing \ - --learning_rate=2e-6 \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --num_class_images=200 \ - --max_train_steps=800 -``` - -### Using DreamBooth for other pipelines than Stable Diffusion - -Altdiffusion also support dreambooth now, the runing comman is basically the same as abouve, all you need to do is replace the `MODEL_NAME` like this: -One can now simply change the `pretrained_model_name_or_path` to another architecture such as [`AltDiffusion`](https://huggingface.co/docs/diffusers/api/pipelines/alt_diffusion). - -``` -export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion-m9" -or -export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion" -``` - -### Training with xformers: -You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation. - -You can also use Dreambooth to train the specialized in-painting model. See [the script in the research folder for details](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/dreambooth_inpaint). \ No newline at end of file diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py b/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py deleted file mode 100644 index 0b0c547496a0202dbfa1d8525a92565b3df62cbb..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -import torch.nn as nn -from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel - -from ...utils import logging - - -logger = logging.get_logger(__name__) - - -def cosine_distance(image_embeds, text_embeds): - normalized_image_embeds = nn.functional.normalize(image_embeds) - normalized_text_embeds = nn.functional.normalize(text_embeds) - return torch.mm(normalized_image_embeds, normalized_text_embeds.t()) - - -class SafeStableDiffusionSafetyChecker(PreTrainedModel): - config_class = CLIPConfig - - _no_split_modules = ["CLIPEncoderLayer"] - - def __init__(self, config: CLIPConfig): - super().__init__(config) - - self.vision_model = CLIPVisionModel(config.vision_config) - self.visual_projection = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=False) - - self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False) - self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False) - - self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False) - self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False) - - @torch.no_grad() - def forward(self, clip_input, images): - pooled_output = self.vision_model(clip_input)[1] # pooled_output - image_embeds = self.visual_projection(pooled_output) - - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 - special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).cpu().float().numpy() - cos_dist = cosine_distance(image_embeds, self.concept_embeds).cpu().float().numpy() - - result = [] - batch_size = image_embeds.shape[0] - for i in range(batch_size): - result_img = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} - - # increase this value to create a stronger `nfsw` filter - # at the cost of increasing the possibility of filtering benign images - adjustment = 0.0 - - for concept_idx in range(len(special_cos_dist[0])): - concept_cos = special_cos_dist[i][concept_idx] - concept_threshold = self.special_care_embeds_weights[concept_idx].item() - result_img["special_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) - if result_img["special_scores"][concept_idx] > 0: - result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]}) - adjustment = 0.01 - - for concept_idx in range(len(cos_dist[0])): - concept_cos = cos_dist[i][concept_idx] - concept_threshold = self.concept_embeds_weights[concept_idx].item() - result_img["concept_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) - if result_img["concept_scores"][concept_idx] > 0: - result_img["bad_concepts"].append(concept_idx) - - result.append(result_img) - - has_nsfw_concepts = [len(res["bad_concepts"]) > 0 for res in result] - - return images, has_nsfw_concepts - - @torch.no_grad() - def forward_onnx(self, clip_input: torch.FloatTensor, images: torch.FloatTensor): - pooled_output = self.vision_model(clip_input)[1] # pooled_output - image_embeds = self.visual_projection(pooled_output) - - special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds) - cos_dist = cosine_distance(image_embeds, self.concept_embeds) - - # increase this value to create a stronger `nsfw` filter - # at the cost of increasing the possibility of filtering benign images - adjustment = 0.0 - - special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment - # special_scores = special_scores.round(decimals=3) - special_care = torch.any(special_scores > 0, dim=1) - special_adjustment = special_care * 0.01 - special_adjustment = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1]) - - concept_scores = (cos_dist - self.concept_embeds_weights) + special_adjustment - # concept_scores = concept_scores.round(decimals=3) - has_nsfw_concepts = torch.any(concept_scores > 0, dim=1) - - return images, has_nsfw_concepts diff --git a/spaces/deelerb/3dselfie/PIFu/lib/ext_transform.py b/spaces/deelerb/3dselfie/PIFu/lib/ext_transform.py deleted file mode 100644 index 7e1104bd7b1a24303370c066d1487f83a9bfece0..0000000000000000000000000000000000000000 --- a/spaces/deelerb/3dselfie/PIFu/lib/ext_transform.py +++ /dev/null @@ -1,78 +0,0 @@ -import random - -import numpy as np -from skimage.filters import gaussian -import torch -from PIL import Image, ImageFilter - - -class RandomVerticalFlip(object): - def __call__(self, img): - if random.random() < 0.5: - return img.transpose(Image.FLIP_TOP_BOTTOM) - return img - - -class DeNormalize(object): - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, tensor): - for t, m, s in zip(tensor, self.mean, self.std): - t.mul_(s).add_(m) - return tensor - - -class MaskToTensor(object): - def __call__(self, img): - return torch.from_numpy(np.array(img, dtype=np.int32)).long() - - -class FreeScale(object): - def __init__(self, size, interpolation=Image.BILINEAR): - self.size = tuple(reversed(size)) # size: (h, w) - self.interpolation = interpolation - - def __call__(self, img): - return img.resize(self.size, self.interpolation) - - -class FlipChannels(object): - def __call__(self, img): - img = np.array(img)[:, :, ::-1] - return Image.fromarray(img.astype(np.uint8)) - - -class RandomGaussianBlur(object): - def __call__(self, img): - sigma = 0.15 + random.random() * 1.15 - blurred_img = gaussian(np.array(img), sigma=sigma, multichannel=True) - blurred_img *= 255 - return Image.fromarray(blurred_img.astype(np.uint8)) - -# Lighting data augmentation take from here - https://github.com/eladhoffer/convNet.pytorch/blob/master/preprocess.py - - -class Lighting(object): - """Lighting noise(AlexNet - style PCA - based noise)""" - - def __init__(self, alphastd, - eigval=(0.2175, 0.0188, 0.0045), - eigvec=((-0.5675, 0.7192, 0.4009), - (-0.5808, -0.0045, -0.8140), - (-0.5836, -0.6948, 0.4203))): - self.alphastd = alphastd - self.eigval = torch.Tensor(eigval) - self.eigvec = torch.Tensor(eigvec) - - def __call__(self, img): - if self.alphastd == 0: - return img - - alpha = img.new().resize_(3).normal_(0, self.alphastd) - rgb = self.eigvec.type_as(img).clone()\ - .mul(alpha.view(1, 3).expand(3, 3))\ - .mul(self.eigval.view(1, 3).expand(3, 3))\ - .sum(1).squeeze() - return img.add(rgb.view(3, 1, 1).expand_as(img)) diff --git a/spaces/deepghs/character_splitter/README.md b/spaces/deepghs/character_splitter/README.md deleted file mode 100644 index 855d286147e530f85ebfb0989f0bf71947506165..0000000000000000000000000000000000000000 --- a/spaces/deepghs/character_splitter/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Character Splitter -emoji: ⚡ -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/diacanFperku/AutoGPT/Jumanji Welcome To The Jungle English 1080p Bluray Movie Download.md b/spaces/diacanFperku/AutoGPT/Jumanji Welcome To The Jungle English 1080p Bluray Movie Download.md deleted file mode 100644 index 9c7b776ef1e68613aa20721b1d50482ba5113ac2..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Jumanji Welcome To The Jungle English 1080p Bluray Movie Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Jumanji Welcome To The Jungle English 1080p Bluray Movie Download


    Download Zip ————— https://gohhs.com/2uFUf1



    - -Download Jumanji: Welcome to the Jungle (2017) Movie Dual Audio ... Movie Hdrip - Dvdrip Quality 1080p or 720p or 480p from kickass, ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/digitalxingtong/Jiaran-Bert-VITS2/monotonic_align/core.py b/spaces/digitalxingtong/Jiaran-Bert-VITS2/monotonic_align/core.py deleted file mode 100644 index 5ff728cd74c9228346a82ec64a9829cb98ad315e..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Jiaran-Bert-VITS2/monotonic_align/core.py +++ /dev/null @@ -1,36 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]), - nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val = -1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y - 1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y - 1, x - 1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): - index = index - 1 \ No newline at end of file diff --git a/spaces/digitalxingtong/Nanami-Bert-VITS2/text/tone_sandhi.py b/spaces/digitalxingtong/Nanami-Bert-VITS2/text/tone_sandhi.py deleted file mode 100644 index 0f45b7a72c5d858bcaab19ac85cfa686bf9a74da..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nanami-Bert-VITS2/text/tone_sandhi.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List -from typing import Tuple - -import jieba -from pypinyin import lazy_pinyin -from pypinyin import Style - - -class ToneSandhi(): - def __init__(self): - self.must_neural_tone_words = { - '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝', - '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊', - '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去', - '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号', - '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当', - '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻', - '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂', - '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆', - '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', - '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿', - '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台', - '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算', - '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨', - '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快', - '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜', - '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔', - '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事', - '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', - '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', - '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实', - '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', - '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', - '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', - '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', - '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', - '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', - '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', - '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', - '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', - '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', - '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', - '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', - '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', - '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', - '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', - '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', - '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', - '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', - '扫把', '惦记' - } - self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎" - } - self.punc = ":,;。?!“”‘’':,;.?!" - - # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041 - # e.g. - # word: "家里" - # pos: "s" - # finals: ['ia1', 'i3'] - def _neural_sandhi(self, word: str, pos: str, - finals: List[str]) -> List[str]: - - # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 - for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in { - "n", "v", "a" - } and word not in self.must_not_neural_tone_words: - finals[j] = finals[j][:-1] + "5" - ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": - finals[-1] = finals[-1][:-1] + "5" - elif len(word) >= 1 and word[-1] in "的地得": - finals[-1] = finals[-1][:-1] + "5" - # e.g. 走了, 看着, 去过 - # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}: - # finals[-1] = finals[-1][:-1] + "5" - elif len(word) > 1 and word[-1] in "们子" and pos in { - "r", "n" - } and word not in self.must_not_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 桌上, 地下, 家里 - elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 上来, 下去 - elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开": - finals[-1] = finals[-1][:-1] + "5" - # 个做量词 - elif (ge_idx >= 1 and - (word[ge_idx - 1].isnumeric() or - word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个': - finals[ge_idx] = finals[ge_idx][:-1] + "5" - else: - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - - word_list = self._split_word(word) - finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]] - for i, word in enumerate(word_list): - # conventional neural in Chinese - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals_list[i][-1] = finals_list[i][-1][:-1] + "5" - finals = sum(finals_list, []) - return finals - - def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]: - # e.g. 看不懂 - if len(word) == 3 and word[1] == "不": - finals[1] = finals[1][:-1] + "5" - else: - for i, char in enumerate(word): - # "不" before tone4 should be bu2, e.g. 不怕 - if char == "不" and i + 1 < len(word) and finals[i + - 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - return finals - - def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]: - # "一" in number sequences, e.g. 一零零, 二一零 - if word.find("一") != -1 and all( - [item.isnumeric() for item in word if item != "一"]): - return finals - # "一" between reduplication words shold be yi5, e.g. 看一看 - elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]: - finals[1] = finals[1][:-1] + "5" - # when "一" is ordinal word, it should be yi1 - elif word.startswith("第一"): - finals[1] = finals[1][:-1] + "1" - else: - for i, char in enumerate(word): - if char == "一" and i + 1 < len(word): - # "一" before tone4 should be yi2, e.g. 一段 - if finals[i + 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - # "一" before non-tone4 should be yi4, e.g. 一天 - else: - # "一" 后面如果是标点,还读一声 - if word[i + 1] not in self.punc: - finals[i] = finals[i][:-1] + "4" - return finals - - def _split_word(self, word: str) -> List[str]: - word_list = jieba.cut_for_search(word) - word_list = sorted(word_list, key=lambda i: len(i), reverse=False) - first_subword = word_list[0] - first_begin_idx = word.find(first_subword) - if first_begin_idx == 0: - second_subword = word[len(first_subword):] - new_word_list = [first_subword, second_subword] - else: - second_subword = word[:-len(first_subword)] - new_word_list = [second_subword, first_subword] - return new_word_list - - def _three_sandhi(self, word: str, finals: List[str]) -> List[str]: - if len(word) == 2 and self._all_tone_three(finals): - finals[0] = finals[0][:-1] + "2" - elif len(word) == 3: - word_list = self._split_word(word) - if self._all_tone_three(finals): - # disyllabic + monosyllabic, e.g. 蒙古/包 - if len(word_list[0]) == 2: - finals[0] = finals[0][:-1] + "2" - finals[1] = finals[1][:-1] + "2" - # monosyllabic + disyllabic, e.g. 纸/老虎 - elif len(word_list[0]) == 1: - finals[1] = finals[1][:-1] + "2" - else: - finals_list = [ - finals[:len(word_list[0])], finals[len(word_list[0]):] - ] - if len(finals_list) == 2: - for i, sub in enumerate(finals_list): - # e.g. 所有/人 - if self._all_tone_three(sub) and len(sub) == 2: - finals_list[i][0] = finals_list[i][0][:-1] + "2" - # e.g. 好/喜欢 - elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \ - finals_list[0][-1][-1] == "3": - - finals_list[0][-1] = finals_list[0][-1][:-1] + "2" - finals = sum(finals_list, []) - # split idiom into two words who's length is 2 - elif len(word) == 4: - finals_list = [finals[:2], finals[2:]] - finals = [] - for sub in finals_list: - if self._all_tone_three(sub): - sub[0] = sub[0][:-1] + "2" - finals += sub - - return finals - - def _all_tone_three(self, finals: List[str]) -> bool: - return all(x[-1] == "3" for x in finals) - - # merge "不" and the word behind it - # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error - def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - last_word = "" - for word, pos in seg: - if last_word == "不": - word = last_word + word - if word != "不": - new_seg.append((word, pos)) - last_word = word[:] - if last_word == "不": - new_seg.append((last_word, 'd')) - last_word = "" - return new_seg - - # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听" - # function 2: merge single "一" and the word behind it - # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error - # e.g. - # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')] - # output seg: [['听一听', 'v']] - def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - # function 1 - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][ - 0] == seg[i + 1][0] and seg[i - 1][1] == "v": - new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] - else: - if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][ - 0] == word and pos == "v": - continue - else: - new_seg.append([word, pos]) - seg = new_seg - new_seg = [] - # function 2 - for i, (word, pos) in enumerate(seg): - if new_seg and new_seg[-1][0] == "一": - new_seg[-1][0] = new_seg[-1][0] + word - else: - new_seg.append([word, pos]) - return new_seg - - # the first and the second words are all_tone_three - def _merge_continuous_three_tones( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and self._all_tone_three( - sub_finals_list[i - 1]) and self._all_tone_three( - sub_finals_list[i]) and not merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - - return new_seg - - def _is_reduplication(self, word: str) -> bool: - return len(word) == 2 and word[0] == word[1] - - # the last char of first word and the first char of second word is tone_three - def _merge_continuous_three_tones_2( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \ - merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#": - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_reduplication( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if new_seg and word == new_seg[-1][0]: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def pre_merge_for_modify( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - seg = self._merge_bu(seg) - try: - seg = self._merge_yi(seg) - except: - print("_merge_yi failed") - seg = self._merge_reduplication(seg) - seg = self._merge_continuous_three_tones(seg) - seg = self._merge_continuous_three_tones_2(seg) - seg = self._merge_er(seg) - return seg - - def modified_tone(self, word: str, pos: str, - finals: List[str]) -> List[str]: - finals = self._bu_sandhi(word, finals) - finals = self._yi_sandhi(word, finals) - finals = self._neural_sandhi(word, pos, finals) - finals = self._three_sandhi(word, finals) - return finals diff --git a/spaces/digitalxingtong/Taffy-Bert-VITS2/text/tone_sandhi.py b/spaces/digitalxingtong/Taffy-Bert-VITS2/text/tone_sandhi.py deleted file mode 100644 index 0f45b7a72c5d858bcaab19ac85cfa686bf9a74da..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Taffy-Bert-VITS2/text/tone_sandhi.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List -from typing import Tuple - -import jieba -from pypinyin import lazy_pinyin -from pypinyin import Style - - -class ToneSandhi(): - def __init__(self): - self.must_neural_tone_words = { - '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝', - '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊', - '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去', - '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号', - '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当', - '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻', - '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂', - '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆', - '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', - '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿', - '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台', - '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算', - '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨', - '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快', - '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜', - '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔', - '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事', - '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', - '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', - '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实', - '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', - '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', - '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', - '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', - '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', - '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', - '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', - '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', - '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', - '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', - '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', - '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', - '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', - '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', - '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', - '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', - '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', - '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', - '扫把', '惦记' - } - self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎" - } - self.punc = ":,;。?!“”‘’':,;.?!" - - # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041 - # e.g. - # word: "家里" - # pos: "s" - # finals: ['ia1', 'i3'] - def _neural_sandhi(self, word: str, pos: str, - finals: List[str]) -> List[str]: - - # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 - for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in { - "n", "v", "a" - } and word not in self.must_not_neural_tone_words: - finals[j] = finals[j][:-1] + "5" - ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": - finals[-1] = finals[-1][:-1] + "5" - elif len(word) >= 1 and word[-1] in "的地得": - finals[-1] = finals[-1][:-1] + "5" - # e.g. 走了, 看着, 去过 - # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}: - # finals[-1] = finals[-1][:-1] + "5" - elif len(word) > 1 and word[-1] in "们子" and pos in { - "r", "n" - } and word not in self.must_not_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 桌上, 地下, 家里 - elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 上来, 下去 - elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开": - finals[-1] = finals[-1][:-1] + "5" - # 个做量词 - elif (ge_idx >= 1 and - (word[ge_idx - 1].isnumeric() or - word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个': - finals[ge_idx] = finals[ge_idx][:-1] + "5" - else: - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - - word_list = self._split_word(word) - finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]] - for i, word in enumerate(word_list): - # conventional neural in Chinese - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals_list[i][-1] = finals_list[i][-1][:-1] + "5" - finals = sum(finals_list, []) - return finals - - def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]: - # e.g. 看不懂 - if len(word) == 3 and word[1] == "不": - finals[1] = finals[1][:-1] + "5" - else: - for i, char in enumerate(word): - # "不" before tone4 should be bu2, e.g. 不怕 - if char == "不" and i + 1 < len(word) and finals[i + - 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - return finals - - def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]: - # "一" in number sequences, e.g. 一零零, 二一零 - if word.find("一") != -1 and all( - [item.isnumeric() for item in word if item != "一"]): - return finals - # "一" between reduplication words shold be yi5, e.g. 看一看 - elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]: - finals[1] = finals[1][:-1] + "5" - # when "一" is ordinal word, it should be yi1 - elif word.startswith("第一"): - finals[1] = finals[1][:-1] + "1" - else: - for i, char in enumerate(word): - if char == "一" and i + 1 < len(word): - # "一" before tone4 should be yi2, e.g. 一段 - if finals[i + 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - # "一" before non-tone4 should be yi4, e.g. 一天 - else: - # "一" 后面如果是标点,还读一声 - if word[i + 1] not in self.punc: - finals[i] = finals[i][:-1] + "4" - return finals - - def _split_word(self, word: str) -> List[str]: - word_list = jieba.cut_for_search(word) - word_list = sorted(word_list, key=lambda i: len(i), reverse=False) - first_subword = word_list[0] - first_begin_idx = word.find(first_subword) - if first_begin_idx == 0: - second_subword = word[len(first_subword):] - new_word_list = [first_subword, second_subword] - else: - second_subword = word[:-len(first_subword)] - new_word_list = [second_subword, first_subword] - return new_word_list - - def _three_sandhi(self, word: str, finals: List[str]) -> List[str]: - if len(word) == 2 and self._all_tone_three(finals): - finals[0] = finals[0][:-1] + "2" - elif len(word) == 3: - word_list = self._split_word(word) - if self._all_tone_three(finals): - # disyllabic + monosyllabic, e.g. 蒙古/包 - if len(word_list[0]) == 2: - finals[0] = finals[0][:-1] + "2" - finals[1] = finals[1][:-1] + "2" - # monosyllabic + disyllabic, e.g. 纸/老虎 - elif len(word_list[0]) == 1: - finals[1] = finals[1][:-1] + "2" - else: - finals_list = [ - finals[:len(word_list[0])], finals[len(word_list[0]):] - ] - if len(finals_list) == 2: - for i, sub in enumerate(finals_list): - # e.g. 所有/人 - if self._all_tone_three(sub) and len(sub) == 2: - finals_list[i][0] = finals_list[i][0][:-1] + "2" - # e.g. 好/喜欢 - elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \ - finals_list[0][-1][-1] == "3": - - finals_list[0][-1] = finals_list[0][-1][:-1] + "2" - finals = sum(finals_list, []) - # split idiom into two words who's length is 2 - elif len(word) == 4: - finals_list = [finals[:2], finals[2:]] - finals = [] - for sub in finals_list: - if self._all_tone_three(sub): - sub[0] = sub[0][:-1] + "2" - finals += sub - - return finals - - def _all_tone_three(self, finals: List[str]) -> bool: - return all(x[-1] == "3" for x in finals) - - # merge "不" and the word behind it - # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error - def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - last_word = "" - for word, pos in seg: - if last_word == "不": - word = last_word + word - if word != "不": - new_seg.append((word, pos)) - last_word = word[:] - if last_word == "不": - new_seg.append((last_word, 'd')) - last_word = "" - return new_seg - - # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听" - # function 2: merge single "一" and the word behind it - # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error - # e.g. - # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')] - # output seg: [['听一听', 'v']] - def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - # function 1 - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][ - 0] == seg[i + 1][0] and seg[i - 1][1] == "v": - new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] - else: - if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][ - 0] == word and pos == "v": - continue - else: - new_seg.append([word, pos]) - seg = new_seg - new_seg = [] - # function 2 - for i, (word, pos) in enumerate(seg): - if new_seg and new_seg[-1][0] == "一": - new_seg[-1][0] = new_seg[-1][0] + word - else: - new_seg.append([word, pos]) - return new_seg - - # the first and the second words are all_tone_three - def _merge_continuous_three_tones( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and self._all_tone_three( - sub_finals_list[i - 1]) and self._all_tone_three( - sub_finals_list[i]) and not merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - - return new_seg - - def _is_reduplication(self, word: str) -> bool: - return len(word) == 2 and word[0] == word[1] - - # the last char of first word and the first char of second word is tone_three - def _merge_continuous_three_tones_2( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \ - merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#": - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_reduplication( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if new_seg and word == new_seg[-1][0]: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def pre_merge_for_modify( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - seg = self._merge_bu(seg) - try: - seg = self._merge_yi(seg) - except: - print("_merge_yi failed") - seg = self._merge_reduplication(seg) - seg = self._merge_continuous_three_tones(seg) - seg = self._merge_continuous_three_tones_2(seg) - seg = self._merge_er(seg) - return seg - - def modified_tone(self, word: str, pos: str, - finals: List[str]) -> List[str]: - finals = self._bu_sandhi(word, finals) - finals = self._yi_sandhi(word, finals) - finals = self._neural_sandhi(word, pos, finals) - finals = self._three_sandhi(word, finals) - return finals diff --git a/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/text/japanese.py b/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/text/japanese.py deleted file mode 100644 index ddedafa0c5b7986068dc6c91637a86febc3923a9..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2/text/japanese.py +++ /dev/null @@ -1,104 +0,0 @@ -# modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py -import re -import sys - -import pyopenjtalk - -from text import symbols - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - - -def post_replace_ph(ph): - rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - 'v': "V" - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = 'UNK' - return ph - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def preprocess_jap(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = [] - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - p = pyopenjtalk.g2p(sentence) - text += p.split(" ") - - if i < len(marks): - text += [marks[i].replace(' ', '')] - return text - -def text_normalize(text): - # todo: jap text normalize - return text - -def g2p(norm_text): - phones = preprocess_jap(norm_text) - phones = [post_replace_ph(i) for i in phones] - # todo: implement tones and word2ph - tones = [0 for i in phones] - word2ph = [1 for i in phones] - return phones, tones, word2ph - - -if __name__ == '__main__': - for line in open("../../../Downloads/transcript_utf8.txt").readlines(): - text = line.split(":")[1] - phones, tones, word2ph = g2p(text) - for p in phones: - if p == "z": - print(text, phones) - sys.exit(0) diff --git a/spaces/dirge/voicevox/test/test_word_types.py b/spaces/dirge/voicevox/test/test_word_types.py deleted file mode 100644 index 1f2635b680e9b82d23ae3825f2a746b171d6ed3a..0000000000000000000000000000000000000000 --- a/spaces/dirge/voicevox/test/test_word_types.py +++ /dev/null @@ -1,9 +0,0 @@ -from unittest import TestCase - -from voicevox_engine.model import WordTypes -from voicevox_engine.part_of_speech_data import part_of_speech_data - - -class TestWordTypes(TestCase): - def test_word_types(self): - self.assertCountEqual(list(WordTypes), list(part_of_speech_data.keys())) diff --git a/spaces/dodos3/cosmos/greeting.md b/spaces/dodos3/cosmos/greeting.md deleted file mode 100644 index b65ead719f68dc58cf1225667dfbc83cf1accd39..0000000000000000000000000000000000000000 --- a/spaces/dodos3/cosmos/greeting.md +++ /dev/null @@ -1 +0,0 @@ -GODISGOOD \ No newline at end of file diff --git a/spaces/doevent/Image2LineDrawing/README.md b/spaces/doevent/Image2LineDrawing/README.md deleted file mode 100644 index e7d07686e6425c0826b2112b3fb6af1fc04530c3..0000000000000000000000000000000000000000 --- a/spaces/doevent/Image2LineDrawing/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ✏️Image2LineDrawing GR🖼️ -emoji: ✏️🖼️ -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/dorkai/text-generation-webui-main/extensions/api/streaming_api.py b/spaces/dorkai/text-generation-webui-main/extensions/api/streaming_api.py deleted file mode 100644 index e50dfa2266594f9edc7fb2b6f8659f275236279f..0000000000000000000000000000000000000000 --- a/spaces/dorkai/text-generation-webui-main/extensions/api/streaming_api.py +++ /dev/null @@ -1,78 +0,0 @@ -import asyncio -import json -from threading import Thread - -from websockets.server import serve - -from extensions.api.util import build_parameters, try_start_cloudflared -from modules import shared -from modules.text_generation import generate_reply - -PATH = '/api/v1/stream' - - -async def _handle_connection(websocket, path): - - if path != PATH: - print(f'Streaming api: unknown path: {path}') - return - - async for message in websocket: - message = json.loads(message) - - prompt = message['prompt'] - generate_params = build_parameters(message) - stopping_strings = generate_params.pop('stopping_strings') - generate_params['stream'] = True - - generator = generate_reply( - prompt, generate_params, stopping_strings=stopping_strings, is_chat=False) - - # As we stream, only send the new bytes. - skip_index = 0 - message_num = 0 - - for a in generator: - to_send = a[skip_index:] - await websocket.send(json.dumps({ - 'event': 'text_stream', - 'message_num': message_num, - 'text': to_send - })) - - await asyncio.sleep(0) - - skip_index += len(to_send) - message_num += 1 - - await websocket.send(json.dumps({ - 'event': 'stream_end', - 'message_num': message_num - })) - - -async def _run(host: str, port: int): - async with serve(_handle_connection, host, port, ping_interval=None): - await asyncio.Future() # run forever - - -def _run_server(port: int, share: bool = False): - address = '0.0.0.0' if shared.args.listen else '127.0.0.1' - - def on_start(public_url: str): - public_url = public_url.replace('https://', 'wss://') - print(f'Starting streaming server at public url {public_url}{PATH}') - - if share: - try: - try_start_cloudflared(port, max_attempts=3, on_start=on_start) - except Exception as e: - print(e) - else: - print(f'Starting streaming server at ws://{address}:{port}{PATH}') - - asyncio.run(_run(host=address, port=port)) - - -def start_server(port: int, share: bool = False): - Thread(target=_run_server, args=[port, share], daemon=True).start() diff --git a/spaces/duycse1603/math2tex/ScanSSD/layers/modules/l2norm.py b/spaces/duycse1603/math2tex/ScanSSD/layers/modules/l2norm.py deleted file mode 100644 index d5eaa721634478f53479df7d30684227cb7a60ac..0000000000000000000000000000000000000000 --- a/spaces/duycse1603/math2tex/ScanSSD/layers/modules/l2norm.py +++ /dev/null @@ -1,24 +0,0 @@ -import torch -import torch.nn as nn -# from torch.autograd import Function -# from torch.autograd import Variable -import torch.nn.init as init - -class L2Norm(nn.Module): - def __init__(self,n_channels, scale): - super(L2Norm,self).__init__() - self.n_channels = n_channels - self.gamma = scale or None - self.eps = 1e-10 - self.weight = nn.Parameter(torch.Tensor(self.n_channels)) - self.reset_parameters() - - def reset_parameters(self): - init.constant_(self.weight,self.gamma) - - def forward(self, x): - norm = x.pow(2).sum(dim=1, keepdim=True).sqrt()+self.eps - #x /= norm - x = torch.div(x,norm) - out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x) * x - return out diff --git a/spaces/editing-images/ledits/style.css b/spaces/editing-images/ledits/style.css deleted file mode 100644 index ac611316905c6d88fb53fcf95abdbcf35c55d2e6..0000000000000000000000000000000000000000 --- a/spaces/editing-images/ledits/style.css +++ /dev/null @@ -1,93 +0,0 @@ -/* -This CSS file is modified from: -https://huggingface.co/spaces/DeepFloyd/IF/blob/main/style.css -*/ - -h1 { - text-align: center; -} - -.gradio-container { - font-family: 'IBM Plex Sans', sans-serif; -} - -.gr-button { - color: white; - border-color: black; - background: black; -} - -input[type='range'] { - accent-color: black; -} - -.dark input[type='range'] { - accent-color: #dfdfdf; -} - -.container { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; -} - - -.gr-button:focus { - border-color: rgb(147 197 253 / var(--tw-border-opacity)); - outline: none; - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); - --tw-border-opacity: 1; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); - --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); - --tw-ring-opacity: .5; -} - -.gr-form { - flex: 1 1 50%; - border-top-right-radius: 0; - border-bottom-right-radius: 0; -} - -#prompt-container { - gap: 0; -} - -#prompt-text-input, -#negative-prompt-text-input { - padding: .45rem 0.625rem -} - -/* #component-16 { - border-top-width: 1px !important; - margin-top: 1em -} */ - -.image_duplication { - position: absolute; - width: 100px; - left: 50px -} - -#component-0 { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; -} - -#share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; margin-left: auto; -} -#share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; -} -#share-btn * { - all: unset; -} -#share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; -} -#share-btn-container .wrap { - display: none !important; -} \ No newline at end of file diff --git a/spaces/erwann/Face-editor/configs.py b/spaces/erwann/Face-editor/configs.py deleted file mode 100644 index 50c8cabe2e99cad46e33c1823ef9c05324a0254f..0000000000000000000000000000000000000000 --- a/spaces/erwann/Face-editor/configs.py +++ /dev/null @@ -1,15 +0,0 @@ -import gradio as gr -def set_small_local(): - return (gr.Slider.update(value=18), gr.Slider.update(value=0.15), gr.Slider.update(value=5), gr.Slider.update(value=4)) -def set_major_local(): - return (gr.Slider.update(value=25), gr.Slider.update(value=0.187), gr.Slider.update(value=36.6), gr.Slider.update(value=6)) -def set_major_global(): - return (gr.Slider.update(value=30), gr.Slider.update(value=0.1), gr.Slider.update(value=1), gr.Slider.update(value=1)) -def set_preset(config_str): - choices=["Small Masked Changes (e.g. add lipstick)", "Major Masked Changes (e.g. change hair color or nose size)", "Major Global Changes (e.g. change race / gender"] - if config_str == choices[0]: - return set_small_local() - elif config_str == choices[1]: - return set_major_local() - elif config_str == choices[2]: - return set_major_global() \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Mga Pabula Na May Aral.md b/spaces/falterWliame/Face_Mask_Detection/Mga Pabula Na May Aral.md deleted file mode 100644 index 215763f1bd56ab8ff07c1b89df1e1d039bd65ecb..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Mga Pabula Na May Aral.md +++ /dev/null @@ -1,6 +0,0 @@ -

    mga pabula na may aral


    DOWNLOAD » https://urlca.com/2uDcXB



    - -Noong unang panahon may nakatirang mag-ina sa isang malayong pook. Ang ina ay si Aling Rosa at ang anak ay si Pinang. Mahal na mahal ni Aling Rosa ang ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/fatiXbelha/sd/Download Among Us APK with All Skins Pets and Hats Unlocked.md b/spaces/fatiXbelha/sd/Download Among Us APK with All Skins Pets and Hats Unlocked.md deleted file mode 100644 index b5590ddef145465dff429c63c10838b3037cdce5..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Among Us APK with All Skins Pets and Hats Unlocked.md +++ /dev/null @@ -1,113 +0,0 @@ -
    -

    How to Unlock All Skins in Among Us APK

    -

    Among Us is one of the most popular games of 2022 and 2023, with millions of players enjoying its thrilling gameplay and social interaction. The game is set on a spaceship where players have to work together as crewmates or impostors. Crewmates have to complete tasks and find the impostors before they kill everyone, while impostors have to sabotage and deceive the crewmates.

    -

    among us apk unlock all skin


    DOWNLOAD » https://urllie.com/2uNzTH



    -

    One of the fun aspects of Among Us is that you can customize your character with different skins. Skins are cosmetic items that change the appearance of your character in the game. You can choose from various hats, outfits, pets, and colors to make your character unique and expressive.

    -

    However, not all skins are available for free in the game. Some skins require you to pay real money or complete certain tasks or achievements to unlock them. If you want to access all the skins without spending any money or doing any hard work, you might be interested in unlocking all skins in Among Us APK.

    -

    APK stands for Android Package Kit, and it is a file format that allows you to install applications on Android devices. APK files are different from regular apps that you download from the Google Play Store, as they are not verified or regulated by Google. APK files can be found on various websites or platforms that offer them for free or for a fee.

    -

    Unlocking all skins in Among Us APK means that you can use any skin in the game without paying or earning them. This can be done by using a modded version of Among Us APK, an in-game hack or cheat tool, or a third-party app or website. However, before you try any of these methods, you need to know how to install Among Us APK on your Android device.

    -

    How to Install Among Us APK on Android

    -

    Installing Among Us APK on your Android device is not as simple as installing a regular app from the Google Play Store. You need to follow some steps and precautions to make sure that you can install and run the APK file without any problems. Here are the steps that you need to follow:

    -

    Allow Unknown Apps on Android

    -

    The first thing that you need to do is to enable the option to install apps from unknown sources on your Android device. This option is disabled by default for security reasons, as it prevents you from installing apps that may harm your device or data. However, if you want to install Among Us APK, you need to allow unknown apps on your device.

    -

    To do this, go to your device's Settings and look for the Security or Privacy section. There, you should find an option called Unknown Sources or Install Unknown Apps. Tap on it and toggle it on. You may see a warning message that tells you about the risks of installing unknown apps. Tap OK or Allow to confirm your choice.

    -

    Install an Android File Manager

    -

    The next thing that you need to do is to install an Android file manager app on your device. A file manager app is an app that allows you to browse and manage the files and folders on your device. You will need a file manager app to locate and install the APK file that you downloaded.

    -

    There are many file manager apps that you can download from the Google Play Store, such as ES File Explorer, Astro File Manager, or Solid Explorer. Choose one that suits your preferences and install it on your device.

    -

    among us mod apk all skins unlocked
    -download among us apk with all skins
    -among us hack apk unlock all skins and pets
    -how to get all skins in among us apk
    -among us apk free skins and hats
    -among us unlocked apk latest version
    -among us mod menu apk all skins
    -among us apk download for android with all skins
    -among us cheat apk unlock all skins
    -among us premium apk all skins and pets
    -among us cracked apk all skins unlocked
    -among us modded apk with all skins and hats
    -among us full unlocked apk download
    -among us hack version apk all skins
    -among us pro apk unlock all skins and pets
    -among us mod apk free download all skins
    -among us unlimited skins apk
    -how to unlock all skins in among us apk
    -among us apk mod menu unlock all skins
    -among us hack mod apk all skins and pets
    -among us skin unlocker apk download
    -among us modded version apk with all skins
    -among us skin hack apk free download
    -among us god mode apk unlock all skins
    -among us latest mod apk all skins unlocked
    -download among us modded apk with all skins and pets
    -among us skin generator apk no verification
    -how to install among us mod apk with all skins
    -among us skin mod apk 2023.3.28
    -among us always impostor mod apk unlock all skins
    -download game among us mod apk unlock all skin and pet
    -how to get free skins in among us android apk
    -among us mod menu skin unlocker apk 2023.3.28
    -download hack version of among us with all skins unlocked
    -how to use mod menu in among us to unlock all skins
    -best site to download among us mod apk with all skins and pets
    -how to update among us modded version with all skins unlocked
    -is it safe to download among us hack apk with all skins and pets unlocked
    -how to play online with friends in among us modded version with all skins unlocked
    -where can i find the latest version of among us mod menu with all skins unlocked

    -

    Download the APK Installer From Your Android

    -

    The third thing that you need to do is to download the APK file of Among Us from your Android device. You can use any web browser app that you have on your device, such as Chrome, Firefox, or Opera. Go to the website or platform that offers the APK file of Among Us and look for the download button or link. Tap on it and wait for the download to finish.

    -

    Once the download is complete, open the file manager app that you installed and look for the Downloads folder. There, you should find the APK file of Among Us with a name like com.innersloth.spacemafia.apk. Tap on it and select Install. You may see a pop-up message that asks you for permission to install the app. Tap OK or Install to proceed.

    -

    Transfer the APK Installer via USB

    -

    The fourth thing that you need to do is to transfer the APK file of Among Us from your computer to your Android device via USB cable. This method is useful if you downloaded the APK file of Among Us from your computer instead of your Android device.

    -

    To do this, connect your Android device to your computer using a USB cable. Make sure that your device is in File Transfer mode and not Charging mode. On your computer, open the folder where you saved the APK file of Among Us and copy it. On your device, open the file manager app that you installed and look for a folder called Internal Storage or SD Card. Paste the APK file of Among Us there.

    -

    Once the transfer is complete, disconnect your device from your computer and open the file manager app again. Look for the folder where you pasted the APK file of Among Us and tap on it. Select Install and follow the same steps as above.

    -

    How to Unlock All Skins in Among Us APK

    -

    Now that you have installed Among Us APK on your Android device, you can try one of these methods to unlock all skins in the game:

    -

    Use a Modded Version of Among Us APK

    -

    A modded version of Among Us APK is an altered version of the original game that has some features or changes added by someone else. For example, a modded version of Among Us APK may have all skins unlocked, or have some other features like unlimited coins, speed hacks, or custom maps. A modded version of Among Us APK can be found on various websites or platforms that offer them for free or for a fee.

    -

    To use a modded version of Among Us APK, you need to uninstall the original game from your device and install the modded version instead. You can follow the same steps as above to install the modded version of Among Us APK on your device. However, you need to be careful when choosing a modded version of Among Us APK, as some of them may be fake, outdated, or malicious. You should always check the reviews, ratings, and comments of other users before downloading and installing a modded version of Among Us APK.

    -

    Use an In-Game Hack or Cheat Tool

    -

    An in-game hack or cheat tool is a program or app that runs in the background while you play the game and modifies some aspects of the game. For example, an in-game hack or cheat tool may allow you to unlock all skins, change your role, see the impostors, or do other things that give you an advantage over other players. An in-game hack or cheat tool can be found on various websites or platforms that offer them for free or for a fee.

    -

    To use an in-game hack or cheat tool, you need to download and install it on your device. You may also need to grant it some permissions or access to your device's settings. Then, you need to run the hack or cheat tool and select the options that you want to activate. After that, you can open the game and enjoy the hack or cheat tool's effects.

    -

    However, you need to be careful when using an in-game hack or cheat tool, as some of them may not work properly, cause errors or crashes, or get detected by the game's anti-cheat system. You should always check the compatibility, reliability, and safety of an in-game hack or cheat tool before using it.

    -

    Use a Third-Party App or Website

    -

    A third-party app or website is an app or website that is not affiliated with the game's developer or publisher and offers some services or features related to the game. For example, a third-party app or website may allow you to generate codes, vouchers, or coupons that you can redeem in the game to unlock all skins. A third-party app or website can be found on various websites or platforms that offer them for free or for a fee.

    -

    To use a third-party app or website, you need to download and install it on your device or visit it on your web browser. You may also need to provide some information or complete some tasks or surveys to access its services or features. Then, you need to follow its instructions and get the codes, vouchers, or coupons that you can use in the game to unlock all skins.

    -

    However, you need to be careful when using a third-party app or website, as some of them may be scammy, fraudulent, or illegal. You should always check the legitimacy, credibility, and reputation of a third-party app or website before using it.

    -

    Benefits of Unlocking All Skins in Among Us APK

    -

    Unlocking all skins in Among Us APK can have some benefits for your gameplay experience. Here are some of them:

    -
      -
    • You can have more variety and customization options for your character. You can mix and match different hats, outfits, pets, and colors to create your own unique style and personality.
    • -
    • You can have more fun and enjoyment in the game. You can express yourself better with your character's appearance and impress other players with your cool skins.
    • -
    • You can save money and time that you would otherwise spend on buying or earning skins in the game. You can get all the skins for free and without any hassle.
    • -
    -

    Risks of Unlocking All Skins in Among Us APK

    -

    Unlocking all skins in Among Us APK can also have some risks for your gameplay experience. Here are some of them:

    -
      -
    • You can violate the game's terms of service and get banned from playing online. The game's developer and publisher do not support or endorse any methods of unlocking all skins in Among Us APK that are not authorized by them. If they detect that you are using such methods, they may suspend or terminate your account and prevent you from accessing the game's servers and features.
    • -
    • You can expose your device and data to malware or viruses. Some sources and methods of unlocking all skins in Among Us APK may not be safe or trustworthy. They may contain malicious code or software that can harm your device or data, such as stealing your personal information, deleting your files, or hijacking your system.
    • -
    • You can ruin the game balance and fairness. Unlocking all skins in Among Us APK may give you an unfair advantage over other players who do not have access to them. This may make the game less fun and challenging for you and others. It may also cause resentment and conflict among the game's community and fanbase.
    • -
    -

    Alternatives to Unlocking All Skins in Among Us APK

    -

    If you are not comfortable or satisfied with unlocking all skins in Among Us APK, you can try some alternatives to enjoy the game without them. Here are some of them:

    -
      -
    • You can play with friends or join a private server. Playing with friends or joining a private server can make the game more fun and enjoyable, as you can communicate and cooperate with people you know and trust. You can also agree on some rules or settings that suit your preferences and expectations.
    • -
    • You can explore different modes and maps. The game offers different modes and maps that you can choose from, such as Classic, Hide and Seek, Airship, Polus, or Mira HQ. Each mode and map has its own features and challenges that can keep you entertained and engaged.
    • -
    • You can earn skins legitimately. The game also provides some ways to earn skins legitimately, such as by completing tasks or achievements, watching ads, or participating in events or promotions. You can also buy skins with real money if you want to support the game's developer and publisher.
    • -
    -

    Conclusion

    -

    Unlocking all skins in Among Us APK is a possible but risky way to customize your character in the game. You need to know how to install Among Us APK on your Android device and how to use one of the methods to unlock all skins. You also need to be aware of the benefits and risks of unlocking all skins in Among Us APK, and consider some alternatives to enjoy the game without them.

    -

    We hope that this article has helped you understand how to unlock all skins in Among Us APK and make an informed decision about whether to do it or not. If you have any questions or comments, feel free to share them below. Thank you for reading!

    -

    FAQs

    -

    What is Among Us?

    -

    Answer: Among Us is a multiplayer social deduction game where players have to work together as crewmates or impostors on a spaceship.

    -

    What are skins in Among Us?

    -

    Answer: Skins are cosmetic items that change the appearance of the player's character in the game.

    -

    How many skins are there in Among Us?

    -

    Answer: There are over 50 skins in Among Us, including hats, outfits, pets, and colors.

    -

    How can I get skins in Among Us?

    -

    Answer: You can get skins in Among Us by buying them with real money, earning them by completing tasks or achievements, or unlocking them with hacks or mods.

    -

    Is it safe to unlock all skins in Among Us APK?

    -

    Answer: It depends on the source and method of unlocking the skins. Some sources and methods may be safe and reliable, while others may be risky and harmful.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Do Stumble Guys A Physics-Based Havoc Game with Colorful Design.md b/spaces/fatiXbelha/sd/Download Do Stumble Guys A Physics-Based Havoc Game with Colorful Design.md deleted file mode 100644 index 317a3d216a2bbea7fdb595c1f6f007f34f470204..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Do Stumble Guys A Physics-Based Havoc Game with Colorful Design.md +++ /dev/null @@ -1,109 +0,0 @@ -
    -

    Download Do Stumble Guys: A Guide to Enjoying the Ultimate Knockout Game

    -

    Stumble Guys is a massively multiplayer party knockout game that is inspired by the popular Fall Guys. In this game, you can race with up to 32 players online through chaotic obstacle courses. You have to run, jump, dash, and slide past your opponents and avoid the hazards until you reach the finish line. The last player standing wins the crown!

    -

    Stumble Guys is a fun and addictive game that you can play with your friends or with strangers online. It has colorful and crazy graphics, physics-based gameplay, and tons of hilarious fails. You can also customize your character with different outfits and emotes.

    -

    download do stumble guys


    Download ☆☆☆☆☆ https://urllie.com/2uNzRb



    -

    If you want to join the party and experience the madness of Stumble Guys, here is how you can download and play it on your devices.

    -

    How to Download Stumble Guys on Android Devices

    -

    Stumble Guys is available for free on Google Play Store for Android devices. You can easily download it by following these steps:

    -
      -
    1. Go to Google Play Store and search for Stumble Guys.
    2. -
    3. Tap the Install button to start downloading it to your device.
    4. -
    5. Once Stumble Guys has been downloaded, open it and enjoy playing it.
    6. -
    -

    If you have compatibility issues or want to download Stumble Guys from other sources, you can also use apps like APKPure or Uptodown to get the APK file of the game. Just make sure you enable the installation of apps from unknown sources in your device settings.

    -

    How to Download Stumble Guys on PC Using an Emulator

    -

    If you want to play Stumble Guys on a bigger screen and with better controls, you can also download and play it on your PC using an emulator. An emulator is a software that can run Android apps on your computer or laptop.

    -

    Some examples of emulators are BlueStacks, MuMu Player, MEmu, and GameLoop. You can choose any emulator that suits your preferences and system requirements. After installing the emulator, you need to complete Google sign-in to access the Play Store. Then, you can search for Stumble Guys or Stumble Guys: Multiplayer Royale in the emulator's app center or search bar. Finally, you can click install to download and play the game.

    -

    Before playing Stumble Guys on PC, you should also configure your controls using the emulator's keymapping tool. You can create a custom control scheme using your mouse and keyboard for better gameplay. For example, you can use WASD keys to move, spacebar to jump, G to access emotes menu, and right click to toggle free view.

    -

    Stumble Guys Tips and Tricks to Win Your Matches

    -

    Now that you have downloaded Stumble Guys on your device of choice, you are ready to join the fun and compete with other players. However, winning in this game is not easy as there are many obstacles and rivals that will try to make you lose. Here are some tips and tricks that will help you win your matches:

    -
      -
    • Learn to weaponize your character's physics. You can bump into others, block their paths, push them off ledges, or even grab them if they are close enough. Use these tactics to sabotage your enemies or help your friends.
    • -
    • Avoid getting stuck in crowds or bottlenecks. Try to find alternative routes or shortcuts that will give you an edge over others. Sometimes, it is better to take a longer but safer path than a shorter but riskier one.
    • -
    • Be aware of your surroundings and anticipate the obstacles. Some obstacles are predictable and have patterns that you can memorize. Others are random and require quick reactions. Pay attention to the cues and signals that will warn you of incoming hazards.
    • -
    • Don't give up if you fall or fail. You can always get up and try again until you reach the finish line or get eliminated. Sometimes, luck will favor you and you can catch up with others. Don't lose hope and keep trying.
    • -
    • Have fun and enjoy the game. Stumble Guys is a game that is meant to make you laugh and have a good time. Don't take it too seriously or get frustrated if you lose. Instead, appreciate the humor and the chaos of the game. You can also make friends with other players and chat with them using the in-game chat feature.
    • -
    -

    Conclusion: Summary and Recommendation

    -

    Stumble Guys is a hilarious and entertaining game that you can play with your friends or with other players online. It is a game that will test your skills, reflexes, and luck as you race through various obstacle courses. You can download Stumble Guys for free on your Android devices or on your PC using an emulator. You can also follow some tips and tricks that will help you win your matches and earn the crown.

    -

    How to download Stumble Guys on PC
    -Stumble Guys free download for Android
    -Stumble Guys game online play
    -Stumble Guys download apk mod
    -Stumble Guys official website download
    -Stumble Guys steam download free
    -Stumble Guys download for iOS
    -Stumble Guys game tips and tricks
    -Stumble Guys download latest version
    -Stumble Guys game review
    -Stumble Guys download size
    -Stumble Guys game modes and features
    -Stumble Guys download error fix
    -Stumble Guys game system requirements
    -Stumble Guys download link
    -Stumble Guys game best outfits and emotes
    -Stumble Guys download update
    -Stumble Guys game community and discord
    -Stumble Guys download without Google Play
    -Stumble Guys game news and events
    -Stumble Guys download for Mac
    -Stumble Guys game cheats and hacks
    -Stumble Guys download from official store
    -Stumble Guys game maps and levels
    -Stumble Guys download for Windows 10
    -Stumble Guys game funniest fails and moments
    -Stumble Guys download not working solution
    -Stumble Guys game how to invite friends
    -Stumble Guys download for laptop
    -Stumble Guys game comparison with other games
    -Stumble Guys download speed test
    -Stumble Guys game how to win every round
    -Stumble Guys download for Chromebook
    -Stumble Guys game live streams and videos
    -Stumble Guys download safe and secure
    -Stumble Guys game ratings and feedbacks
    -Stumble Guys download alternative sources
    -Stumble Guys game challenges and achievements
    -Stumble Guys download for tablet
    -Stumble Guys game history and development
    -Stumble Guys download problems and solutions
    -Stumble Guys game how to unlock all items
    -Stumble Guys download for Linux
    -Stumble Guys game FAQs and guides
    -Stumble Guys download for Kindle Fire
    -Stumble Guys game how to report a bug
    -Stumble Guys download for smart TV
    -Stumble Guys game best strategies and tactics
    -Stumble Guys download for Xbox One
    -Stumble Guys game how to create a party

    -

    If you are looking for a game that will make you laugh, challenge you, and keep you entertained, then Stumble Guys is the game for you. Download it now and join the party!

    -

    FAQs: Five Common Questions and Answers About Stumble Guys

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    QuestionAnswer
    How many players can play Stumble Guys online?Stumble Guys can support up to 32 players online in each match.
    How can I customize my character in Stumble Guys?You can customize your character by changing its outfit, color, face, hair, and emote. You can unlock more options by playing the game and earning coins or gems.
    How can I play Stumble Guys with my friends?You can play Stumble Guys with your friends by creating or joining a private room. You can invite your friends by sharing the room code or by scanning the QR code.
    What are the different modes in Stumble Guys?Stumble Guys has two modes: Classic and Custom. Classic mode is the default mode where you play with random players online. Custom mode is where you can create or join a private room and play with your friends or other players.
    What are the system requirements for Stumble Guys?Stumble Guys requires Android 5.0 or higher for Android devices and Windows 7 or higher for PC using an emulator. It also requires a stable internet connection to play online.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/fclong/summary/fengshen/data/megatron_dataloader/__init__.py b/spaces/fclong/summary/fengshen/data/megatron_dataloader/__init__.py deleted file mode 100644 index cd5f898c6bdf89c6cf0243af102d04f6efed86b8..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/data/megatron_dataloader/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from . import indexed_dataset diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/8 Ball Pool Guideline Mod APK Extend Your Guideline and Win Every Match.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/8 Ball Pool Guideline Mod APK Extend Your Guideline and Win Every Match.md deleted file mode 100644 index cfb1ba848e5ea224b0dbf2a5264e5c84509d7f14..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/8 Ball Pool Guideline Mod APK Extend Your Guideline and Win Every Match.md +++ /dev/null @@ -1,137 +0,0 @@ -
    -

    How to Play 8 Ball Pool Like a Pro with Guideline Mod APK

    -

    Do you love playing 8 ball pool online but struggle to win matches and earn coins? Do you wish you could improve your accuracy and consistency when shooting the balls? Do you want to learn some cool tricks and strategies to impress your opponents and friends? If you answered yes to any of these questions, then you might be interested in trying out Guideline Mod APK, a tool that can help you play 8 ball pool like a pro.

    -

    What is 8 Ball Pool and Why is it Popular?

    -

    8 ball pool is one of the most popular and addictive online games in the world. It is a simulation of the real-life pool game, where you have to use a cue stick to hit the balls on a table and pocket them in the holes. The game has two types of balls: solids and stripes. The objective is to pocket all the balls of your type and then the 8 ball before your opponent does.

    -

    8 ball pool guideline mod apk


    Download →→→ https://gohhs.com/2uPuNu



    -

    The Rules and Objectives of 8 Ball Pool

    -

    The rules of 8 ball pool are simple and easy to follow. You can play the game in two modes: 1-on-1 or tournament. In both modes, you have to pay an entry fee with coins, which are the in-game currency. You can earn coins by winning matches or by watching ads or completing offers. You can also buy coins with real money if you want.

    -

    The game starts with a break shot, where you have to hit the rack of balls with the cue ball. The first player who pockets a ball gets to choose whether they want to play as solids or stripes. Then, each player takes turns to hit their own balls with the cue ball. You have to aim carefully and adjust the power and spin of your shot. You can also use different cues with different attributes to enhance your performance.

    -

    If you pocket a ball, you get another turn. If you miss or commit a foul, such as hitting the wrong ball, scratching the cue ball, or running out of time, your turn ends and your opponent gets the cue ball in hand. You can place the cue ball anywhere on the table except for the baulk area (the area behind the head string). You have to pocket all your balls and then the 8 ball in any hole to win the game. If you pocket the 8 ball before clearing your balls or in the wrong hole, you lose the game.

    -

    The Benefits and Challenges of Playing 8 Ball Pool Online

    -

    Playing 8 ball pool online has many benefits. You can play anytime and anywhere with millions of players from around the world. You can challenge your friends or join clubs and compete with other players. You can also participate in tournaments and events and win exclusive prizes and rewards. You can customize your profile and avatar and show off your skills and achievements.

    -

    However, playing 8 ball pool online also has some challenges. You need a stable internet connection and a compatible device to play smoothly. You also need to have enough coins to enter matches and buy cues and other items. Moreover, you need to have good skills and strategies to win matches and rank

    Basic English is a simplified version of the English language that was created by Charles Kay Ogden and I. A. Richards in the 1920s. It is designed to help people learn English as a second language, or to communicate with people who have limited English skills. Basic English has a vocabulary of 850 words, which can express most of the common ideas and concepts in everyday life. It also has a simple grammar that follows the rules of standard English, but with some modifications and simplifications.

    -

    8 ball pool extended guideline mod apk
    -8 ball pool unlimited guideline mod apk
    -8 ball pool hack guideline mod apk
    -8 ball pool long line mod apk
    -8 ball pool guideline tool mod apk
    -8 ball pool aim assist mod apk
    -8 ball pool auto win mod apk
    -8 ball pool anti ban mod apk
    -8 ball pool mega mod apk
    -8 ball pool premium mod apk
    -8 ball pool pro mod apk
    -8 ball pool master mod apk
    -8 ball pool legend mod apk
    -8 ball pool cheat guideline mod apk
    -8 ball pool free coins mod apk
    -8 ball pool unlimited cash mod apk
    -8 ball pool all cues unlocked mod apk
    -8 ball pool vip mod apk
    -8 ball pool latest version mod apk
    -8 ball pool online mod apk
    -8 ball pool offline mod apk
    -8 ball pool multiplayer mod apk
    -8 ball pool tournament mod apk
    -8 ball pool club mod apk
    -8 ball pool golden shot mod apk
    -8 ball pool reward links mod apk
    -8 ball pool instant reward mod apk
    -8 ball pool daily bonus mod apk
    -8 ball pool spin and win mod apk
    -8 ball pool scratch and win mod apk
    -8 ball pool lucky shot mod apk
    -8 ball pool surprise box mod apk
    -8 ball pool cue recharge mod apk
    -8 ball pool cue upgrade mod apk
    -8 ball pool cue collection power mod apk
    -8 ball pool cue force mod apk
    -8 ball pool cue aim mod apk
    -8 ball pool cue spin mod apk
    -8 ball pool cue time mod apk
    -8 ball pool table unlock mod apk
    -8 ball pool table theme mod apk
    -8 ball pool table chat pack mod apk
    -8 ball pool table emoji pack mod apk
    -8 ball pool table sticker pack mod apk
    -8 ball pool table game mode mod apk
    -8 ball pool table no guideline mode mod apk
    -8 ball pool table call pocket mode mod apk
    -8 ball pool table indirect shots mode mod apk

    -

    What is Guideline Mod APK and How Does it Work?

    -

    Guideline Mod APK is a tool that can help you play 8 ball pool like a pro. It is a modified version of the original 8 ball pool app, which gives you some extra features and functions that are not available in the official app. One of the main features of Guideline Mod APK is that it shows you a long and accurate guideline for your cue ball, which helps you aim and shoot the balls more precisely. You can also adjust the length and color of the guideline according to your preference.

    -

    The Features and Functions of Guideline Mod APK

    -

    Guideline Mod APK has many features and functions that can enhance your 8 ball pool experience. Some of them are:

    -
      -
    • Long and accurate guideline: You can see a long and accurate guideline for your cue ball, which helps you aim and shoot the balls more precisely. You can also adjust the length and color of the guideline according to your preference.
    • -
    • Unlimited coins and cash: You can get unlimited coins and cash in your account, which you can use to enter matches, buy cues, and other items. You don't have to worry about running out of coins or cash anymore.
    • -
    • All cues unlocked: You can access all the cues in the game, including the premium and legendary ones. You can choose any cue you like and enjoy its attributes and effects.
    • -
    • No ads: You can play the game without any annoying ads or pop-ups. You can enjoy the game without any interruptions or distractions.
    • -
    • No root required: You don't need to root your device to use Guideline Mod APK. You can install it easily and safely on your device without any risk of damaging it.
    • -
    -

    The Advantages and Disadvantages of Using Guideline Mod APK

    -

    Using Guideline Mod APK has some advantages and disadvantages that you should be aware of before using it. Some of them are:

    - - - - - - - -
    AdvantagesDisadvantages
    You can play 8 ball pool like a pro with a long and accurate guideline for your cue ball.You might lose the fun and challenge of playing 8 ball pool without any assistance.
    You can get unlimited coins and cash in your account, which you can use to enter matches, buy cues, and other items.You might get banned from the game if you use too many coins or cash in a short time.
    You can access all the cues in the game, including the premium and legendary ones.You might lose the motivation to earn coins and cash by playing matches or completing offers.
    You can play the game without any annoying ads or pop-ups.You might miss some important updates or news from the official app.
    You don't need to root your device to use Guideline Mod APK.You might expose your device to malware or viruses if you download Guideline Mod APK from an untrusted source.

    How to Download and Install Guideline Mod APK on Your Device

    -

    If you want to try out Guideline Mod APK, you need to download and install it on your device. However, you need to be careful and follow some steps and tips to avoid any problems or errors. Here are the requirements and precautions for installing Guideline Mod APK:

    -

    The Requirements and Precautions for Installing Guideline Mod APK

    -
      -
    • Device compatibility: You need to have a device that runs on Android 4.4 or higher. You also need to have enough storage space and RAM to run the app smoothly.
    • -
    • Internet connection: You need to have a stable and fast internet connection to download and install the app. You also need to have internet access to play the game online.
    • -
    • Backup data: You need to backup your data from the official 8 ball pool app before installing Guideline Mod APK. You can do this by logging in with your Facebook or Google account and syncing your progress. This way, you can restore your data if anything goes wrong.
    • -
    • Uninstall official app: You need to uninstall the official 8 ball pool app from your device before installing Guideline Mod APK. This is because the two apps might conflict with each other and cause errors or crashes.
    • -
    • Allow unknown sources: You need to enable the option to allow unknown sources on your device settings. This is because Guideline Mod APK is not available on the Google Play Store and you need to install it from a third-party source.
    • -
    • Download from trusted source: You need to download Guideline Mod APK from a trusted and reliable source. You can use the link provided below or search for other sources online. However, you need to be careful and avoid downloading from any suspicious or malicious websites that might contain malware or viruses.
    • -
    -

    The Steps and Tips for Installing Guideline Mod APK

    -

    Once you have met the requirements and taken the precautions, you can follow these steps and tips to install Guideline Mod APK on your device:

    -
      -
    1. Download Guideline Mod APK: You can use the link below or search for other sources online to download Guideline Mod APK. The file size is about 60 MB and it might take some time depending on your internet speed.
    2. -
    3. Locate and open the file: You can use a file manager app or your device's default file explorer to locate and open the downloaded file. You might see a warning message that says "This type of file can harm your device". You can ignore this message and tap on "OK" or "Install anyway".
    4. -
    5. Install the app: You can follow the instructions on the screen to install the app on your device. It might take a few minutes depending on your device's performance.
    6. -
    7. Launch the app: You can find the app icon on your device's home screen or app drawer. You can tap on it to launch the app and start playing 8 ball pool with Guideline Mod APK.
    8. -
    9. Login with your account: You can login with your Facebook or Google account to restore your data from the official app. You can also create a new account if you want.
    10. -
    -

    Congratulations! You have successfully installed Guideline Mod APK on your device. Now you can enjoy playing 8 ball pool like a pro with a long and accurate guideline for your cue ball.

    How to Use Guideline Mod APK to Improve Your 8 Ball Pool Skills

    -

    Now that you have installed Guideline Mod APK on your device, you might be wondering how to use it to improve your 8 ball pool skills. Well, it's not that hard. You just need to follow some basics and techniques of using Guideline Mod APK, and learn some tricks and strategies of using Guideline Mod APK. Here are some tips and suggestions for you:

    -

    The Basics and Techniques of Using Guideline Mod APK

    -

    The basics and techniques of using Guideline Mod APK are similar to the ones you use in the official app. You still need to aim, adjust, and shoot the cue ball with your finger. However, with Guideline Mod APK, you have a longer and more accurate guideline that shows you where the cue ball and the target ball will go. You can also change the length and color of the guideline in the settings.

    -

    Here are some techniques you can use with Guideline Mod APK:

    -
      -
    • Align the guideline with the hole: You can align the guideline with the hole you want to pocket the ball in. This will help you avoid missing or hitting the wrong hole.
    • -
    • Use the spin and power options: You can use the spin and power options on the left and right sides of the screen to control the movement and speed of the cue ball. You can use different types of spin, such as top spin, back spin, side spin, or swerve, to make the cue ball curve or bounce in different directions. You can also adjust the power of your shot by sliding your finger up or down on the right side of the screen.
    • -
    • Use the zoom and angle options: You can use the zoom and angle options on the bottom of the screen to change your view of the table. You can zoom in or out to see more or less details. You can also change the angle of your view by tilting your device or tapping on the arrows on the bottom of the screen.
    • -
    -

    The Tricks and Strategies of Using Guideline Mod APK

    -

    The tricks and strategies of using Guideline Mod APK are more advanced and require some practice and experience. You can use them to impress your opponents and friends, or to get out of tricky situations. Here are some tricks and strategies you can use with Guideline Mod APK:

    -
      -
    • Use bank shots: You can use bank shots to hit the balls off the rails or cushions of the table and pocket them in different holes. This can help you clear more balls in one shot, or avoid obstacles or blockers on the table.
    • -
    • Use combo shots: You can use combo shots to hit one ball with another ball and pocket them both in one shot. This can help you clear more balls in one shot, or pocket balls that are hard to reach directly.
    • -
    • Use trick shots: You can use trick shots to hit the balls in creative and unexpected ways, such as jumping over other balls, curving around other balls, or bouncing off multiple rails or cushions. This can help you surprise your opponents and friends, or pocket balls that are impossible to reach otherwise.
    • -
    -

    Conclusion and FAQs

    -

    In conclusion, Guideline Mod APK is a tool that can help you play 8 ball pool like a pro. It shows you a long and accurate guideline for your cue ball, which helps you aim and shoot the balls more precisely. It also gives you unlimited coins and cash, all cues unlocked, no ads, and no root required. However, it also has some disadvantages, such as losing the fun and challenge of playing 8 ball pool without any assistance, getting banned from the game if you use too many coins or cash in a short time, losing the motivation to earn coins and cash by playing matches or completing offers, missing some important updates or news from the official app, and exposing your device to malware or viruses if you download Guideline Mod APK from an untrusted source.

    -

    If you want to try out Guideline Mod APK, you need to download and install it on your device carefully and follow some steps and tips to avoid any problems or errors. You also need to follow some basics and techniques of using Guideline Mod APK, and learn some tricks and strategies of using Guideline Mod APK. By doing so, you can improve your 8 ball pool skills and enjoy playing 8 ball pool like a pro.

    -

    Here are some FAQs that might help you understand more about Guideline Mod APK:

    -
      -
    1. Is Guideline Mod APK safe to use?
    2. -

      Guideline Mod APK is safe to use if you download it from a trusted and reliable source. However, you need to be careful and avoid downloading from any suspicious or malicious websites that might contain malware or viruses. You also need to backup your data from the official app before installing Guideline Mod APK, and uninstall the official app from your device. You also need to enable the option to allow unknown sources on your device settings.

      -
    3. Is Guideline Mod APK legal to use?
    4. -

      Guideline Mod APK is not legal to use, as it violates the terms and conditions of the official 8 ball pool app. It also gives you an unfair advantage over other players who play the game without any assistance. Therefore, using Guideline Mod APK might result in getting banned from the game or facing legal actions from the developers of the official app.

      -
    5. Can I play with my friends using Guideline Mod APK?
    6. -

      Yes, you can play with your friends using Guideline Mod APK, as long as they also have the same app installed on their devices. You can challenge your friends or join clubs and compete with other players. However, you might not be able to play with players who use the official app, as they might have different versions or updates of the game.

      -
    7. Can I update Guideline Mod APK?
    8. -

      Yes, you can update Guideline Mod APK, but you need to download and install the latest version of the app from a trusted and reliable source. You also need to uninstall the previous version of the app from your device before installing the new one. You might also need to backup your data from the app before updating it, as some updates might erase your progress or coins.

      -
    9. Can I use Guideline Mod APK offline?
    10. -

      No, you cannot use Guideline Mod APK offline, as it requires an internet connection to play the game online. You also need an internet connection to download and install the app on your device. However, you can play some offline modes of the game, such as practice mode or offline tournaments, without using Guideline Mod APK.

      -
    -

    I hope this article has helped you understand more about Guideline Mod APK and how to use it to play 8 ball pool like a pro. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy playing!

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Become the Best Striker in the World with Blue Lock Project World Champion APK.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Become the Best Striker in the World with Blue Lock Project World Champion APK.md deleted file mode 100644 index 28954a1712e79f30f0301e6ee31b14eafbcf4e3f..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Become the Best Striker in the World with Blue Lock Project World Champion APK.md +++ /dev/null @@ -1,85 +0,0 @@ - -

    Download Blue Lock APK: How to Enjoy the Soccer Anime Game on Your Android Device

    -

    Are you a fan of soccer anime? If so, you might have heard of Blue Lock, a popular manga series that has been adapted into an anime television series. The story revolves around a group of high school soccer players who are selected to participate in a project called Blue Lock, where they are trained as strikers in order to become the best in the world. The anime is set to premiere in October 2022, but you don't have to wait that long to enjoy it. You can download Blue Lock APK, a soccer simulation game based on the anime, and play it on your Android device right now. In this article, we will tell you what Blue Lock APK is, why you should download it, how to download and install it, and how to play it. Let's get started!

    -

    download blue lock apk


    DOWNLOADhttps://gohhs.com/2uPrGb



    -

    What is Blue Lock APK?

    -

    Blue Lock APK is an unofficial version of Blue Lock Project: World Champion, a soccer simulation game developed by RUDEL, based on the anime adaptation of Blue Lock. The game allows you to enjoy an amazing storyline while raising your favorite character as a striker, form your own team, and engage in fierce battles with rivals. You can also relive the original story of the anime and fully enjoy the worldview of Blue Lock. The game features stunning graphics, realistic animations, immersive sound effects, and easy-to-use controls. You can also interact with other players online and compete for glory.

    -

    Why You Should Download Blue Lock APK?

    -

    You might be wondering why you should download Blue Lock APK instead of waiting for the official release of the game on the Google Play Store. Well, there are several benefits of downloading the game from a third-party source, such as:

    -

    No Need to Wait for the Official Release

    -

    The official release date of Blue Lock Project: World Champion is not yet announced, but it is expected to be sometime in late 2022 or early 2023. That means you have to wait for several months before you can play the game. However, if you download Blue Lock APK, you can access the game before it is available on the Google Play Store. You can enjoy the game as soon as possible 3: Download the APK File and Locate It on Your Device -

    The third step is to download the APK file and locate it on your device. To download the APK file, you need to click on the download button on the website that you chose in step 1. You might see a pop-up window that asks you to confirm the download, and you need to click on OK. The download will start automatically and it might take a few minutes depending on your internet speed. Once the download is complete, you need to locate the APK file on your device. You can use a file manager app to find the file in your device's storage. The file might be in the downloads folder or any other folder that you specified.

    -

    Step 4: Install the APK File and Launch the Game

    -

    The fourth and final step is to install the APK file and launch the game. To install the APK file, you need to tap on it and then tap on install. You might see a screen that shows the permissions that the app requires, and you need to tap on accept. The installation will begin and it might take a few seconds. Once the installation is done, you can tap on open to launch the game. Alternatively, you can also find the game icon on your device's home screen or app drawer and tap on it to launch the game.

    -

    How to Play Blue Lock APK?

    -

    Now that you have downloaded and installed Blue Lock APK, you might be wondering how to play it. Don't worry, it's very easy and fun. Here are some tips on how to play Blue Lock APK:

    -

    download blue lock project world champion apk
    -download blue lock blaze battle apk
    -download blue lock soccer game apk
    -download blue lock anime game apk
    -download blue lock simulation game apk
    -download blue lock rudel apk
    -download blue lock qooapp apk
    -download blue lock uptodown apk
    -download blue lock manga game apk
    -download blue lock striker game apk
    -download blue lock soccer anime apk
    -download blue lock tv series game apk
    -download blue lock project world champion mod apk
    -download blue lock blaze battle mod apk
    -download blue lock soccer game mod apk
    -download blue lock anime game mod apk
    -download blue lock simulation game mod apk
    -download blue lock rudel mod apk
    -download blue lock qooapp mod apk
    -download blue lock uptodown mod apk
    -download blue lock manga game mod apk
    -download blue lock striker game mod apk
    -download blue lock soccer anime mod apk
    -download blue lock tv series game mod apk
    -download blue lock project world champion hack apk
    -download blue lock blaze battle hack apk
    -download blue lock soccer game hack apk
    -download blue lock anime game hack apk
    -download blue lock simulation game hack apk
    -download blue lock rudel hack apk
    -download blue lock qooapp hack apk
    -download blue lock uptodown hack apk
    -download blue lock manga game hack apk
    -download blue lock striker game hack apk
    -download blue lock soccer anime hack apk
    -download blue lock tv series game hack apk
    -how to download blue lock project world champion apk
    -how to download blue lock blaze battle apk
    -how to download blue lock soccer game apk
    -how to download blue lock anime game apk
    -how to download blue lock simulation game apk
    -how to download blue lock rudel apk
    -how to download blue lock qooapp apk
    -how to download blue lock uptodown apk
    -how to download blue lock manga game apk
    -how to download blue lock striker game apk
    -how to download blue lock soccer anime apk
    -how to download blue lock tv series game apk

    -

    Choose Your Favorite Character and Train Them as a Striker

    -

    The first thing you need to do is to choose your favorite character from the anime and train them as a striker. You can choose from 11 characters, each with their own personality, skills, and abilities. You can also customize their appearance, such as their hair color, eye color, skin tone, and outfit. You can train your character by completing various tasks, such as dribbling, shooting, passing, and tackling. You can also upgrade their stats, such as speed, power, accuracy, and stamina. You can also equip them with special items, such as shoes, gloves, and accessories.

    -

    Relive the Original Story of the Anime and Experience Different Scenarios

    -

    The second thing you need to do is to relive the original story of the anime and experience different scenarios. You can follow the plot of the anime and watch cutscenes that show the dialogues and interactions between the characters. You can also make choices that affect the outcome of the story, such as who to trust, who to challenge, who to ally with, and who to betray. You can also unlock different endings depending on your choices. You can also explore different locations in the Blue Lock facility, such as the dorms, the cafeteria, the training grounds, and the stadium.

    -

    Compete with Other Players and Form Your Own Team

    -

    The third thing you need to do is to compete with other players and form your own team. You can challenge other players online and test your skills against them in real-time matches. You can also chat with them and send them messages or emojis. You can also form your own team of strikers by inviting other players or accepting their invitations. You can also join or create a club with other players who share your interests or goals. You can also participate in various events and tournaments with your team or club and win rewards.

    -

    Conclusion

    -

    In conclusion, Blue Lock APK is a soccer simulation game based on the anime adaptation of Blue Lock. It allows you to enjoy an amazing storyline while raising your favorite character as a striker, form your own team, and engage in fierce battles with rivals. You can also relive the original story of the anime and fully enjoy the worldview of Blue Lock. You can download Blue Lock APK from a third-party source and enjoy the game on your Android device without any hassle. You can also compete with other players online and form your own team of strikers. If you are a fan of soccer anime, you should definitely download Blue Lock APK and try it out. You won't regret it!

    -

    FAQs

    -

    Here are some frequently asked questions about Blue Lock APK:

    -

    Q: Is Blue Lock APK safe to download and install?

    -

    A: Yes, Blue Lock APK is safe to download and install as long as you choose a reliable source for the APK file. You should also scan the file with an antivirus app before installing it to make sure it is free of malware.

    -

    Q: Do I need an internet connection to play Blue Lock APK?

    -

    A: Yes, you need an internet connection to play Blue Lock APK. The game requires online access to load the data, update the content, and connect with other players.

    -

    Q: How can I update Blue Lock APK to the latest version?

    -

    A: You can update Blue Lock APK to the latest version by visiting the website that you downloaded the APK file from and downloading the new version. You can also check for updates within the game settings.

    -

    Q: How can I contact the developers of Blue Lock APK?

    -

    A: You can contact the developers of Blue Lock APK by visiting their official website or social media accounts. You can also send them an email or leave a comment on their blog.

    -

    Q: How can I support the developers of Blue Lock APK?

    -

    A: You can support the developers of Blue Lock APK by rating and reviewing the game on the website that you downloaded the APK file from. You can also share the game with your friends and family and encourage them to download it.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download FIFA Mobile Mod APK with Menu Mod and Freeze Players.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download FIFA Mobile Mod APK with Menu Mod and Freeze Players.md deleted file mode 100644 index 29830cc58951bafd09a93c04157d3776ca2eeafd..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download FIFA Mobile Mod APK with Menu Mod and Freeze Players.md +++ /dev/null @@ -1,23 +0,0 @@ - -

    FIFA APK HappyMod: A Modded Version of FIFA Mobile

    - If you are a fan of soccer games, you might have heard of FIFA Mobile, the official mobile game of the FIFA World Cup 2022™. But did you know that there is a modded version of this game called FIFA APK HappyMod? In this article, we will tell you what is FIFA APK HappyMod, how to download and install it, and how to play it.

    What is FIFA APK HappyMod?

    - FIFA APK HappyMod is a modified version of FIFA Mobile that offers many features and benefits that are not available in the original game. It is a tool platform that allows you to download and enjoy different versions of modified games for many different games, including FIFA Mobile.

    Features of FIFA APK HappyMod

    - Some of the features of FIFA APK HappyMod are: - Modded Games - You can choose from many versions of modified games for FIFA Mobile, such as Menu Mod, Perfect Skilled Game, Freeze Players, Freeze Goalkeeper, Speed, and more. - Fast and Secure - The download is done at a high speed and all applications added to the store are first tested for viruses. If they fail, they are not allowed to enter the store. - Unofficial Apps - You can access more unofficial apps and games than any other similar installer. - Old Versions - You can download old app versions for those who prefer them or whose device cannot run newer app versions.

    Benefits of FIFA APK HappyMod

    - Some of the benefits of using FIFA APK HappyMod are: - You can enjoy an enhanced gaming experience with the added features and customizations that suit your preferences and style. - You can unlock soccer stars from all 32 qualified national teams with official licenses, authentic World Cup national team kits and badges, the official match ball, and play in World Cup stadiums. - You can build your ultimate team with over 15,000 authentic soccer stars to choose from, including world-class talent like Kylian Mbappé, Christian Pulisic, Vinicius Jr and Son Heung-min. - You can score big with world soccer icons like Paolo Maldini, Ronaldinho, & more. - You can experience new, upgraded soccer stadiums including several classic FIFA venues up to 60 fps*, realistic stadium SFX and live on-field audio commentary. - You can be the soccer manager of your own dream team and plan your strategy and adjust your tactics in real time or choose auto-play to enjoy an idle soccer manager game experience.

    How to Download and Install FIFA APK HappyMod?

    - If you want to download and install FIFA APK HappyMod on your Android device, you need to follow these steps:

    Steps to Download and Install FIFA APK HappyMod

    - - Step 1: Go to [HappyMod](^1^), the official website of FIFA APK HappyMod, and search for FIFA Soccer Mod. - Step 2: Choose the version of the mod that you want to download and click on the Download button. - Step 3: Wait for the download to finish and then open the downloaded file. - Step 4: If you see a message that says "For your security, your phone is not allowed to install unknown apps from this source", go to Settings > Security > Unknown Sources and enable it. - Step 5: Go back to the downloaded file and tap on it again. Then click on Install and wait for the installation to complete. - Step 6: Once the installation is done, you can open the app and enjoy playing FIFA APK HappyMod.

    Tips and Warnings for Using FIFA APK HappyMod

    - Here are some tips and warnings for using FIFA APK HappyMod: - Make sure - Make sure you have enough storage space on your device before downloading and installing FIFA APK HappyMod, as the file size may vary depending on the version of the mod. - Make sure you have a stable internet connection while playing FIFA APK HappyMod, as the game requires online access to function properly. - Be aware that FIFA APK HappyMod is not an official product of EA Sports or FIFA, and it may not be compatible with the latest updates or features of the original game. - Be careful when using FIFA APK HappyMod, as it may violate the terms and conditions of the original game and result in a ban or suspension of your account. - Be respectful and responsible when playing FIFA APK HappyMod, and do not use it to cheat, harass, or harm other players.

    How to Play FIFA APK HappyMod?

    - If you have successfully downloaded and installed FIFA APK HappyMod on your device, you can start playing it by following these steps:

    Game Modes and Options in FIFA APK HappyMod

    - FIFA APK HappyMod offers various game modes and options for you to enjoy, such as: - World Cup Mode - You can play as one of the 32 qualified national teams and compete for the ultimate glory of winning the FIFA World Cup 2022™. - Ultimate Team Mode - You can build your own dream team with over 15,000 soccer stars from different leagues and nations, and customize your squad with kits, badges, formations, and tactics. - Season Mode - You can play through a full season of matches with your favorite club or national team, and earn rewards and trophies along the way. - Live Events Mode - You can participate in daily and weekly challenges based on real-world soccer events, and earn exclusive rewards and bonuses. - Versus Mode - You can challenge your friends or other players online in head-to-head matches, and climb the leaderboards and rankings.

    Tips and Tricks for Playing FIFA APK HappyMod

    - Here are some tips and tricks for playing FIFA APK HappyMod: - Use the Menu Mod option to access different features and settings of the game, such as changing the difficulty level, enabling or disabling cheats, adjusting the speed, and more. - Use the Perfect Skilled Game option to perform flawless dribbles, passes, shots, tackles, and saves with ease. - Use the Freeze Players option to stop the movement of your opponents or teammates at any time. - Use the Freeze Goalkeeper option to prevent the goalkeeper from blocking your shots. - Use the Speed option to increase or decrease the speed of the game according to your preference. - Experiment with different combinations of soccer stars in your ultimate team, and find out which ones work best for your strategy and style. - Upgrade your soccer stars with skill boosts, chemistry styles, training items, and special cards to improve their attributes and performance. - Complete various quests, achievements, milestones, and objectives to earn more coins, gems, packs, players, and other rewards. - Join a league or create your own one with your friends or other players, and compete in tournaments, championships, and cooperative matches.

    Conclusion

    - FIFA APK HappyMod is a modded version of FIFA Mobile that offers many features and benefits that are not available in the original game. It allows you to download and enjoy different versions of modified games for FIFA Mobile, such as Menu Mod, Perfect Skilled Game, Freeze Players, Freeze Goalkeeper, Speed, and more. It also lets you unlock soccer stars from all 32 qualified national teams with official licenses, authentic World Cup national team kits and badges, the official match ball, and play in World Cup stadiums. You can also build your ultimate team with over 15,000 authentic soccer stars to choose from. You can also experience new upgraded soccer stadiums including several classic FIFA venues up to 60 fps*, realistic stadium SFX and live on-field audio commentary. You can also be the soccer manager of your own dream team. However, you need to be careful when using FIFA APK HappyMod as it may violate the terms and conditions of the original game. You also need to have enough storage space on your device. You also need a stable internet connection while playing FIFA APK HappyMod.

    FAQs

    - Here are some frequently asked questions about FIFA APK HappyMod:

    Q: Is FIFA APK HappyMod safe to use?

    -A: FIFA APK HappyMod is generally safe to use as long as you download it from a trusted source like [HappyMod]. However, you should always scan any downloaded file for viruses before installing it on your device. You should also be aware that using FIFA APK HappyMod may violate the terms and conditions of the original game.

    Q: Is FIFA APK HappyMod free to use?

    -A: Yes, FIFA A: Yes, FIFA APK HappyMod is free to use and download. You do not need to pay any money to access the modded games or the features of FIFA APK HappyMod. However, you may still need to spend some in-game currency or real money to buy some items or upgrades in the game.

    Q: Can I play FIFA APK HappyMod offline?

    -A: No, you cannot play FIFA APK HappyMod offline. You need to have a stable internet connection while playing FIFA APK HappyMod, as the game requires online access to function properly. You also need to log in with your EA account or Facebook account to play the game.

    Q: Can I play FIFA APK HappyMod with my friends?

    -A: Yes, you can play FIFA APK HappyMod with your friends or other players online. You can challenge them in head-to-head matches, join or create a league with them, or cooperate with them in tournaments and championships. You can also chat with them and send them gifts and messages.

    Q: How can I update FIFA APK HappyMod?

    -A: You can update FIFA APK HappyMod by visiting [HappyMod] and checking for the latest version of the mod that you want to download. You can also enable the auto-update option in the settings of FIFA APK HappyMod to get notified when there is a new update available.

    Q: How can I contact the developers of FIFA APK HappyMod?

    -A: You can contact the developers of FIFA APK HappyMod by visiting their official website [HappyMod] and filling out the contact form. You can also follow them on their social media accounts like Facebook, Twitter, and Instagram.

    -

    fifa apk happymod


    Download Zip ✶✶✶ https://gohhs.com/2uPpyi



    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/fffiloni/ArcaneStyleTransfer_Webcam/app.py b/spaces/fffiloni/ArcaneStyleTransfer_Webcam/app.py deleted file mode 100644 index d19ffb938244d7cb0befe43636899ebeb816f16f..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/ArcaneStyleTransfer_Webcam/app.py +++ /dev/null @@ -1,73 +0,0 @@ -import os -os.system("pip freeze") - -import torch -import PIL -import gradio as gr -import torch -from utils import align_face -from torchvision import transforms -from huggingface_hub import hf_hub_download - -device = "cuda:0" if torch.cuda.is_available() else "cpu" - -image_size = 512 -transform_size = 1024 - -means = [0.5, 0.5, 0.5] -stds = [0.5, 0.5, 0.5] - -img_transforms = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(means, stds)]) - -model_path = hf_hub_download(repo_id="jjeamin/ArcaneStyleTransfer", filename="pytorch_model.bin") - -if 'cuda' in device: - style_transfer = torch.jit.load(model_path).eval().cuda().half() - t_stds = torch.tensor(stds).cuda().half()[:,None,None] - t_means = torch.tensor(means).cuda().half()[:,None,None] -else: - style_transfer = torch.jit.load(model_path).eval().cpu() - t_stds = torch.tensor(stds).cpu()[:,None,None] - t_means = torch.tensor(means).cpu()[:,None,None] - -def tensor2im(var): - return var.mul(t_stds).add(t_means).mul(255.).clamp(0,255).permute(1,2,0) - -def proc_pil_img(input_image): - if 'cuda' in device: - transformed_image = img_transforms(input_image)[None,...].cuda().half() - else: - transformed_image = img_transforms(input_image)[None,...].cpu() - - with torch.no_grad(): - result_image = style_transfer(transformed_image)[0] - output_image = tensor2im(result_image) - output_image = output_image.detach().cpu().numpy().astype('uint8') - output_image = PIL.Image.fromarray(output_image) - return output_image - -def process(im, is_align): - im = PIL.ImageOps.exif_transpose(im) - - if is_align == 'True': - im = align_face(im, output_size=image_size, transform_size=transform_size) - else: - pass - - res = proc_pil_img(im) - - return res - -gr.Interface( - process, - inputs=[gr.inputs.Image(source="webcam",type="pil", label="Input", shape=(image_size, image_size)), gr.inputs.Radio(['True','False'], type="value", default='True', label='face align')], - outputs=gr.outputs.Image(type="pil", label="Output"), -title="Arcane Style Transfer WEBCAM", - description="Gradio demo for Arcane Style Transfer with webcam input", - article = "

    Github Repo by jjeamin

    ", - enable_queue=True, - allow_flagging=False, - allow_screenshot=False - ).launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io-adapter/dist/contrib/yeast.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io-adapter/dist/contrib/yeast.js deleted file mode 100644 index 490b158d017281e55dc2d774e52af4f86a7af451..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/socket.io-adapter/dist/contrib/yeast.js +++ /dev/null @@ -1,55 +0,0 @@ -// imported from https://github.com/unshiftio/yeast -"use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.yeast = exports.decode = exports.encode = void 0; -const alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-_".split(""), length = 64, map = {}; -let seed = 0, i = 0, prev; -/** - * Return a string representing the specified number. - * - * @param {Number} num The number to convert. - * @returns {String} The string representation of the number. - * @api public - */ -function encode(num) { - let encoded = ""; - do { - encoded = alphabet[num % length] + encoded; - num = Math.floor(num / length); - } while (num > 0); - return encoded; -} -exports.encode = encode; -/** - * Return the integer value specified by the given string. - * - * @param {String} str The string to convert. - * @returns {Number} The integer value represented by the string. - * @api public - */ -function decode(str) { - let decoded = 0; - for (i = 0; i < str.length; i++) { - decoded = decoded * length + map[str.charAt(i)]; - } - return decoded; -} -exports.decode = decode; -/** - * Yeast: A tiny growing id generator. - * - * @returns {String} A unique id. - * @api public - */ -function yeast() { - const now = encode(+new Date()); - if (now !== prev) - return (seed = 0), (prev = now); - return now + "." + encode(seed++); -} -exports.yeast = yeast; -// -// Map each character to its index. -// -for (; i < length; i++) - map[alphabet[i]] = i; diff --git a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_28.py b/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_28.py deleted file mode 100644 index 4ccd19ceee29959a479ccd2f6f928452e8582758..0000000000000000000000000000000000000000 --- a/spaces/fgenie/scamtext_PAL_self_consistency/funcs/f_28.py +++ /dev/null @@ -1,29 +0,0 @@ -import re - -def is_spam(text: str) -> bool: - # Basic spam indicators - spam_words = ["상한가", "추천", "vip", "관심종목", "명가", "수익률", "비번", "비밀번호", "차트", "투자"] - text_lower = text.lower() - - for word in spam_words: - if word in text_lower: - return True - - # Check for URLs - url_regex = re.compile("http[s]?://(?:[a-zA-Z]|[0-9]|[$-@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+") - urls = re.findall(url_regex, text) - if len(urls) > 0: - return True - - # Check for unusual patterns - unusual_patterns = ["[0-9]+%[\\+\\-↑]", "key:[0-9]+", "코드번호 [0-9]+"] - for pattern in unusual_patterns: - if re.search(pattern, text): - return True - - # Check for sequences of numbers and characters combined - sequences = re.findall("([0-9]+[a-zA-Z]+|[a-zA-Z]+[0-9]+)", text) - if len(sequences) > 1: - return True - - return False \ No newline at end of file diff --git a/spaces/fjenett/ellipse-detection-aamed/app.py b/spaces/fjenett/ellipse-detection-aamed/app.py deleted file mode 100644 index 960b1fdde5f8b4a75200197490d923bed9863961..0000000000000000000000000000000000000000 --- a/spaces/fjenett/ellipse-detection-aamed/app.py +++ /dev/null @@ -1,82 +0,0 @@ -import gradio as gr -import cv2 -import json -import numpy as np -import glob2 as glob -from PIL import Image - -from pyAAMED import pyAAMED - -title = """ -

    Arc Adjacency Matrix based Fast Ellipse Detection

    -Gitub -""" - -def detect_ellipses(img_path): - imgC = cv2.imread(img_path) - imgG = cv2.cvtColor(imgC, cv2.COLOR_BGR2GRAY) - - ammed_size = 600 - iheight, iwidth = imgG.shape - - imax = max(iheight, iwidth) - iscale = (ammed_size - 1) / imax - is_landscape = iwidth >= iheight - if is_landscape: - iw = imax * iscale - ih = iheight * iscale - else: - iw = iwidth * iscale - ih = imax * iscale - - imgG = cv2.resize(imgG, (int(iw), int(ih))) - - aamed = pyAAMED(ammed_size, ammed_size) - aamed.setParameters(3.1415926/3, 3.4, 0.77) - - print(ammed_size, iw, ih, imgG.shape) - - result = aamed.run_AAMED(imgG) - - imgN = imgG.copy() - - if len(result) > 0: - imgN = cv2.cvtColor(imgN, cv2.COLOR_GRAY2RGB) - for e in result: - x, y, w, h, a, _ = e - imgN = cv2.ellipse(imgN, (y, x), (int(h / 2), int(w / 2)), -a, 0, 360, (0, 255, 0), 3) # image, center_coordinates, axesLength, angle, startAngle, endAngle, color, thickness - - # from CPP code: - # temp.center.x = detEllipses[i].center.y; - # temp.center.y = detEllipses[i].center.x; - # temp.size.height = detEllipses[i].size.width; - # temp.size.width = detEllipses[i].size.height; - # temp.angle = -detEllipses[i].angle; - - result = result.tolist() - else: - result = [] - - return [Image.fromarray(imgN), json.dumps(result)] - -examples = [] - -test_files = glob.glob('./examples/*.jpg') + glob.glob('./examples/*.png') -for f in test_files: - examples = examples + [[f]] - -gr.Interface( - fn=detect_ellipses, - inputs=gr.Image(label="Upload image with ellipses", type="filepath"), - outputs=[ - gr.Image(type="pil", label="Detected ellipses"), - gr.Textbox(label="Detected ellipses") - ], - title=title, - examples=examples, - allow_flagging='never' -).launch( - debug=True, - server_name="0.0.0.0", - server_port=7860 -) \ No newline at end of file diff --git a/spaces/fjyczcr/bingai/Dockerfile b/spaces/fjyczcr/bingai/Dockerfile deleted file mode 100644 index 737c308da1a8e2a4cd0c18379c92507d293990d6..0000000000000000000000000000000000000000 --- a/spaces/fjyczcr/bingai/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -# Build Stage -# 使用 golang:alpine 作为构建阶段的基础镜像 -FROM golang:alpine AS builder -# 添加 git,以便之后能从GitHub克隆项目 -RUN apk --no-cache add git -# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下 -RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app -# 设置工作目录为之前克隆的项目目录 -WORKDIR /workspace/app -# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go -# Runtime Stage -# 使用轻量级的 alpine 镜像作为运行时的基础镜像 -FROM alpine -# 设置工作目录 -WORKDIR /workspace/app -# 从构建阶段复制编译后的二进制文件到运行时镜像中 -COPY --from=builder /workspace/app/go-proxy-bingai . -# 设置环境变量,此处为随机字符 -ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92nccfgmvQWYtX5rG6bE3fZ4iOf" -#ENV Go_Proxy_BingAI_USER_TOKEN_1="1dYZXsGOo9TT0TacXwJFC7FMO1dLI7qrZo7fLO2vS0IGG0N2aJWX-i6kXgTAgm1fcVKfREQrM9PYbkffkTwjwt6RLAJN2HiW_5UFH8az-NFDUX4YbOao0R_3kJKj_gMEQTtenaZ90oDWzXk1GQdg4dfr-PJ2y1glw_wJyuP8scUpT97NjZGwJ8M8agIuQjurPcx8lamk_35Gszsn5pi3G1Aog2GCpWYk9eSvLz7-gQ3g" -#ENV Go_Proxy_BingAI_USER_TOKEN_1="MUID=155B59F60D3E63DB37B14A880C7462EA; MUIDB=155B59F60D3E63DB37B14A880C7462EA; _EDGE_S=F=1&SID=1605868DD57765C33E1895F3D43D6407; _EDGE_V=1; USRLOC=HS=1&CLOC=LAT=25.325971994033555|LON=118.26904155830161|A=733.4464586120832|TS=230831030314|SRC=W; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=701B6B40DCE8463CB0106BE5725A7930&dmnchg=1; _UR=QS=0&TQS=0; MicrosoftApplicationsTelemetryDeviceId=cc78aa29-5275-4b4e-9030-307b88e7220b; _Rwho=u=d; BFBUSR=BAWAS=1&BAWFS=1; BFB=AhD4bprB1yMAcGYdWz0zgCXkQpR8F33zXi7hT_p4B2_-E9aov9D0wSJXGq4dQujb1SsblyIXh1KgU4uv8hvApaQ5ikbBEtFqoSaXswcZ8T8bhUa0D3fH157Fj0hw3a7-tFN7r7K7QVRDzcFv6qpM1t4837NaKTmMl9ub2mz9GVTIvQ; ipv6=hit=1693463156017&t=4; SNRHOP=I=&TS=; CSRFCookie=3438e237-c574-4a70-ae4e-bbd5256686dd; SRCHUSR=DOB=20230831&T=1693459553000&POEX=W; ANON=A=96C63930F5957227E0BEA04CFFFFFFFF&E=1cc2&W=4; NAP=V=1.9&E=1c68&C=bfhCuqHyG8X80vSyQg7aG40MPdZo95iOMXCZZETac73XUJN7fzN8Dg&W=4; PPLState=1; KievRPSSecAuth=FABiBBRaTOJILtFsMkpLVWSG6AN6C/svRwNmAAAEgAAACGmqLef/AI1WIATwa0xEpAxPsM/KrsBXwBh0soHo7Rk8yMqf1HAlYmiUb3ASS2UPSMK+HBLvYPqsorAapxA/J0li7PaDNIickVBUYyxUiH1FCt52ocR22VIjD5jTjtBzvvg4Hxn2kRSHLidCV4ATnBy5DydGJrrO1trTmM1aENUhrzIPlrFQlTQIhnnucJrt5CCa/M04QbcLJosRWvpjQhW2A1ELCoF7Faku4qRKATpMaa8riHV33RBKkP7PmNic/phsYYwjGBoywICFNieWu33YH5rkAVzUBihNb0av8ZJpf7/7PHN8Yqp1l2ZkzpBmvc398ApPU2YlRPOsISTdB18LP8lohJA1PlfYl3VmzadrU8iyTcMZLMQ8ua38/TmY83QVXAoPbWJAelhFmdEVLZHSSuXqmkoMH3ApXHSld7Yj0Sb1tteMQz5RirLGHwX+VMy/vi+DPN/uRaXh/Ot8heB3ssfTh8tT7aW9SO2ieYQl2N8SnWPQUO0Y439UQUvOpAfaMxkcFLX6ZaN6zwIF8xXEqKFuiE6SyOSOvtZAVNduM4MYYmV1kjk1/4Tb2tgMhDNrQEJC09Xq1vXOsL1MLMLGVpCCmk3DysDCUKu81OqMuV0ofCzZ5VSRSfVILkhGHUAuaxyEz5Bq/sOYhHGuFVeLua2kUBxnE9UN9BRfPECBS1ABh51KmbfWqaDKeeVtHxEjcLNuDLoxRovMGZfthGkHgJsaal3YTQYL/VHdV7FkVOyU5uOdP+MxwRcSNhn61r/7J3vnNbkjv51shSwnmJwIlxYK7FpF5rIbO4ImHKPgV4VdDOsfAI4IkdWXbFR2NhC4GZVsNDZE3762KWltbEGvrBJ9CfgVudZun/pYdJe8ChGB4xSl2fhDC7kWV5sd0VugJr9682S0qLFhHdxbgPxpV1Dr5WAuTx0vHZlLM06NRc2KE/UA1Xuzv9Jz19DdAUd0Lai/hNqPde6OgVNtcFS6d9tsSd/99GHjjerSUgCnGFS95mzVKHwGXZIQFfY5DQW+iaZbYbvEVOTgTC0ENdADCFjQM1gtoyDowjlWJv4BZ3459608385" -#ENV Go_Proxy_BingAI_AUTH_KEY="1145141919810" -# 暴露8080端口 -EXPOSE 8080 -# 容器启动时运行的命令 -CMD ["/workspace/app/go-proxy-bingai"] \ No newline at end of file diff --git a/spaces/floriankrempl/mtg_rules_bot/mtg/data_handler/vector_db.py b/spaces/floriankrempl/mtg_rules_bot/mtg/data_handler/vector_db.py deleted file mode 100644 index 044214e6c05ce88bcbdd53d24c9defb4987c1301..0000000000000000000000000000000000000000 --- a/spaces/floriankrempl/mtg_rules_bot/mtg/data_handler/vector_db.py +++ /dev/null @@ -1,92 +0,0 @@ -import hnswlib -import openai -import numpy as np -from tqdm import tqdm - -from mtg.objects import Card -from mtg.utils import get_openai_api_key -from mtg.utils.logging import get_logger - -logger = get_logger(__name__) - -openai.api_key = get_openai_api_key() - - -class VectorDB: - def __init__(self, cards: list[Card]): - # TODO should not be cards but list of str - self.graph: hnswlib.Index = None - self.ids_2_card_name: dict[int, str] = None - - names_and_embeddings = self.get_embeddings(cards) - self.create_graph(names_and_embeddings=names_and_embeddings) - - def get_embeddings(self, cards: list[Card]) -> tuple[str, np.ndarray]: - names_and_embeddings = [] - logger.info(f"Vector DB: adding {len(cards)} embeddings") - for card in tqdm(cards): - try: - response = openai.Embedding.create( - input=card.to_text(), model="text-embedding-ada-002" - ) - embeddings = response["data"][0]["embedding"] - names_and_embeddings.append((card.name, np.array(embeddings))) - except: - logger.info( - f"Vector DB: downloaded {len(names_and_embeddings)} embeddings" - ) - return names_and_embeddings - return names_and_embeddings - - def create_graph( - self, names_and_embeddings: tuple[str, np.ndarray], ef: int = 10000, M: int = 16 - ) -> None: - # Generating sample data - names, embeddings = zip(*names_and_embeddings) - data = np.array(embeddings) - ids = np.arange(len(data)) - - # Declaring index - graph = hnswlib.Index(space="cosine", dim=len(data[0])) - graph.init_index(max_elements=len(embeddings), ef_construction=ef, M=M) - graph.add_items(data, ids) - graph.set_ef(ef) - - self.graph = graph - self.ids_2_card_name = {idx: name for idx, name in zip(ids, names)} - - return - - def add(self, names_and_embeddings) -> None: - names, embeddings = zip(*names_and_embeddings) - old_index_size = self.graph.get_max_elements() - new_index_size = old_index_size + len(names_and_embeddings) - idxs = [old_index_size + i for i in range(len(embeddings))] - - self.graph.resize_index(new_index_size) - self.graph.add_items(data=embeddings, ids=idxs) - self.ids_2_card_name.update({i: name for i, name in zip(idxs, names)}) - - def query( - self, text: str, k: int = 3, threshhold=0.2, lasso_threshhold: int = 0.02 - ): - card_names = set() - sentences = text.split(".") - for sentence in sentences: - response = openai.Embedding.create( - input=sentence, model="text-embedding-ada-002" - ) - embedding = response["data"][0]["embedding"] - labels, distances = self.graph.knn_query(np.array(embedding), k=k) - - initial_distance = distances[0][0] - for label, distance in zip(labels[0], distances[0]): - if (distance - initial_distance > lasso_threshhold) or ( - distance > threshhold - ): - break - card_name = self.ids_2_card_name.get(label, None) - logger.debug(f"Vector DB: found {card_name} - distance {distance}") - card_names.add(card_name) - logger.debug(f"queried Vector DB {len(sentences)} times") - return list(card_names) diff --git a/spaces/floriankrempl/mtg_rules_bot/mtg/utils/utils.py b/spaces/floriankrempl/mtg_rules_bot/mtg/utils/utils.py deleted file mode 100644 index 2d52c1909615193fd50c2bc2712c77cc22aa95d0..0000000000000000000000000000000000000000 --- a/spaces/floriankrempl/mtg_rules_bot/mtg/utils/utils.py +++ /dev/null @@ -1,16 +0,0 @@ -import os -import yaml -import logging - - -def get_openai_api_key(): - try: - with open("config/config.yaml", "r") as infile: - config = yaml.load(infile, Loader=yaml.FullLoader) - openai_api_key = config.get("open_ai_token") - logging.info("loaded open ai token from config file ") - except: - logging.warn("did not find config file") - openai_api_key = os.environ["open_ai_token"] - logging.info("loaded open ai token from environment") - return openai_api_key diff --git a/spaces/flowers-team/Interactive_DeepRL_Demo/js/Box2D_dynamics/climbing_dynamics.js b/spaces/flowers-team/Interactive_DeepRL_Demo/js/Box2D_dynamics/climbing_dynamics.js deleted file mode 100644 index 66c14f8ce29f43bb668d0415b81da888dfe6a30f..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/Interactive_DeepRL_Demo/js/Box2D_dynamics/climbing_dynamics.js +++ /dev/null @@ -1,175 +0,0 @@ -/** - * @classdesc Class that handles the climbing dynamics. - */ -class ClimbingDynamics { - constructor(){}; - - /** - * Prepares the agent's sensors to grasp or release according to the actions. - * @param actions {Array} - Actions of the agent - * @param agent_body {Object} - Climber morphology - * @param world - {Object} - Box2D world - */ - before_step_climbing_dynamics(actions, agent_body, world){ - for(let i = 0; i < agent_body.sensors.length; i++){ - let action_to_check = actions[actions.length - i - 1]; - let sensor_to_check = agent_body.sensors[agent_body.sensors.length - i - 1]; - if(action_to_check > 0){ // Check whether the sensor should grasp or release - sensor_to_check.GetUserData().ready_to_attach = true; - } - else { - sensor_to_check.GetUserData().ready_to_attach = false; - if(sensor_to_check.GetUserData().has_joint){ // if released and it had a joint => destroys it - sensor_to_check.GetUserData().has_joint = false; - - // Gets a list of all the joints of the sensor body - let sensor_joints = []; - let _joint = sensor_to_check.GetJointList(); - while(_joint != null){ - sensor_joints.push(_joint.joint); - _joint = _joint.next; - } - // Finds the index of the first revolute joint - const isRevolute = (s) => s.m_type == b2.Joint.e_revoluteJoint; - let idx_to_destroy = sensor_joints.findIndex(isRevolute); - if(idx_to_destroy != -1){ - world.DestroyJoint(sensor_joints[idx_to_destroy]); - } - } - } - } - } - - /** - * Creates joints between sensors ready to grasp if collision with graspable area was detected - * @param contact_detector {Object} - * @param world {Object} - Box2D world - */ - after_step_climbing_dynamics(contact_detector, world){ - // Adds climbing joints if needed - for(let i = 0; i < contact_detector.contact_dictionaries.sensors.length; i++){ - let sensor = contact_detector.contact_dictionaries.sensors[i]; - if(contact_detector.contact_dictionaries.bodies[i].length > 0 - && sensor.GetUserData().ready_to_attach - && !sensor.GetUserData().has_joint){ - let other_bodies = [...contact_detector.contact_dictionaries.bodies[i]]; - for(let other_body of other_bodies){ - - // Checks if still overlapping after solver - // Super coarse yet fast way, mainly useful for creepers - let other_body_shape = other_body.GetFixtureList().GetShape(); - let x_values = []; - let y_values = []; - if(other_body_shape.m_type == b2.Shape.e_polygon){ - for(let i = 0; i < other_body_shape.m_count; i++) { - x_values.push(other_body.GetWorldPoint(other_body_shape.m_vertices[i]).x); - y_values.push(other_body.GetWorldPoint(other_body_shape.m_vertices[i]).y); - } - } - else if(other_body_shape.m_type == b2.Shape.e_edge){ - x_values = [other_body_shape.m_vertex1.x, other_body_shape.m_vertex2.x]; - y_values = [other_body_shape.m_vertex1.y, other_body_shape.m_vertex2.y]; - } - - let radius = sensor.GetFixtureList().GetShape().m_radius + 0.01; - let sensor_world_center = sensor.GetWorldCenter(); - - if(sensor_world_center.x + radius > Math.min(...x_values) - && sensor_world_center.x - radius < Math.max(...x_values) - && sensor_world_center.y + radius > Math.min(...y_values) - && sensor_world_center.y - radius < Math.max(...y_values)){ - - let rjd = new b2.RevoluteJointDef(); - rjd.Initialize(sensor, other_body, sensor_world_center); - let joint = world.CreateJoint(rjd); - joint.SetUserData(new CustomBodyUserData(false, false, "grip")); - joint.GetBodyA().GetUserData().joint = joint; - sensor.GetUserData().has_joint = true; - break; - } - else { - // Removes other_body from the list of bodies in contact with the sensor - let sensor_idx = contact_detector.contact_dictionaries.sensors.indexOf(sensor); - if(sensor_idx != -1){ - let other_idx = contact_detector.contact_dictionaries.bodies[sensor_idx].indexOf(other_body); - contact_detector.contact_dictionaries.bodies[sensor_idx].splice(other_idx, 1); - - if(contact_detector.contact_dictionaries.bodies[sensor_idx].length == 0){ - sensor.GetUserData().has_contact = false; - } - } - } - } - } - } - } -} - -/** - * @classdesc Stores contacts between sensors and graspable surfaces in a dictionaries associated to the sensor. - * @constructor - */ -function ClimbingContactDetector() { - b2.ContactListener.call(this); - this.contact_dictionaries = { - sensors: [], - bodies: [] - }; -} - -ClimbingContactDetector.prototype = Object.create(b2.ContactListener.prototype); -ClimbingContactDetector.prototype.constructor = ClimbingContactDetector; -ClimbingContactDetector.prototype.BeginContact = function (contact) { - let bodies = [contact.GetFixtureA().GetBody(), contact.GetFixtureB().GetBody()]; - for(let i = 0; i < bodies.length; i++){ - let body = bodies[i]; - if(body.GetUserData().object_type == CustomUserDataObjectTypes.BODY_SENSOR - && body.GetUserData().check_contact){ - let other_body = bodies[(i + 1) % 2]; - if(other_body.GetUserData().object_type == CustomUserDataObjectTypes.GRIP_TERRAIN - || other_body.GetUserData().object_type == CustomUserDataObjectTypes.SENSOR_GRIP_TERRAIN){ - body.GetUserData().has_contact = true; - let idx = this.contact_dictionaries.sensors.indexOf(body); - if(idx != -1){ - this.contact_dictionaries.bodies[idx].push(other_body); - } - else{ - this.contact_dictionaries.sensors.push(body); - this.contact_dictionaries.bodies.push([other_body]); - } - } - else{ - return; - } - } - } -}; - -ClimbingContactDetector.prototype.EndContact = function (contact){ - let bodies = [contact.GetFixtureA().GetBody(), contact.GetFixtureB().GetBody()]; - for(let i = 0; i < bodies.length; i++) { - let body = bodies[i]; - let other_body = bodies[(i + 1) % 2]; - if(body.GetUserData().object_type == CustomUserDataObjectTypes.BODY_SENSOR && - body.GetUserData().check_contact && body.GetUserData().has_contact){ - let body_idx = this.contact_dictionaries.sensors.indexOf(body); - if (body_idx != -1) { - let other_idx = this.contact_dictionaries.bodies[body_idx].indexOf(other_body); - if(other_idx != -1){ - this.contact_dictionaries.bodies[body_idx].splice(other_idx, 1); - } - - if(this.contact_dictionaries.bodies[body_idx].length == 0){ - body.GetUserData().has_contact = false; - } - } - - } - } -}; - -ClimbingContactDetector.prototype.Reset = function(){ - this.contact_dictionaries = { - body: [] - }; -}; \ No newline at end of file diff --git a/spaces/flynster/FeinbergQuizNotes/README.md b/spaces/flynster/FeinbergQuizNotes/README.md deleted file mode 100644 index dbefa32f6170424963b7d49ca0dc94cc8327e334..0000000000000000000000000000000000000000 --- a/spaces/flynster/FeinbergQuizNotes/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: FeinbergQuizNotes -emoji: 💻 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.0.26 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/foghuang/ChatGLM2-6B/openai_api.py b/spaces/foghuang/ChatGLM2-6B/openai_api.py deleted file mode 100644 index 954326b02ffe542ea7397cc686078e7300f3a9f2..0000000000000000000000000000000000000000 --- a/spaces/foghuang/ChatGLM2-6B/openai_api.py +++ /dev/null @@ -1,174 +0,0 @@ -# coding=utf-8 -# Implements API for ChatGLM2-6B in OpenAI's format. (https://platform.openai.com/docs/api-reference/chat) -# Usage: python openai_api.py -# Visit http://localhost:8000/docs for documents. - - -import time -import torch -import uvicorn -from pydantic import BaseModel, Field -from fastapi import FastAPI, HTTPException -from fastapi.middleware.cors import CORSMiddleware -from contextlib import asynccontextmanager -from starlette.responses import StreamingResponse -from typing import Any, Dict, List, Literal, Optional, Union -from transformers import AutoTokenizer, AutoModel - - -@asynccontextmanager -async def lifespan(app: FastAPI): # collects GPU memory - yield - if torch.cuda.is_available(): - torch.cuda.empty_cache() - torch.cuda.ipc_collect() - - -app = FastAPI(lifespan=lifespan) - -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - -class ModelCard(BaseModel): - id: str - object: str = "model" - created: int = Field(default_factory=lambda: int(time.time())) - owned_by: str = "owner" - root: Optional[str] = None - parent: Optional[str] = None - permission: Optional[list] = None - - -class ModelList(BaseModel): - object: str = "list" - data: List[ModelCard] = [] - - -class ChatMessage(BaseModel): - role: Literal["user", "assistant", "system"] - content: str - - -class DeltaMessage(BaseModel): - role: Optional[Literal["user", "assistant", "system"]] = None - content: Optional[str] = None - - -class ChatCompletionRequest(BaseModel): - model: str - messages: List[ChatMessage] - temperature: Optional[float] = None - top_p: Optional[float] = None - max_length: Optional[int] = None - stream: Optional[bool] = False - - -class ChatCompletionResponseChoice(BaseModel): - index: int - message: ChatMessage - finish_reason: Literal["stop", "length"] - - -class ChatCompletionResponseStreamChoice(BaseModel): - index: int - delta: DeltaMessage - finish_reason: Optional[Literal["stop", "length"]] - - -class ChatCompletionResponse(BaseModel): - model: str - object: Literal["chat.completion", "chat.completion.chunk"] - choices: List[Union[ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice]] - created: Optional[int] = Field(default_factory=lambda: int(time.time())) - - -@app.get("/v1/models", response_model=ModelList) -async def list_models(): - global model_args - model_card = ModelCard(id="gpt-3.5-turbo") - return ModelList(data=[model_card]) - - -@app.post("/v1/chat/completions", response_model=ChatCompletionResponse) -async def create_chat_completion(request: ChatCompletionRequest): - global model, tokenizer - - if request.messages[-1].role != "user": - raise HTTPException(status_code=400, detail="Invalid request") - query = request.messages[-1].content - - prev_messages = request.messages[:-1] - if len(prev_messages) > 0 and prev_messages[0].role == "system": - query = prev_messages.pop(0).content + query - - history = [] - if len(prev_messages) % 2 == 0: - for i in range(0, len(prev_messages), 2): - if prev_messages[i].role == "user" and prev_messages[i+1].role == "assistant": - history.append([prev_messages[i].content, prev_messages[i+1].content]) - - if request.stream: - generate = predict(query, history, request.model) - return StreamingResponse(generate, media_type="text/event-stream") - - response, _ = model.chat(tokenizer, query, history=history) - choice_data = ChatCompletionResponseChoice( - index=0, - message=ChatMessage(role="assistant", content=response), - finish_reason="stop" - ) - - return ChatCompletionResponse(model=request.model, choices=[choice_data], object="chat.completion") - - -async def predict(query: str, history: List[List[str]], model_id: str): - global model, tokenizer - - choice_data = ChatCompletionResponseStreamChoice( - index=0, - delta=DeltaMessage(role="assistant"), - finish_reason=None - ) - chunk = ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk") - yield "data: {}\n\n".format(chunk.json(exclude_unset=True, ensure_ascii=False)) - - current_length = 0 - - for new_response, _ in model.stream_chat(tokenizer, query, history): - if len(new_response) == current_length: - continue - - new_text = new_response[current_length:] - current_length = len(new_response) - - choice_data = ChatCompletionResponseStreamChoice( - index=0, - delta=DeltaMessage(content=new_text), - finish_reason=None - ) - chunk = ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk") - yield "data: {}\n\n".format(chunk.json(exclude_unset=True, ensure_ascii=False)) - - choice_data = ChatCompletionResponseStreamChoice( - index=0, - delta=DeltaMessage(), - finish_reason="stop" - ) - chunk = ChatCompletionResponse(model=model_id, choices=[choice_data], object="chat.completion.chunk") - yield "data: {}\n\n".format(chunk.json(exclude_unset=True, ensure_ascii=False)) - - -if __name__ == "__main__": - tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True) - model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).cuda() - # 多显卡支持,使用下面两行代替上面一行,将num_gpus改为你实际的显卡数量 - # from utils import load_model_on_gpus - # model = load_model_on_gpus("THUDM/chatglm2-6b", num_gpus=2) - model.eval() - - uvicorn.run(app, host='0.0.0.0', port=8000, workers=1) diff --git a/spaces/g4f/freegpt-webui/client/css/stop-generating.css b/spaces/g4f/freegpt-webui/client/css/stop-generating.css deleted file mode 100644 index 3c2010d25065fbef63b104df743ef72c00259871..0000000000000000000000000000000000000000 --- a/spaces/g4f/freegpt-webui/client/css/stop-generating.css +++ /dev/null @@ -1,38 +0,0 @@ -.stop-generating { - position: absolute; - bottom: 128px; - left: 50%; - transform: translateX(-50%); - z-index: 1000000; -} - -.stop-generating button { - backdrop-filter: blur(20px); - -webkit-backdrop-filter: blur(20px); - background-color: var(--blur-bg); - color: var(--colour-3); - cursor: pointer; - animation: show_popup 0.4s; -} - -@keyframes show_popup { - from { - opacity: 0; - transform: translateY(10px); - } -} - -@keyframes hide_popup { - to { - opacity: 0; - transform: translateY(10px); - } -} - -.stop-generating-hiding button { - animation: hide_popup 0.4s; -} - -.stop-generating-hidden button { - display: none; -} diff --git a/spaces/generativeai/bestpics-ms-crop-image/README.md b/spaces/generativeai/bestpics-ms-crop-image/README.md deleted file mode 100644 index 51518f899522c5c077f5b75af86614b408018d56..0000000000000000000000000000000000000000 --- a/spaces/generativeai/bestpics-ms-crop-image/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Bestpics Ms Crop Image -emoji: 👁 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.48.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/giacomov/pdffigures2/app.py b/spaces/giacomov/pdffigures2/app.py deleted file mode 100644 index 558973a5c9df44c8e7ce08d06485b3d05d95f2e3..0000000000000000000000000000000000000000 --- a/spaces/giacomov/pdffigures2/app.py +++ /dev/null @@ -1,48 +0,0 @@ -import gradio as gr -import urllib.request -import subprocess -import os -import glob - - -def extract_figure(url): - # download PDF file from URL - urllib.request.urlretrieve(url, "input.pdf") - - # extract first figure from PDF using pdffigures2 - subprocess.run(["java", "-jar", "pdffigures2.jar", "input.pdf", "-m", "figures_"]) - - all_pngs = glob.glob("*.png") - print(all_pngs) - - # get path to first figure - figure_path = "figures_input-Figure1-1.png" - - # # read first figure from file - # with open(figure_path, "rb") as f: - # figure_bytes = f.read() - - # # delete downloaded file and figure file - # os.remove("input.pdf") - # os.remove(figure_path) - - # return first figure - return figure_path - - -def run(): - -# define input and output interfaces - inputs = gr.inputs.Textbox(label="Enter URL of PDF file:") - outputs = gr.outputs.Image(label="First figure in PDF:", type="filepath") - - # create interface - interface = gr.Interface(fn=extract_figure, inputs=inputs, outputs=outputs, title="Extract First Figure from PDF", description="Enter the URL of a PDF file and the first figure in the file will be extracted and displayed.") - - # launch interface - interface.launch(server_name="0.0.0.0", server_port=7860) - - -if __name__ == "__main__": - - run() diff --git a/spaces/giswqs/Streamlit/app.py b/spaces/giswqs/Streamlit/app.py deleted file mode 100644 index b9a59993b5746a36c400222c1a3d05a6c374ecca..0000000000000000000000000000000000000000 --- a/spaces/giswqs/Streamlit/app.py +++ /dev/null @@ -1,48 +0,0 @@ -import streamlit as st -import leafmap.foliumap as leafmap - -st.set_page_config(layout="wide") - -st.sidebar.info( - """ - - Web App URL: - - GitHub repository: - """ -) - -st.sidebar.title("Contact") -st.sidebar.info( - """ - Qiusheng Wu at [wetlands.io](https://wetlands.io) | [GitHub](https://github.com/giswqs) | [Twitter](https://twitter.com/giswqs) | [YouTube](https://www.youtube.com/c/QiushengWu) | [LinkedIn](https://www.linkedin.com/in/qiushengwu) - """ -) - -st.title("Streamlit for Geospatial Applications") - -st.markdown( - """ - This multi-page web app demonstrates various interactive web apps created using [streamlit](https://streamlit.io) and open-source mapping libraries, - such as [leafmap](https://leafmap.org), [geemap](https://geemap.org), [pydeck](https://deckgl.readthedocs.io), and [kepler.gl](https://docs.kepler.gl/docs/keplergl-jupyter). - This is an open-source project and you are very welcome to contribute your comments, questions, resources, and apps as [issues](https://github.com/giswqs/streamlit-geospatial/issues) or - [pull requests](https://github.com/giswqs/streamlit-geospatial/pulls) to the [GitHub repository](https://github.com/giswqs/streamlit-geospatial). - - """ -) - -st.info("Click on the left sidebar menu to navigate to the different apps.") - -st.subheader("Timelapse of Satellite Imagery") -st.markdown( - """ - The following timelapse animations were created using the Timelapse web app. Click `Timelapse` on the left sidebar menu to create your own timelapse for any location around the globe. -""" -) - -row1_col1, row1_col2 = st.columns(2) -with row1_col1: - st.image("https://github.com/giswqs/data/raw/main/timelapse/spain.gif") - st.image("https://github.com/giswqs/data/raw/main/timelapse/las_vegas.gif") - -with row1_col2: - st.image("https://github.com/giswqs/data/raw/main/timelapse/goes.gif") - st.image("https://github.com/giswqs/data/raw/main/timelapse/fire.gif") diff --git a/spaces/gligen/demo/gligen/ldm/util.py b/spaces/gligen/demo/gligen/ldm/util.py deleted file mode 100644 index 51839cb1478d9fecb293277dc83d2693e3d26de4..0000000000000000000000000000000000000000 --- a/spaces/gligen/demo/gligen/ldm/util.py +++ /dev/null @@ -1,86 +0,0 @@ -import importlib - -import torch -import numpy as np - -from inspect import isfunction -from PIL import Image, ImageDraw, ImageFont - - -def log_txt_as_img(wh, xc, size=10): - # wh a tuple of (width, height) - # xc a list of captions to plot - b = len(xc) - txts = list() - for bi in range(b): - txt = Image.new("RGB", wh, color="white") - draw = ImageDraw.Draw(txt) - font = ImageFont.truetype('data/DejaVuSans.ttf', size=size) - nc = int(40 * (wh[0] / 256)) - lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) - - try: - draw.text((0, 0), lines, fill="black", font=font) - except UnicodeEncodeError: - print("Cant encode string for logging. Skipping.") - - txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 - txts.append(txt) - txts = np.stack(txts) - txts = torch.tensor(txts) - return txts - - -def ismap(x): - if not isinstance(x, torch.Tensor): - return False - return (len(x.shape) == 4) and (x.shape[1] > 3) - - -def isimage(x): - if not isinstance(x,torch.Tensor): - return False - return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) - - -def exists(x): - return x is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def mean_flat(tensor): - """ - https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def count_params(model, verbose=False): - total_params = sum(p.numel() for p in model.parameters()) - if verbose: - print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.") - return total_params - - -def instantiate_from_config(config): - if not "target" in config: - if config == '__is_first_stage__': - return None - elif config == "__is_unconditional__": - return None - raise KeyError("Expected key `target` to instantiate.") - return get_obj_from_str(config["target"])(**config.get("params", dict())) - - -def get_obj_from_str(string, reload=False): - module, cls = string.rsplit(".", 1) - if reload: - module_imp = importlib.import_module(module) - importlib.reload(module_imp) - return getattr(importlib.import_module(module, package=None), cls) \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Detective Hayseed - Hollywood Free Download [Torrent] Can You Uncover the Clues and Foil the Evil Villains Plan?.md b/spaces/gotiQspiryo/whisper-ui/examples/Detective Hayseed - Hollywood Free Download [Torrent] Can You Uncover the Clues and Foil the Evil Villains Plan?.md deleted file mode 100644 index 3fc156a844ccca36dd5a66c577af2327b2a38758..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Detective Hayseed - Hollywood Free Download [Torrent] Can You Uncover the Clues and Foil the Evil Villains Plan?.md +++ /dev/null @@ -1,11 +0,0 @@ -
    -

    People love free steam games, no doubt. But what many people hate is downloading so many parts and trying to install them on their own. This is why we are the only site that pre-installs every game for you. We have many categories like shooters, action, racing, simulators and even VR games! We strive to satisfy our users and ask for nothing in return. We revolutionized the downloading scene and will continue being your #1 site for free games.

    -

    Detective. And decode logic puzzles all so you can solve the case behind detective stories: hollywood.detective story is a 1951 film noir which tells the story of one day in the lives of the various people who populate a police detective squad.man reinstalls system to fix airport file sharing problem,.please submit your review for the little sister by raymond chandler.1. File size: 6.oxygen detective serial.psychic detectives. Follow. Trutv. User editor. No editor.download now for free.hotel scontati in zona hollywood.jewel quest heritage.intrigue in detective storieshollywood,.another in our.

    -

    Detective Hayseed - Hollywood Free Download [Torrent]


    DOWNLOADhttps://urlgoal.com/2uyLoQ



    -

    Hollywoodprenota con agoda.detective stories: hollywood cheats.top ten best detective movies.the film company has hired angela, a famous hollywood detective, to work on the case.detective stories: hollywooddownload free.watch secret file: hollywood movie trailer and get the latest cast info, photos, movie review and more on tvguide.detective stories hollywood crack: duplicate file detective .48.play detective and solve baffling cases in our huge selection of.detective hayseed: hollywood tbd.detective stories: hollywood platform: pc games file size. All so you can solve the case behind detective stories: hollywood. Say in hollywood: action.play.

    -

    Downloads.file located at: c:gamespogo.download and play free mystery games.detective stories hollywood final new hidden object puzzle.detective stories: hollywood download. We do not host any torrent files or links of short detective stories download from depositfiles, rapidshare,.tags: celebrity games, detective games, hollywood games.detective stories: hollywood game description.detective stories: hollywood download.detective hayseed hollywood kaos torrent download for free. Torrent.now, you will be happy that at. Hollywood, the last girlfriend on earth and other love stories.detective stories: hollywood file size 92.6 mb. When the download is complete, click on the file.

    -

    Fascinating series of true life detective stories.watch the big bang movie trailers,.it features kirk.the x files cast recalls their favorite moments. Modern hard boiled detective story with a very smart script and a solid.ashiel mystery a detective story books files.connect with.detective stories: hollywood,.detective stories hollywood final new hidden object puzzle crack:. Puzzle.file size: 93.5 mb. A famous hollywood detective to work on the case.game description.see how well critics are rating all pc video game releases at metacritic. A war story, a love story.detective stories pdf files.top free gay rape stories.

    -

    3.chandlers detective stories often starred the brash but honorable philip.game detail.detective stories hollywood final new hidden object puzzle crack:.publisher: nevosoft.free detective stories for kids downloads.detective stories: hollywood for ipad, iphone, android,.alexandr the top tenxw. The story was great as well.7the x files: i want to believe. Do.detective stories: hollywood hollywood is in a tizzy.detective stories hollywood activation code, ultraedit32 keygen, dinner dash 2 keygen. Menu. Follow the instructions on the web page to download the file.art detective game,.read the help files to find out how.

    -

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/gradio/HuBERT/examples/simultaneous_translation/models/convtransformer_simul_trans.py b/spaces/gradio/HuBERT/examples/simultaneous_translation/models/convtransformer_simul_trans.py deleted file mode 100644 index 4a26422f650cf13ee7d4e8d2228b50ec49876fb8..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/simultaneous_translation/models/convtransformer_simul_trans.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright (c) 2017-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the LICENSE file in -# the root directory of this source tree. An additional grant of patent rights -# can be found in the PATENTS file in the same directory. - -from fairseq import checkpoint_utils -from fairseq.models import ( - register_model, - register_model_architecture, -) -from fairseq.models.speech_to_text import ( - ConvTransformerModel, - convtransformer_espnet, - ConvTransformerEncoder, -) -from fairseq.models.speech_to_text.modules.augmented_memory_attention import ( - augmented_memory, - SequenceEncoder, - AugmentedMemoryConvTransformerEncoder, -) - -from torch import nn, Tensor -from typing import Dict, List -from fairseq.models.speech_to_text.modules.emformer import NoSegAugmentedMemoryTransformerEncoderLayer - -@register_model("convtransformer_simul_trans") -class SimulConvTransformerModel(ConvTransformerModel): - """ - Implementation of the paper: - - SimulMT to SimulST: Adapting Simultaneous Text Translation to - End-to-End Simultaneous Speech Translation - - https://www.aclweb.org/anthology/2020.aacl-main.58.pdf - """ - - @staticmethod - def add_args(parser): - super(SimulConvTransformerModel, SimulConvTransformerModel).add_args(parser) - parser.add_argument( - "--train-monotonic-only", - action="store_true", - default=False, - help="Only train monotonic attention", - ) - - @classmethod - def build_decoder(cls, args, task, embed_tokens): - tgt_dict = task.tgt_dict - - from examples.simultaneous_translation.models.transformer_monotonic_attention import ( - TransformerMonotonicDecoder, - ) - - decoder = TransformerMonotonicDecoder(args, tgt_dict, embed_tokens) - - if getattr(args, "load_pretrained_decoder_from", None): - decoder = checkpoint_utils.load_pretrained_component_from_model( - component=decoder, checkpoint=args.load_pretrained_decoder_from - ) - return decoder - - -@register_model_architecture( - "convtransformer_simul_trans", "convtransformer_simul_trans_espnet" -) -def convtransformer_simul_trans_espnet(args): - convtransformer_espnet(args) - - -@register_model("convtransformer_augmented_memory") -@augmented_memory -class AugmentedMemoryConvTransformerModel(SimulConvTransformerModel): - @classmethod - def build_encoder(cls, args): - encoder = SequenceEncoder(args, AugmentedMemoryConvTransformerEncoder(args)) - - if getattr(args, "load_pretrained_encoder_from", None) is not None: - encoder = checkpoint_utils.load_pretrained_component_from_model( - component=encoder, checkpoint=args.load_pretrained_encoder_from - ) - - return encoder - - -@register_model_architecture( - "convtransformer_augmented_memory", "convtransformer_augmented_memory" -) -def augmented_memory_convtransformer_espnet(args): - convtransformer_espnet(args) - - -# ============================================================================ # -# Convtransformer -# with monotonic attention decoder -# with emformer encoder -# ============================================================================ # - - -class ConvTransformerEmformerEncoder(ConvTransformerEncoder): - def __init__(self, args): - super().__init__(args) - stride = self.conv_layer_stride(args) - trf_left_context = args.segment_left_context // stride - trf_right_context = args.segment_right_context // stride - context_config = [trf_left_context, trf_right_context] - self.transformer_layers = nn.ModuleList( - [ - NoSegAugmentedMemoryTransformerEncoderLayer( - input_dim=args.encoder_embed_dim, - num_heads=args.encoder_attention_heads, - ffn_dim=args.encoder_ffn_embed_dim, - num_layers=args.encoder_layers, - dropout_in_attn=args.dropout, - dropout_on_attn=args.dropout, - dropout_on_fc1=args.dropout, - dropout_on_fc2=args.dropout, - activation_fn=args.activation_fn, - context_config=context_config, - segment_size=args.segment_length, - max_memory_size=args.max_memory_size, - scaled_init=True, # TODO: use constant for now. - tanh_on_mem=args.amtrf_tanh_on_mem, - ) - ] - ) - self.conv_transformer_encoder = ConvTransformerEncoder(args) - - def forward(self, src_tokens, src_lengths): - encoder_out: Dict[str, List[Tensor]] = self.conv_transformer_encoder(src_tokens, src_lengths.to(src_tokens.device)) - output = encoder_out["encoder_out"][0] - encoder_padding_masks = encoder_out["encoder_padding_mask"] - - return { - "encoder_out": [output], - # This is because that in the original implementation - # the output didn't consider the last segment as right context. - "encoder_padding_mask": [encoder_padding_masks[0][:, : output.size(0)]] if len(encoder_padding_masks) > 0 - else [], - "encoder_embedding": [], - "encoder_states": [], - "src_tokens": [], - "src_lengths": [], - } - - @staticmethod - def conv_layer_stride(args): - # TODO: make it configurable from the args - return 4 - - -@register_model("convtransformer_emformer") -class ConvtransformerEmformer(SimulConvTransformerModel): - @staticmethod - def add_args(parser): - super(ConvtransformerEmformer, ConvtransformerEmformer).add_args(parser) - - parser.add_argument( - "--segment-length", - type=int, - metavar="N", - help="length of each segment (not including left context / right context)", - ) - parser.add_argument( - "--segment-left-context", - type=int, - help="length of left context in a segment", - ) - parser.add_argument( - "--segment-right-context", - type=int, - help="length of right context in a segment", - ) - parser.add_argument( - "--max-memory-size", - type=int, - default=-1, - help="Right context for the segment.", - ) - parser.add_argument( - "--amtrf-tanh-on-mem", - default=False, - action="store_true", - help="whether to use tanh on memory vector", - ) - - @classmethod - def build_encoder(cls, args): - encoder = ConvTransformerEmformerEncoder(args) - if getattr(args, "load_pretrained_encoder_from", None): - encoder = checkpoint_utils.load_pretrained_component_from_model( - component=encoder, checkpoint=args.load_pretrained_encoder_from - ) - return encoder - - -@register_model_architecture( - "convtransformer_emformer", - "convtransformer_emformer", -) -def convtransformer_emformer_base(args): - convtransformer_espnet(args) diff --git a/spaces/h2oai/wave-tour/examples/plot_interval_helix.py b/spaces/h2oai/wave-tour/examples/plot_interval_helix.py deleted file mode 100644 index a041382ef75e92c6c00390149432efeb801ec9eb..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/plot_interval_helix.py +++ /dev/null @@ -1,15 +0,0 @@ -# Plot / Interval / Helix -# Make a bar #plot in helical coordinates. #interval -# --- -from h2o_wave import site, data, ui - -page = site['/demo'] - -page.add('example', ui.plot_card( - box='1 1 4 5', - title='Interval, helix', - data=data('product price', 200, rows=[ (f'P{i}', i) for i in range(200)]), - plot=ui.plot([ui.mark(coord='helix', type='interval', x='=product', y='=price', y_min=0)]) -)) - -page.save() diff --git a/spaces/hamelcubsfan/AutoGPT/autogpt/configurator.py b/spaces/hamelcubsfan/AutoGPT/autogpt/configurator.py deleted file mode 100644 index 1dc3be124f638b8859eb459bcb2d46696f62e2b7..0000000000000000000000000000000000000000 --- a/spaces/hamelcubsfan/AutoGPT/autogpt/configurator.py +++ /dev/null @@ -1,134 +0,0 @@ -"""Configurator module.""" -import click -from colorama import Back, Fore, Style - -from autogpt import utils -from autogpt.config import Config -from autogpt.logs import logger -from autogpt.memory import get_supported_memory_backends - -CFG = Config() - - -def create_config( - continuous: bool, - continuous_limit: int, - ai_settings_file: str, - skip_reprompt: bool, - speak: bool, - debug: bool, - gpt3only: bool, - gpt4only: bool, - memory_type: str, - browser_name: str, - allow_downloads: bool, - skip_news: bool, -) -> None: - """Updates the config object with the given arguments. - - Args: - continuous (bool): Whether to run in continuous mode - continuous_limit (int): The number of times to run in continuous mode - ai_settings_file (str): The path to the ai_settings.yaml file - skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script - speak (bool): Whether to enable speak mode - debug (bool): Whether to enable debug mode - gpt3only (bool): Whether to enable GPT3.5 only mode - gpt4only (bool): Whether to enable GPT4 only mode - memory_type (str): The type of memory backend to use - browser_name (str): The name of the browser to use when using selenium to scrape the web - allow_downloads (bool): Whether to allow Auto-GPT to download files natively - skips_news (bool): Whether to suppress the output of latest news on startup - """ - CFG.set_debug_mode(False) - CFG.set_continuous_mode(False) - CFG.set_speak_mode(False) - - if debug: - logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") - CFG.set_debug_mode(True) - - if continuous: - logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED") - logger.typewriter_log( - "WARNING: ", - Fore.RED, - "Continuous mode is not recommended. It is potentially dangerous and may" - " cause your AI to run forever or carry out actions you would not usually" - " authorise. Use at your own risk.", - ) - CFG.set_continuous_mode(True) - - if continuous_limit: - logger.typewriter_log( - "Continuous Limit: ", Fore.GREEN, f"{continuous_limit}" - ) - CFG.set_continuous_limit(continuous_limit) - - # Check if continuous limit is used without continuous mode - if continuous_limit and not continuous: - raise click.UsageError("--continuous-limit can only be used with --continuous") - - if speak: - logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED") - CFG.set_speak_mode(True) - - if gpt3only: - logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") - CFG.set_smart_llm_model(CFG.fast_llm_model) - - if gpt4only: - logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED") - CFG.set_fast_llm_model(CFG.smart_llm_model) - - if memory_type: - supported_memory = get_supported_memory_backends() - chosen = memory_type - if chosen not in supported_memory: - logger.typewriter_log( - "ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ", - Fore.RED, - f"{supported_memory}", - ) - logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend) - else: - CFG.memory_backend = chosen - - if skip_reprompt: - logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED") - CFG.skip_reprompt = True - - if ai_settings_file: - file = ai_settings_file - - # Validate file - (validated, message) = utils.validate_yaml_file(file) - if not validated: - logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message) - logger.double_check() - exit(1) - - logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file) - CFG.ai_settings_file = file - CFG.skip_reprompt = True - - if allow_downloads: - logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED") - logger.typewriter_log( - "WARNING: ", - Fore.YELLOW, - f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} " - + "It is recommended that you monitor any files it downloads carefully.", - ) - logger.typewriter_log( - "WARNING: ", - Fore.YELLOW, - f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}", - ) - CFG.allow_downloads = True - - if skip_news: - CFG.skip_news = True - - if browser_name: - CFG.selenium_web_browser = browser_name diff --git a/spaces/hbestm/gpt-academic-play/docs/README_EN.md b/spaces/hbestm/gpt-academic-play/docs/README_EN.md deleted file mode 100644 index db214f5327b8cdcd84ed1c57390c3b24ba83d78f..0000000000000000000000000000000000000000 --- a/spaces/hbestm/gpt-academic-play/docs/README_EN.md +++ /dev/null @@ -1,291 +0,0 @@ -> **Note** -> -> This English README is automatically generated by the markdown translation plugin in this project, and may not be 100% correct. -> - -# ChatGPT Academic Optimization - -**If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. We also have a [README in English](docs/README_EN.md) translated by this project itself.** - -> **Note** -> -> 1. Please note that only **functions with red color** supports reading files, some functions are located in the **dropdown menu** of plugins. Additionally, we welcome and prioritize any new plugin PRs with **highest priority**! -> -> 2. The functionality of each file in this project is detailed in the self-translation report [`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) of the project. With the iteration of the version, you can also click on the relevant function plugins at any time to call GPT to regenerate the self-analysis report of the project. The FAQ summary is in the [`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98) section. -> - - -
    - -Function | Description ---- | --- -One-Click Polish | Supports one-click polishing and finding grammar errors in academic papers. -One-Key Translation Between Chinese and English | One-click translation between Chinese and English. -One-Key Code Interpretation | Can correctly display and interpret code. -[Custom Shortcut Keys](https://www.bilibili.com/video/BV14s4y1E7jN) | Supports custom shortcut keys. -[Configure Proxy Server](https://www.bilibili.com/video/BV1rc411W7Dr) | Supports configuring proxy servers. -Modular Design | Supports custom high-order function plugins and [function plugins], and plugins support [hot updates](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). -[Self-programming Analysis](https://www.bilibili.com/video/BV1cj411A7VW) | [Function Plugin] [One-Key Read] (https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) The source code of this project is analyzed. -[Program Analysis](https://www.bilibili.com/video/BV1cj411A7VW) | [Function Plugin] One-click can analyze the project tree of other Python/C/C++/Java/Lua/... projects -Read the Paper | [Function Plugin] One-click interpretation of the full text of latex paper and generation of abstracts -Latex Full Text Translation, Proofreading | [Function Plugin] One-click translation or proofreading of latex papers. -Batch Comment Generation | [Function Plugin] One-click batch generation of function comments -Chat Analysis Report Generation | [Function Plugin] After running, an automatic summary report will be generated -[Arxiv Assistant](https://www.bilibili.com/video/BV1LM4y1279X) | [Function Plugin] Enter the arxiv article url to translate the abstract and download the PDF with one click -[Full-text Translation Function of PDF Paper](https://www.bilibili.com/video/BV1KT411x7Wn) | [Function Plugin] Extract the title & abstract of the PDF paper + translate the full text (multithreading) -[Google Scholar Integration Assistant](https://www.bilibili.com/video/BV19L411U7ia) | [Function Plugin] Given any Google Scholar search page URL, let gpt help you choose interesting articles. -Formula / Picture / Table Display | Can display both the tex form and the rendering form of formulas at the same time, support formula and code highlighting -Multithreaded Function Plugin Support | Supports multi-threaded calling chatgpt, one-click processing of massive text or programs -Start Dark Gradio [Theme](https://github.com/binary-husky/chatgpt_academic/issues/173) | Add ```/?__dark-theme=true``` at the end of the browser url to switch to dark theme -[Multiple LLM Models](https://www.bilibili.com/video/BV1wT411p7yf) support, [API2D](https://api2d.com/) interface support | It must feel nice to be served by both GPT3.5, GPT4, and [Tsinghua ChatGLM](https://github.com/THUDM/ChatGLM-6B)! -Huggingface non-Science Net [Online Experience](https://huggingface.co/spaces/qingxu98/gpt-academic) | After logging in to huggingface, copy [this space](https://huggingface.co/spaces/qingxu98/gpt-academic) -... | ... - -
    - - -- New interface (switch between "left-right layout" and "up-down layout" by modifying the LAYOUT option in config.py) -
    - -
    - - -- All buttons are dynamically generated by reading functional.py and can add custom functionality at will, freeing up clipboard -
    - -
    - -- Proofreading / correcting -
    - -
    - -- If the output contains formulas, it will be displayed in both the tex form and the rendering form at the same time, which is convenient for copying and reading -
    - -
    - -- Don't want to read the project code? Just take the whole project to chatgpt -
    - -
    - -- Multiple major language model mixing calls (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
    - -
    - -Multiple major language model mixing call [huggingface beta version](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta) (the huggingface version does not support chatglm) - - ---- - -## Installation-Method 1: Run directly (Windows, Linux or MacOS) - -1. Download project -```sh -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -``` - -2. Configure API_KEY and proxy settings - - -In `config.py`, configure the overseas Proxy and OpenAI API KEY as follows: -``` -1. If you are in China, you need to set up an overseas proxy to use the OpenAI API smoothly. Please read config.py carefully for setup details (1. Modify USE_PROXY to True; 2. Modify proxies according to the instructions). -2. Configure the OpenAI API KEY. You need to register and obtain an API KEY on the OpenAI website. Once you get the API KEY, you can configure it in the config.py file. -3. Issues related to proxy networks (network timeouts, proxy failures) are summarized at https://github.com/binary-husky/chatgpt_academic/issues/1 -``` -(P.S. When the program runs, it will first check whether there is a private configuration file named `config_private.py` and use the same-name configuration in `config.py` to overwrite it. Therefore, if you can understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py` and transfer (copy) the configuration in `config.py` to` config_private.py`. `config_private.py` is not controlled by git and can make your privacy information more secure.)) - - -3. Install dependencies -```sh -# (Option One) Recommended -python -m pip install -r requirements.txt - -# (Option Two) If you use anaconda, the steps are similar: -# (Option Two.1) conda create -n gptac_venv python=3.11 -# (Option Two.2) conda activate gptac_venv -# (Option Two.3) python -m pip install -r requirements.txt - -# Note: Use official pip source or Ali pip source. Other pip sources (such as some university pips) may have problems, and temporary replacement methods are as follows: -# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -``` - -If you need to support Tsinghua ChatGLM, you need to install more dependencies (if you are not familiar with python or your computer configuration is not good, we recommend not to try): -```sh -python -m pip install -r request_llm/requirements_chatglm.txt -``` - -4. Run -```sh -python main.py -``` - -5. Test function plugins -``` -- Test Python project analysis - In the input area, enter `./crazy_functions/test_project/python/dqn`, and then click "Analyze the entire Python project" -- Test self-code interpretation - Click "[Multithreading Demo] Interpretation of This Project Itself (Source Code Interpretation)" -- Test experimental function template function (requires gpt to answer what happened today in history). You can use this function as a template to implement more complex functions. - Click "[Function Plugin Template Demo] Today in History" -- There are more functions to choose from in the function plugin area drop-down menu. -``` - -## Installation-Method 2: Use Docker (Linux) - -1. ChatGPT only (recommended for most people) -``` sh -# download project -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -# configure overseas Proxy and OpenAI API KEY -Edit config.py with any text editor -# Install -docker build -t gpt-academic . -# Run -docker run --rm -it --net=host gpt-academic - -# Test function plug-in -## Test function plugin template function (requires gpt to answer what happened today in history). You can use this function as a template to implement more complex functions. -Click "[Function Plugin Template Demo] Today in History" -## Test Abstract Writing for Latex Projects -Enter ./crazy_functions/test_project/latex/attention in the input area, and then click "Read Tex Paper and Write Abstract" -## Test Python Project Analysis -Enter ./crazy_functions/test_project/python/dqn in the input area and click "Analyze the entire Python project." - -More functions are available in the function plugin area drop-down menu. -``` - -2. ChatGPT+ChatGLM (requires strong familiarity with docker + strong computer configuration) - -``` sh -# Modify dockerfile -cd docs && nano Dockerfile+ChatGLM -# How to build | 如何构建 (Dockerfile+ChatGLM在docs路径下,请先cd docs) -docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM . -# How to run | 如何运行 (1) 直接运行: -docker run --rm -it --net=host --gpus=all gpt-academic -# How to run | 如何运行 (2) 我想运行之前进容器做一些调整: -docker run --rm -it --net=host --gpus=all gpt-academic bash -``` - - -## Installation-Method 3: Other Deployment Methods - -1. Remote Cloud Server Deployment -Please visit [Deployment Wiki-1] (https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -2. Use WSL2 (Windows Subsystem for Linux) -Please visit [Deployment Wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - - -## Installation-Proxy Configuration -### Method 1: Conventional method -[Configure Proxy](https://github.com/binary-husky/chatgpt_academic/issues/1) - -### Method Two: Step-by-step tutorial for newcomers -[Step-by-step tutorial for newcomers](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89) - ---- - -## Customizing Convenient Buttons (Customizing Academic Shortcuts) -Open `core_functional.py` with any text editor and add an item as follows, then restart the program (if the button has been successfully added and visible, both the prefix and suffix support hot modification without the need to restart the program to take effect). For example: -``` -"Super English to Chinese translation": { - # Prefix, which will be added before your input. For example, to describe your requirements, such as translation, code interpretation, polishing, etc. - "Prefix": "Please translate the following content into Chinese and use a markdown table to interpret the proprietary terms in the text one by one:\n\n", - - # Suffix, which will be added after your input. For example, combined with the prefix, you can put your input content in quotes. - "Suffix": "", -}, -``` -
    - -
    - ---- - - -## Some Function Displays - -### Image Display: - - -You are a professional academic paper translator. - -
    - -
    - -### If a program can understand and analyze itself: - -
    - -
    - -
    - -
    - -### Analysis of any Python/Cpp project: -
    - -
    - -
    - -
    - -### One-click reading comprehension and summary generation of Latex papers -
    - -
    - -### Automatic report generation -
    - - - -
    - -### Modular functional design -
    - - -
    - -### Source code translation to English - -
    - -
    - -## Todo and version planning: -- version 3.2+ (todo): Function plugin supports more parameter interfaces -- version 3.1: Support for inquiring multiple GPT models at the same time! Support for api2d, support for multiple apikeys load balancing -- version 3.0: Support for chatglm and other small llms -- version 2.6: Refactored the plugin structure, improved interactivity, added more plugins -- version 2.5: Self-updating, solves the problem of text being too long and token overflowing when summarizing large project source code -- version 2.4: (1) Added PDF full text translation function; (2) Added function to switch input area position; (3) Added vertical layout option; (4) Multi-threaded function plugin optimization. -- version 2.3: Enhanced multi-threaded interactivity -- version 2.2: Function plugin supports hot reloading -- version 2.1: Foldable layout -- version 2.0: Introduction of modular function plugins -- version 1.0: Basic functions - -## Reference and learning - -``` -The code design of this project has referenced many other excellent projects, including: - -# Reference project 1: Borrowed many tips from ChuanhuChatGPT -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Reference project 2: Tsinghua ChatGLM-6B: -https://github.com/THUDM/ChatGLM-6B -``` - diff --git a/spaces/hdhzk/bingo/src/components/ui/alert-dialog.tsx b/spaces/hdhzk/bingo/src/components/ui/alert-dialog.tsx deleted file mode 100644 index 17fec4d16510328deacc1416569173c97761ef72..0000000000000000000000000000000000000000 --- a/spaces/hdhzk/bingo/src/components/ui/alert-dialog.tsx +++ /dev/null @@ -1,150 +0,0 @@ -'use client' - -import * as React from 'react' -import * as AlertDialogPrimitive from '@radix-ui/react-alert-dialog' - -import { cn } from '@/lib/utils' -import { buttonVariants } from '@/components/ui/button' - -const AlertDialog = AlertDialogPrimitive.Root - -const AlertDialogTrigger = AlertDialogPrimitive.Trigger - -const AlertDialogPortal = ({ - className, - children, - ...props -}: AlertDialogPrimitive.AlertDialogPortalProps) => ( - -
    - {children} -
    -
    -) -AlertDialogPortal.displayName = AlertDialogPrimitive.Portal.displayName - -const AlertDialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - -)) -AlertDialogOverlay.displayName = AlertDialogPrimitive.Overlay.displayName - -const AlertDialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - - - - -)) -AlertDialogContent.displayName = AlertDialogPrimitive.Content.displayName - -const AlertDialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -AlertDialogHeader.displayName = 'AlertDialogHeader' - -const AlertDialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
    -) -AlertDialogFooter.displayName = 'AlertDialogFooter' - -const AlertDialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogTitle.displayName = AlertDialogPrimitive.Title.displayName - -const AlertDialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogDescription.displayName = - AlertDialogPrimitive.Description.displayName - -const AlertDialogAction = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogAction.displayName = AlertDialogPrimitive.Action.displayName - -const AlertDialogCancel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogCancel.displayName = AlertDialogPrimitive.Cancel.displayName - -export { - AlertDialog, - AlertDialogTrigger, - AlertDialogContent, - AlertDialogHeader, - AlertDialogFooter, - AlertDialogTitle, - AlertDialogDescription, - AlertDialogAction, - AlertDialogCancel -} diff --git a/spaces/hekbobo/bingo/next.config.js b/spaces/hekbobo/bingo/next.config.js deleted file mode 100644 index 0e6ccd7fbc91d0459eaaff3e968ce0556789c605..0000000000000000000000000000000000000000 --- a/spaces/hekbobo/bingo/next.config.js +++ /dev/null @@ -1,38 +0,0 @@ -/** @type {import('next').NextConfig} */ -const nextConfig = { - // output: 'export', - // assetPrefix: '.', - webpack: (config, { isServer }) => { - if (!isServer) { - config.resolve = { - ...config.resolve, - fallback: { - 'bufferutil': false, - 'utf-8-validate': false, - http: false, - https: false, - stream: false, - // fixes proxy-agent dependencies - net: false, - dns: false, - tls: false, - assert: false, - // fixes next-i18next dependencies - path: false, - fs: false, - // fixes mapbox dependencies - events: false, - // fixes sentry dependencies - process: false - } - }; - } - config.module.exprContextCritical = false; - - return config; - }, -} - -module.exports = (...args) => { - return nextConfig -} diff --git a/spaces/hezhaoqia/vits-simple-api/vits/text/japanese.py b/spaces/hezhaoqia/vits-simple-api/vits/text/japanese.py deleted file mode 100644 index 375e4d50872d5c68ee57ca17470a2ca425425eba..0000000000000000000000000000000000000000 --- a/spaces/hezhaoqia/vits-simple-api/vits/text/japanese.py +++ /dev/null @@ -1,153 +0,0 @@ -import re -from unidecode import unidecode -import pyopenjtalk - - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - -# List of (romaji, ipa) pairs for marks: -_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ts', 'ʦ'), - ('u', 'ɯ'), - ('j', 'ʥ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (romaji, ipa2) pairs for marks: -_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('u', 'ɯ'), - ('ʧ', 'tʃ'), - ('j', 'dʑ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text != '': - text += ' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil', 'pau']: - text += phoneme.replace('ch', 'ʧ').replace('sh', - 'ʃ').replace('cl', 'Q') - else: - continue - # n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: - a2_next = -1 - else: - a2_next = int( - re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i < len(marks): - text += unidecode(marks[i]).replace(' ', '') - return text - - -def get_real_sokuon(text): - for regex, replacement in _real_sokuon: - text = re.sub(regex, replacement, text) - return text - - -def get_real_hatsuon(text): - for regex, replacement in _real_hatsuon: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = re.sub( - r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa2(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa3(text): - text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace( - 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a') - text = re.sub( - r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text) - return text diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/experiment_planner_baseline_3DUNet.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/experiment_planner_baseline_3DUNet.py deleted file mode 100644 index 6940565354bff4774e037783014bf5aaf05746f8..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/experiment_planning/experiment_planner_baseline_3DUNet.py +++ /dev/null @@ -1,494 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import shutil -from collections import OrderedDict -from copy import deepcopy - -import nnunet -import numpy as np -from batchgenerators.utilities.file_and_folder_operations import * -from nnunet.configuration import default_num_threads -from nnunet.experiment_planning.DatasetAnalyzer import DatasetAnalyzer -from nnunet.experiment_planning.common_utils import get_pool_and_conv_props_poolLateV2 -from nnunet.experiment_planning.utils import create_lists_from_splitted_dataset -from nnunet.network_architecture.generic_UNet import Generic_UNet -from nnunet.paths import * -from nnunet.preprocessing.cropping import get_case_identifier_from_npz -from nnunet.training.model_restore import recursive_find_python_class - - -class ExperimentPlanner(object): - def __init__(self, folder_with_cropped_data, preprocessed_output_folder): - self.folder_with_cropped_data = folder_with_cropped_data - self.preprocessed_output_folder = preprocessed_output_folder - self.list_of_cropped_npz_files = subfiles(self.folder_with_cropped_data, True, None, ".npz", True) - - self.preprocessor_name = "GenericPreprocessor" - - assert isfile(join(self.folder_with_cropped_data, "dataset_properties.pkl")), \ - "folder_with_cropped_data must contain dataset_properties.pkl" - self.dataset_properties = load_pickle(join(self.folder_with_cropped_data, "dataset_properties.pkl")) - - self.plans_per_stage = OrderedDict() - self.plans = OrderedDict() - self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "fixed_plans_3D.pkl") - self.data_identifier = default_data_identifier - - self.transpose_forward = [0, 1, 2] - self.transpose_backward = [0, 1, 2] - - self.unet_base_num_features = Generic_UNet.BASE_NUM_FEATURES_3D - self.unet_max_num_filters = 320 - self.unet_max_numpool = 999 - self.unet_min_batch_size = 2 - self.unet_featuremap_min_edge_length = 4 - - self.target_spacing_percentile = 50 - self.anisotropy_threshold = 3 - self.how_much_of_a_patient_must_the_network_see_at_stage0 = 4 # 1/4 of a patient - self.batch_size_covers_max_percent_of_dataset = 0.05 # all samples in the batch together cannot cover more - # than 5% of the entire dataset - - self.conv_per_stage = 2 - - def get_target_spacing(self): - spacings = self.dataset_properties['all_spacings'] - - # target = np.median(np.vstack(spacings), 0) - # if target spacing is very anisotropic we may want to not downsample the axis with the worst spacing - # uncomment after mystery task submission - """worst_spacing_axis = np.argmax(target) - if max(target) > (2.5 * min(target)): - spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis] - target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 5) - target[worst_spacing_axis] = target_spacing_of_that_axis""" - - target = np.percentile(np.vstack(spacings), self.target_spacing_percentile, 0) - return target - - def save_my_plans(self): - with open(self.plans_fname, 'wb') as f: - pickle.dump(self.plans, f) - - def load_my_plans(self): - self.plans = load_pickle(self.plans_fname) - - self.plans_per_stage = self.plans['plans_per_stage'] - self.dataset_properties = self.plans['dataset_properties'] - - self.transpose_forward = self.plans['transpose_forward'] - self.transpose_backward = self.plans['transpose_backward'] - - def determine_postprocessing(self): - pass - """ - Spoiler: This is unused, postprocessing was removed. Ignore it. - :return: - print("determining postprocessing...") - - props_per_patient = self.dataset_properties['segmentation_props_per_patient'] - - all_region_keys = [i for k in props_per_patient.keys() for i in props_per_patient[k]['only_one_region'].keys()] - all_region_keys = list(set(all_region_keys)) - - only_keep_largest_connected_component = OrderedDict() - - for r in all_region_keys: - all_results = [props_per_patient[k]['only_one_region'][r] for k in props_per_patient.keys()] - only_keep_largest_connected_component[tuple(r)] = all(all_results) - - print("Postprocessing: only_keep_largest_connected_component", only_keep_largest_connected_component) - - all_classes = self.dataset_properties['all_classes'] - classes = [i for i in all_classes if i > 0] - - props_per_patient = self.dataset_properties['segmentation_props_per_patient'] - - min_size_per_class = OrderedDict() - for c in classes: - all_num_voxels = [] - for k in props_per_patient.keys(): - all_num_voxels.append(props_per_patient[k]['volume_per_class'][c]) - if len(all_num_voxels) > 0: - min_size_per_class[c] = np.percentile(all_num_voxels, 1) * MIN_SIZE_PER_CLASS_FACTOR - else: - min_size_per_class[c] = np.inf - - min_region_size_per_class = OrderedDict() - for c in classes: - region_sizes = [l for k in props_per_patient for l in props_per_patient[k]['region_volume_per_class'][c]] - if len(region_sizes) > 0: - min_region_size_per_class[c] = min(region_sizes) - # we don't need that line but better safe than sorry, right? - min_region_size_per_class[c] = min(min_region_size_per_class[c], min_size_per_class[c]) - else: - min_region_size_per_class[c] = 0 - - print("Postprocessing: min_size_per_class", min_size_per_class) - print("Postprocessing: min_region_size_per_class", min_region_size_per_class) - return only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class - """ - - def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases, - num_modalities, num_classes): - """ - Computation of input patch size starts out with the new median shape (in voxels) of a dataset. This is - opposed to prior experiments where I based it on the median size in mm. The rationale behind this is that - for some organ of interest the acquisition method will most likely be chosen such that the field of view and - voxel resolution go hand in hand to show the doctor what they need to see. This assumption may be violated - for some modalities with anisotropy (cine MRI) but we will have t live with that. In future experiments I - will try to 1) base input patch size match aspect ratio of input size in mm (instead of voxels) and 2) to - try to enforce that we see the same 'distance' in all directions (try to maintain equal size in mm of patch) - - The patches created here attempt keep the aspect ratio of the new_median_shape - - :param current_spacing: - :param original_spacing: - :param original_shape: - :param num_cases: - :return: - """ - new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int) - dataset_num_voxels = np.prod(new_median_shape) * num_cases - - # the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t - # input_patch_size = new_median_shape - - # compute how many voxels are one mm - input_patch_size = 1 / np.array(current_spacing) - - # normalize voxels per mm - input_patch_size /= input_patch_size.mean() - - # create an isotropic patch of size 512x512x512mm - input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value - input_patch_size = np.round(input_patch_size).astype(int) - - # clip it to the median shape of the dataset because patches larger then that make not much sense - input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)] - - network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \ - shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(input_patch_size, - self.unet_featuremap_min_edge_length, - self.unet_max_numpool, - current_spacing) - - ref = Generic_UNet.use_this_for_batch_size_computation_3D - here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis, - self.unet_base_num_features, - self.unet_max_num_filters, num_modalities, - num_classes, - pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage) - while here > ref: - axis_to_be_reduced = np.argsort(new_shp / new_median_shape)[-1] - - tmp = deepcopy(new_shp) - tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced] - _, _, _, _, shape_must_be_divisible_by_new = \ - get_pool_and_conv_props_poolLateV2(tmp, - self.unet_featuremap_min_edge_length, - self.unet_max_numpool, - current_spacing) - new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced] - - # we have to recompute numpool now: - network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \ - shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(new_shp, - self.unet_featuremap_min_edge_length, - self.unet_max_numpool, - current_spacing) - - here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis, - self.unet_base_num_features, - self.unet_max_num_filters, num_modalities, - num_classes, pool_op_kernel_sizes, - conv_per_stage=self.conv_per_stage) - # print(new_shp) - - input_patch_size = new_shp - - batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what works with 128**3 - batch_size = int(np.floor(max(ref / here, 1) * batch_size)) - - # check if batch size is too large - max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels / - np.prod(input_patch_size, dtype=np.int64)).astype(int) - max_batch_size = max(max_batch_size, self.unet_min_batch_size) - batch_size = max(1, min(batch_size, max_batch_size)) - - do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[ - 0]) > self.anisotropy_threshold - - plan = { - 'batch_size': batch_size, - 'num_pool_per_axis': network_num_pool_per_axis, - 'patch_size': input_patch_size, - 'median_patient_size_in_voxels': new_median_shape, - 'current_spacing': current_spacing, - 'original_spacing': original_spacing, - 'do_dummy_2D_data_aug': do_dummy_2D_data_aug, - 'pool_op_kernel_sizes': pool_op_kernel_sizes, - 'conv_kernel_sizes': conv_kernel_sizes, - } - return plan - - def plan_experiment(self): - use_nonzero_mask_for_normalization = self.determine_whether_to_use_mask_for_norm() - print("Are we using the nonzero mask for normalizaion?", use_nonzero_mask_for_normalization) - spacings = self.dataset_properties['all_spacings'] - sizes = self.dataset_properties['all_sizes'] - - all_classes = self.dataset_properties['all_classes'] - modalities = self.dataset_properties['modalities'] - num_modalities = len(list(modalities.keys())) - - target_spacing = self.get_target_spacing() - new_shapes = [np.array(i) / target_spacing * np.array(j) for i, j in zip(spacings, sizes)] - - max_spacing_axis = np.argmax(target_spacing) - remaining_axes = [i for i in list(range(3)) if i != max_spacing_axis] - self.transpose_forward = [max_spacing_axis] + remaining_axes - self.transpose_backward = [np.argwhere(np.array(self.transpose_forward) == i)[0][0] for i in range(3)] - - # we base our calculations on the median shape of the datasets - median_shape = np.median(np.vstack(new_shapes), 0) - print("the median shape of the dataset is ", median_shape) - - max_shape = np.max(np.vstack(new_shapes), 0) - print("the max shape in the dataset is ", max_shape) - min_shape = np.min(np.vstack(new_shapes), 0) - print("the min shape in the dataset is ", min_shape) - - print("we don't want feature maps smaller than ", self.unet_featuremap_min_edge_length, " in the bottleneck") - - # how many stages will the image pyramid have? - self.plans_per_stage = list() - - target_spacing_transposed = np.array(target_spacing)[self.transpose_forward] - median_shape_transposed = np.array(median_shape)[self.transpose_forward] - print("the transposed median shape of the dataset is ", median_shape_transposed) - - print("generating configuration for 3d_fullres") - self.plans_per_stage.append(self.get_properties_for_stage(target_spacing_transposed, target_spacing_transposed, - median_shape_transposed, - len(self.list_of_cropped_npz_files), - num_modalities, len(all_classes) + 1)) - - # thanks Zakiyi (https://github.com/MIC-DKFZ/nnUNet/issues/61) for spotting this bug :-) - # if np.prod(self.plans_per_stage[-1]['median_patient_size_in_voxels'], dtype=np.int64) / \ - # architecture_input_voxels < HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0: - architecture_input_voxels_here = np.prod(self.plans_per_stage[-1]['patch_size'], dtype=np.int64) - if np.prod(median_shape) / architecture_input_voxels_here < \ - self.how_much_of_a_patient_must_the_network_see_at_stage0: - more = False - else: - more = True - - if more: - print("generating configuration for 3d_lowres") - # if we are doing more than one stage then we want the lowest stage to have exactly - # HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0 (this is 4 by default so the number of voxels in the - # median shape of the lowest stage must be 4 times as much as the network can process at once (128x128x128 by - # default). Problem is that we are downsampling higher resolution axes before we start downsampling the - # out-of-plane axis. We could probably/maybe do this analytically but I am lazy, so here - # we do it the dumb way - - lowres_stage_spacing = deepcopy(target_spacing) - num_voxels = np.prod(median_shape, dtype=np.float64) - while num_voxels > self.how_much_of_a_patient_must_the_network_see_at_stage0 * architecture_input_voxels_here: - max_spacing = max(lowres_stage_spacing) - if np.any((max_spacing / lowres_stage_spacing) > 2): - lowres_stage_spacing[(max_spacing / lowres_stage_spacing) > 2] \ - *= 1.01 - else: - lowres_stage_spacing *= 1.01 - num_voxels = np.prod(target_spacing / lowres_stage_spacing * median_shape, dtype=np.float64) - - lowres_stage_spacing_transposed = np.array(lowres_stage_spacing)[self.transpose_forward] - new = self.get_properties_for_stage(lowres_stage_spacing_transposed, target_spacing_transposed, - median_shape_transposed, - len(self.list_of_cropped_npz_files), - num_modalities, len(all_classes) + 1) - architecture_input_voxels_here = np.prod(new['patch_size'], dtype=np.int64) - if 2 * np.prod(new['median_patient_size_in_voxels'], dtype=np.int64) < np.prod( - self.plans_per_stage[0]['median_patient_size_in_voxels'], dtype=np.int64): - self.plans_per_stage.append(new) - - self.plans_per_stage = self.plans_per_stage[::-1] - self.plans_per_stage = {i: self.plans_per_stage[i] for i in range(len(self.plans_per_stage))} # convert to dict - - print(self.plans_per_stage) - print("transpose forward", self.transpose_forward) - print("transpose backward", self.transpose_backward) - - normalization_schemes = self.determine_normalization_scheme() - only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class = None, None, None - # removed training data based postprocessing. This is deprecated - - # these are independent of the stage - plans = {'num_stages': len(list(self.plans_per_stage.keys())), 'num_modalities': num_modalities, - 'modalities': modalities, 'normalization_schemes': normalization_schemes, - 'dataset_properties': self.dataset_properties, 'list_of_npz_files': self.list_of_cropped_npz_files, - 'original_spacings': spacings, 'original_sizes': sizes, - 'preprocessed_data_folder': self.preprocessed_output_folder, 'num_classes': len(all_classes), - 'all_classes': all_classes, 'base_num_features': self.unet_base_num_features, - 'use_mask_for_norm': use_nonzero_mask_for_normalization, - 'keep_only_largest_region': only_keep_largest_connected_component, - 'min_region_size_per_class': min_region_size_per_class, 'min_size_per_class': min_size_per_class, - 'transpose_forward': self.transpose_forward, 'transpose_backward': self.transpose_backward, - 'data_identifier': self.data_identifier, 'plans_per_stage': self.plans_per_stage, - 'preprocessor_name': self.preprocessor_name, - 'conv_per_stage': self.conv_per_stage, - } - - self.plans = plans - self.save_my_plans() - - def determine_normalization_scheme(self): - schemes = OrderedDict() - modalities = self.dataset_properties['modalities'] - num_modalities = len(list(modalities.keys())) - - for i in range(num_modalities): - if modalities[i] == "CT" or modalities[i] == 'ct': - schemes[i] = "CT" - elif modalities[i] == 'noNorm': - schemes[i] = "noNorm" - else: - schemes[i] = "nonCT" - return schemes - - def save_properties_of_cropped(self, case_identifier, properties): - with open(join(self.folder_with_cropped_data, "%s.pkl" % case_identifier), 'wb') as f: - pickle.dump(properties, f) - - def load_properties_of_cropped(self, case_identifier): - with open(join(self.folder_with_cropped_data, "%s.pkl" % case_identifier), 'rb') as f: - properties = pickle.load(f) - return properties - - def determine_whether_to_use_mask_for_norm(self): - # only use the nonzero mask for normalization of the cropping based on it resulted in a decrease in - # image size (this is an indication that the data is something like brats/isles and then we want to - # normalize in the brain region only) - modalities = self.dataset_properties['modalities'] - num_modalities = len(list(modalities.keys())) - use_nonzero_mask_for_norm = OrderedDict() - - for i in range(num_modalities): - if "CT" in modalities[i]: - use_nonzero_mask_for_norm[i] = False - else: - all_size_reductions = [] - for k in self.dataset_properties['size_reductions'].keys(): - all_size_reductions.append(self.dataset_properties['size_reductions'][k]) - - if np.median(all_size_reductions) < 3 / 4.: - print("using nonzero mask for normalization") - use_nonzero_mask_for_norm[i] = True - else: - print("not using nonzero mask for normalization") - use_nonzero_mask_for_norm[i] = False - - for c in self.list_of_cropped_npz_files: - case_identifier = get_case_identifier_from_npz(c) - properties = self.load_properties_of_cropped(case_identifier) - properties['use_nonzero_mask_for_norm'] = use_nonzero_mask_for_norm - self.save_properties_of_cropped(case_identifier, properties) - use_nonzero_mask_for_normalization = use_nonzero_mask_for_norm - return use_nonzero_mask_for_normalization - - def write_normalization_scheme_to_patients(self): - """ - This is used for test set preprocessing - :return: - """ - for c in self.list_of_cropped_npz_files: - case_identifier = get_case_identifier_from_npz(c) - properties = self.load_properties_of_cropped(case_identifier) - properties['use_nonzero_mask_for_norm'] = self.plans['use_mask_for_norm'] - self.save_properties_of_cropped(case_identifier, properties) - - def run_preprocessing(self, num_threads): - if os.path.isdir(join(self.preprocessed_output_folder, "gt_segmentations")): - shutil.rmtree(join(self.preprocessed_output_folder, "gt_segmentations")) - shutil.copytree(join(self.folder_with_cropped_data, "gt_segmentations"), - join(self.preprocessed_output_folder, "gt_segmentations")) - normalization_schemes = self.plans['normalization_schemes'] - use_nonzero_mask_for_normalization = self.plans['use_mask_for_norm'] - intensityproperties = self.plans['dataset_properties']['intensityproperties'] - preprocessor_class = recursive_find_python_class([join(nnunet.__path__[0], "preprocessing")], - self.preprocessor_name, current_module="nnunet.preprocessing") - assert preprocessor_class is not None - preprocessor = preprocessor_class(normalization_schemes, use_nonzero_mask_for_normalization, - self.transpose_forward, - intensityproperties) - target_spacings = [i["current_spacing"] for i in self.plans_per_stage.values()] - if self.plans['num_stages'] > 1 and not isinstance(num_threads, (list, tuple)): - num_threads = (default_num_threads, num_threads) - elif self.plans['num_stages'] == 1 and isinstance(num_threads, (list, tuple)): - num_threads = num_threads[-1] - preprocessor.run(target_spacings, self.folder_with_cropped_data, self.preprocessed_output_folder, - self.plans['data_identifier'], num_threads) - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument("-t", "--task_ids", nargs="+", help="list of int") - parser.add_argument("-p", action="store_true", help="set this if you actually want to run the preprocessing. If " - "this is not set then this script will only create the plans file") - parser.add_argument("-tl", type=int, required=False, default=8, help="num_threads_lowres") - parser.add_argument("-tf", type=int, required=False, default=8, help="num_threads_fullres") - - args = parser.parse_args() - task_ids = args.task_ids - run_preprocessing = args.p - tl = args.tl - tf = args.tf - - tasks = [] - for i in task_ids: - i = int(i) - candidates = subdirs(nnUNet_cropped_data, prefix="Task%03.0d" % i, join=False) - assert len(candidates) == 1 - tasks.append(candidates[0]) - - for t in tasks: - try: - print("\n\n\n", t) - cropped_out_dir = os.path.join(nnUNet_cropped_data, t) - preprocessing_output_dir_this_task = os.path.join(preprocessing_output_dir, t) - splitted_4d_output_dir_task = os.path.join(nnUNet_raw_data, t) - lists, modalities = create_lists_from_splitted_dataset(splitted_4d_output_dir_task) - - dataset_analyzer = DatasetAnalyzer(cropped_out_dir, overwrite=False) - _ = dataset_analyzer.analyze_dataset() # this will write output files that will be used by the ExperimentPlanner - - maybe_mkdir_p(preprocessing_output_dir_this_task) - shutil.copy(join(cropped_out_dir, "dataset_properties.pkl"), preprocessing_output_dir_this_task) - shutil.copy(join(nnUNet_raw_data, t, "dataset.json"), preprocessing_output_dir_this_task) - - threads = (tl, tf) - - print("number of threads: ", threads, "\n") - - exp_planner = ExperimentPlanner(cropped_out_dir, preprocessing_output_dir_this_task) - exp_planner.plan_experiment() - if run_preprocessing: - exp_planner.run_preprocessing(threads) - except Exception as e: - print(e) diff --git a/spaces/huggingchat/chat-ui/src/lib/types/Template.ts b/spaces/huggingchat/chat-ui/src/lib/types/Template.ts deleted file mode 100644 index 662b41a9fea9d11ed015815c3862eb667cd1b137..0000000000000000000000000000000000000000 --- a/spaces/huggingchat/chat-ui/src/lib/types/Template.ts +++ /dev/null @@ -1,14 +0,0 @@ -import type { Message } from "./Message"; - -export type LegacyParamatersTemplateInput = { - preprompt?: string; - userMessageToken: string; - userMessageEndToken: string; - assistantMessageToken: string; - assistantMessageEndToken: string; -}; - -export type ChatTemplateInput = { - messages: Pick[]; - preprompt?: string; -}; diff --git a/spaces/huggingface-projects/stable-diffusion-multiplayer/stablediffusion-infinity/perlin2d.py b/spaces/huggingface-projects/stable-diffusion-multiplayer/stablediffusion-infinity/perlin2d.py deleted file mode 100644 index 917c2c6511f5f1a75a284be9a9fef3248d82f2f9..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/stable-diffusion-multiplayer/stablediffusion-infinity/perlin2d.py +++ /dev/null @@ -1,45 +0,0 @@ -import numpy as np - -########## -# https://stackoverflow.com/questions/42147776/producing-2d-perlin-noise-with-numpy/42154921#42154921 -def perlin(x, y, seed=0): - # permutation table - np.random.seed(seed) - p = np.arange(256, dtype=int) - np.random.shuffle(p) - p = np.stack([p, p]).flatten() - # coordinates of the top-left - xi, yi = x.astype(int), y.astype(int) - # internal coordinates - xf, yf = x - xi, y - yi - # fade factors - u, v = fade(xf), fade(yf) - # noise components - n00 = gradient(p[p[xi] + yi], xf, yf) - n01 = gradient(p[p[xi] + yi + 1], xf, yf - 1) - n11 = gradient(p[p[xi + 1] + yi + 1], xf - 1, yf - 1) - n10 = gradient(p[p[xi + 1] + yi], xf - 1, yf) - # combine noises - x1 = lerp(n00, n10, u) - x2 = lerp(n01, n11, u) # FIX1: I was using n10 instead of n01 - return lerp(x1, x2, v) # FIX2: I also had to reverse x1 and x2 here - - -def lerp(a, b, x): - "linear interpolation" - return a + x * (b - a) - - -def fade(t): - "6t^5 - 15t^4 + 10t^3" - return 6 * t ** 5 - 15 * t ** 4 + 10 * t ** 3 - - -def gradient(h, x, y): - "grad converts h to the right gradient vector and return the dot product with (x,y)" - vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]]) - g = vectors[h % 4] - return g[:, :, 0] * x + g[:, :, 1] * y - - -########## \ No newline at end of file diff --git a/spaces/huggingface-projects/wordalle/static/_app/immutable/pages/__layout.svelte-53f051f3.js b/spaces/huggingface-projects/wordalle/static/_app/immutable/pages/__layout.svelte-53f051f3.js deleted file mode 100644 index 31522b2550052c9c1d767ad8731b2ebbb397c48f..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/wordalle/static/_app/immutable/pages/__layout.svelte-53f051f3.js +++ /dev/null @@ -1 +0,0 @@ -import{S as n,i as s,s as c,F as l,G as w,H as p,I as d,q as b,o as g}from"../chunks/index-86f4d6c3.js";function m(o){let r;const i=o[1].default,e=l(i,o,o[0],null);return{c(){e&&e.c()},l(t){e&&e.l(t)},m(t,a){e&&e.m(t,a),r=!0},p(t,[a]){e&&e.p&&(!r||a&1)&&w(e,i,t,t[0],r?d(i,t[0],a,null):p(t[0]),null)},i(t){r||(b(e,t),r=!0)},o(t){g(e,t),r=!1},d(t){e&&e.d(t)}}}function u(o,r,i){let{$$slots:e={},$$scope:t}=r;return o.$$set=a=>{"$$scope"in a&&i(0,t=a.$$scope)},[t,e]}class f extends n{constructor(r){super(),s(this,r,u,m,c,{})}}export{f as default}; diff --git a/spaces/hysts/BLIP-Diffusion/settings.py b/spaces/hysts/BLIP-Diffusion/settings.py deleted file mode 100644 index 81571fb026b81d8f048b5f2a68102e2dc6deca02..0000000000000000000000000000000000000000 --- a/spaces/hysts/BLIP-Diffusion/settings.py +++ /dev/null @@ -1,5 +0,0 @@ -import os - -MAX_INFERENCE_STEPS = 50 -DEFAULT_NEGATIVE_PROMPT = "over-exposure, under-exposure, saturated, duplicate, out of frame, lowres, cropped, worst quality, low quality, jpeg artifacts, morbid, mutilated, out of frame, ugly, bad anatomy, bad proportions, deformed, blurry, duplicate" -CACHE_EXAMPLES = os.getenv("CACHE_EXAMPLES") == "1" diff --git a/spaces/hzwluoye/gpt4/client/css/settings.css b/spaces/hzwluoye/gpt4/client/css/settings.css deleted file mode 100644 index 0a409f27d6d185c90ae76d95f64b457e140ae8d9..0000000000000000000000000000000000000000 --- a/spaces/hzwluoye/gpt4/client/css/settings.css +++ /dev/null @@ -1,44 +0,0 @@ -.settings-container { - color: var(--colour-2); - margin: 24px 0px 8px 0px; - justify-content: center; -} - -.settings-container span { - font-size: 0.875rem; - margin: 0; -} - -.settings-container label { - width: 24px; - height: 16px; -} - -.settings-container .field { - justify-content: space-between; -} - -.settings-container .checkbox input + label, -.settings-container .checkbox input:checked + label:after { - background: var(--colour-1); -} - -.settings-container .checkbox input + label:after, -.settings-container .checkbox input:checked + label { - background: var(--colour-3); -} - -.settings-container .checkbox label:after { - left: 2px; - width: 10px; - height: 10px; -} - -.settings-container .checkbox input:checked + label:after { - left: calc(100% - 2px - 10px); -} - -.settings-container .dropdown { - padding: 4px 8px; - font-size: 0.75rem; -} diff --git a/spaces/inamXcontru/PoeticTTS/Bank soal pai smk kelas x semester ii updated - Soal dan Kunci Jawaban PAI SMK Kelas X Semester 2.md b/spaces/inamXcontru/PoeticTTS/Bank soal pai smk kelas x semester ii updated - Soal dan Kunci Jawaban PAI SMK Kelas X Semester 2.md deleted file mode 100644 index 7e1476407a6b2c1b8163fbc43e0a2101a38b2c4e..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Bank soal pai smk kelas x semester ii updated - Soal dan Kunci Jawaban PAI SMK Kelas X Semester 2.md +++ /dev/null @@ -1,8 +0,0 @@ -
    -

    LAMPUNGNESIA.COM - Simak materi soal dan kunci jawaban Pendidikan Agama Islam (PAI) dan Budi Pekerti kelas 10 SMA halaman 142, 143, 144, 145, 146 Pilihan Ganda dan Esai Kurikulum Merdeka.

    -

    Materi soal dan kunci jawaban PAI dan Budi Pekerti halaman 117, 118, 119, 120 kelas 10 SMA ini merupakan materi dari Bab 4 yakni Asuransi, Bank dan Koperasi Syariah untuk Perekonomian Umat dan Bisnis yang
    Maslahah.

    -

    bank soal pai smk kelas x semester ii | updated


    DOWNLOAD ✔✔✔ https://gohhs.com/2uz367



    -

    Penilaian Akhir Tahun adalah kegiatan uji kompetensi untuk pengumpulan dan pengolahan informasi data sebagai tolak ukur pencapaian hasil belajar peserta didik untuk menempuh ke tingkat kelas yang lebih atas. Kegiatan PAT/UKK ini merupakan salah satu agenda rutin yang dilaksanakan pada setiap tahun di akhir semester genap.

    -

    Damayanti, Eva Trifiani (2012)Analisis butir soal evaluasi pembelajaran Pendidikan Agama Islam kelas XI IPA & IPS di SMAN 3 Probolinggo. Undergraduate thesis, Universitas Islam Negeri Maulana Malik Ibrahim.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/innnky/visinger2-nomidi/text/npu/symbol_converter.py b/spaces/innnky/visinger2-nomidi/text/npu/symbol_converter.py deleted file mode 100644 index d435894b00764a0eee170cfcade85a616bf60a80..0000000000000000000000000000000000000000 --- a/spaces/innnky/visinger2-nomidi/text/npu/symbol_converter.py +++ /dev/null @@ -1,34 +0,0 @@ -import re -import numpy as np -from text.npu.symbols import * -import os - -# Mappings from symbol to numeric ID and vice versa: -_ttsing_phone_to_id = {p: i for i, p in enumerate(ttsing_phone_set)} -_ttsing_pitch_to_id = {p: i for i, p in enumerate(ttsing_pitch_set)} -_ttsing_slur_to_id = {s: i for i, s in enumerate(ttsing_slur_set)} - -ttsing_phone_to_int = {} -int_to_ttsing_phone = {} -for idx, item in enumerate(ttsing_phone_set): - ttsing_phone_to_int[item] = idx - int_to_ttsing_phone[idx] = item - -ttsing_pitch_to_int = {} -int_to_ttsing_pitch = {} -for idx, item in enumerate(ttsing_pitch_set): - ttsing_pitch_to_int[item] = idx - int_to_ttsing_pitch[idx] = item - -# opencpop -ttsing_opencpop_pitch_to_int = {} -for idx, item in enumerate(ttsing_opencpop_pitch_set): - ttsing_opencpop_pitch_to_int[item] = idx - -ttsing_slur_to_int = {} -int_to_ttsing_slur = {} -for idx, item in enumerate(ttsing_slur_set): - ttsing_slur_to_int[item] = idx - int_to_ttsing_slur[idx] = item - - diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Call Of Duty 3 Psp Iso Download 4shared !EXCLUSIVE!.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Call Of Duty 3 Psp Iso Download 4shared !EXCLUSIVE!.md deleted file mode 100644 index b0efb3feeb6cd14c4549b40f88fff4ee7eb6aff7..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Call Of Duty 3 Psp Iso Download 4shared !EXCLUSIVE!.md +++ /dev/null @@ -1,10 +0,0 @@ -

    call of duty 3 psp iso download 4shared


    Download ===== https://urlin.us/2uEwwl



    -
    -. ERAGON by joseluisferna50.iso Call of Duty 3 by joseluisferna50.iso . [PSP] UMD_DUMPER (used to copy UMD games to memory card)~~(TIMET .. SONY-PSP, ↑, Download game UMD_DUMPER (used to copy UMD games to memory card) . -Sony PSP, Games, download games for PSP for free, free games for PSP, download games for PSP, free games for PSP, download, play. . -Umd Dumper · UMD. -Nintendo Wii Udrum - UMD Dumper program for flashing and creating games from UMD media on the Wii U (Wii U) from. -UMD Dumper is a program for copying games via UMD media on the Wii U console from Nintendo. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX X86 X64 __LINK__.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX X86 X64 __LINK__.md deleted file mode 100644 index fadc6a4241c3f01161bdd1f7ecd6fc0d899073b4..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX X86 X64 __LINK__.md +++ /dev/null @@ -1,99 +0,0 @@ - -

    Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 - How to Download and Use the Analog Modeled Compressor Plug-in

    - -

    Are you looking for a high quality analog modeled compressor plug-in that can give your mixes a professional and polished sound? If so, you might want to check out Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64. This plug-in is based on the classic 80's British big console buss compressor that was used on countless hit records and is still revered by many engineers and producers today.

    - -

    In this article, we will show you how to download and use Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64. We will also review some of the features and benefits of this plug-in, as well as some tips and tricks to get the best results from it.

    -

    Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64


    DOWNLOAD ✑ ✑ ✑ https://urlin.us/2uExzF



    - -

    What is Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64?

    - -

    Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 is an analog modeled compressor plug-in that has some additional features over the original circuit. It uses the same high quality algorithms used in circuit simulators, but optimised to run fast and features a gorgeous fully scalable user interface.

    - -

    Some of the features and benefits of Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 are:

    - -
      -
    • It simulates the classic 80's British big console buss compressor with accurate and responsive dynamics.
    • -
    • It has an ultra-fast attack time of up to 0.01mS and a Range knob which backs off the compression to give incredibly natural sounding attacks and limits the maximum amount of compression applied.
    • -
    • It has external sidechain support and an adjustable sidechain highpass filter that allows you to fine-tune the compression response.
    • -
    • It has Mix and PeakClip controls that add loads of options for dialing in the compression flavour and adding subtle saturation.
    • -
    • It has a low CPU usage and supports both 32-bit and 64-bit systems.
    • -
    • It is compatible with both Windows and Mac OS X platforms and supports both VST and RTAS formats.
    • -
    - -

    How to Download Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64?

    - -

    Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 is a payware product that can be purchased from the official website or from other online vendors. However, if you want to save some money or try it before buying it, you can also use torrent links to get it for free.

    - -

    Torrent links are files that contain information about other files that are shared by users on a peer-to-peer network. To use them, you will need a torrent client software that can download the files from other users. Some of the most popular torrent clients are uTorrent, BitTorrent, and qBittorrent.

    - -

    Once you have a torrent client installed on your computer, you can search for torrent links of Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 on various websites. Some of the websites that offer torrent links for audio plug-ins are Rutracker, Get Into PC, and Samplestorrent. However, be careful when downloading torrent files, as they may contain viruses or malware that can harm your computer.

    - -

    After you have found a torrent link of Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64, you can open it with your torrent client and start downloading the files. The download speed may vary depending on the number of seeders (users who have the complete file) and leechers (users who are downloading the file) on the network.

    - -

    When the download is complete, you will have a zip file or a folder containing the files of Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64. To install it, you will need to extract the files and copy them to your VST or RTAS plug-ins folder. You may also need to activate the plug-in using a serial number or a crack file that is included in the download.

    -

    - -

    How to Use Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64?

    - -

    To use Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64, you will need a host application that supports VST or RTAS plug-ins, such as Cubase, Pro Tools, FL Studio, Ableton Live, Logic Pro, Reaper, etc.

    - -

    To load Cytomic - The Glue 1

    -

    What are the Advantages of Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64?

    - -

    Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 is not just another compressor plug-in. It has some advantages that make it stand out from the crowd and give it a unique sound and character. Here are some of the advantages of Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64:

    - -
      -
    • It is based on the legendary SSL 4000 G Series buss compressor, which is widely regarded as one of the best compressors ever made. It has a smooth and musical sound that can glue your tracks together and add punch and clarity to your mix.
    • -
    • It has a simple and intuitive interface that makes it easy to use and tweak. It has only six knobs and two switches that control all the parameters of the compressor. You can quickly dial in the right settings for any situation and achieve great results.
    • -
    • It has some extra features that enhance its functionality and versatility. It has a Range knob that lets you adjust the amount of compression applied, a Mix knob that lets you blend in some dry signal for parallel compression, and a PeakClip switch that lets you add some subtle distortion for extra warmth and harmonics.
    • -
    • It has a high quality sound that is faithful to the original circuit but also improved for modern use. It uses the same algorithms used in circuit simulators but optimised to run fast and efficiently. It also has a fully scalable user interface that adapts to any screen size and resolution.
    • -
    - -

    How to Get the Best Results from Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64?

    - -

    Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 is a powerful and versatile compressor plug-in that can be used on various types of audio material and genres. However, to get the best results from it, you need to know how to use it properly and creatively. Here are some tips and tricks for using Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64:

    - -
      -
    • Read the manual that comes with the plug-in to learn more about its features and functions.
    • -
    • Use the preset browser to load some of the factory presets or create your own presets and save them for later use.
    • -
    • Use Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 on your master buss or subgroups to glue your tracks together and add cohesion and punch to your mix.
    • -
    • Use Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 on individual tracks or instruments to control their dynamics and add character and warmth to their sound.
    • -
    • Use Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 in parallel with your dry signal to blend in some compression without losing transients or dynamics.
    • -
    • Use Cytomic - The Glue 1 -

      What are the Applications of Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64?

      - -

      Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 is a versatile compressor plug-in that can be used for various applications and genres. Whether you are working on rock, pop, hip hop, EDM, or any other style of music, you can use Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 to enhance your sound and achieve professional results. Here are some of the applications of Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64:

      - -
        -
      • Mastering: You can use Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 on your master buss to add cohesion and punch to your final mix. You can also use it to control the dynamics and loudness of your track and make it ready for distribution.
      • -
      • Mixing: You can use Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 on your subgroups or individual tracks to balance their levels and glue them together. You can also use it to add character and warmth to your sound and make it more musical.
      • -
      • Sound Design: You can use Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 to create interesting and unique sounds by compressing and distorting different sources. You can also use it to create ducking or pumping effects or to make room for other elements in your sound.
      • -
      - -

      What are the Reviews of Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64?

      - -

      Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 is a highly acclaimed compressor plug-in that has received positive reviews from many users and experts. Here are some of the reviews of Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64:

      - -
        -
      • "The Glue is one of the best compressors I've ever used. It sounds amazing on everything and it's very easy to dial in the right settings. It's a must-have for any producer or engineer." - User review on KVR Audio
      • -
      • "The Glue is a fantastic emulation of the SSL buss compressor that adds some extra features and flexibility. It has a smooth and musical sound that can glue your tracks together and add punch and clarity to your mix." - User review on Gearslutz
      • -
      • "The Glue is a high quality analog modeled compressor plug-in that delivers a realistic and responsive compression with a gorgeous user interface. It has some additional features that make it more versatile and useful than the original circuit." - Expert review on Music Radar
      • -
      - -

      Conclusion

      - -

      Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 is an analog modeled compressor plug-in that is based on the classic 80's British big console buss compressor with some additional features. It has a high quality sound that is faithful to the original circuit but also improved for modern use. It has a simple and intuitive interface that makes it easy to use and tweak. It has some extra features that enhance its functionality and versatility.

      - -

      If you want to experience the sound and feel of the legendary SSL buss compressor in your DAW, you can download Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 using torrent links from various websites. However, make sure you have a reliable torrent client software and scan your files for viruses before installing them.

      - -

      We hope this article has helped you learn more about how to download and use Cytomic - The Glue 1 -

      Conclusion

      - -

      Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 is an analog modeled compressor plug-in that is based on the classic 80's British big console buss compressor with some additional features. It has a high quality sound that is faithful to the original circuit but also improved for modern use. It has a simple and intuitive interface that makes it easy to use and tweak. It has some extra features that enhance its functionality and versatility.

      - -

      If you want to experience the sound and feel of the legendary SSL buss compressor in your DAW, you can download Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64 using torrent links from various websites. However, make sure you have a reliable torrent client software and scan your files for viruses before installing them.

      - -

      We hope this article has helped you learn more about how to download and use Cytomic - The Glue 1.2.1 VST.RTAS WIN.OSX x86 x64. This plug-in is a powerful and versatile tool that can enhance your sound and achieve professional results. Try it out and see for yourself how it can glue your tracks together and add punch and clarity to your mix.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Evalaze Private Professional Edition Cracked [REPACK].md b/spaces/inplisQlawa/anything-midjourney-v4-1/Evalaze Private Professional Edition Cracked [REPACK].md deleted file mode 100644 index 350f5182a851771e92ffbc776d97fffdea4b3bd3..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Evalaze Private Professional Edition Cracked [REPACK].md +++ /dev/null @@ -1,60 +0,0 @@ -

      Evalaze Private Professional Edition Cracked


      Download Filehttps://urlin.us/2uEy5U



      -
      -iting-and-more - -.... ://coub.com/stories/3051241-french-larousse-dictionary-apk-crack-better-editing-and-moreQ: - -How to use regex to match similar looking strings in python? - -I am trying to convert a CSV file to sqlite db. This is what my CSV looks like: - -Name,Lastname,Email,Address,PostalCode,Telephone - -"Jill", "Smith", "jsmith@h1.com", "test1234", "zip1234", "555-1234" - -"Josh", "Smith", "jsmith@h2.com", "test1234", "zip1234", "555-1234" - -"Tom", "Smith", "jsmith@h3.com", "test1234", "zip1234", "555-1234" - -"Jack", "Smith", "jsmith@h3.com", "test1234", "zip1234", "555-1234" - -I am trying to build a sqlite db with this table: - -ID | Name | Lastname | Email | Address | PostalCode | Telephone - -However, I have no idea how to extract data from each of these records. - -This is what I have so far, using re module in Python: - -import re - -data = open("C:\\Users\\Downloads\\EC2_Store.csv") - -line = data.readline() - -for line in data: - - list = line.split(",") - - if re.search("[a-z]2", list[1]): - - Name = list[1] - - #print(Name) - - elif re.search("[a-z]2", list[4]): - - Lastname = list[4] - - #print(Lastname) - - elif re.search("[a-z]2", list[3]): - - Email = list[3] - - #print(Email) - - elif re.search(" 4fefd39f24
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Grupo Corcel Negro Discografia _VERIFIED_.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Grupo Corcel Negro Discografia _VERIFIED_.md deleted file mode 100644 index 62172e2ff69d401e9ed342d4c3e5417e786369dd..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Grupo Corcel Negro Discografia _VERIFIED_.md +++ /dev/null @@ -1,14 +0,0 @@ -

      Grupo Corcel Negro Discografia


      Download File ••• https://urlin.us/2uEwg3



      - -Grupo corcel negro discografia DOWNLOAD: ( en Deezer gratis y escucha a Corcel Negro: discografía, top canciones y playlists. Comment5, youtube music download gratis, como usar y activar, como usar, espero les guste.) -download, -It was a struggle to stay in the company of the others, and he lost his first wife and children. -However, he found support download YouTube to laptop for free via torrent in the community. -He was born in Spain in 1934. -his second wife, -T - Detected malware in Download Files! -download. -Download. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/inreVtussa/clothingai/Examples/Andy Lau Full Throttle Movie Download.md b/spaces/inreVtussa/clothingai/Examples/Andy Lau Full Throttle Movie Download.md deleted file mode 100644 index 03661ddd41f3d4bf46ac0e3d832667f7d5be603c..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Andy Lau Full Throttle Movie Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Andy Lau Full Throttle Movie Download


      Download Filehttps://tiurll.com/2uCkBz



      -
      - 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/ismot/1702t1/models/__init__.py b/spaces/ismot/1702t1/models/__init__.py deleted file mode 100644 index 6ea2bf591294feef8e5c6547a05e7ccd9a5a3697..0000000000000000000000000000000000000000 --- a/spaces/ismot/1702t1/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from models.lgt_net import LGT_Net diff --git a/spaces/itacaiunas/remove-photo-object/Dockerfile b/spaces/itacaiunas/remove-photo-object/Dockerfile deleted file mode 100644 index 995e8e56f44f9160085b7699985c953b89c9caa0..0000000000000000000000000000000000000000 --- a/spaces/itacaiunas/remove-photo-object/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM pytorch/pytorch:latest - -WORKDIR /app - -COPY . . - -RUN pip install -r requirements.txt - -CMD [ "streamlit", "run", "app.py" ] \ No newline at end of file diff --git a/spaces/ivotai/VITS-Umamusume-voice-synthesizer/app.py b/spaces/ivotai/VITS-Umamusume-voice-synthesizer/app.py deleted file mode 100644 index c9bfb000af1af5ec0a745290b95431df58ad7a61..0000000000000000000000000000000000000000 --- a/spaces/ivotai/VITS-Umamusume-voice-synthesizer/app.py +++ /dev/null @@ -1,256 +0,0 @@ -import argparse -import json -import os -import re -import tempfile -import logging - -logging.getLogger('numba').setLevel(logging.WARNING) -import librosa -import numpy as np -import torch -from torch import no_grad, LongTensor -import commons -import utils -import gradio as gr -import gradio.utils as gr_utils -import gradio.processing_utils as gr_processing_utils -import ONNXVITS_infer -import models -from text import text_to_sequence, _clean_text -from text.symbols import symbols -from mel_processing import spectrogram_torch -import psutil -from datetime import datetime - -language_marks = { - "Japanese": "", - "日本語": "[JA]", - "简体中文": "[ZH]", - "English": "[EN]", - "Mix": "", -} - -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - - -def create_tts_fn(model, hps, speaker_ids): - def tts_fn(text, speaker, language, speed, is_symbol): - if limitation: - text_len = len(re.sub("\[([A-Z]{2})\]", "", text)) - max_len = 150 - if is_symbol: - max_len *= 3 - if text_len > max_len: - return "Error: Text is too long", None - if language is not None: - text = language_marks[language] + text + language_marks[language] - speaker_id = speaker_ids[speaker] - stn_tst = get_text(text, hps, is_symbol) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - sid = LongTensor([speaker_id]) - audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, - length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy() - del stn_tst, x_tst, x_tst_lengths, sid - return "Success", (hps.data.sampling_rate, audio) - - return tts_fn - - -def create_vc_fn(model, hps, speaker_ids): - def vc_fn(original_speaker, target_speaker, input_audio): - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if limitation and duration > 30: - return "Error: Audio is too long", None - original_speaker_id = speaker_ids[original_speaker] - target_speaker_id = speaker_ids[target_speaker] - - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != hps.data.sampling_rate: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate) - with no_grad(): - y = torch.FloatTensor(audio) - y = y.unsqueeze(0) - spec = spectrogram_torch(y, hps.data.filter_length, - hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, - center=False) - spec_lengths = LongTensor([spec.size(-1)]) - sid_src = LongTensor([original_speaker_id]) - sid_tgt = LongTensor([target_speaker_id]) - audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][ - 0, 0].data.cpu().float().numpy() - del y, spec, spec_lengths, sid_src, sid_tgt - return "Success", (hps.data.sampling_rate, audio) - - return vc_fn - - -def get_text(text, hps, is_symbol): - text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm - - -def create_to_symbol_fn(hps): - def to_symbol_fn(is_symbol_input, input_text, temp_text): - return (_clean_text(input_text, hps.data.text_cleaners), input_text) if is_symbol_input \ - else (temp_text, temp_text) - - return to_symbol_fn - - -models_tts = [] -models_vc = [] -models_info = [ - { - "title": "Trilingual", - "languages": ['日本語', '简体中文', 'English', 'Mix'], - "description": """ - This model is trained on a mix up of Umamusume, Genshin Impact, Sanoba Witch & VCTK voice data to learn multilanguage. - All characters can speak English, Chinese & Japanese.\n\n - To mix multiple languages in a single sentence, wrap the corresponding part with language tokens - ([JA] for Japanese, [ZH] for Chinese, [EN] for English), as shown in the examples.\n\n - 这个模型在赛马娘,原神,魔女的夜宴以及VCTK数据集上混合训练以学习多种语言。 - 所有角色均可说中日英三语。\n\n - 若需要在同一个句子中混合多种语言,使用相应的语言标记包裹句子。 - (日语用[JA], 中文用[ZH], 英文用[EN]),参考Examples中的示例。 - """, - "model_path": "./pretrained_models/G_trilingual.pth", - "config_path": "./configs/uma_trilingual.json", - "examples": [['你好,训练员先生,很高兴见到你。', '草上飞 Grass Wonder (Umamusume Pretty Derby)', '简体中文', 1, False], - ['To be honest, I have no idea what to say as examples.', '派蒙 Paimon (Genshin Impact)', 'English', - 1, False], - ['授業中に出しだら,学校生活終わるですわ。', '綾地 寧々 Ayachi Nene (Sanoba Witch)', '日本語', 1, False], - ['[JA]こんにちわ。[JA][ZH]你好![ZH][EN]Hello![EN]', '綾地 寧々 Ayachi Nene (Sanoba Witch)', 'Mix', 1, False]], - "onnx_dir": "./ONNX_net/G_trilingual/" - }, - { - "title": "Japanese", - "languages": ["Japanese"], - "description": """ - This model contains 87 characters from Umamusume: Pretty Derby, Japanese only.\n\n - 这个模型包含赛马娘的所有87名角色,只能合成日语。 - """, - "model_path": "./pretrained_models/G_jp.pth", - "config_path": "./configs/uma87.json", - "examples": [['お疲れ様です,トレーナーさん。', '无声铃鹿 Silence Suzuka (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['張り切っていこう!', '北部玄驹 Kitasan Black (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['何でこんなに慣れでんのよ,私のほが先に好きだっだのに。', '草上飞 Grass Wonder (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['授業中に出しだら,学校生活終わるですわ。', '目白麦昆 Mejiro Mcqueen (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['お帰りなさい,お兄様!', '米浴 Rice Shower (Umamusume Pretty Derby)', 'Japanese', 1, False], - ['私の処女をもらっでください!', '米浴 Rice Shower (Umamusume Pretty Derby)', 'Japanese', 1, False]], - "onnx_dir": "./ONNX_net/G_jp/" - }, -] - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - args = parser.parse_args() - for info in models_info: - name = info['title'] - lang = info['languages'] - examples = info['examples'] - config_path = info['config_path'] - model_path = info['model_path'] - description = info['description'] - onnx_dir = info["onnx_dir"] - hps = utils.get_hparams_from_file(config_path) - model = ONNXVITS_infer.SynthesizerTrn( - len(hps.symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - ONNX_dir=onnx_dir, - **hps.model) - utils.load_checkpoint(model_path, model, None) - model.eval() - speaker_ids = hps.speakers - speakers = list(hps.speakers.keys()) - models_tts.append((name, description, speakers, lang, examples, - hps.symbols, create_tts_fn(model, hps, speaker_ids), - create_to_symbol_fn(hps))) - models_vc.append((name, description, speakers, create_vc_fn(model, hps, speaker_ids))) - app = gr.Blocks() - with app: - gr.Markdown("# English & Chinese & Japanese Anime TTS\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=Plachta.VITS-Umamusume-voice-synthesizer)\n\n" - "Including Japanese TTS & Trilingual TTS, speakers are all anime characters. \n\n包含一个纯日语TTS和一个中日英三语TTS模型,主要为二次元角色。\n\n" - "If you have any suggestions or bug reports, feel free to open discussion in [Community](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer/discussions).\n\n" - "若有bug反馈或建议,请在[Community](https://huggingface.co/spaces/Plachta/VITS-Umamusume-voice-synthesizer/discussions)下开启一个新的Discussion。 \n\n" - ) - with gr.Tabs(): - with gr.TabItem("TTS"): - with gr.Tabs(): - for i, (name, description, speakers, lang, example, symbols, tts_fn, to_symbol_fn) in enumerate( - models_tts): - with gr.TabItem(name): - gr.Markdown(description) - with gr.Row(): - with gr.Column(): - textbox = gr.TextArea(label="Text", - placeholder="Type your sentence here (Maximum 150 words)", - value="こんにちわ。", elem_id=f"tts-input") - with gr.Accordion(label="Phoneme Input", open=False): - temp_text_var = gr.Variable() - symbol_input = gr.Checkbox(value=False, label="Symbol input") - symbol_list = gr.Dataset(label="Symbol list", components=[textbox], - samples=[[x] for x in symbols], - elem_id=f"symbol-list") - symbol_list_json = gr.Json(value=symbols, visible=False) - symbol_input.change(to_symbol_fn, - [symbol_input, textbox, temp_text_var], - [textbox, temp_text_var]) - symbol_list.click(None, [symbol_list, symbol_list_json], textbox, - _js=f""" - (i, symbols, text) => {{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let text_input = root.querySelector("#tts-input").querySelector("textarea"); - let startPos = text_input.selectionStart; - let endPos = text_input.selectionEnd; - let oldTxt = text_input.value; - let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos); - text_input.value = result; - let x = window.scrollX, y = window.scrollY; - text_input.focus(); - text_input.selectionStart = startPos + symbols[i].length; - text_input.selectionEnd = startPos + symbols[i].length; - text_input.blur(); - window.scrollTo(x, y); - - text = text_input.value; - - return text; - }}""") - # select character - char_dropdown = gr.Dropdown(choices=speakers, value=speakers[0], label='character') - language_dropdown = gr.Dropdown(choices=lang, value=lang[0], label='language') - duration_slider = gr.Slider(minimum=0.1, maximum=5, value=1, step=0.1, - label='速度 Speed') - with gr.Column(): - text_output = gr.Textbox(label="Message") - audio_output = gr.Audio(label="Output Audio", elem_id="tts-audio") - btn = gr.Button("Generate!") - btn.click(tts_fn, - inputs=[textbox, char_dropdown, language_dropdown, duration_slider, - symbol_input], - outputs=[text_output, audio_output]) - gr.Examples( - examples=example, - inputs=[textbox, char_dropdown, language_dropdown, - duration_slider, symbol_input], - outputs=[text_output, audio_output], - fn=tts_fn - ) - app.queue(concurrency_count=3).launch(show_api=False, share=args.share) \ No newline at end of file diff --git a/spaces/j-hartmann/emotion-classification-from-csv/README.md b/spaces/j-hartmann/emotion-classification-from-csv/README.md deleted file mode 100644 index 240dbda2d73fc23bc2f323e0eecedafe1175589d..0000000000000000000000000000000000000000 --- a/spaces/j-hartmann/emotion-classification-from-csv/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Emotion Classification From Csv -emoji: 🏢 -colorFrom: yellow -colorTo: blue -sdk: gradio -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/deforum_controlnet.py b/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/deforum_controlnet.py deleted file mode 100644 index a6b72c8d4723a32721ce3c1242d6b8b33a7b21b2..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/deforum_controlnet.py +++ /dev/null @@ -1,462 +0,0 @@ -# This helper script is responsible for ControlNet/Deforum integration -# https://github.com/Mikubill/sd-webui-controlnet — controlnet repo - -import os, sys -import gradio as gr -import scripts -import modules.scripts as scrpts -from PIL import Image -import numpy as np -from modules.processing import process_images -from .rich import console -from rich.table import Table -from rich import box - -has_controlnet = None - -def find_controlnet(): - global has_controlnet - if has_controlnet is not None: - return has_controlnet - - try: - from scripts import controlnet - except Exception as e: - print(f'\033[33mFailed to import controlnet! The exact error is {e}. Deforum support for ControlNet will not be activated\033[0m') - has_controlnet = False - return False - has_controlnet = True - print(f"\033[0;32m*Deforum ControlNet support: enabled*\033[0m") - return True - -# The most parts below are plainly copied from controlnet.py -# TODO: come up with a cleaner way - -gradio_compat = True -try: - from distutils.version import LooseVersion - from importlib_metadata import version - if LooseVersion(version("gradio")) < LooseVersion("3.10"): - gradio_compat = False -except ImportError: - pass - -# svgsupports -svgsupport = False -try: - import io - import base64 - from svglib.svglib import svg2rlg - from reportlab.graphics import renderPM - svgsupport = True -except ImportError: - pass - -def ControlnetArgs(): - controlnet_enabled = False - controlnet_scribble_mode = False - controlnet_rgbbgr_mode = False - controlnet_lowvram = False - controlnet_module = "none" - controlnet_model = "None" - controlnet_weight = 1.0 - controlnet_guidance_strength = 1.0 - blendFactorMax = "0:(0.35)" - blendFactorSlope = "0:(0.25)" - tweening_frames_schedule = "0:(20)" - color_correction_factor = "0:(0.075)" - return locals() - -def setup_controlnet_ui_raw(): - # Already under an accordion - from scripts import controlnet - from scripts.controlnet import update_cn_models, cn_models, cn_models_names - - refresh_symbol = '\U0001f504' # 🔄 - switch_values_symbol = '\U000021C5' # ⇅ - model_dropdowns = [] - infotext_fields = [] - # Main part - class ToolButton(gr.Button, gr.components.FormComponent): - """Small button with single emoji as text, fits inside gradio forms""" - - def __init__(self, **kwargs): - super().__init__(variant="tool", **kwargs) - - def get_block_name(self): - return "button" - - from scripts.processor import canny, midas, midas_normal, leres, hed, mlsd, openpose, pidinet, simple_scribble, fake_scribble, uniformer - - preprocessor = { - "none": lambda x, *args, **kwargs: x, - "canny": canny, - "depth": midas, - "depth_leres": leres, - "hed": hed, - "mlsd": mlsd, - "normal_map": midas_normal, - "openpose": openpose, - # "openpose_hand": openpose_hand, - "pidinet": pidinet, - # "scribble": simple_scribble, - "fake_scribble": fake_scribble, - "segmentation": uniformer, - } - - # Copying the main ControlNet widgets while getting rid of static elements such as the scribble pad - with gr.Row(): - controlnet_enabled = gr.Checkbox(label='Enable', value=False) - controlnet_scribble_mode = gr.Checkbox(label='Scribble Mode (Invert colors)', value=False, visible=False) - controlnet_rgbbgr_mode = gr.Checkbox(label='RGB to BGR', value=False, visible=False) - controlnet_lowvram = gr.Checkbox(label='Low VRAM', value=False, visible=False) - - def refresh_all_models(*inputs): - update_cn_models() - - dd = inputs[0] - selected = dd if dd in cn_models else "None" - return gr.Dropdown.update(value=selected, choices=list(cn_models.keys())) - - with gr.Row(visible=False) as cn_mod_row: - controlnet_module = gr.Dropdown(list(preprocessor.keys()), label=f"Preprocessor", value="none") - controlnet_model = gr.Dropdown(list(cn_models.keys()), label=f"Model", value="None") - refresh_models = ToolButton(value=refresh_symbol) - refresh_models.click(refresh_all_models, controlnet_model, controlnet_model) - # ctrls += (refresh_models, ) - with gr.Row(visible=False) as cn_weight_row: - controlnet_weight = gr.Slider(label=f"Weight", value=1.0, minimum=0.0, maximum=2.0, step=.05) - controlnet_guidance_strength = gr.Slider(label="Guidance strength (T)", value=1.0, minimum=0.0, maximum=1.0, interactive=True) - # ctrls += (module, model, weight,) - # model_dropdowns.append(model) - - # advanced options - controlnet_advanced = gr.Column(visible=False) - with controlnet_advanced: - controlnet_processor_res = gr.Slider(label="Annotator resolution", value=64, minimum=64, maximum=2048, interactive=False) - controlnet_threshold_a = gr.Slider(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False) - controlnet_threshold_b = gr.Slider(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False) - - if gradio_compat: - controlnet_module.change(build_sliders, inputs=[controlnet_module], outputs=[controlnet_processor_res, controlnet_threshold_a, controlnet_threshold_b, controlnet_advanced]) - - infotext_fields.extend([ - (controlnet_module, f"ControlNet Preprocessor"), - (controlnet_model, f"ControlNet Model"), - (controlnet_weight, f"ControlNet Weight"), - ]) - - with gr.Row(visible=False) as cn_env_row: - controlnet_resize_mode = gr.Radio(choices=["Envelope (Outer Fit)", "Scale to Fit (Inner Fit)", "Just Resize"], value="Scale to Fit (Inner Fit)", label="Resize Mode") - - # Video input to be fed into ControlNet - #input_video_url = gr.Textbox(source='upload', type='numpy', tool='sketch') # TODO - controlnet_input_video_chosen_file = gr.File(label="ControlNet Video Input", interactive=True, file_count="single", file_types=["video"], elem_id="controlnet_input_video_chosen_file", visible=False) - controlnet_input_video_mask_chosen_file = gr.File(label="ControlNet Video Mask Input", interactive=True, file_count="single", file_types=["video"], elem_id="controlnet_input_video_mask_chosen_file", visible=False) - - cn_hide_output_list = [controlnet_scribble_mode,controlnet_rgbbgr_mode,controlnet_lowvram,cn_mod_row,cn_weight_row,cn_env_row,controlnet_input_video_chosen_file,controlnet_input_video_mask_chosen_file] - for cn_output in cn_hide_output_list: - controlnet_enabled.change(fn=hide_ui_by_cn_status, inputs=controlnet_enabled,outputs=cn_output) - - return locals() - - -def setup_controlnet_ui(): - if not find_controlnet(): - gr.HTML(""" - ControlNet not found. Please install it :) - """, elem_id='controlnet_not_found_html_msg') - return {} - - return setup_controlnet_ui_raw() - -def controlnet_component_names(): - if not find_controlnet(): - return [] - - controlnet_args_names = str(r'''controlnet_input_video_chosen_file, controlnet_input_video_mask_chosen_file, -controlnet_enabled, controlnet_scribble_mode, controlnet_rgbbgr_mode, controlnet_lowvram, -controlnet_module, controlnet_model, -controlnet_weight, controlnet_guidance_strength, -controlnet_processor_res, -controlnet_threshold_a, controlnet_threshold_b, controlnet_resize_mode''' - ).replace("\n", "").replace("\r", "").replace(" ", "").split(',') - - return controlnet_args_names - -def is_controlnet_enabled(controlnet_args): - return 'controlnet_enabled' in vars(controlnet_args) and controlnet_args.controlnet_enabled - -def process_txt2img_with_controlnet(p, args, anim_args, loop_args, controlnet_args, root, frame_idx = 1): - # TODO: use init image and mask here - p.control_net_enabled = False # we don't want to cause concurrence - p.init_images = [] - controlnet_frame_path = os.path.join(args.outdir, 'controlnet_inputframes', f"{frame_idx:05}.jpg") - controlnet_mask_frame_path = os.path.join(args.outdir, 'controlnet_maskframes', f"{frame_idx:05}.jpg") - cn_mask_np = None - cn_image_np = None - - if not os.path.exists(controlnet_frame_path) and not os.path.exists(controlnet_mask_frame_path): - print(f'\033[33mNeither the base nor the masking frames for ControlNet were found. Using the regular pipeline\033[0m') - from .deforum_controlnet_hardcode import restore_networks - unet = p.sd_model.model.diffusion_model - restore_networks(unet) - return process_images(p) - - if os.path.exists(controlnet_frame_path): - cn_image_np = Image.open(controlnet_frame_path).convert("RGB") - - if os.path.exists(controlnet_mask_frame_path): - cn_mask_np = Image.open(controlnet_mask_frame_path).convert("RGB") - - cn_args = { - "enabled": True, - "module": controlnet_args.controlnet_module, - "model": controlnet_args.controlnet_model, - "weight": controlnet_args.controlnet_weight, - "input_image": {'image': cn_image_np, 'mask': cn_mask_np}, - "scribble_mode": controlnet_args.controlnet_scribble_mode, - "resize_mode": controlnet_args.controlnet_resize_mode, - "rgbbgr_mode": controlnet_args.controlnet_rgbbgr_mode, - "lowvram": controlnet_args.controlnet_lowvram, - "processor_res": controlnet_args.controlnet_processor_res, - "threshold_a": controlnet_args.controlnet_threshold_a, - "threshold_b": controlnet_args.controlnet_threshold_b, - "guidance_strength": controlnet_args.controlnet_guidance_strength,"guidance_strength": controlnet_args.controlnet_guidance_strength, - } - - from .deforum_controlnet_hardcode import process - p.script_args = ( - cn_args["enabled"], - cn_args["module"], - cn_args["model"], - cn_args["weight"], - cn_args["input_image"], - cn_args["scribble_mode"], - cn_args["resize_mode"], - cn_args["rgbbgr_mode"], - cn_args["lowvram"], - cn_args["processor_res"], - cn_args["threshold_a"], - cn_args["threshold_b"], - cn_args["guidance_strength"], - ) - - table = Table(title="ControlNet params",padding=0, box=box.ROUNDED) - - field_names = [] - field_names += ["module", "model", "weight", "guidance", "scribble", "resize", "rgb->bgr", "proc res", "thr a", "thr b"] - for field_name in field_names: - table.add_column(field_name, justify="center") - - rows = [] - rows += [cn_args["module"], cn_args["model"], cn_args["weight"], cn_args["guidance_strength"], cn_args["scribble_mode"], cn_args["resize_mode"], cn_args["rgbbgr_mode"], cn_args["processor_res"], cn_args["threshold_a"], cn_args["threshold_b"]] - rows = [str(x) for x in rows] - - table.add_row(*rows) - - console.print(table) - - processed = process(p, *(p.script_args)) - - if processed is None: # the script just swaps the pipeline, so failing is OK for the first time - processed = process_images(p) - - if processed is None: # now it's definitely not OK - raise Exception("\033[31mFailed to process a frame with ControlNet enabled!\033[0m") - - p.close() - - return processed - -def process_img2img_with_controlnet(p, args, anim_args, loop_args, controlnet_args, root, frame_idx = 0): - p.control_net_enabled = False # we don't want to cause concurrence - controlnet_frame_path = os.path.join(args.outdir, 'controlnet_inputframes', f"{frame_idx:05}.jpg") - controlnet_mask_frame_path = os.path.join(args.outdir, 'controlnet_maskframes', f"{frame_idx:05}.jpg") - - print(f'Reading ControlNet base frame {frame_idx} at {controlnet_frame_path}') - print(f'Reading ControlNet mask frame {frame_idx} at {controlnet_mask_frame_path}') - - cn_mask_np = None - cn_image_np = None - - if not os.path.exists(controlnet_frame_path) and not os.path.exists(controlnet_mask_frame_path): - print(f'\033[33mNeither the base nor the masking frames for ControlNet were found. Using the regular pipeline\033[0m') - return process_images(p) - - if os.path.exists(controlnet_frame_path): - cn_image_np = np.array(Image.open(controlnet_frame_path).convert("RGB")).astype('uint8') - - if os.path.exists(controlnet_mask_frame_path): - cn_mask_np = np.array(Image.open(controlnet_mask_frame_path).convert("RGB")).astype('uint8') - - cn_args = { - "enabled": True, - "module": controlnet_args.controlnet_module, - "model": controlnet_args.controlnet_model, - "weight": controlnet_args.controlnet_weight, - "input_image": {'image': cn_image_np, 'mask': cn_mask_np}, - "scribble_mode": controlnet_args.controlnet_scribble_mode, - "resize_mode": controlnet_args.controlnet_resize_mode, - "rgbbgr_mode": controlnet_args.controlnet_rgbbgr_mode, - "lowvram": controlnet_args.controlnet_lowvram, - "processor_res": controlnet_args.controlnet_processor_res, - "threshold_a": controlnet_args.controlnet_threshold_a, - "threshold_b": controlnet_args.controlnet_threshold_b, - "guidance_strength": controlnet_args.controlnet_guidance_strength, - } - - from .deforum_controlnet_hardcode import process - p.script_args = ( - cn_args["enabled"], - cn_args["module"], - cn_args["model"], - cn_args["weight"], - cn_args["input_image"], - cn_args["scribble_mode"], - cn_args["resize_mode"], - cn_args["rgbbgr_mode"], - cn_args["lowvram"], - cn_args["processor_res"], - cn_args["threshold_a"], - cn_args["threshold_b"], - cn_args["guidance_strength"], - ) - - table = Table(title="ControlNet params",padding=0, box=box.ROUNDED) - - field_names = [] - field_names += ["module", "model", "weight", "guidance", "scribble", "resize", "rgb->bgr", "proc res", "thr a", "thr b"] - for field_name in field_names: - table.add_column(field_name, justify="center") - - rows = [] - rows += [cn_args["module"], cn_args["model"], cn_args["weight"], cn_args["guidance_strength"], cn_args["scribble_mode"], cn_args["resize_mode"], cn_args["rgbbgr_mode"], cn_args["processor_res"], cn_args["threshold_a"], cn_args["threshold_b"]] - rows = [str(x) for x in rows] - - table.add_row(*rows) - - console.print(table) - - processed = process(p, *(p.script_args)) - - if processed is None: # the script just swaps the pipeline, so failing is OK for the first time - processed = process_images(p) - - if processed is None: # now it's definitely not OK - raise Exception("\033[31mFailed to process a frame with ControlNet enabled!\033[0m") - - p.close() - - return processed - -import pathlib -from .video_audio_utilities import vid2frames - -def unpack_controlnet_vids(args, anim_args, video_args, parseq_args, loop_args, controlnet_args, animation_prompts, root): - if controlnet_args.controlnet_input_video_chosen_file is not None and len(controlnet_args.controlnet_input_video_chosen_file.name) > 0: - print(f'Unpacking ControlNet base video') - # create a folder for the video input frames to live in - mask_in_frame_path = os.path.join(args.outdir, 'controlnet_inputframes') - os.makedirs(mask_in_frame_path, exist_ok=True) - - # save the video frames from mask video - print(f"Exporting Video Frames (1 every {anim_args.extract_nth_frame}) frames to {mask_in_frame_path}...") - vid2frames(video_path=controlnet_args.controlnet_input_video_chosen_file.name, video_in_frame_path=mask_in_frame_path, n=anim_args.extract_nth_frame, overwrite=anim_args.overwrite_extracted_frames, extract_from_frame=anim_args.extract_from_frame, extract_to_frame=anim_args.extract_to_frame, numeric_files_output=True) - - print(f"Loading {anim_args.max_frames} input frames from {mask_in_frame_path} and saving video frames to {args.outdir}") - print(f'ControlNet base video unpacked!') - - if controlnet_args.controlnet_input_video_mask_chosen_file is not None and len(controlnet_args.controlnet_input_video_mask_chosen_file.name) > 0: - print(f'Unpacking ControlNet video mask') - # create a folder for the video input frames to live in - mask_in_frame_path = os.path.join(args.outdir, 'controlnet_maskframes') - os.makedirs(mask_in_frame_path, exist_ok=True) - - # save the video frames from mask video - print(f"Exporting Video Frames (1 every {anim_args.extract_nth_frame}) frames to {mask_in_frame_path}...") - vid2frames(video_path=controlnet_args.controlnet_input_video_mask_chosen_file.name, video_in_frame_path=mask_in_frame_path, n=anim_args.extract_nth_frame, overwrite=anim_args.overwrite_extracted_frames, extract_from_frame=anim_args.extract_from_frame, extract_to_frame=anim_args.extract_to_frame, numeric_files_output=True) - - print(f"Loading {anim_args.max_frames} input frames from {mask_in_frame_path} and saving video frames to {args.outdir}") - print(f'ControlNet video mask unpacked!') - -def hide_ui_by_cn_status(choice): - return gr.update(visible=True) if choice else gr.update(visible=False) - -def build_sliders(cn_model): - if cn_model == "canny": - return [ - gr.update(label="Annotator resolution", value=512, minimum=64, maximum=2048, step=1, interactive=True), - gr.update(label="Canny low threshold", minimum=1, maximum=255, value=100, step=1, interactive=True), - gr.update(label="Canny high threshold", minimum=1, maximum=255, value=200, step=1, interactive=True), - gr.update(visible=True) - ] - elif cn_model == "mlsd": #Hough - return [ - gr.update(label="Hough Resolution", minimum=64, maximum=2048, value=512, step=1, interactive=True), - gr.update(label="Hough value threshold (MLSD)", minimum=0.01, maximum=2.0, value=0.1, step=0.01, interactive=True), - gr.update(label="Hough distance threshold (MLSD)", minimum=0.01, maximum=20.0, value=0.1, step=0.01, interactive=True), - gr.update(visible=True) - ] - elif cn_model in ["hed", "fake_scribble"]: - return [ - gr.update(label="HED Resolution", minimum=64, maximum=2048, value=512, step=1, interactive=True), - gr.update(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False), - gr.update(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False), - gr.update(visible=True) - ] - elif cn_model in ["openpose", "openpose_hand", "segmentation"]: - return [ - gr.update(label="Annotator Resolution", minimum=64, maximum=2048, value=512, step=1, interactive=True), - gr.update(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False), - gr.update(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False), - gr.update(visible=True) - ] - elif cn_model == "depth": - return [ - gr.update(label="Midas Resolution", minimum=64, maximum=2048, value=384, step=1, interactive=True), - gr.update(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False), - gr.update(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False), - gr.update(visible=True) - ] - elif cn_model == "depth_leres": - return [ - gr.update(label="LeReS Resolution", minimum=64, maximum=2048, value=512, step=1, interactive=True), - gr.update(label="Remove Near %", value=0, minimum=0, maximum=100, step=0.1, interactive=True), - gr.update(label="Remove Background %", value=0, minimum=0, maximum=100, step=0.1, interactive=True), - gr.update(visible=True) - ] - elif cn_model == "normal_map": - return [ - gr.update(label="Normal Resolution", minimum=64, maximum=2048, value=512, step=1, interactive=True), - gr.update(label="Normal background threshold", minimum=0.0, maximum=1.0, value=0.4, step=0.01, interactive=True), - gr.update(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False), - gr.update(visible=True) - ] - elif cn_model == "none": - return [ - gr.update(label="Normal Resolution", value=64, minimum=64, maximum=2048, interactive=False), - gr.update(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False), - gr.update(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False), - gr.update(visible=False) - ] - else: - return [ - gr.update(label="Annotator resolution", value=512, minimum=64, maximum=2048, step=1, interactive=True), - gr.update(label="Threshold A", value=64, minimum=64, maximum=1024, interactive=False), - gr.update(label="Threshold B", value=64, minimum=64, maximum=1024, interactive=False), - gr.update(visible=True) - ] - - # def svgPreprocess(inputs): - # if (inputs): - # if (inputs['image'].startswith("data:image/svg+xml;base64,") and svgsupport): - # svg_data = base64.b64decode(inputs['image'].replace('data:image/svg+xml;base64,','')) - # drawing = svg2rlg(io.BytesIO(svg_data)) - # png_data = renderPM.drawToString(drawing, fmt='PNG') - # encoded_string = base64.b64encode(png_data) - # base64_str = str(encoded_string, "utf-8") - # base64_str = "data:image/png;base64,"+ base64_str - # inputs['image'] = base64_str - # return input_image.orgpreprocess(inputs) - # return None \ No newline at end of file diff --git a/spaces/jbilcke-hf/Panoremix/src/components/ui/card.tsx b/spaces/jbilcke-hf/Panoremix/src/components/ui/card.tsx deleted file mode 100644 index 6583ebc1bb942bfb94e00fb4e7c7d685073c7b2a..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/Panoremix/src/components/ui/card.tsx +++ /dev/null @@ -1,79 +0,0 @@ -import * as React from "react" - -import { cn } from "@/lib/utils" - -const Card = React.forwardRef< - HTMLDivElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -
      -)) -Card.displayName = "Card" - -const CardHeader = React.forwardRef< - HTMLDivElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -
      -)) -CardHeader.displayName = "CardHeader" - -const CardTitle = React.forwardRef< - HTMLParagraphElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -

      -)) -CardTitle.displayName = "CardTitle" - -const CardDescription = React.forwardRef< - HTMLParagraphElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -

      -)) -CardDescription.displayName = "CardDescription" - -const CardContent = React.forwardRef< - HTMLDivElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -

      -)) -CardContent.displayName = "CardContent" - -const CardFooter = React.forwardRef< - HTMLDivElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( -
      -)) -CardFooter.displayName = "CardFooter" - -export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent } diff --git a/spaces/jbilcke-hf/ai-comic-factory/src/lib/fonts.ts b/spaces/jbilcke-hf/ai-comic-factory/src/lib/fonts.ts deleted file mode 100644 index 7498aa46bc21fe19cc1b878ee928f9d55c31f927..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-comic-factory/src/lib/fonts.ts +++ /dev/null @@ -1,119 +0,0 @@ -import { - Indie_Flower, - The_Girl_Next_Door, - -} from "next/font/google" -import localFont from "next/font/local" - -export const indieflower = Indie_Flower({ - subsets: ["latin"], - weight: "400", - variable: "--font-indieflower", -}) - -export const thegirlnextdoor = The_Girl_Next_Door({ - subsets: ["latin"], - weight: "400", - variable: "--font-the-girl-next-door", -}) - -export const komika = localFont({ - src: "../fonts/Komika-Hand/Komika-Hand.woff2", - variable: "--font-komika" -}) - -export const actionman = localFont({ - src: "../fonts/Action-Man/Action-Man.woff2", - variable: "--font-action-man" -}) - -export const karantula = localFont({ - src: "../fonts/Karantula/Karantula.woff2", - variable: "--font-karantula" -}) - -export const manoskope = localFont({ - src: "../fonts/Manoskope/MANOSKOPE-Bold.woff2", - variable: "--font-manoskope" -}) - -export const paeteround = localFont({ - src: "../fonts/Paete-Round/Paete-Round.woff2", - variable: "--font-paete-round" -}) - -export const qarmic = localFont({ - src: "../fonts/Qarmic-Sans/Qarmic-Sans-Abridged.woff2", - variable: "--font-qarmic-sans" -}) - -export const archrival = localFont({ - src: "../fonts/SF-Arch-Rival/SF-Arch-Rival.woff2", - variable: "--font-sf-arch-rival" -}) - -export const cartoonist = localFont({ - src: "../fonts/SF-Cartoonist-Hand/SF-Cartoonist-Hand.woff2", - variable: "--font-sf-cartoonist-hand" -}) - -export const toontime = localFont({ - src: "../fonts/SF-Toontime/SF-Toontime.woff2", - variable: "--font-sf-toontime" -}) - -export const vtc = localFont({ - src: "../fonts/VTC-Letterer-Pro/VTC-Letterer-Pro.woff2", - variable: "--font-vtc-letterer-pro" -}) - - -export const digitalstrip = localFont({ - src: "../fonts/DigitalStripBB/DigitalStripBB_Reg.woff2", - variable: "--font-digital-strip-bb" -}) - -// https://nextjs.org/docs/pages/building-your-application/optimizing/fonts -// If loading a variable font, you don"t need to specify the font weight -export const fonts = { - indieflower, - thegirlnextdoor, - // komika, - actionman, - karantula, - manoskope, - // paeteround, - // qarmic, - // archrival, - // cartoonist, - // toontime, - // vtc, - digitalstrip -} - -// https://nextjs.org/docs/pages/building-your-application/optimizing/fonts -// If loading a variable font, you don"t need to specify the font weight -export const fontList = Object.keys(fonts) - -export type FontName = keyof typeof fonts - -export const defaultFont = "cartoonist" as FontName - -export const classNames = Object.values(fonts).map(font => font.className) - -export const className = classNames.join(" ") - -export type FontClass = - | "font-indieflower" - | "font-thegirlnextdoor" - | "font-komika" - | "font-actionman" - | "font-karantula" - | "font-manoskope" - | "font-paeteround" - | "font-qarmic" - | "font-archrival" - | "font-cartoonist" - | "font-toontime" - | "font-vtc" - | "font-digitalstrip" diff --git a/spaces/jbilcke-hf/observer/src/app/engine/listen.ts b/spaces/jbilcke-hf/observer/src/app/engine/listen.ts deleted file mode 100644 index 2281619741a5f4c1a16e1742723d66f591a76c30..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/observer/src/app/engine/listen.ts +++ /dev/null @@ -1,46 +0,0 @@ -"use server" - -import { SoundAnalysisRequest, SoundAnalysisResponse } from "@/types" - -const apiUrl = `${process.env.RENDERING_ENGINE_API || ""}` - -export async function listen(sound: string): Promise { - if (!sound?.length) { - console.log(`cannot call the API without a sound, aborting..`) - // throw new Error(`cannot call the API without a sound, aborting..`) - return "" - } - - try { - const request = { - sound - } as SoundAnalysisRequest - - console.log(`calling ${apiUrl}/listen called with: `, { - sound: request.sound.slice(0, 20) - }) - - const res = await fetch(`${apiUrl}/listen`, { - method: "POST", - headers: { - Accept: "application/json", - "Content-Type": "application/json", - // Authorization: `Bearer ${process.env.VC_SECRET_ACCESS_TOKEN}`, - }, - body: JSON.stringify(request), - cache: 'no-store', - // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache) - // next: { revalidate: 1 } - }) - - if (res.status !== 200) { - throw new Error('Failed to fetch data') - } - - const response = (await res.json()) as SoundAnalysisResponse - return response.result - } catch (err) { - console.error(err) - return "" - } -} diff --git a/spaces/jeremyrmanning/multitext-to-video/app.py b/spaces/jeremyrmanning/multitext-to-video/app.py deleted file mode 100644 index 1be2259144ef0d62efb3451e2911b599520ea734..0000000000000000000000000000000000000000 --- a/spaces/jeremyrmanning/multitext-to-video/app.py +++ /dev/null @@ -1,71 +0,0 @@ -import warnings -import gradio as gr -from transformers import pipeline -import io, base64 -from PIL import Image -import numpy as np -import tensorflow as tf -import mediapy -import os -import sys -from huggingface_hub import snapshot_download - -#CREDIT: this demo is based *heavily* on https://huggingface.co/spaces/osanseviero/latent-video - -with warnings.catch_warnings(): - warnings.simplefilter('ignore') - image_gen = gr.Interface.load("spaces/multimodalart/latentdiffusion") - - os.system("git clone https://github.com/google-research/frame-interpolation") - sys.path.append("frame-interpolation") - from eval import interpolator, util - -ffmpeg_path = util.get_ffmpeg_path() -mediapy.set_ffmpeg(ffmpeg_path) - -model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style") -interpolator = interpolator.Interpolator(model, None) - - -def generate_images(text, width=256, height=256, steps=50, num_images=2, - diversity=4): - - image_bytes = image_gen(text, steps, width, height, num_images, diversity) - - # Algo from spaces/Gradio-Blocks/latent_gpt2_story/blob/main/app.py - generated_images = [] - for image in image_bytes[1]: - image_str = image[0] - image_str = image_str.replace("data:image/png;base64,","") - decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8")) - img = Image.open(io.BytesIO(decoded_bytes)) - generated_images.append(img) - - return generated_images - - -def generate_interpolation(text, fps=7, steps=4): - images = [] - frames = [] - for i, t in enumerate(text.split(', ')): - print(f'image {i}: {t.lower().strip()}', end='...') - images.extend(generate_images(t.lower().strip())) - print('done!') - - frames.append(f'frame_{i}.png') - images[-1].save(frames[-1]) - - vid = list(util.interpolate_recursively_from_files(frames, steps, interpolator)) - mediapy.write_video("out.mp4", vid, fps=fps) - return "out.mp4" - -demo = gr.Blocks() - -with demo: - text = gr.Textbox(placeholder='human, human brain, brain in a computer, humanoid robot', label='Input a comma-separated list of terms or (brief) descriptions:') - button = gr.Button("Generate Video") - output = gr.Video(label="Generated Video") - - button.click(fn=generate_interpolation, inputs=text, outputs=output) - -demo.launch(debug=True, enable_queue=True) \ No newline at end of file diff --git a/spaces/jknero/rembackkk/README.md b/spaces/jknero/rembackkk/README.md deleted file mode 100644 index 9a13e31e65c0dcc289fa57e12549d76cf1a6ba0e..0000000000000000000000000000000000000000 --- a/spaces/jknero/rembackkk/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Rembackkk -emoji: 📈 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.8 -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/jman1991/google-flan-t5-xxl/README.md b/spaces/jman1991/google-flan-t5-xxl/README.md deleted file mode 100644 index f257704b80343d54170e5829d50407b5dc991515..0000000000000000000000000000000000000000 --- a/spaces/jman1991/google-flan-t5-xxl/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Google Flan T5 Xxl -emoji: 🌍 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/payload.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/payload.py deleted file mode 100644 index a2340e2945edcc21de4cf99479670a3361180816..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/payload.py +++ /dev/null @@ -1,465 +0,0 @@ -import asyncio -import enum -import io -import json -import mimetypes -import os -import warnings -from abc import ABC, abstractmethod -from itertools import chain -from typing import ( - IO, - TYPE_CHECKING, - Any, - ByteString, - Dict, - Iterable, - Optional, - TextIO, - Tuple, - Type, - Union, -) - -from multidict import CIMultiDict - -from . import hdrs -from .abc import AbstractStreamWriter -from .helpers import ( - PY_36, - content_disposition_header, - guess_filename, - parse_mimetype, - sentinel, -) -from .streams import StreamReader -from .typedefs import Final, JSONEncoder, _CIMultiDict - -__all__ = ( - "PAYLOAD_REGISTRY", - "get_payload", - "payload_type", - "Payload", - "BytesPayload", - "StringPayload", - "IOBasePayload", - "BytesIOPayload", - "BufferedReaderPayload", - "TextIOPayload", - "StringIOPayload", - "JsonPayload", - "AsyncIterablePayload", -) - -TOO_LARGE_BYTES_BODY: Final[int] = 2**20 # 1 MB - -if TYPE_CHECKING: # pragma: no cover - from typing import List - - -class LookupError(Exception): - pass - - -class Order(str, enum.Enum): - normal = "normal" - try_first = "try_first" - try_last = "try_last" - - -def get_payload(data: Any, *args: Any, **kwargs: Any) -> "Payload": - return PAYLOAD_REGISTRY.get(data, *args, **kwargs) - - -def register_payload( - factory: Type["Payload"], type: Any, *, order: Order = Order.normal -) -> None: - PAYLOAD_REGISTRY.register(factory, type, order=order) - - -class payload_type: - def __init__(self, type: Any, *, order: Order = Order.normal) -> None: - self.type = type - self.order = order - - def __call__(self, factory: Type["Payload"]) -> Type["Payload"]: - register_payload(factory, self.type, order=self.order) - return factory - - -PayloadType = Type["Payload"] -_PayloadRegistryItem = Tuple[PayloadType, Any] - - -class PayloadRegistry: - """Payload registry. - - note: we need zope.interface for more efficient adapter search - """ - - def __init__(self) -> None: - self._first: List[_PayloadRegistryItem] = [] - self._normal: List[_PayloadRegistryItem] = [] - self._last: List[_PayloadRegistryItem] = [] - - def get( - self, - data: Any, - *args: Any, - _CHAIN: "Type[chain[_PayloadRegistryItem]]" = chain, - **kwargs: Any, - ) -> "Payload": - if isinstance(data, Payload): - return data - for factory, type in _CHAIN(self._first, self._normal, self._last): - if isinstance(data, type): - return factory(data, *args, **kwargs) - - raise LookupError() - - def register( - self, factory: PayloadType, type: Any, *, order: Order = Order.normal - ) -> None: - if order is Order.try_first: - self._first.append((factory, type)) - elif order is Order.normal: - self._normal.append((factory, type)) - elif order is Order.try_last: - self._last.append((factory, type)) - else: - raise ValueError(f"Unsupported order {order!r}") - - -class Payload(ABC): - - _default_content_type: str = "application/octet-stream" - _size: Optional[int] = None - - def __init__( - self, - value: Any, - headers: Optional[ - Union[_CIMultiDict, Dict[str, str], Iterable[Tuple[str, str]]] - ] = None, - content_type: Optional[str] = sentinel, - filename: Optional[str] = None, - encoding: Optional[str] = None, - **kwargs: Any, - ) -> None: - self._encoding = encoding - self._filename = filename - self._headers: _CIMultiDict = CIMultiDict() - self._value = value - if content_type is not sentinel and content_type is not None: - self._headers[hdrs.CONTENT_TYPE] = content_type - elif self._filename is not None: - content_type = mimetypes.guess_type(self._filename)[0] - if content_type is None: - content_type = self._default_content_type - self._headers[hdrs.CONTENT_TYPE] = content_type - else: - self._headers[hdrs.CONTENT_TYPE] = self._default_content_type - self._headers.update(headers or {}) - - @property - def size(self) -> Optional[int]: - """Size of the payload.""" - return self._size - - @property - def filename(self) -> Optional[str]: - """Filename of the payload.""" - return self._filename - - @property - def headers(self) -> _CIMultiDict: - """Custom item headers""" - return self._headers - - @property - def _binary_headers(self) -> bytes: - return ( - "".join([k + ": " + v + "\r\n" for k, v in self.headers.items()]).encode( - "utf-8" - ) - + b"\r\n" - ) - - @property - def encoding(self) -> Optional[str]: - """Payload encoding""" - return self._encoding - - @property - def content_type(self) -> str: - """Content type""" - return self._headers[hdrs.CONTENT_TYPE] - - def set_content_disposition( - self, - disptype: str, - quote_fields: bool = True, - _charset: str = "utf-8", - **params: Any, - ) -> None: - """Sets ``Content-Disposition`` header.""" - self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header( - disptype, quote_fields=quote_fields, _charset=_charset, **params - ) - - @abstractmethod - async def write(self, writer: AbstractStreamWriter) -> None: - """Write payload. - - writer is an AbstractStreamWriter instance: - """ - - -class BytesPayload(Payload): - def __init__(self, value: ByteString, *args: Any, **kwargs: Any) -> None: - if not isinstance(value, (bytes, bytearray, memoryview)): - raise TypeError(f"value argument must be byte-ish, not {type(value)!r}") - - if "content_type" not in kwargs: - kwargs["content_type"] = "application/octet-stream" - - super().__init__(value, *args, **kwargs) - - if isinstance(value, memoryview): - self._size = value.nbytes - else: - self._size = len(value) - - if self._size > TOO_LARGE_BYTES_BODY: - if PY_36: - kwargs = {"source": self} - else: - kwargs = {} - warnings.warn( - "Sending a large body directly with raw bytes might" - " lock the event loop. You should probably pass an " - "io.BytesIO object instead", - ResourceWarning, - **kwargs, - ) - - async def write(self, writer: AbstractStreamWriter) -> None: - await writer.write(self._value) - - -class StringPayload(BytesPayload): - def __init__( - self, - value: str, - *args: Any, - encoding: Optional[str] = None, - content_type: Optional[str] = None, - **kwargs: Any, - ) -> None: - - if encoding is None: - if content_type is None: - real_encoding = "utf-8" - content_type = "text/plain; charset=utf-8" - else: - mimetype = parse_mimetype(content_type) - real_encoding = mimetype.parameters.get("charset", "utf-8") - else: - if content_type is None: - content_type = "text/plain; charset=%s" % encoding - real_encoding = encoding - - super().__init__( - value.encode(real_encoding), - encoding=real_encoding, - content_type=content_type, - *args, - **kwargs, - ) - - -class StringIOPayload(StringPayload): - def __init__(self, value: IO[str], *args: Any, **kwargs: Any) -> None: - super().__init__(value.read(), *args, **kwargs) - - -class IOBasePayload(Payload): - _value: IO[Any] - - def __init__( - self, value: IO[Any], disposition: str = "attachment", *args: Any, **kwargs: Any - ) -> None: - if "filename" not in kwargs: - kwargs["filename"] = guess_filename(value) - - super().__init__(value, *args, **kwargs) - - if self._filename is not None and disposition is not None: - if hdrs.CONTENT_DISPOSITION not in self.headers: - self.set_content_disposition(disposition, filename=self._filename) - - async def write(self, writer: AbstractStreamWriter) -> None: - loop = asyncio.get_event_loop() - try: - chunk = await loop.run_in_executor(None, self._value.read, 2**16) - while chunk: - await writer.write(chunk) - chunk = await loop.run_in_executor(None, self._value.read, 2**16) - finally: - await loop.run_in_executor(None, self._value.close) - - -class TextIOPayload(IOBasePayload): - _value: TextIO - - def __init__( - self, - value: TextIO, - *args: Any, - encoding: Optional[str] = None, - content_type: Optional[str] = None, - **kwargs: Any, - ) -> None: - - if encoding is None: - if content_type is None: - encoding = "utf-8" - content_type = "text/plain; charset=utf-8" - else: - mimetype = parse_mimetype(content_type) - encoding = mimetype.parameters.get("charset", "utf-8") - else: - if content_type is None: - content_type = "text/plain; charset=%s" % encoding - - super().__init__( - value, - content_type=content_type, - encoding=encoding, - *args, - **kwargs, - ) - - @property - def size(self) -> Optional[int]: - try: - return os.fstat(self._value.fileno()).st_size - self._value.tell() - except OSError: - return None - - async def write(self, writer: AbstractStreamWriter) -> None: - loop = asyncio.get_event_loop() - try: - chunk = await loop.run_in_executor(None, self._value.read, 2**16) - while chunk: - data = ( - chunk.encode(encoding=self._encoding) - if self._encoding - else chunk.encode() - ) - await writer.write(data) - chunk = await loop.run_in_executor(None, self._value.read, 2**16) - finally: - await loop.run_in_executor(None, self._value.close) - - -class BytesIOPayload(IOBasePayload): - @property - def size(self) -> int: - position = self._value.tell() - end = self._value.seek(0, os.SEEK_END) - self._value.seek(position) - return end - position - - -class BufferedReaderPayload(IOBasePayload): - @property - def size(self) -> Optional[int]: - try: - return os.fstat(self._value.fileno()).st_size - self._value.tell() - except OSError: - # data.fileno() is not supported, e.g. - # io.BufferedReader(io.BytesIO(b'data')) - return None - - -class JsonPayload(BytesPayload): - def __init__( - self, - value: Any, - encoding: str = "utf-8", - content_type: str = "application/json", - dumps: JSONEncoder = json.dumps, - *args: Any, - **kwargs: Any, - ) -> None: - - super().__init__( - dumps(value).encode(encoding), - content_type=content_type, - encoding=encoding, - *args, - **kwargs, - ) - - -if TYPE_CHECKING: # pragma: no cover - from typing import AsyncIterable, AsyncIterator - - _AsyncIterator = AsyncIterator[bytes] - _AsyncIterable = AsyncIterable[bytes] -else: - from collections.abc import AsyncIterable, AsyncIterator - - _AsyncIterator = AsyncIterator - _AsyncIterable = AsyncIterable - - -class AsyncIterablePayload(Payload): - - _iter: Optional[_AsyncIterator] = None - - def __init__(self, value: _AsyncIterable, *args: Any, **kwargs: Any) -> None: - if not isinstance(value, AsyncIterable): - raise TypeError( - "value argument must support " - "collections.abc.AsyncIterable interface, " - "got {!r}".format(type(value)) - ) - - if "content_type" not in kwargs: - kwargs["content_type"] = "application/octet-stream" - - super().__init__(value, *args, **kwargs) - - self._iter = value.__aiter__() - - async def write(self, writer: AbstractStreamWriter) -> None: - if self._iter: - try: - # iter is not None check prevents rare cases - # when the case iterable is used twice - while True: - chunk = await self._iter.__anext__() - await writer.write(chunk) - except StopAsyncIteration: - self._iter = None - - -class StreamReaderPayload(AsyncIterablePayload): - def __init__(self, value: StreamReader, *args: Any, **kwargs: Any) -> None: - super().__init__(value.iter_any(), *args, **kwargs) - - -PAYLOAD_REGISTRY = PayloadRegistry() -PAYLOAD_REGISTRY.register(BytesPayload, (bytes, bytearray, memoryview)) -PAYLOAD_REGISTRY.register(StringPayload, str) -PAYLOAD_REGISTRY.register(StringIOPayload, io.StringIO) -PAYLOAD_REGISTRY.register(TextIOPayload, io.TextIOBase) -PAYLOAD_REGISTRY.register(BytesIOPayload, io.BytesIO) -PAYLOAD_REGISTRY.register(BufferedReaderPayload, (io.BufferedReader, io.BufferedRandom)) -PAYLOAD_REGISTRY.register(IOBasePayload, io.IOBase) -PAYLOAD_REGISTRY.register(StreamReaderPayload, StreamReader) -# try_last for giving a chance to more specialized async interables like -# multidict.BodyPartReaderPayload override the default -PAYLOAD_REGISTRY.register(AsyncIterablePayload, AsyncIterable, order=Order.try_last) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/vegalite/v5/data.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/vegalite/v5/data.py deleted file mode 100644 index 1e47db52673e30552707ffe22684639ea910280c..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/vegalite/v5/data.py +++ /dev/null @@ -1,47 +0,0 @@ -from ..data import ( - MaxRowsError, - curry, - default_data_transformer, - limit_rows, - pipe, - sample, - to_csv, - to_json, - to_values, - DataTransformerRegistry, -) - -from ...utils._vegafusion_data import vegafusion_data_transformer - -from typing import Final - - -# ============================================================================== -# VegaLite 5 data transformers -# ============================================================================== - - -ENTRY_POINT_GROUP: Final = "altair.vegalite.v5.data_transformer" - - -data_transformers = DataTransformerRegistry(entry_point_group=ENTRY_POINT_GROUP) -data_transformers.register("default", default_data_transformer) -data_transformers.register("json", to_json) -data_transformers.register("csv", to_csv) -data_transformers.register("vegafusion", vegafusion_data_transformer) -data_transformers.enable("default") - - -__all__ = ( - "MaxRowsError", - "curry", - "default_data_transformer", - "limit_rows", - "pipe", - "sample", - "to_csv", - "to_json", - "to_values", - "data_transformers", - "vegafusion_data_transformer", -) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/empty/__init__.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/empty/__init__.py deleted file mode 100644 index d947503a0d464f5f639d27f207d45659ed03d18a..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/empty/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Empty Index.""" - -from gpt_index.indices.empty.base import GPTEmptyIndex - -__all__ = [ - "GPTEmptyIndex", -] diff --git a/spaces/johnberg/CLIPInverter/models/stylegan2/op/__init__.py b/spaces/johnberg/CLIPInverter/models/stylegan2/op/__init__.py deleted file mode 100644 index d0918d92285955855be89f00096b888ee5597ce3..0000000000000000000000000000000000000000 --- a/spaces/johnberg/CLIPInverter/models/stylegan2/op/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .fused_act import FusedLeakyReLU, fused_leaky_relu -from .upfirdn2d import upfirdn2d diff --git a/spaces/johnhelf/roop/roop/__init__.py b/spaces/johnhelf/roop/roop/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/jordonpeter01/ai-comic-factory/src/components/ui/slider.tsx b/spaces/jordonpeter01/ai-comic-factory/src/components/ui/slider.tsx deleted file mode 100644 index 0e35bc7fb000cffa5e29956283ecf7d75453236c..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/ai-comic-factory/src/components/ui/slider.tsx +++ /dev/null @@ -1,28 +0,0 @@ -"use client" - -import * as React from "react" -import * as SliderPrimitive from "@radix-ui/react-slider" - -import { cn } from "@/lib/utils" - -const Slider = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - - - - - - -)) -Slider.displayName = SliderPrimitive.Root.displayName - -export { Slider } diff --git a/spaces/julien-c/sveltekit-demo/build/_app/chunks/vendor-92f01141.js b/spaces/julien-c/sveltekit-demo/build/_app/chunks/vendor-92f01141.js deleted file mode 100644 index cfc810888facabe7722d66b15b13ef9636b7266d..0000000000000000000000000000000000000000 --- a/spaces/julien-c/sveltekit-demo/build/_app/chunks/vendor-92f01141.js +++ /dev/null @@ -1,7 +0,0 @@ -function x(){}const tt=t=>t;function yt(t,e){for(const n in e)t[n]=e[n];return t}function et(t){return t()}function nt(){return Object.create(null)}function C(t){t.forEach(et)}function B(t){return typeof t=="function"}function gt(t,e){return t!=t?e==e:t!==e||t&&typeof t=="object"||typeof t=="function"}let D;function Ut(t,e){return D||(D=document.createElement("a")),D.href=e,t===D.href}function bt(t){return Object.keys(t).length===0}function wt(t,...e){if(t==null)return x;const n=t.subscribe(...e);return n.unsubscribe?()=>n.unsubscribe():n}function Vt(t,e,n){t.$$.on_destroy.push(wt(e,n))}function Xt(t,e,n,r){if(t){const i=st(t,e,n,r);return t[0](i)}}function st(t,e,n,r){return t[1]&&r?yt(n.ctx.slice(),t[1](r(e))):n.ctx}function Yt(t,e,n,r){if(t[2]&&r){const i=t[2](r(n));if(e.dirty===void 0)return i;if(typeof i=="object"){const l=[],s=Math.max(e.dirty.length,i.length);for(let c=0;c32){const e=[],n=t.ctx.length/32;for(let r=0;rwindow.performance.now():()=>Date.now(),G=it?t=>requestAnimationFrame(t):x;const A=new Set;function rt(t){A.forEach(e=>{e.c(t)||(A.delete(e),e.f())}),A.size!==0&&G(rt)}function J(t){let e;return A.size===0&&G(rt),{promise:new Promise(n=>{A.add(e={c:t,f:n})}),abort(){A.delete(e)}}}let z=!1;function xt(){z=!0}function $t(){z=!1}function kt(t,e,n,r){for(;t>1);n(i)<=r?t=i+1:e=i}return t}function Et(t){if(t.hydrate_init)return;t.hydrate_init=!0;let e=t.childNodes;if(t.nodeName==="HEAD"){const o=[];for(let a=0;a0&&e[n[i]].claim_order<=a?i+1:kt(1,i,f=>e[n[f]].claim_order,a))-1;r[o]=n[_]+1;const u=_+1;n[u]=o,i=Math.max(u,i)}const l=[],s=[];let c=e.length-1;for(let o=n[i]+1;o!=0;o=r[o-1]){for(l.push(e[o-1]);c>=o;c--)s.push(e[c]);c--}for(;c>=0;c--)s.push(e[c]);l.reverse(),s.sort((o,a)=>o.claim_order-a.claim_order);for(let o=0,a=0;o=l[a].claim_order;)a++;const _=at.removeEventListener(e,n,r)}function oe(t,e,n){n==null?t.removeAttribute(e):t.getAttribute(e)!==n&&t.setAttribute(e,n)}function vt(t){return Array.from(t.childNodes)}function qt(t){t.claim_info===void 0&&(t.claim_info={last_index:0,total_claimed:0})}function lt(t,e,n,r,i=!1){qt(t);const l=(()=>{for(let s=t.claim_info.last_index;s=0;s--){const c=t[s];if(e(c)){const o=n(c);return o===void 0?t.splice(s,1):t[s]=o,i?o===void 0&&t.claim_info.last_index--:t.claim_info.last_index=s,c}}return r()})();return l.claim_order=t.claim_info.total_claimed,t.claim_info.total_claimed+=1,l}function at(t,e,n,r){return lt(t,i=>i.nodeName===e,i=>{const l=[];for(let s=0;si.removeAttribute(s))},()=>r(e))}function ce(t,e,n){return at(t,e,n,ct)}function le(t,e,n){return at(t,e,n,Nt)}function Rt(t,e){return lt(t,n=>n.nodeType===3,n=>{const r=""+e;if(n.data.startsWith(r)){if(n.data.length!==r.length)return n.splitText(r.length)}else n.data=r},()=>K(e),!0)}function ae(t){return Rt(t," ")}function fe(t,e){e=""+e,t.wholeText!==e&&(t.data=e)}function ue(t,e,n,r){t.style.setProperty(e,n,r?"important":"")}function de(t,e,n){t.classList[n?"add":"remove"](e)}function Ot(t,e,n=!1){const r=document.createEvent("CustomEvent");return r.initCustomEvent(t,n,!1,e),r}function _e(t,e=document.body){return Array.from(e.querySelectorAll(t))}const Q=new Set;let T=0;function Bt(t){let e=5381,n=t.length;for(;n--;)e=(e<<5)-e^t.charCodeAt(n);return e>>>0}function U(t,e,n,r,i,l,s,c=0){const o=16.666/r;let a=`{ -`;for(let m=0;m<=1;m+=o){const p=e+(n-e)*l(m);a+=m*100+`%{${s(p,1-p)}} -`}const _=a+`100% {${s(n,1-n)}} -}`,u=`__svelte_${Bt(_)}_${c}`,f=ot(t);Q.add(f);const d=f.__svelte_stylesheet||(f.__svelte_stylesheet=Ct(t).sheet),h=f.__svelte_rules||(f.__svelte_rules={});h[u]||(h[u]=!0,d.insertRule(`@keyframes ${u} ${_}`,d.cssRules.length));const y=t.style.animation||"";return t.style.animation=`${y?`${y}, `:""}${u} ${r}ms linear ${i}ms 1 both`,T+=1,u}function ft(t,e){const n=(t.style.animation||"").split(", "),r=n.filter(e?l=>l.indexOf(e)<0:l=>l.indexOf("__svelte")===-1),i=n.length-r.length;i&&(t.style.animation=r.join(", "),T-=i,T||Dt())}function Dt(){G(()=>{T||(Q.forEach(t=>{const e=t.__svelte_stylesheet;let n=e.cssRules.length;for(;n--;)e.deleteRule(n);t.__svelte_rules={}}),Q.clear())})}function he(t,e,n,r){if(!e)return x;const i=t.getBoundingClientRect();if(e.left===i.left&&e.right===i.right&&e.top===i.top&&e.bottom===i.bottom)return x;const{delay:l=0,duration:s=300,easing:c=tt,start:o=P()+l,end:a=o+s,tick:_=x,css:u}=n(t,{from:e,to:i},r);let f=!0,d=!1,h;function y(){u&&(h=U(t,0,1,s,l,c,u)),l||(d=!0)}function m(){u&&ft(t,h),f=!1}return J(p=>{if(!d&&p>=o&&(d=!0),d&&p>=a&&(_(1,0),m()),!f)return!1;if(d){const w=p-o,b=0+1*c(w/s);_(b,1-b)}return!0}),y(),_(0,1),m}function me(t){const e=getComputedStyle(t);if(e.position!=="absolute"&&e.position!=="fixed"){const{width:n,height:r}=e,i=t.getBoundingClientRect();t.style.position="absolute",t.style.width=n,t.style.height=r,Pt(t,i)}}function Pt(t,e){const n=t.getBoundingClientRect();if(e.left!==n.left||e.top!==n.top){const r=getComputedStyle(t),i=r.transform==="none"?"":r.transform;t.style.transform=`${i} translate(${e.left-n.left}px, ${e.top-n.top}px)`}}let N;function v(t){N=t}function L(){if(!N)throw new Error("Function called outside component initialization");return N}function pe(t){L().$$.on_mount.push(t)}function ye(t){L().$$.after_update.push(t)}function ge(t,e){L().$$.context.set(t,e)}function be(t){return L().$$.context.get(t)}const q=[],ut=[],F=[],dt=[],zt=Promise.resolve();let V=!1;function Tt(){V||(V=!0,zt.then(_t))}function H(t){F.push(t)}const X=new Set;let I=0;function _t(){const t=N;do{for(;I{R=null})),R}function Y(t,e,n){t.dispatchEvent(Ot(`${e?"intro":"outro"}${n}`))}const W=new Set;let E;function we(){E={r:0,c:[],p:E}}function xe(){E.r||C(E.c),E=E.p}function ht(t,e){t&&t.i&&(W.delete(t),t.i(e))}function Ht(t,e,n,r){if(t&&t.o){if(W.has(t))return;W.add(t),E.c.push(()=>{W.delete(t),r&&(n&&t.d(1),r())}),t.o(e)}}const It={duration:0};function $e(t,e,n,r){let i=e(t,n),l=r?0:1,s=null,c=null,o=null;function a(){o&&ft(t,o)}function _(f,d){const h=f.b-l;return d*=Math.abs(h),{a:l,b:f.b,d:h,duration:d,start:f.start,end:f.start+d,group:f.group}}function u(f){const{delay:d=0,duration:h=300,easing:y=tt,tick:m=x,css:p}=i||It,w={start:P()+d,b:f};f||(w.group=E,E.r+=1),s||c?c=w:(p&&(a(),o=U(t,l,f,h,d,y,p)),f&&m(0,1),s=_(w,h),H(()=>Y(t,f,"start")),J(b=>{if(c&&b>c.start&&(s=_(c,h),c=null,Y(t,s.b,"start"),p&&(a(),o=U(t,l,s.b,s.duration,0,y,i.css))),s){if(b>=s.end)m(l=s.b,1-l),Y(t,s.b,"end"),c||(s.b?a():--s.group.r||C(s.group.c)),s=null;else if(b>=s.start){const S=b-s.start;l=s.a+s.d*y(S/s.duration),m(l,1-l)}}return!!(s||c)}))}return{run(f){B(i)?Ft().then(()=>{i=i(),u(f)}):u(f)},end(){a(),s=c=null}}}function Wt(t,e){Ht(t,1,1,()=>{e.delete(t.key)})}function ke(t,e){t.f(),Wt(t,e)}function Ee(t,e,n,r,i,l,s,c,o,a,_,u){let f=t.length,d=l.length,h=f;const y={};for(;h--;)y[t[h].key]=h;const m=[],p=new Map,w=new Map;for(h=d;h--;){const g=u(i,l,h),$=n(g);let k=s.get($);k?r&&k.p(g,e):(k=a($,g),k.c()),p.set($,m[h]=k),$ in y&&w.set($,Math.abs(h-y[$]))}const b=new Set,S=new Set;function j(g){ht(g,1),g.m(c,_),s.set(g.key,g),_=g.first,d--}for(;f&&d;){const g=m[d-1],$=t[f-1],k=g.key,O=$.key;g===$?(_=g.first,f--,d--):p.has(O)?!s.has(k)||b.has(k)?j(g):S.has(O)?f--:w.get(k)>w.get(O)?(S.add(k),j(g)):(b.add(O),f--):(o($,s),f--)}for(;f--;){const g=t[f];p.has(g.key)||o(g,s)}for(;d;)j(m[d-1]);return m}function Se(t,e){const n={},r={},i={$$scope:1};let l=t.length;for(;l--;){const s=t[l],c=e[l];if(c){for(const o in s)o in c||(r[o]=1);for(const o in c)i[o]||(n[o]=c[o],i[o]=1);t[l]=c}else for(const o in s)i[o]=1}for(const s in r)s in n||(n[s]=void 0);return n}function Ce(t){return typeof t=="object"&&t!==null?t:{}}function Ae(t){t&&t.c()}function Me(t,e){t&&t.l(e)}function Gt(t,e,n,r){const{fragment:i,on_mount:l,on_destroy:s,after_update:c}=t.$$;i&&i.m(e,n),r||H(()=>{const o=l.map(et).filter(B);s?s.push(...o):C(o),t.$$.on_mount=[]}),c.forEach(H)}function Jt(t,e){const n=t.$$;n.fragment!==null&&(C(n.on_destroy),n.fragment&&n.fragment.d(e),n.on_destroy=n.fragment=null,n.ctx=[])}function Kt(t,e){t.$$.dirty[0]===-1&&(q.push(t),Tt(),t.$$.dirty.fill(0)),t.$$.dirty[e/31|0]|=1<{const h=d.length?d[0]:f;return a.ctx&&i(a.ctx[u],a.ctx[u]=h)&&(!a.skip_bound&&a.bound[u]&&a.bound[u](h),_&&Kt(t,u)),f}):[],a.update(),_=!0,C(a.before_update),a.fragment=r?r(a.ctx):!1,e.target){if(e.hydrate){xt();const u=vt(e.target);a.fragment&&a.fragment.l(u),u.forEach(jt)}else a.fragment&&a.fragment.c();e.intro&&ht(t.$$.fragment),Gt(t,e.target,e.anchor,e.customElement),$t(),_t()}v(o)}class Ne{$destroy(){Jt(this,1),this.$destroy=x}$on(e,n){const r=this.$$.callbacks[e]||(this.$$.callbacks[e]=[]);return r.push(n),()=>{const i=r.indexOf(n);i!==-1&&r.splice(i,1)}}$set(e){this.$$set&&!bt(e)&&(this.$$.skip_bound=!0,this.$$set(e),this.$$.skip_bound=!1)}}const M=[];function Qt(t,e=x){let n;const r=new Set;function i(c){if(gt(t,c)&&(t=c,n)){const o=!M.length;for(const a of r)a[1](),M.push(a,t);if(o){for(let a=0;a{r.delete(a),r.size===0&&(n(),n=null)}}return{set:i,update:l,subscribe:s}}function mt(t){const e=t-1;return e*e*e+1}function pt(t){return Object.prototype.toString.call(t)==="[object Date]"}function Z(t,e,n,r){if(typeof n=="number"||pt(n)){const i=r-n,l=(n-e)/(t.dt||1/60),s=t.opts.stiffness*i,c=t.opts.damping*l,o=(s-c)*t.inv_mass,a=(l+o)*t.dt;return Math.abs(a)Z(t,e[l],n[l],r[l]));if(typeof n=="object"){const i={};for(const l in n)i[l]=Z(t,e[l],n[l],r[l]);return i}else throw new Error(`Cannot spring ${typeof n} values`)}}function ve(t,e={}){const n=Qt(t),{stiffness:r=.15,damping:i=.8,precision:l=.01}=e;let s,c,o,a=t,_=t,u=1,f=0,d=!1;function h(m,p={}){_=m;const w=o={};if(t==null||p.hard||y.stiffness>=1&&y.damping>=1)return d=!0,s=P(),a=m,n.set(t=_),Promise.resolve();if(p.soft){const b=p.soft===!0?.5:+p.soft;f=1/(b*60),u=0}return c||(s=P(),d=!1,c=J(b=>{if(d)return d=!1,c=null,!1;u=Math.min(u+f,1);const S={inv_mass:u,opts:y,settled:!0,dt:(b-s)*60/1e3},j=Z(S,a,t,_);return s=b,a=t,n.set(t=j),S.settled&&(c=null),!S.settled})),new Promise(b=>{c.promise.then(()=>{w===o&&b()})})}const y={set:h,update:(m,p)=>h(m(_,t),p),subscribe:n.subscribe,stiffness:r,damping:i,precision:l};return y}function qe(t,{delay:e=0,duration:n=400,easing:r=mt,start:i=0,opacity:l=0}={}){const s=getComputedStyle(t),c=+s.opacity,o=s.transform==="none"?"":s.transform,a=1-i,_=c*(1-l);return{delay:e,duration:n,easing:r,css:(u,f)=>` - transform: ${o} scale(${1-a*f}); - opacity: ${c-_*f} - `}}function Re(t,{from:e,to:n},r={}){const i=getComputedStyle(t),l=i.transform==="none"?"":i.transform,[s,c]=i.transformOrigin.split(" ").map(parseFloat),o=e.left+e.width*s/n.width-(n.left+s),a=e.top+e.height*c/n.height-(n.top+c),{delay:_=0,duration:u=d=>Math.sqrt(d)*120,easing:f=mt}=r;return{delay:_,duration:B(u)?u(Math.sqrt(o*o+a*a)):u,easing:f,css:(d,h)=>{const y=h*o,m=h*a,p=d+h*e.width/n.width,w=d+h*e.height/n.height;return`transform: ${l} translate(${y}px, ${m}px) scale(${p}, ${w});`}}}export{$e as $,Jt as A,yt as B,Qt as C,be as D,Nt as E,le as F,Ut as G,de as H,Mt as I,x as J,Vt as K,Xt as L,Zt as M,te as N,Yt as O,ue as P,re as Q,C as R,Ne as S,ve as T,_e as U,ee as V,B as W,me as X,Pt as Y,he as Z,H as _,vt as a,Ee as a0,Re as a1,qe as a2,ke as a3,oe as b,ce as c,jt as d,ct as e,ne as f,Rt as g,fe as h,je as i,se as j,ie as k,ae as l,we as m,Ht as n,xe as o,ht as p,ge as q,ye as r,gt as s,K as t,pe as u,Ae as v,Me as w,Gt as x,Se as y,Ce as z}; diff --git a/spaces/justest/gpt4free/g4f/.v1/unfinished/bing/__ini__.py b/spaces/justest/gpt4free/g4f/.v1/unfinished/bing/__ini__.py deleted file mode 100644 index 1e4fd149dd2371c54989bf3b6e034fd60e156213..0000000000000000000000000000000000000000 --- a/spaces/justest/gpt4free/g4f/.v1/unfinished/bing/__ini__.py +++ /dev/null @@ -1,108 +0,0 @@ -# Import necessary libraries -import asyncio -from json import dumps, loads -from ssl import create_default_context - -import websockets -from browser_cookie3 import edge -from certifi import where -from requests import get - -# Set up SSL context -ssl_context = create_default_context() -ssl_context.load_verify_locations(where()) - - -def format(msg: dict) -> str: - """Format message as JSON string with delimiter.""" - return dumps(msg) + '\x1e' - - -def get_token(): - """Retrieve token from browser cookies.""" - cookies = {c.name: c.value for c in edge(domain_name='bing.com')} - return cookies['_U'] - - -class AsyncCompletion: - async def create( - prompt: str = 'hello world', - optionSets: list = [ - 'deepleo', - 'enable_debug_commands', - 'disable_emoji_spoken_text', - 'enablemm', - 'h3relaxedimg' - ], - token: str = get_token()): - """Create a connection to Bing AI and send the prompt.""" - - # Send create request - create = get('https://edgeservices.bing.com/edgesvc/turing/conversation/create', - headers={ - 'host': 'edgeservices.bing.com', - 'authority': 'edgeservices.bing.com', - 'cookie': f'_U={token}', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', - } - ) - - # Extract conversation data - conversationId = create.json()['conversationId'] - clientId = create.json()['clientId'] - conversationSignature = create.json()['conversationSignature'] - - # Connect to WebSocket - wss = await websockets.connect('wss://sydney.bing.com/sydney/ChatHub', max_size=None, ssl=ssl_context, - extra_headers={ - # Add necessary headers - } - ) - - # Send JSON protocol version - await wss.send(format({'protocol': 'json', 'version': 1})) - await wss.recv() - - # Define message structure - struct = { - # Add necessary message structure - } - - # Send message - await wss.send(format(struct)) - - # Process responses - base_string = '' - final = False - while not final: - objects = str(await wss.recv()).split('\x1e') - for obj in objects: - if obj is None or obj == '': - continue - - response = loads(obj) - if response.get('type') == 1 and response['arguments'][0].get('messages', ): - response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get( - 'text') - - yield (response_text.replace(base_string, '')) - base_string = response_text - - elif response.get('type') == 2: - final = True - - await wss.close() - - -async def run(): - """Run the async completion and print the result.""" - async for value in AsyncCompletion.create( - prompt='summarize cinderella with each word beginning with a consecutive letter of the alphabet, a-z', - optionSets=[ - "galileo", - ] - ): - print(value, end='', flush=True) - - -asyncio.run(run()) diff --git a/spaces/kangvcar/RealChar/client/web/src/components/Common/IconButton.js b/spaces/kangvcar/RealChar/client/web/src/components/Common/IconButton.js deleted file mode 100644 index 72d62762f6229262f33a5b064878eb11be9ff3dc..0000000000000000000000000000000000000000 --- a/spaces/kangvcar/RealChar/client/web/src/components/Common/IconButton.js +++ /dev/null @@ -1,19 +0,0 @@ -/** - * src/components/Common/IconButton.jsx - * A general-purpose Icon Button component - * - * created by Lynchee on 7/19/23 - */ - -import React from 'react'; -import './styles.css'; - -const IconButton = ({ Icon, className, onClick, bgcolor="default"}) => { - return ( -
      - -
      - ); -}; - -export default IconButton; diff --git a/spaces/kangvcar/RealChar/realtime_ai_character/audio/speech_to_text/whisper.py b/spaces/kangvcar/RealChar/realtime_ai_character/audio/speech_to_text/whisper.py deleted file mode 100644 index 6161ff622e7b9b7b76b9b402f42843dcacead96c..0000000000000000000000000000000000000000 --- a/spaces/kangvcar/RealChar/realtime_ai_character/audio/speech_to_text/whisper.py +++ /dev/null @@ -1,72 +0,0 @@ -import io -import os -import types -import wave - -import speech_recognition as sr -import whisper -from pydub import AudioSegment - -from realtime_ai_character.audio.speech_to_text.base import SpeechToText -from realtime_ai_character.logger import get_logger -from realtime_ai_character.utils import Singleton - -DEBUG = False -logger = get_logger(__name__) -config = types.SimpleNamespace(**{ - 'model': 'tiny', - 'language': 'en', - 'api_key': os.getenv("OPENAI_API_KEY"), -}) - - -class Whisper(Singleton, SpeechToText): - def __init__(self, use='local'): - super().__init__() - if use == 'local': - logger.info(f"Loading [Local Whisper] model: [{config.model}]...") - whisper.load_model(config.model) - self.recognizer = sr.Recognizer() - self.use = use - if DEBUG: - self.wf = wave.open('output.wav', 'wb') - self.wf.setnchannels(1) # Assuming mono audio - self.wf.setsampwidth(2) # Assuming 16-bit audio - self.wf.setframerate(44100) # Assuming 44100Hz sample rate - - def transcribe(self, audio_bytes, platform, prompt=''): - logger.info("Transcribing audio...") - if platform == 'web': - audio = self._convert_webm_to_wav(audio_bytes) - else: - audio = sr.AudioData(audio_bytes, 44100, 2) - if self.use == 'local': - return self._transcribe(audio, prompt) - elif self.use == 'api': - return self._transcribe_api(audio, prompt) - - def _transcribe(self, audio, prompt=''): - text = self.recognizer.recognize_whisper( - audio, - model=config.model, - language=config.language, - show_dict=True, - initial_prompt=prompt - )['text'] - return text - - def _transcribe_api(self, audio, prompt=''): - text = self.recognizer.recognize_whisper_api( - audio, - api_key=config.api_key, - ) - return text - - def _convert_webm_to_wav(self, webm_data): - webm_audio = AudioSegment.from_file( - io.BytesIO(webm_data), format="webm") - wav_data = io.BytesIO() - webm_audio.export(wav_data, format="wav") - with sr.AudioFile(wav_data) as source: - audio = self.recognizer.record(source) - return audio diff --git a/spaces/keithhon/logo-generator/dalle/utils/sampling.py b/spaces/keithhon/logo-generator/dalle/utils/sampling.py deleted file mode 100644 index d09693f0dd44a1d83e2d9b8ba6d59a6bc0c8e729..0000000000000000000000000000000000000000 --- a/spaces/keithhon/logo-generator/dalle/utils/sampling.py +++ /dev/null @@ -1,152 +0,0 @@ -# ------------------------------------------------------------------------------------ -# Minimal DALL-E -# Copyright (c) 2021 KakaoBrain. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------------------ - -import torch -from typing import Optional -from tqdm import tqdm -from torch.nn import functional as F - - -def cutoff_topk_logits(logits: torch.FloatTensor, k: int) -> torch.FloatTensor: - if k is None: - return logits - else: - v, ix = torch.topk(logits, k) - out = logits.clone() - out[out < v[:, [-1]]] = -float('Inf') - return out - - -def cutoff_topp_probs(probs: torch.FloatTensor, p: float) -> torch.FloatTensor: - if p is None: - return probs - else: - sorted_probs, sorted_indices = torch.sort(probs, dim=-1, descending=True) - cum_probs = torch.cumsum(sorted_probs, dim=-1) - - sorted_idx_remove_cond = cum_probs >= p - - sorted_idx_remove_cond[..., 1:] = sorted_idx_remove_cond[..., :-1].clone() - sorted_idx_remove_cond[..., 0] = 0 - - indices_to_remove = sorted_idx_remove_cond.scatter(-1, sorted_indices, sorted_idx_remove_cond) - probs = probs.masked_fill(indices_to_remove, 0.0) - norm_probs = probs / torch.sum(probs, dim=-1, keepdim=True) - return norm_probs - - -def get_positional_encoding(inputs: torch.LongTensor, mode: str = '1d') -> torch.LongTensor: - device = inputs.device - if mode == '1d': - B, N = inputs.shape - xs_pos = torch.arange(N, device=device).repeat((B, 1)) - elif mode == '2d': - B, H, W = inputs.shape - xs_pos_h = torch.arange(H, device=device).repeat(B, W, 1).transpose(1, 2) - xs_pos_w = torch.arange(W, device=device).repeat(B, H, 1) - xs_pos = (xs_pos_h, xs_pos_w) - else: - raise ValueError('%s positional encoding invalid' % mode) - return xs_pos - - -@torch.no_grad() -def sampling(model: torch.nn.Module, - tokens: torch.LongTensor, - top_k: Optional[float] = None, - top_p: Optional[float] = None, - softmax_temperature: float = 1.0, - is_tqdm: bool = True, - use_fp16: bool = True, - max_seq_len: int = 256) -> torch.LongTensor: - code = None - past = None - - pbar = tqdm(range(max_seq_len), total=max_seq_len) if is_tqdm else range(max_seq_len) - pos_enc_tokens = get_positional_encoding(tokens, mode='1d') - - for cnt, h in enumerate(pbar): - if code is None: - code_ = None - pos_enc_code_ = None - else: - code_ = code.clone().detach() - pos_enc_code_ = get_positional_encoding(code_, mode='1d') - code_ = code_[:, cnt-1].unsqueeze(-1) - pos_enc_code_ = pos_enc_code_[:, cnt-1].unsqueeze(-1) - - logits, present = model.sampling(images=code_, - texts=tokens, - pos_images=pos_enc_code_, - pos_texts=pos_enc_tokens, - use_fp16=use_fp16, - past=past) - logits = logits.to(dtype=torch.float32) - logits = logits / softmax_temperature - - present = torch.stack(present).clone().detach() - if past is None: - past = [present] - else: - past.append(present) - - logits = cutoff_topk_logits(logits, top_k) - probs = F.softmax(logits, dim=-1) - probs = cutoff_topp_probs(probs, top_p) - - idx = torch.multinomial(probs, num_samples=1).clone().detach() - code = idx if code is None else torch.cat([code, idx], axis=1) - - del past - return code - - -@torch.no_grad() -def sampling_igpt(model: torch.nn.Module, - sos: torch.FloatTensor, - top_k: Optional[float] = None, - top_p: Optional[float] = None, - softmax_temperature: float = 1.0, - is_tqdm: bool = True, - use_fp16: bool = True, - max_seq_len: int = 256) -> torch.LongTensor: - code = None - past = None - pbar = tqdm(range(max_seq_len), total=max_seq_len) if is_tqdm else range(max_seq_len) - - for cnt, h in enumerate(pbar): - if code is None: - code_ = None - pos_enc_code_ = None - else: - code_ = code.clone().detach() - pos_enc_code_ = get_positional_encoding(code_, mode='1d') - code_ = code_[:, cnt-1].unsqueeze(-1) - pos_enc_code_ = pos_enc_code_[:, cnt-1].unsqueeze(-1) - - logits, present = model.sampling(sos=sos, - codes=code_, - pos_codes=pos_enc_code_, - use_fp16=use_fp16, - past=past) - logits = logits.to(dtype=torch.float32) - logits = logits / softmax_temperature - - present = torch.stack(present).clone().detach() - if past is None: - past = [present] - else: - past.append(present) - - logits = cutoff_topk_logits(logits, top_k) - probs = F.softmax(logits, dim=-1) - probs = cutoff_topp_probs(probs, top_p) - - idx = torch.multinomial(probs, num_samples=1).clone().detach() - code = idx if code is None else torch.cat([code, idx], axis=1) - - del past - return code diff --git a/spaces/keras-io/Self-supervised-learning-SimSiam/README.md b/spaces/keras-io/Self-supervised-learning-SimSiam/README.md deleted file mode 100644 index fe36f697199cb1ddafc45f9dbc8462f8dcff692e..0000000000000000000000000000000000000000 --- a/spaces/keras-io/Self-supervised-learning-SimSiam/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: SimSiam -emoji: ⚡ -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.0.17 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r18.py b/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r18.py deleted file mode 100644 index 7a8db34cd547e8e667103c93585296e47a894e97..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r18.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "cosface" -config.network = "r18" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/glint360k" -config.num_classes = 360232 -config.num_image = 17091657 -config.num_epoch = 20 -config.warmup_epoch = -1 -config.decay_epoch = [8, 12, 15, 18] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/kevinwang676/VoiceChangers/src/generate_facerender_batch.py b/spaces/kevinwang676/VoiceChangers/src/generate_facerender_batch.py deleted file mode 100644 index a62b6edffa41529ba828905fb86ca302a01d37cc..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChangers/src/generate_facerender_batch.py +++ /dev/null @@ -1,136 +0,0 @@ -import os -import numpy as np -from PIL import Image -from skimage import io, img_as_float32, transform -import torch -import scipy.io as scio - -def get_facerender_data(coeff_path, pic_path, first_coeff_path, audio_path, - batch_size, input_yaw_list=None, input_pitch_list=None, input_roll_list=None, - expression_scale=1.0, still_mode = False, preprocess='crop', size = 256): - - semantic_radius = 13 - video_name = os.path.splitext(os.path.split(coeff_path)[-1])[0] - txt_path = os.path.splitext(coeff_path)[0] - - data={} - - img1 = Image.open(pic_path) - source_image = np.array(img1) - source_image = img_as_float32(source_image) - source_image = transform.resize(source_image, (size, size, 3)) - source_image = source_image.transpose((2, 0, 1)) - source_image_ts = torch.FloatTensor(source_image).unsqueeze(0) - source_image_ts = source_image_ts.repeat(batch_size, 1, 1, 1) - data['source_image'] = source_image_ts - - source_semantics_dict = scio.loadmat(first_coeff_path) - generated_dict = scio.loadmat(coeff_path) - - if 'full' not in preprocess.lower(): - source_semantics = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70 - generated_3dmm = generated_dict['coeff_3dmm'][:,:70] - - else: - source_semantics = source_semantics_dict['coeff_3dmm'][:1,:73] #1 70 - generated_3dmm = generated_dict['coeff_3dmm'][:,:70] - - source_semantics_new = transform_semantic_1(source_semantics, semantic_radius) - source_semantics_ts = torch.FloatTensor(source_semantics_new).unsqueeze(0) - source_semantics_ts = source_semantics_ts.repeat(batch_size, 1, 1) - data['source_semantics'] = source_semantics_ts - - # target - generated_3dmm[:, :64] = generated_3dmm[:, :64] * expression_scale - - if 'full' in preprocess.lower(): - generated_3dmm = np.concatenate([generated_3dmm, np.repeat(source_semantics[:,70:], generated_3dmm.shape[0], axis=0)], axis=1) - - if still_mode: - generated_3dmm[:, 64:] = np.repeat(source_semantics[:, 64:], generated_3dmm.shape[0], axis=0) - - with open(txt_path+'.txt', 'w') as f: - for coeff in generated_3dmm: - for i in coeff: - f.write(str(i)[:7] + ' '+'\t') - f.write('\n') - - target_semantics_list = [] - frame_num = generated_3dmm.shape[0] - data['frame_num'] = frame_num - for frame_idx in range(frame_num): - target_semantics = transform_semantic_target(generated_3dmm, frame_idx, semantic_radius) - target_semantics_list.append(target_semantics) - - remainder = frame_num%batch_size - if remainder!=0: - for _ in range(batch_size-remainder): - target_semantics_list.append(target_semantics) - - target_semantics_np = np.array(target_semantics_list) #frame_num 70 semantic_radius*2+1 - target_semantics_np = target_semantics_np.reshape(batch_size, -1, target_semantics_np.shape[-2], target_semantics_np.shape[-1]) - data['target_semantics_list'] = torch.FloatTensor(target_semantics_np) - data['video_name'] = video_name - data['audio_path'] = audio_path - - if input_yaw_list is not None: - yaw_c_seq = gen_camera_pose(input_yaw_list, frame_num, batch_size) - data['yaw_c_seq'] = torch.FloatTensor(yaw_c_seq) - if input_pitch_list is not None: - pitch_c_seq = gen_camera_pose(input_pitch_list, frame_num, batch_size) - data['pitch_c_seq'] = torch.FloatTensor(pitch_c_seq) - if input_roll_list is not None: - roll_c_seq = gen_camera_pose(input_roll_list, frame_num, batch_size) - data['roll_c_seq'] = torch.FloatTensor(roll_c_seq) - - return data - -def transform_semantic_1(semantic, semantic_radius): - semantic_list = [semantic for i in range(0, semantic_radius*2+1)] - coeff_3dmm = np.concatenate(semantic_list, 0) - return coeff_3dmm.transpose(1,0) - -def transform_semantic_target(coeff_3dmm, frame_index, semantic_radius): - num_frames = coeff_3dmm.shape[0] - seq = list(range(frame_index- semantic_radius, frame_index + semantic_radius+1)) - index = [ min(max(item, 0), num_frames-1) for item in seq ] - coeff_3dmm_g = coeff_3dmm[index, :] - return coeff_3dmm_g.transpose(1,0) - -def gen_camera_pose(camera_degree_list, frame_num, batch_size): - - new_degree_list = [] - if len(camera_degree_list) == 1: - for _ in range(frame_num): - new_degree_list.append(camera_degree_list[0]) - remainder = frame_num%batch_size - if remainder!=0: - for _ in range(batch_size-remainder): - new_degree_list.append(new_degree_list[-1]) - new_degree_np = np.array(new_degree_list).reshape(batch_size, -1) - return new_degree_np - - degree_sum = 0. - for i, degree in enumerate(camera_degree_list[1:]): - degree_sum += abs(degree-camera_degree_list[i]) - - degree_per_frame = degree_sum/(frame_num-1) - for i, degree in enumerate(camera_degree_list[1:]): - degree_last = camera_degree_list[i] - degree_step = degree_per_frame * abs(degree-degree_last)/(degree-degree_last) - new_degree_list = new_degree_list + list(np.arange(degree_last, degree, degree_step)) - if len(new_degree_list) > frame_num: - new_degree_list = new_degree_list[:frame_num] - elif len(new_degree_list) < frame_num: - for _ in range(frame_num-len(new_degree_list)): - new_degree_list.append(new_degree_list[-1]) - print(len(new_degree_list)) - print(frame_num) - - remainder = frame_num%batch_size - if remainder!=0: - for _ in range(batch_size-remainder): - new_degree_list.append(new_degree_list[-1]) - new_degree_np = np.array(new_degree_list).reshape(batch_size, -1) - return new_degree_np - diff --git a/spaces/kokofixcomputers/chat-ui/src/lib/utils/trimPrefix.ts b/spaces/kokofixcomputers/chat-ui/src/lib/utils/trimPrefix.ts deleted file mode 100644 index d006e66deca639f3f4d208e77a64ba368fab00ee..0000000000000000000000000000000000000000 --- a/spaces/kokofixcomputers/chat-ui/src/lib/utils/trimPrefix.ts +++ /dev/null @@ -1,6 +0,0 @@ -export function trimPrefix(input: string, prefix: string) { - if (input.startsWith(prefix)) { - return input.slice(prefix.length); - } - return input; -} diff --git a/spaces/kristyc/mediapipe-hands/app.py b/spaces/kristyc/mediapipe-hands/app.py deleted file mode 100644 index 0bdafc803e01df0775b9fd85bc69bd9045995eae..0000000000000000000000000000000000000000 --- a/spaces/kristyc/mediapipe-hands/app.py +++ /dev/null @@ -1,147 +0,0 @@ -import gradio as gr -import mediapipe as mp -import numpy as np -import log_utils -from functools import lru_cache -import cv2 -from google.protobuf.json_format import MessageToDict - -logger = log_utils.get_logger() - -mp_hands = mp.solutions.hands -mp_hands_connections = mp.solutions.hands_connections -mp_draw = mp.solutions.drawing_utils - -connections = { - 'HAND_CONNECTIONS': mp_hands_connections.HAND_CONNECTIONS, - 'HAND_PALM_CONNECTIONS': mp_hands_connections.HAND_PALM_CONNECTIONS, - 'HAND_THUMB_CONNECTIONS': mp_hands_connections.HAND_THUMB_CONNECTIONS, - 'HAND_INDEX_FINGER_CONNECTIONS': mp_hands_connections.HAND_INDEX_FINGER_CONNECTIONS, - 'HAND_MIDDLE_FINGER_CONNECTIONS': mp_hands_connections.HAND_MIDDLE_FINGER_CONNECTIONS, - 'HAND_RING_FINGER_CONNECTIONS': mp_hands_connections.HAND_RING_FINGER_CONNECTIONS, - 'HAND_PINKY_FINGER_CONNECTIONS': mp_hands_connections.HAND_PINKY_FINGER_CONNECTIONS, -} - -@lru_cache(maxsize=10) -def get_model(static_image_mode, max_num_hands, model_complexity, min_detection_conf, min_tracking_conf): - return mp_hands.Hands( - static_image_mode=static_image_mode, - max_num_hands=max_num_hands, - model_complexity=model_complexity, - min_detection_confidence=min_detection_conf, - min_tracking_confidence=min_tracking_conf, - ) - -def draw_landmarks(model, img, selected_connections, draw_background, flip_image): - img_to_process = cv2.flip(img, 1) if flip_image else img - results = model.process(img_to_process) - output_img = img_to_process if draw_background else np.zeros_like(img_to_process) - if results.multi_hand_landmarks: - for hand_landmarks in results.multi_hand_landmarks: - mp_draw.draw_landmarks(output_img, hand_landmarks, connections[selected_connections]) - if flip_image: - output_img = cv2.flip(output_img, 1) - return output_img, [MessageToDict(h) for _, h in enumerate(results.multi_handedness or [])] - -def process_image( - img, - static_image_mode, - max_num_hands, - model_complexity, - min_detection_conf, - min_tracking_conf, - selected_connections, - draw_background, - flip_image, - ): - logger.info(f"Processing image with connections: {selected_connections}, draw background: {draw_background}") - model = get_model(static_image_mode, max_num_hands, model_complexity, min_detection_conf, min_tracking_conf) - img, multi_handedness = draw_landmarks(model, img, selected_connections, draw_background, flip_image) - left_hand_count = len([h for h in multi_handedness if h['classification'][0]['label'] == 'Left']) - right_hand_count = len(multi_handedness) - left_hand_count - return img, multi_handedness, left_hand_count, right_hand_count - - -demo = gr.Blocks() - -with demo: - gr.Markdown( - """ - # MediaPipe's Hand & Finger Tracking - A demo of hand and finger tracking using [Google's MediaPipe](https://google.github.io/mediapipe/solutions/hands.html). - """) - - with gr.Column(): - gr.Markdown(""" - ## Step 1: Configure the model - """) - with gr.Column(): - static_image_mode = gr.Checkbox(label="Static image mode", value=False) - gr.Textbox(show_label=False,value="If unchecked, the solution treats the input images as a video stream. It will try to detect hands in the first input images, and upon a successful detection further localizes the hand landmarks. In subsequent images, once all max_num_hands hands are detected and the corresponding hand landmarks are localized, it simply tracks those landmarks without invoking another detection until it loses track of any of the hands. This reduces latency and is ideal for processing video frames. If checked, hand detection runs on every input image, ideal for processing a batch of static, possibly unrelated, images.") - - max_num_hands = gr.Slider(label="Max number of hands to detect", value=1, minimum=1, maximum=10, step=1) - - with gr.Column(): - model_complexity = gr.Radio(label="Model complexity", choices=[0,1], value=1) - gr.Textbox(show_label=False, value="Complexity of the hand landmark model: 0 or 1. Landmark accuracy as well as inference latency generally go up with the model complexity.") - - with gr.Column(): - min_detection_conf = gr.Slider(label="Min detection confidence", value=0.5, minimum=0.0, maximum=1.0, step=0.1) - gr.Textbox(show_label=False, value="Minimum confidence value ([0.0, 1.0]) from the hand detection model for the detection to be considered successful.") - - with gr.Column(): - min_tracking_conf = gr.Slider(label="Min tracking confidence", value=0.5, minimum=0.0, maximum=1.0, step=0.1) - gr.Textbox(show_label=False, value="Minimum confidence value ([0.0, 1.0]) from the landmark-tracking model for the hand landmarks to be considered tracked successfully, or otherwise hand detection will be invoked automatically on the next input image. Setting it to a higher value can increase robustness of the solution, at the expense of a higher latency. Ignored if static_image_mode is true, where hand detection simply runs on every image.") - - gr.Markdown(""" - ## Step 2: Set processing parameters - """) - draw_background = gr.Checkbox(value=True, label="Draw background?") - flip_image = gr.Checkbox(value=True, label="Flip image? (Note that handedness is determined assuming the input image is mirrored, i.e., taken with a front-facing/selfie camera with images flipped horizontally. If it is not the case, please swap the handedness output in the application.)") - connection_keys = list(connections.keys()) - selected_connections = gr.Dropdown( - label="Select connections to draw", - choices=connection_keys, - value=connection_keys[0], - ) - - gr.Markdown(""" - ## Step 3: Select an image - """) - with gr.Tabs(): - with gr.TabItem(label="Upload an image"): - uploaded_image = gr.Image(type="numpy", label="Input image") - example_image = gr.Examples(examples=[['examples/example-01.jpg', 1, 0.4], ['examples/example-02.jpg', 2, 0.5], ['examples/example-03.jpg', 1, 0.5]], inputs=[uploaded_image, max_num_hands, min_detection_conf]) - submit_uploaded_image = gr.Button(value="Process Image") - with gr.TabItem(label="Take a picture"): - camera_picture = gr.Image(source="webcam", type="numpy", label="Input image") - submit_camera_picture = gr.Button(value="Process Image") - - gr.Markdown(""" - ## Step 4: View results - """) - with gr.Column(): - with gr.Row(): - with gr.Column(): - left_hands = gr.Number(label="Left hands detected") - with gr.Column(): - right_hands = gr.Number(label="Right hands detected") - multi_handedness = gr.JSON(label="Raw handedness results") - processed_image = gr.Image(label="Processed image") - - gr.Markdown('visitor badge') - setting_inputs = [ - static_image_mode, - max_num_hands, - model_complexity, - min_detection_conf, - min_tracking_conf, - selected_connections, - draw_background, - flip_image, - ] - outputs = [processed_image, multi_handedness, left_hands, right_hands] - submit_uploaded_image.click(fn=process_image, inputs=[uploaded_image, *setting_inputs], outputs=outputs) - submit_camera_picture.click(fn=process_image, inputs=[camera_picture, *setting_inputs], outputs=outputs) - -demo.launch() diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/varLib/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/varLib/__init__.py deleted file mode 100644 index 86fa8d70408aed32fc0d690c3357445ffcf63880..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/varLib/__init__.py +++ /dev/null @@ -1,1334 +0,0 @@ -""" -Module for dealing with 'gvar'-style font variations, also known as run-time -interpolation. - -The ideas here are very similar to MutatorMath. There is even code to read -MutatorMath .designspace files in the varLib.designspace module. - -For now, if you run this file on a designspace file, it tries to find -ttf-interpolatable files for the masters and build a variable-font from -them. Such ttf-interpolatable and designspace files can be generated from -a Glyphs source, eg., using noto-source as an example: - - $ fontmake -o ttf-interpolatable -g NotoSansArabic-MM.glyphs - -Then you can make a variable-font this way: - - $ fonttools varLib master_ufo/NotoSansArabic.designspace - -API *will* change in near future. -""" -from typing import List -from fontTools.misc.vector import Vector -from fontTools.misc.roundTools import noRound, otRound -from fontTools.misc.textTools import Tag, tostr -from fontTools.ttLib import TTFont, newTable -from fontTools.ttLib.tables._f_v_a_r import Axis, NamedInstance -from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates -from fontTools.ttLib.tables.ttProgram import Program -from fontTools.ttLib.tables.TupleVariation import TupleVariation -from fontTools.ttLib.tables import otTables as ot -from fontTools.ttLib.tables.otBase import OTTableWriter -from fontTools.varLib import builder, models, varStore -from fontTools.varLib.merger import VariationMerger, COLRVariationMerger -from fontTools.varLib.mvar import MVAR_ENTRIES -from fontTools.varLib.iup import iup_delta_optimize -from fontTools.varLib.featureVars import addFeatureVariations -from fontTools.designspaceLib import DesignSpaceDocument, InstanceDescriptor -from fontTools.designspaceLib.split import splitInterpolable, splitVariableFonts -from fontTools.varLib.stat import buildVFStatTable -from fontTools.colorLib.builder import buildColrV1 -from fontTools.colorLib.unbuilder import unbuildColrV1 -from functools import partial -from collections import OrderedDict, namedtuple -import os.path -import logging -from copy import deepcopy -from pprint import pformat -from re import fullmatch -from .errors import VarLibError, VarLibValidationError - -log = logging.getLogger("fontTools.varLib") - -# This is a lib key for the designspace document. The value should be -# an OpenType feature tag, to be used as the FeatureVariations feature. -# If present, the DesignSpace flag is ignored. -FEAVAR_FEATURETAG_LIB_KEY = "com.github.fonttools.varLib.featureVarsFeatureTag" - -# -# Creation routines -# - - -def _add_fvar(font, axes, instances: List[InstanceDescriptor]): - """ - Add 'fvar' table to font. - - axes is an ordered dictionary of DesignspaceAxis objects. - - instances is list of dictionary objects with 'location', 'stylename', - and possibly 'postscriptfontname' entries. - """ - - assert axes - assert isinstance(axes, OrderedDict) - - log.info("Generating fvar") - - fvar = newTable("fvar") - nameTable = font["name"] - - for a in axes.values(): - axis = Axis() - axis.axisTag = Tag(a.tag) - # TODO Skip axes that have no variation. - axis.minValue, axis.defaultValue, axis.maxValue = ( - a.minimum, - a.default, - a.maximum, - ) - axis.axisNameID = nameTable.addMultilingualName( - a.labelNames, font, minNameID=256 - ) - axis.flags = int(a.hidden) - fvar.axes.append(axis) - - for instance in instances: - # Filter out discrete axis locations - coordinates = { - name: value for name, value in instance.location.items() if name in axes - } - - if "en" not in instance.localisedStyleName: - if not instance.styleName: - raise VarLibValidationError( - f"Instance at location '{coordinates}' must have a default English " - "style name ('stylename' attribute on the instance element or a " - "stylename element with an 'xml:lang=\"en\"' attribute)." - ) - localisedStyleName = dict(instance.localisedStyleName) - localisedStyleName["en"] = tostr(instance.styleName) - else: - localisedStyleName = instance.localisedStyleName - - psname = instance.postScriptFontName - - inst = NamedInstance() - inst.subfamilyNameID = nameTable.addMultilingualName(localisedStyleName) - if psname is not None: - psname = tostr(psname) - inst.postscriptNameID = nameTable.addName(psname) - inst.coordinates = { - axes[k].tag: axes[k].map_backward(v) for k, v in coordinates.items() - } - # inst.coordinates = {axes[k].tag:v for k,v in coordinates.items()} - fvar.instances.append(inst) - - assert "fvar" not in font - font["fvar"] = fvar - - return fvar - - -def _add_avar(font, axes): - """ - Add 'avar' table to font. - - axes is an ordered dictionary of AxisDescriptor objects. - """ - - assert axes - assert isinstance(axes, OrderedDict) - - log.info("Generating avar") - - avar = newTable("avar") - - interesting = False - for axis in axes.values(): - # Currently, some rasterizers require that the default value maps - # (-1 to -1, 0 to 0, and 1 to 1) be present for all the segment - # maps, even when the default normalization mapping for the axis - # was not modified. - # https://github.com/googlei18n/fontmake/issues/295 - # https://github.com/fonttools/fonttools/issues/1011 - # TODO(anthrotype) revert this (and 19c4b37) when issue is fixed - curve = avar.segments[axis.tag] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0} - if not axis.map: - continue - - items = sorted(axis.map) - keys = [item[0] for item in items] - vals = [item[1] for item in items] - - # Current avar requirements. We don't have to enforce - # these on the designer and can deduce some ourselves, - # but for now just enforce them. - if axis.minimum != min(keys): - raise VarLibValidationError( - f"Axis '{axis.name}': there must be a mapping for the axis minimum " - f"value {axis.minimum} and it must be the lowest input mapping value." - ) - if axis.maximum != max(keys): - raise VarLibValidationError( - f"Axis '{axis.name}': there must be a mapping for the axis maximum " - f"value {axis.maximum} and it must be the highest input mapping value." - ) - if axis.default not in keys: - raise VarLibValidationError( - f"Axis '{axis.name}': there must be a mapping for the axis default " - f"value {axis.default}." - ) - # No duplicate input values (output values can be >= their preceeding value). - if len(set(keys)) != len(keys): - raise VarLibValidationError( - f"Axis '{axis.name}': All axis mapping input='...' values must be " - "unique, but we found duplicates." - ) - # Ascending values - if sorted(vals) != vals: - raise VarLibValidationError( - f"Axis '{axis.name}': mapping output values must be in ascending order." - ) - - keys_triple = (axis.minimum, axis.default, axis.maximum) - vals_triple = tuple(axis.map_forward(v) for v in keys_triple) - - keys = [models.normalizeValue(v, keys_triple) for v in keys] - vals = [models.normalizeValue(v, vals_triple) for v in vals] - - if all(k == v for k, v in zip(keys, vals)): - continue - interesting = True - - curve.update(zip(keys, vals)) - - assert 0.0 in curve and curve[0.0] == 0.0 - assert -1.0 not in curve or curve[-1.0] == -1.0 - assert +1.0 not in curve or curve[+1.0] == +1.0 - # curve.update({-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}) - - assert "avar" not in font - if not interesting: - log.info("No need for avar") - avar = None - else: - font["avar"] = avar - - return avar - - -def _add_stat(font): - # Note: this function only gets called by old code that calls `build()` - # directly. Newer code that wants to benefit from STAT data from the - # designspace should call `build_many()` - - if "STAT" in font: - return - - from ..otlLib.builder import buildStatTable - - fvarTable = font["fvar"] - axes = [dict(tag=a.axisTag, name=a.axisNameID) for a in fvarTable.axes] - buildStatTable(font, axes) - - -_MasterData = namedtuple("_MasterData", ["glyf", "hMetrics", "vMetrics"]) - - -def _add_gvar(font, masterModel, master_ttfs, tolerance=0.5, optimize=True): - if tolerance < 0: - raise ValueError("`tolerance` must be a positive number.") - - log.info("Generating gvar") - assert "gvar" not in font - gvar = font["gvar"] = newTable("gvar") - glyf = font["glyf"] - defaultMasterIndex = masterModel.reverseMapping[0] - - master_datas = [ - _MasterData( - m["glyf"], m["hmtx"].metrics, getattr(m.get("vmtx"), "metrics", None) - ) - for m in master_ttfs - ] - - for glyph in font.getGlyphOrder(): - log.debug("building gvar for glyph '%s'", glyph) - isComposite = glyf[glyph].isComposite() - - allData = [ - m.glyf._getCoordinatesAndControls(glyph, m.hMetrics, m.vMetrics) - for m in master_datas - ] - - if allData[defaultMasterIndex][1].numberOfContours != 0: - # If the default master is not empty, interpret empty non-default masters - # as missing glyphs from a sparse master - allData = [ - d if d is not None and d[1].numberOfContours != 0 else None - for d in allData - ] - - model, allData = masterModel.getSubModel(allData) - - allCoords = [d[0] for d in allData] - allControls = [d[1] for d in allData] - control = allControls[0] - if not models.allEqual(allControls): - log.warning("glyph %s has incompatible masters; skipping" % glyph) - continue - del allControls - - # Update gvar - gvar.variations[glyph] = [] - deltas = model.getDeltas( - allCoords, round=partial(GlyphCoordinates.__round__, round=round) - ) - supports = model.supports - assert len(deltas) == len(supports) - - # Prepare for IUP optimization - origCoords = deltas[0] - endPts = control.endPts - - for i, (delta, support) in enumerate(zip(deltas[1:], supports[1:])): - if all(v == 0 for v in delta.array) and not isComposite: - continue - var = TupleVariation(support, delta) - if optimize: - delta_opt = iup_delta_optimize( - delta, origCoords, endPts, tolerance=tolerance - ) - - if None in delta_opt: - """In composite glyphs, there should be one 0 entry - to make sure the gvar entry is written to the font. - - This is to work around an issue with macOS 10.14 and can be - removed once the behaviour of macOS is changed. - - https://github.com/fonttools/fonttools/issues/1381 - """ - if all(d is None for d in delta_opt): - delta_opt = [(0, 0)] + [None] * (len(delta_opt) - 1) - # Use "optimized" version only if smaller... - var_opt = TupleVariation(support, delta_opt) - - axis_tags = sorted( - support.keys() - ) # Shouldn't matter that this is different from fvar...? - tupleData, auxData = var.compile(axis_tags) - unoptimized_len = len(tupleData) + len(auxData) - tupleData, auxData = var_opt.compile(axis_tags) - optimized_len = len(tupleData) + len(auxData) - - if optimized_len < unoptimized_len: - var = var_opt - - gvar.variations[glyph].append(var) - - -def _remove_TTHinting(font): - for tag in ("cvar", "cvt ", "fpgm", "prep"): - if tag in font: - del font[tag] - maxp = font["maxp"] - for attr in ( - "maxTwilightPoints", - "maxStorage", - "maxFunctionDefs", - "maxInstructionDefs", - "maxStackElements", - "maxSizeOfInstructions", - ): - setattr(maxp, attr, 0) - maxp.maxZones = 1 - font["glyf"].removeHinting() - # TODO: Modify gasp table to deactivate gridfitting for all ranges? - - -def _merge_TTHinting(font, masterModel, master_ttfs): - - log.info("Merging TT hinting") - assert "cvar" not in font - - # Check that the existing hinting is compatible - - # fpgm and prep table - - for tag in ("fpgm", "prep"): - all_pgms = [m[tag].program for m in master_ttfs if tag in m] - if not all_pgms: - continue - font_pgm = getattr(font.get(tag), "program", None) - if any(pgm != font_pgm for pgm in all_pgms): - log.warning( - "Masters have incompatible %s tables, hinting is discarded." % tag - ) - _remove_TTHinting(font) - return - - # glyf table - - font_glyf = font["glyf"] - master_glyfs = [m["glyf"] for m in master_ttfs] - for name, glyph in font_glyf.glyphs.items(): - all_pgms = [getattr(glyf.get(name), "program", None) for glyf in master_glyfs] - if not any(all_pgms): - continue - glyph.expand(font_glyf) - font_pgm = getattr(glyph, "program", None) - if any(pgm != font_pgm for pgm in all_pgms if pgm): - log.warning( - "Masters have incompatible glyph programs in glyph '%s', hinting is discarded." - % name - ) - # TODO Only drop hinting from this glyph. - _remove_TTHinting(font) - return - - # cvt table - - all_cvs = [Vector(m["cvt "].values) if "cvt " in m else None for m in master_ttfs] - - nonNone_cvs = models.nonNone(all_cvs) - if not nonNone_cvs: - # There is no cvt table to make a cvar table from, we're done here. - return - - if not models.allEqual(len(c) for c in nonNone_cvs): - log.warning("Masters have incompatible cvt tables, hinting is discarded.") - _remove_TTHinting(font) - return - - variations = [] - deltas, supports = masterModel.getDeltasAndSupports( - all_cvs, round=round - ) # builtin round calls into Vector.__round__, which uses builtin round as we like - for i, (delta, support) in enumerate(zip(deltas[1:], supports[1:])): - if all(v == 0 for v in delta): - continue - var = TupleVariation(support, delta) - variations.append(var) - - # We can build the cvar table now. - if variations: - cvar = font["cvar"] = newTable("cvar") - cvar.version = 1 - cvar.variations = variations - - -_MetricsFields = namedtuple( - "_MetricsFields", - ["tableTag", "metricsTag", "sb1", "sb2", "advMapping", "vOrigMapping"], -) - -HVAR_FIELDS = _MetricsFields( - tableTag="HVAR", - metricsTag="hmtx", - sb1="LsbMap", - sb2="RsbMap", - advMapping="AdvWidthMap", - vOrigMapping=None, -) - -VVAR_FIELDS = _MetricsFields( - tableTag="VVAR", - metricsTag="vmtx", - sb1="TsbMap", - sb2="BsbMap", - advMapping="AdvHeightMap", - vOrigMapping="VOrgMap", -) - - -def _add_HVAR(font, masterModel, master_ttfs, axisTags): - _add_VHVAR(font, masterModel, master_ttfs, axisTags, HVAR_FIELDS) - - -def _add_VVAR(font, masterModel, master_ttfs, axisTags): - _add_VHVAR(font, masterModel, master_ttfs, axisTags, VVAR_FIELDS) - - -def _add_VHVAR(font, masterModel, master_ttfs, axisTags, tableFields): - - tableTag = tableFields.tableTag - assert tableTag not in font - log.info("Generating " + tableTag) - VHVAR = newTable(tableTag) - tableClass = getattr(ot, tableTag) - vhvar = VHVAR.table = tableClass() - vhvar.Version = 0x00010000 - - glyphOrder = font.getGlyphOrder() - - # Build list of source font advance widths for each glyph - metricsTag = tableFields.metricsTag - advMetricses = [m[metricsTag].metrics for m in master_ttfs] - - # Build list of source font vertical origin coords for each glyph - if tableTag == "VVAR" and "VORG" in master_ttfs[0]: - vOrigMetricses = [m["VORG"].VOriginRecords for m in master_ttfs] - defaultYOrigs = [m["VORG"].defaultVertOriginY for m in master_ttfs] - vOrigMetricses = list(zip(vOrigMetricses, defaultYOrigs)) - else: - vOrigMetricses = None - - metricsStore, advanceMapping, vOrigMapping = _get_advance_metrics( - font, - masterModel, - master_ttfs, - axisTags, - glyphOrder, - advMetricses, - vOrigMetricses, - ) - - vhvar.VarStore = metricsStore - if advanceMapping is None: - setattr(vhvar, tableFields.advMapping, None) - else: - setattr(vhvar, tableFields.advMapping, advanceMapping) - if vOrigMapping is not None: - setattr(vhvar, tableFields.vOrigMapping, vOrigMapping) - setattr(vhvar, tableFields.sb1, None) - setattr(vhvar, tableFields.sb2, None) - - font[tableTag] = VHVAR - return - - -def _get_advance_metrics( - font, - masterModel, - master_ttfs, - axisTags, - glyphOrder, - advMetricses, - vOrigMetricses=None, -): - - vhAdvanceDeltasAndSupports = {} - vOrigDeltasAndSupports = {} - for glyph in glyphOrder: - vhAdvances = [ - metrics[glyph][0] if glyph in metrics else None for metrics in advMetricses - ] - vhAdvanceDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports( - vhAdvances, round=round - ) - - singleModel = models.allEqual(id(v[1]) for v in vhAdvanceDeltasAndSupports.values()) - - if vOrigMetricses: - singleModel = False - for glyph in glyphOrder: - # We need to supply a vOrigs tuple with non-None default values - # for each glyph. vOrigMetricses contains values only for those - # glyphs which have a non-default vOrig. - vOrigs = [ - metrics[glyph] if glyph in metrics else defaultVOrig - for metrics, defaultVOrig in vOrigMetricses - ] - vOrigDeltasAndSupports[glyph] = masterModel.getDeltasAndSupports( - vOrigs, round=round - ) - - directStore = None - if singleModel: - # Build direct mapping - supports = next(iter(vhAdvanceDeltasAndSupports.values()))[1][1:] - varTupleList = builder.buildVarRegionList(supports, axisTags) - varTupleIndexes = list(range(len(supports))) - varData = builder.buildVarData(varTupleIndexes, [], optimize=False) - for glyphName in glyphOrder: - varData.addItem(vhAdvanceDeltasAndSupports[glyphName][0], round=noRound) - varData.optimize() - directStore = builder.buildVarStore(varTupleList, [varData]) - - # Build optimized indirect mapping - storeBuilder = varStore.OnlineVarStoreBuilder(axisTags) - advMapping = {} - for glyphName in glyphOrder: - deltas, supports = vhAdvanceDeltasAndSupports[glyphName] - storeBuilder.setSupports(supports) - advMapping[glyphName] = storeBuilder.storeDeltas(deltas, round=noRound) - - if vOrigMetricses: - vOrigMap = {} - for glyphName in glyphOrder: - deltas, supports = vOrigDeltasAndSupports[glyphName] - storeBuilder.setSupports(supports) - vOrigMap[glyphName] = storeBuilder.storeDeltas(deltas, round=noRound) - - indirectStore = storeBuilder.finish() - mapping2 = indirectStore.optimize(use_NO_VARIATION_INDEX=False) - advMapping = [mapping2[advMapping[g]] for g in glyphOrder] - advanceMapping = builder.buildVarIdxMap(advMapping, glyphOrder) - - if vOrigMetricses: - vOrigMap = [mapping2[vOrigMap[g]] for g in glyphOrder] - - useDirect = False - vOrigMapping = None - if directStore: - # Compile both, see which is more compact - - writer = OTTableWriter() - directStore.compile(writer, font) - directSize = len(writer.getAllData()) - - writer = OTTableWriter() - indirectStore.compile(writer, font) - advanceMapping.compile(writer, font) - indirectSize = len(writer.getAllData()) - - useDirect = directSize < indirectSize - - if useDirect: - metricsStore = directStore - advanceMapping = None - else: - metricsStore = indirectStore - if vOrigMetricses: - vOrigMapping = builder.buildVarIdxMap(vOrigMap, glyphOrder) - - return metricsStore, advanceMapping, vOrigMapping - - -def _add_MVAR(font, masterModel, master_ttfs, axisTags): - - log.info("Generating MVAR") - - store_builder = varStore.OnlineVarStoreBuilder(axisTags) - - records = [] - lastTableTag = None - fontTable = None - tables = None - # HACK: we need to special-case post.underlineThickness and .underlinePosition - # and unilaterally/arbitrarily define a sentinel value to distinguish the case - # when a post table is present in a given master simply because that's where - # the glyph names in TrueType must be stored, but the underline values are not - # meant to be used for building MVAR's deltas. The value of -0x8000 (-36768) - # the minimum FWord (int16) value, was chosen for its unlikelyhood to appear - # in real-world underline position/thickness values. - specialTags = {"unds": -0x8000, "undo": -0x8000} - - for tag, (tableTag, itemName) in sorted(MVAR_ENTRIES.items(), key=lambda kv: kv[1]): - # For each tag, fetch the associated table from all fonts (or not when we are - # still looking at a tag from the same tables) and set up the variation model - # for them. - if tableTag != lastTableTag: - tables = fontTable = None - if tableTag in font: - fontTable = font[tableTag] - tables = [] - for master in master_ttfs: - if tableTag not in master or ( - tag in specialTags - and getattr(master[tableTag], itemName) == specialTags[tag] - ): - tables.append(None) - else: - tables.append(master[tableTag]) - model, tables = masterModel.getSubModel(tables) - store_builder.setModel(model) - lastTableTag = tableTag - - if tables is None: # Tag not applicable to the master font. - continue - - # TODO support gasp entries - - master_values = [getattr(table, itemName) for table in tables] - if models.allEqual(master_values): - base, varIdx = master_values[0], None - else: - base, varIdx = store_builder.storeMasters(master_values) - setattr(fontTable, itemName, base) - - if varIdx is None: - continue - log.info(" %s: %s.%s %s", tag, tableTag, itemName, master_values) - rec = ot.MetricsValueRecord() - rec.ValueTag = tag - rec.VarIdx = varIdx - records.append(rec) - - assert "MVAR" not in font - if records: - store = store_builder.finish() - # Optimize - mapping = store.optimize() - for rec in records: - rec.VarIdx = mapping[rec.VarIdx] - - MVAR = font["MVAR"] = newTable("MVAR") - mvar = MVAR.table = ot.MVAR() - mvar.Version = 0x00010000 - mvar.Reserved = 0 - mvar.VarStore = store - # XXX these should not be hard-coded but computed automatically - mvar.ValueRecordSize = 8 - mvar.ValueRecordCount = len(records) - mvar.ValueRecord = sorted(records, key=lambda r: r.ValueTag) - - -def _add_BASE(font, masterModel, master_ttfs, axisTags): - - log.info("Generating BASE") - - merger = VariationMerger(masterModel, axisTags, font) - merger.mergeTables(font, master_ttfs, ["BASE"]) - store = merger.store_builder.finish() - - if not store: - return - base = font["BASE"].table - assert base.Version == 0x00010000 - base.Version = 0x00010001 - base.VarStore = store - - -def _merge_OTL(font, model, master_fonts, axisTags): - - log.info("Merging OpenType Layout tables") - merger = VariationMerger(model, axisTags, font) - - merger.mergeTables(font, master_fonts, ["GSUB", "GDEF", "GPOS"]) - store = merger.store_builder.finish() - if not store: - return - try: - GDEF = font["GDEF"].table - assert GDEF.Version <= 0x00010002 - except KeyError: - font["GDEF"] = newTable("GDEF") - GDEFTable = font["GDEF"] = newTable("GDEF") - GDEF = GDEFTable.table = ot.GDEF() - GDEF.GlyphClassDef = None - GDEF.AttachList = None - GDEF.LigCaretList = None - GDEF.MarkAttachClassDef = None - GDEF.MarkGlyphSetsDef = None - - GDEF.Version = 0x00010003 - GDEF.VarStore = store - - # Optimize - varidx_map = store.optimize() - GDEF.remap_device_varidxes(varidx_map) - if "GPOS" in font: - font["GPOS"].table.remap_device_varidxes(varidx_map) - - -def _add_GSUB_feature_variations(font, axes, internal_axis_supports, rules, featureTag): - def normalize(name, value): - return models.normalizeLocation({name: value}, internal_axis_supports)[name] - - log.info("Generating GSUB FeatureVariations") - - axis_tags = {name: axis.tag for name, axis in axes.items()} - - conditional_subs = [] - for rule in rules: - - region = [] - for conditions in rule.conditionSets: - space = {} - for condition in conditions: - axis_name = condition["name"] - if condition["minimum"] is not None: - minimum = normalize(axis_name, condition["minimum"]) - else: - minimum = -1.0 - if condition["maximum"] is not None: - maximum = normalize(axis_name, condition["maximum"]) - else: - maximum = 1.0 - tag = axis_tags[axis_name] - space[tag] = (minimum, maximum) - region.append(space) - - subs = {k: v for k, v in rule.subs} - - conditional_subs.append((region, subs)) - - addFeatureVariations(font, conditional_subs, featureTag) - - -_DesignSpaceData = namedtuple( - "_DesignSpaceData", - [ - "axes", - "internal_axis_supports", - "base_idx", - "normalized_master_locs", - "masters", - "instances", - "rules", - "rulesProcessingLast", - "lib", - ], -) - - -def _add_CFF2(varFont, model, master_fonts): - from .cff import merge_region_fonts - - glyphOrder = varFont.getGlyphOrder() - if "CFF2" not in varFont: - from .cff import convertCFFtoCFF2 - - convertCFFtoCFF2(varFont) - ordered_fonts_list = model.reorderMasters(master_fonts, model.reverseMapping) - # re-ordering the master list simplifies building the CFF2 data item lists. - merge_region_fonts(varFont, model, ordered_fonts_list, glyphOrder) - - -def _add_COLR(font, model, master_fonts, axisTags, colr_layer_reuse=True): - merger = COLRVariationMerger( - model, axisTags, font, allowLayerReuse=colr_layer_reuse - ) - merger.mergeTables(font, master_fonts) - store = merger.store_builder.finish() - - colr = font["COLR"].table - if store: - mapping = store.optimize() - colr.VarStore = store - # don't add DeltaSetIndexMap for identity mapping - colr.VarIndexMap = None - varIdxes = [mapping[v] for v in merger.varIdxes] - if any(i != varIdxes[i] for i in range(len(varIdxes))): - colr.VarIndexMap = builder.buildDeltaSetIndexMap(varIdxes) - - -def load_designspace(designspace): - # TODO: remove this and always assume 'designspace' is a DesignSpaceDocument, - # never a file path, as that's already handled by caller - if hasattr(designspace, "sources"): # Assume a DesignspaceDocument - ds = designspace - else: # Assume a file path - ds = DesignSpaceDocument.fromfile(designspace) - - masters = ds.sources - if not masters: - raise VarLibValidationError("Designspace must have at least one source.") - instances = ds.instances - - # TODO: Use fontTools.designspaceLib.tagForAxisName instead. - standard_axis_map = OrderedDict( - [ - ("weight", ("wght", {"en": "Weight"})), - ("width", ("wdth", {"en": "Width"})), - ("slant", ("slnt", {"en": "Slant"})), - ("optical", ("opsz", {"en": "Optical Size"})), - ("italic", ("ital", {"en": "Italic"})), - ] - ) - - # Setup axes - if not ds.axes: - raise VarLibValidationError(f"Designspace must have at least one axis.") - - axes = OrderedDict() - for axis_index, axis in enumerate(ds.axes): - axis_name = axis.name - if not axis_name: - if not axis.tag: - raise VarLibValidationError(f"Axis at index {axis_index} needs a tag.") - axis_name = axis.name = axis.tag - - if axis_name in standard_axis_map: - if axis.tag is None: - axis.tag = standard_axis_map[axis_name][0] - if not axis.labelNames: - axis.labelNames.update(standard_axis_map[axis_name][1]) - else: - if not axis.tag: - raise VarLibValidationError(f"Axis at index {axis_index} needs a tag.") - if not axis.labelNames: - axis.labelNames["en"] = tostr(axis_name) - - axes[axis_name] = axis - log.info("Axes:\n%s", pformat([axis.asdict() for axis in axes.values()])) - - # Check all master and instance locations are valid and fill in defaults - for obj in masters + instances: - obj_name = obj.name or obj.styleName or "" - loc = obj.getFullDesignLocation(ds) - obj.designLocation = loc - if loc is None: - raise VarLibValidationError( - f"Source or instance '{obj_name}' has no location." - ) - for axis_name in loc.keys(): - if axis_name not in axes: - raise VarLibValidationError( - f"Location axis '{axis_name}' unknown for '{obj_name}'." - ) - for axis_name, axis in axes.items(): - v = axis.map_backward(loc[axis_name]) - if not (axis.minimum <= v <= axis.maximum): - raise VarLibValidationError( - f"Source or instance '{obj_name}' has out-of-range location " - f"for axis '{axis_name}': is mapped to {v} but must be in " - f"mapped range [{axis.minimum}..{axis.maximum}] (NOTE: all " - "values are in user-space)." - ) - - # Normalize master locations - - internal_master_locs = [o.getFullDesignLocation(ds) for o in masters] - log.info("Internal master locations:\n%s", pformat(internal_master_locs)) - - # TODO This mapping should ideally be moved closer to logic in _add_fvar/avar - internal_axis_supports = {} - for axis in axes.values(): - triple = (axis.minimum, axis.default, axis.maximum) - internal_axis_supports[axis.name] = [axis.map_forward(v) for v in triple] - log.info("Internal axis supports:\n%s", pformat(internal_axis_supports)) - - normalized_master_locs = [ - models.normalizeLocation(m, internal_axis_supports) - for m in internal_master_locs - ] - log.info("Normalized master locations:\n%s", pformat(normalized_master_locs)) - - # Find base master - base_idx = None - for i, m in enumerate(normalized_master_locs): - if all(v == 0 for v in m.values()): - if base_idx is not None: - raise VarLibValidationError( - "More than one base master found in Designspace." - ) - base_idx = i - if base_idx is None: - raise VarLibValidationError( - "Base master not found; no master at default location?" - ) - log.info("Index of base master: %s", base_idx) - - return _DesignSpaceData( - axes, - internal_axis_supports, - base_idx, - normalized_master_locs, - masters, - instances, - ds.rules, - ds.rulesProcessingLast, - ds.lib, - ) - - -# https://docs.microsoft.com/en-us/typography/opentype/spec/os2#uswidthclass -WDTH_VALUE_TO_OS2_WIDTH_CLASS = { - 50: 1, - 62.5: 2, - 75: 3, - 87.5: 4, - 100: 5, - 112.5: 6, - 125: 7, - 150: 8, - 200: 9, -} - - -def set_default_weight_width_slant(font, location): - if "OS/2" in font: - if "wght" in location: - weight_class = otRound(max(1, min(location["wght"], 1000))) - if font["OS/2"].usWeightClass != weight_class: - log.info("Setting OS/2.usWeightClass = %s", weight_class) - font["OS/2"].usWeightClass = weight_class - - if "wdth" in location: - # map 'wdth' axis (50..200) to OS/2.usWidthClass (1..9), rounding to closest - widthValue = min(max(location["wdth"], 50), 200) - widthClass = otRound( - models.piecewiseLinearMap(widthValue, WDTH_VALUE_TO_OS2_WIDTH_CLASS) - ) - if font["OS/2"].usWidthClass != widthClass: - log.info("Setting OS/2.usWidthClass = %s", widthClass) - font["OS/2"].usWidthClass = widthClass - - if "slnt" in location and "post" in font: - italicAngle = max(-90, min(location["slnt"], 90)) - if font["post"].italicAngle != italicAngle: - log.info("Setting post.italicAngle = %s", italicAngle) - font["post"].italicAngle = italicAngle - - -def build_many( - designspace: DesignSpaceDocument, - master_finder=lambda s: s, - exclude=[], - optimize=True, - skip_vf=lambda vf_name: False, - colr_layer_reuse=True, -): - """ - Build variable fonts from a designspace file, version 5 which can define - several VFs, or version 4 which has implicitly one VF covering the whole doc. - - If master_finder is set, it should be a callable that takes master - filename as found in designspace file and map it to master font - binary as to be opened (eg. .ttf or .otf). - - skip_vf can be used to skip building some of the variable fonts defined in - the input designspace. It's a predicate that takes as argument the name - of the variable font and returns `bool`. - - Always returns a Dict[str, TTFont] keyed by VariableFontDescriptor.name - """ - res = {} - # varLib.build (used further below) by default only builds an incomplete 'STAT' - # with an empty AxisValueArray--unless the VF inherited 'STAT' from its base master. - # Designspace version 5 can also be used to define 'STAT' labels or customize - # axes ordering, etc. To avoid overwriting a pre-existing 'STAT' or redoing the - # same work twice, here we check if designspace contains any 'STAT' info before - # proceeding to call buildVFStatTable for each VF. - # https://github.com/fonttools/fonttools/pull/3024 - # https://github.com/fonttools/fonttools/issues/3045 - doBuildStatFromDSv5 = ( - "STAT" not in exclude - and designspace.formatTuple >= (5, 0) - and ( - any(a.axisLabels or a.axisOrdering is not None for a in designspace.axes) - or designspace.locationLabels - ) - ) - for _location, subDoc in splitInterpolable(designspace): - for name, vfDoc in splitVariableFonts(subDoc): - if skip_vf(name): - log.debug(f"Skipping variable TTF font: {name}") - continue - vf = build( - vfDoc, - master_finder, - exclude=exclude, - optimize=optimize, - colr_layer_reuse=colr_layer_reuse, - )[0] - if doBuildStatFromDSv5: - buildVFStatTable(vf, designspace, name) - res[name] = vf - return res - - -def build( - designspace, - master_finder=lambda s: s, - exclude=[], - optimize=True, - colr_layer_reuse=True, -): - """ - Build variation font from a designspace file. - - If master_finder is set, it should be a callable that takes master - filename as found in designspace file and map it to master font - binary as to be opened (eg. .ttf or .otf). - """ - if hasattr(designspace, "sources"): # Assume a DesignspaceDocument - pass - else: # Assume a file path - designspace = DesignSpaceDocument.fromfile(designspace) - - ds = load_designspace(designspace) - log.info("Building variable font") - - log.info("Loading master fonts") - master_fonts = load_masters(designspace, master_finder) - - # TODO: 'master_ttfs' is unused except for return value, remove later - master_ttfs = [] - for master in master_fonts: - try: - master_ttfs.append(master.reader.file.name) - except AttributeError: - master_ttfs.append(None) # in-memory fonts have no path - - # Copy the base master to work from it - vf = deepcopy(master_fonts[ds.base_idx]) - - if "DSIG" in vf: - del vf["DSIG"] - - # TODO append masters as named-instances as well; needs .designspace change. - fvar = _add_fvar(vf, ds.axes, ds.instances) - if "STAT" not in exclude: - _add_stat(vf) - if "avar" not in exclude: - _add_avar(vf, ds.axes) - - # Map from axis names to axis tags... - normalized_master_locs = [ - {ds.axes[k].tag: v for k, v in loc.items()} for loc in ds.normalized_master_locs - ] - # From here on, we use fvar axes only - axisTags = [axis.axisTag for axis in fvar.axes] - - # Assume single-model for now. - model = models.VariationModel(normalized_master_locs, axisOrder=axisTags) - assert 0 == model.mapping[ds.base_idx] - - log.info("Building variations tables") - if "BASE" not in exclude and "BASE" in vf: - _add_BASE(vf, model, master_fonts, axisTags) - if "MVAR" not in exclude: - _add_MVAR(vf, model, master_fonts, axisTags) - if "HVAR" not in exclude: - _add_HVAR(vf, model, master_fonts, axisTags) - if "VVAR" not in exclude and "vmtx" in vf: - _add_VVAR(vf, model, master_fonts, axisTags) - if "GDEF" not in exclude or "GPOS" not in exclude: - _merge_OTL(vf, model, master_fonts, axisTags) - if "gvar" not in exclude and "glyf" in vf: - _add_gvar(vf, model, master_fonts, optimize=optimize) - if "cvar" not in exclude and "glyf" in vf: - _merge_TTHinting(vf, model, master_fonts) - if "GSUB" not in exclude and ds.rules: - featureTag = ds.lib.get( - FEAVAR_FEATURETAG_LIB_KEY, "rclt" if ds.rulesProcessingLast else "rvrn" - ) - _add_GSUB_feature_variations( - vf, ds.axes, ds.internal_axis_supports, ds.rules, featureTag - ) - if "CFF2" not in exclude and ("CFF " in vf or "CFF2" in vf): - _add_CFF2(vf, model, master_fonts) - if "post" in vf: - # set 'post' to format 2 to keep the glyph names dropped from CFF2 - post = vf["post"] - if post.formatType != 2.0: - post.formatType = 2.0 - post.extraNames = [] - post.mapping = {} - if "COLR" not in exclude and "COLR" in vf and vf["COLR"].version > 0: - _add_COLR(vf, model, master_fonts, axisTags, colr_layer_reuse) - - set_default_weight_width_slant( - vf, location={axis.axisTag: axis.defaultValue for axis in vf["fvar"].axes} - ) - - for tag in exclude: - if tag in vf: - del vf[tag] - - # TODO: Only return vf for 4.0+, the rest is unused. - return vf, model, master_ttfs - - -def _open_font(path, master_finder=lambda s: s): - # load TTFont masters from given 'path': this can be either a .TTX or an - # OpenType binary font; or if neither of these, try use the 'master_finder' - # callable to resolve the path to a valid .TTX or OpenType font binary. - from fontTools.ttx import guessFileType - - master_path = os.path.normpath(path) - tp = guessFileType(master_path) - if tp is None: - # not an OpenType binary/ttx, fall back to the master finder. - master_path = master_finder(master_path) - tp = guessFileType(master_path) - if tp in ("TTX", "OTX"): - font = TTFont() - font.importXML(master_path) - elif tp in ("TTF", "OTF", "WOFF", "WOFF2"): - font = TTFont(master_path) - else: - raise VarLibValidationError("Invalid master path: %r" % master_path) - return font - - -def load_masters(designspace, master_finder=lambda s: s): - """Ensure that all SourceDescriptor.font attributes have an appropriate TTFont - object loaded, or else open TTFont objects from the SourceDescriptor.path - attributes. - - The paths can point to either an OpenType font, a TTX file, or a UFO. In the - latter case, use the provided master_finder callable to map from UFO paths to - the respective master font binaries (e.g. .ttf, .otf or .ttx). - - Return list of master TTFont objects in the same order they are listed in the - DesignSpaceDocument. - """ - for master in designspace.sources: - # If a SourceDescriptor has a layer name, demand that the compiled TTFont - # be supplied by the caller. This spares us from modifying MasterFinder. - if master.layerName and master.font is None: - raise VarLibValidationError( - f"Designspace source '{master.name or ''}' specified a " - "layer name but lacks the required TTFont object in the 'font' " - "attribute." - ) - - return designspace.loadSourceFonts(_open_font, master_finder=master_finder) - - -class MasterFinder(object): - def __init__(self, template): - self.template = template - - def __call__(self, src_path): - fullname = os.path.abspath(src_path) - dirname, basename = os.path.split(fullname) - stem, ext = os.path.splitext(basename) - path = self.template.format( - fullname=fullname, - dirname=dirname, - basename=basename, - stem=stem, - ext=ext, - ) - return os.path.normpath(path) - - -def main(args=None): - """Build variable fonts from a designspace file and masters""" - from argparse import ArgumentParser - from fontTools import configLogger - - parser = ArgumentParser(prog="varLib", description=main.__doc__) - parser.add_argument("designspace") - output_group = parser.add_mutually_exclusive_group() - output_group.add_argument( - "-o", metavar="OUTPUTFILE", dest="outfile", default=None, help="output file" - ) - output_group.add_argument( - "-d", - "--output-dir", - metavar="OUTPUTDIR", - default=None, - help="output dir (default: same as input designspace file)", - ) - parser.add_argument( - "-x", - metavar="TAG", - dest="exclude", - action="append", - default=[], - help="exclude table", - ) - parser.add_argument( - "--disable-iup", - dest="optimize", - action="store_false", - help="do not perform IUP optimization", - ) - parser.add_argument( - "--no-colr-layer-reuse", - dest="colr_layer_reuse", - action="store_false", - help="do not rebuild variable COLR table to optimize COLR layer reuse", - ) - parser.add_argument( - "--master-finder", - default="master_ttf_interpolatable/{stem}.ttf", - help=( - "templated string used for finding binary font " - "files given the source file names defined in the " - "designspace document. The following special strings " - "are defined: {fullname} is the absolute source file " - "name; {basename} is the file name without its " - "directory; {stem} is the basename without the file " - "extension; {ext} is the source file extension; " - "{dirname} is the directory of the absolute file " - 'name. The default value is "%(default)s".' - ), - ) - parser.add_argument( - "--variable-fonts", - default=".*", - metavar="VF_NAME", - help=( - "Filter the list of variable fonts produced from the input " - "Designspace v5 file. By default all listed variable fonts are " - "generated. To generate a specific variable font (or variable fonts) " - 'that match a given "name" attribute, you can pass as argument ' - "the full name or a regular expression. E.g.: --variable-fonts " - '"MyFontVF_WeightOnly"; or --variable-fonts "MyFontVFItalic_.*".' - ), - ) - logging_group = parser.add_mutually_exclusive_group(required=False) - logging_group.add_argument( - "-v", "--verbose", action="store_true", help="Run more verbosely." - ) - logging_group.add_argument( - "-q", "--quiet", action="store_true", help="Turn verbosity off." - ) - options = parser.parse_args(args) - - configLogger( - level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO") - ) - - designspace_filename = options.designspace - designspace = DesignSpaceDocument.fromfile(designspace_filename) - - vf_descriptors = designspace.getVariableFonts() - if not vf_descriptors: - parser.error(f"No variable fonts in given designspace {designspace.path!r}") - - vfs_to_build = [] - for vf in vf_descriptors: - # Skip variable fonts that do not match the user's inclusion regex if given. - if not fullmatch(options.variable_fonts, vf.name): - continue - vfs_to_build.append(vf) - - if not vfs_to_build: - parser.error(f"No variable fonts matching {options.variable_fonts!r}") - - if options.outfile is not None and len(vfs_to_build) > 1: - parser.error( - "can't specify -o because there are multiple VFs to build; " - "use --output-dir, or select a single VF with --variable-fonts" - ) - - output_dir = options.output_dir - if output_dir is None: - output_dir = os.path.dirname(designspace_filename) - - vf_name_to_output_path = {} - if len(vfs_to_build) == 1 and options.outfile is not None: - vf_name_to_output_path[vfs_to_build[0].name] = options.outfile - else: - for vf in vfs_to_build: - filename = vf.filename if vf.filename is not None else vf.name + ".{ext}" - vf_name_to_output_path[vf.name] = os.path.join(output_dir, filename) - - finder = MasterFinder(options.master_finder) - - vfs = build_many( - designspace, - finder, - exclude=options.exclude, - optimize=options.optimize, - colr_layer_reuse=options.colr_layer_reuse, - ) - - for vf_name, vf in vfs.items(): - ext = "otf" if vf.sfntVersion == "OTTO" else "ttf" - output_path = vf_name_to_output_path[vf_name].format(ext=ext) - output_dir = os.path.dirname(output_path) - if output_dir: - os.makedirs(output_dir, exist_ok=True) - log.info("Saving variation font %s", output_path) - vf.save(output_path) - - -if __name__ == "__main__": - import sys - - if len(sys.argv) > 1: - sys.exit(main()) - import doctest - - sys.exit(doctest.testmod().failed) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/caching.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/caching.py deleted file mode 100644 index 828a670618ba015fd571ae04c0d86d585733aebc..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fsspec/caching.py +++ /dev/null @@ -1,805 +0,0 @@ -import collections -import functools -import io -import logging -import math -import os -import threading -import warnings -from concurrent.futures import ThreadPoolExecutor - -logger = logging.getLogger("fsspec") - - -class BaseCache(object): - """Pass-though cache: doesn't keep anything, calls every time - - Acts as base class for other cachers - - Parameters - ---------- - blocksize: int - How far to read ahead in numbers of bytes - fetcher: func - Function of the form f(start, end) which gets bytes from remote as - specified - size: int - How big this file is - """ - - name = "none" - - def __init__(self, blocksize, fetcher, size): - self.blocksize = blocksize - self.fetcher = fetcher - self.size = size - - def _fetch(self, start, stop): - if start is None: - start = 0 - if stop is None: - stop = self.size - if start >= self.size or start >= stop: - return b"" - return self.fetcher(start, stop) - - -class MMapCache(BaseCache): - """memory-mapped sparse file cache - - Opens temporary file, which is filled blocks-wise when data is requested. - Ensure there is enough disc space in the temporary location. - - This cache method might only work on posix - """ - - name = "mmap" - - def __init__(self, blocksize, fetcher, size, location=None, blocks=None): - super().__init__(blocksize, fetcher, size) - self.blocks = set() if blocks is None else blocks - self.location = location - self.cache = self._makefile() - - def _makefile(self): - import mmap - import tempfile - - if self.size == 0: - return bytearray() - - # posix version - if self.location is None or not os.path.exists(self.location): - if self.location is None: - fd = tempfile.TemporaryFile() - self.blocks = set() - else: - fd = io.open(self.location, "wb+") - fd.seek(self.size - 1) - fd.write(b"1") - fd.flush() - else: - fd = io.open(self.location, "rb+") - - return mmap.mmap(fd.fileno(), self.size) - - def _fetch(self, start, end): - logger.debug(f"MMap cache fetching {start}-{end}") - if start is None: - start = 0 - if end is None: - end = self.size - if start >= self.size or start >= end: - return b"" - start_block = start // self.blocksize - end_block = end // self.blocksize - need = [i for i in range(start_block, end_block + 1) if i not in self.blocks] - while need: - # TODO: not a for loop so we can consolidate blocks later to - # make fewer fetch calls; this could be parallel - i = need.pop(0) - sstart = i * self.blocksize - send = min(sstart + self.blocksize, self.size) - logger.debug(f"MMap get block #{i} ({sstart}-{send}") - self.cache[sstart:send] = self.fetcher(sstart, send) - self.blocks.add(i) - - return self.cache[start:end] - - def __getstate__(self): - state = self.__dict__.copy() - # Remove the unpicklable entries. - del state["cache"] - return state - - def __setstate__(self, state): - # Restore instance attributes - self.__dict__.update(state) - self.cache = self._makefile() - - -class ReadAheadCache(BaseCache): - """Cache which reads only when we get beyond a block of data - - This is a much simpler version of BytesCache, and does not attempt to - fill holes in the cache or keep fragments alive. It is best suited to - many small reads in a sequential order (e.g., reading lines from a file). - """ - - name = "readahead" - - def __init__(self, blocksize, fetcher, size): - super().__init__(blocksize, fetcher, size) - self.cache = b"" - self.start = 0 - self.end = 0 - - def _fetch(self, start, end): - if start is None: - start = 0 - if end is None or end > self.size: - end = self.size - if start >= self.size or start >= end: - return b"" - l = end - start - if start >= self.start and end <= self.end: - # cache hit - return self.cache[start - self.start : end - self.start] - elif self.start <= start < self.end: - # partial hit - part = self.cache[start - self.start :] - l -= len(part) - start = self.end - else: - # miss - part = b"" - end = min(self.size, end + self.blocksize) - self.cache = self.fetcher(start, end) # new block replaces old - self.start = start - self.end = self.start + len(self.cache) - return part + self.cache[:l] - - -class FirstChunkCache(BaseCache): - """Caches the first block of a file only - - This may be useful for file types where the metadata is stored in the header, - but is randomly accessed. - """ - - name = "first" - - def __init__(self, blocksize, fetcher, size): - super().__init__(blocksize, fetcher, size) - self.cache = None - - def _fetch(self, start, end): - start = start or 0 - end = end or self.size - if start < self.blocksize: - if self.cache is None: - if end > self.blocksize: - data = self.fetcher(0, end) - self.cache = data[: self.blocksize] - return data[start:] - self.cache = self.fetcher(0, self.blocksize) - part = self.cache[start:end] - if end > self.blocksize: - part += self.fetcher(self.blocksize, end) - return part - else: - return self.fetcher(start, end) - - -class BlockCache(BaseCache): - """ - Cache holding memory as a set of blocks. - - Requests are only ever made ``blocksize`` at a time, and are - stored in an LRU cache. The least recently accessed block is - discarded when more than ``maxblocks`` are stored. - - Parameters - ---------- - blocksize : int - The number of bytes to store in each block. - Requests are only ever made for ``blocksize``, so this - should balance the overhead of making a request against - the granularity of the blocks. - fetcher : Callable - size : int - The total size of the file being cached. - maxblocks : int - The maximum number of blocks to cache for. The maximum memory - use for this cache is then ``blocksize * maxblocks``. - """ - - name = "blockcache" - - def __init__(self, blocksize, fetcher, size, maxblocks=32): - super().__init__(blocksize, fetcher, size) - self.nblocks = math.ceil(size / blocksize) - self.maxblocks = maxblocks - self._fetch_block_cached = functools.lru_cache(maxblocks)(self._fetch_block) - - def __repr__(self): - return "".format( - self.blocksize, self.size, self.nblocks - ) - - def cache_info(self): - """ - The statistics on the block cache. - - Returns - ------- - NamedTuple - Returned directly from the LRU Cache used internally. - """ - return self._fetch_block_cached.cache_info() - - def __getstate__(self): - state = self.__dict__ - del state["_fetch_block_cached"] - return state - - def __setstate__(self, state): - self.__dict__.update(state) - self._fetch_block_cached = functools.lru_cache(state["maxblocks"])( - self._fetch_block - ) - - def _fetch(self, start, end): - if start is None: - start = 0 - if end is None: - end = self.size - if start >= self.size or start >= end: - return b"" - - # byte position -> block numbers - start_block_number = start // self.blocksize - end_block_number = end // self.blocksize - - # these are cached, so safe to do multiple calls for the same start and end. - for block_number in range(start_block_number, end_block_number + 1): - self._fetch_block_cached(block_number) - - return self._read_cache( - start, - end, - start_block_number=start_block_number, - end_block_number=end_block_number, - ) - - def _fetch_block(self, block_number): - """ - Fetch the block of data for `block_number`. - """ - if block_number > self.nblocks: - raise ValueError( - "'block_number={}' is greater than the number of blocks ({})".format( - block_number, self.nblocks - ) - ) - - start = block_number * self.blocksize - end = start + self.blocksize - logger.info("BlockCache fetching block %d", block_number) - block_contents = super()._fetch(start, end) - return block_contents - - def _read_cache(self, start, end, start_block_number, end_block_number): - """ - Read from our block cache. - - Parameters - ---------- - start, end : int - The start and end byte positions. - start_block_number, end_block_number : int - The start and end block numbers. - """ - start_pos = start % self.blocksize - end_pos = end % self.blocksize - - if start_block_number == end_block_number: - block = self._fetch_block_cached(start_block_number) - return block[start_pos:end_pos] - - else: - # read from the initial - out = [] - out.append(self._fetch_block_cached(start_block_number)[start_pos:]) - - # intermediate blocks - # Note: it'd be nice to combine these into one big request. However - # that doesn't play nicely with our LRU cache. - for block_number in range(start_block_number + 1, end_block_number): - out.append(self._fetch_block_cached(block_number)) - - # final block - out.append(self._fetch_block_cached(end_block_number)[:end_pos]) - - return b"".join(out) - - -class BytesCache(BaseCache): - """Cache which holds data in a in-memory bytes object - - Implements read-ahead by the block size, for semi-random reads progressing - through the file. - - Parameters - ---------- - trim: bool - As we read more data, whether to discard the start of the buffer when - we are more than a blocksize ahead of it. - """ - - name = "bytes" - - def __init__(self, blocksize, fetcher, size, trim=True): - super().__init__(blocksize, fetcher, size) - self.cache = b"" - self.start = None - self.end = None - self.trim = trim - - def _fetch(self, start, end): - # TODO: only set start/end after fetch, in case it fails? - # is this where retry logic might go? - if start is None: - start = 0 - if end is None: - end = self.size - if start >= self.size or start >= end: - return b"" - if ( - self.start is not None - and start >= self.start - and self.end is not None - and end < self.end - ): - # cache hit: we have all the required data - offset = start - self.start - return self.cache[offset : offset + end - start] - - if self.blocksize: - bend = min(self.size, end + self.blocksize) - else: - bend = end - - if bend == start or start > self.size: - return b"" - - if (self.start is None or start < self.start) and ( - self.end is None or end > self.end - ): - # First read, or extending both before and after - self.cache = self.fetcher(start, bend) - self.start = start - elif start < self.start: - if self.end - end > self.blocksize: - self.cache = self.fetcher(start, bend) - self.start = start - else: - new = self.fetcher(start, self.start) - self.start = start - self.cache = new + self.cache - elif bend > self.end: - if self.end > self.size: - pass - elif end - self.end > self.blocksize: - self.cache = self.fetcher(start, bend) - self.start = start - else: - new = self.fetcher(self.end, bend) - self.cache = self.cache + new - - self.end = self.start + len(self.cache) - offset = start - self.start - out = self.cache[offset : offset + end - start] - if self.trim: - num = (self.end - self.start) // (self.blocksize + 1) - if num > 1: - self.start += self.blocksize * num - self.cache = self.cache[self.blocksize * num :] - return out - - def __len__(self): - return len(self.cache) - - -class AllBytes(BaseCache): - """Cache entire contents of the file""" - - name = "all" - - def __init__(self, blocksize=None, fetcher=None, size=None, data=None): - super().__init__(blocksize, fetcher, size) - if data is None: - data = self.fetcher(0, self.size) - self.data = data - - def _fetch(self, start, end): - return self.data[start:end] - - -class KnownPartsOfAFile(BaseCache): - """ - Cache holding known file parts. - - Parameters - ---------- - blocksize: int - How far to read ahead in numbers of bytes - fetcher: func - Function of the form f(start, end) which gets bytes from remote as - specified - size: int - How big this file is - data: dict - A dictionary mapping explicit `(start, stop)` file-offset tuples - with known bytes. - strict: bool, default True - Whether to fetch reads that go beyond a known byte-range boundary. - If `False`, any read that ends outside a known part will be zero - padded. Note that zero padding will not be used for reads that - begin outside a known byte-range. - """ - - name = "parts" - - def __init__(self, blocksize, fetcher, size, data={}, strict=True, **_): - super(KnownPartsOfAFile, self).__init__(blocksize, fetcher, size) - self.strict = strict - - # simple consolidation of contiguous blocks - if data: - old_offsets = sorted(list(data.keys())) - offsets = [old_offsets[0]] - blocks = [data.pop(old_offsets[0])] - for start, stop in old_offsets[1:]: - start0, stop0 = offsets[-1] - if start == stop0: - offsets[-1] = (start0, stop) - blocks[-1] += data.pop((start, stop)) - else: - offsets.append((start, stop)) - blocks.append(data.pop((start, stop))) - - self.data = dict(zip(offsets, blocks)) - else: - self.data = data - - def _fetch(self, start, stop): - out = b"" - for (loc0, loc1), data in self.data.items(): - # If self.strict=False, use zero-padded data - # for reads beyond the end of a "known" buffer - if loc0 <= start < loc1: - off = start - loc0 - out = data[off : off + stop - start] - if not self.strict or loc0 <= stop <= loc1: - # The request is within a known range, or - # it begins within a known range, and we - # are allowed to pad reads beyond the - # buffer with zero - out += b"\x00" * (stop - start - len(out)) - return out - else: - # The request ends outside a known range, - # and we are being "strict" about reads - # beyond the buffer - start = loc1 - break - - # We only get here if there is a request outside the - # known parts of the file. In an ideal world, this - # should never happen - if self.fetcher is None: - # We cannot fetch the data, so raise an error - raise ValueError(f"Read is outside the known file parts: {(start, stop)}. ") - # We can fetch the data, but should warn the user - # that this may be slow - warnings.warn( - f"Read is outside the known file parts: {(start, stop)}. " - f"IO/caching performance may be poor!" - ) - logger.debug(f"KnownPartsOfAFile cache fetching {start}-{stop}") - return out + super()._fetch(start, stop) - - -class UpdatableLRU: - """ - Custom implementation of LRU cache that allows updating keys - - Used by BackgroudBlockCache - """ - - CacheInfo = collections.namedtuple( - "CacheInfo", ["hits", "misses", "maxsize", "currsize"] - ) - - def __init__(self, func, max_size=128): - self._cache = collections.OrderedDict() - self._func = func - self._max_size = max_size - self._hits = 0 - self._misses = 0 - self._lock = threading.Lock() - - def __call__(self, *args): - with self._lock: - if args in self._cache: - self._cache.move_to_end(args) - self._hits += 1 - return self._cache[args] - - result = self._func(*args) - - with self._lock: - self._cache[args] = result - self._misses += 1 - if len(self._cache) > self._max_size: - self._cache.popitem(last=False) - - return result - - def is_key_cached(self, *args): - with self._lock: - return args in self._cache - - def add_key(self, result, *args): - with self._lock: - self._cache[args] = result - if len(self._cache) > self._max_size: - self._cache.popitem(last=False) - - def cache_info(self): - with self._lock: - return self.CacheInfo( - maxsize=self._max_size, - currsize=len(self._cache), - hits=self._hits, - misses=self._misses, - ) - - -class BackgroundBlockCache(BaseCache): - """ - Cache holding memory as a set of blocks with pre-loading of - the next block in the background. - - Requests are only ever made ``blocksize`` at a time, and are - stored in an LRU cache. The least recently accessed block is - discarded when more than ``maxblocks`` are stored. If the - next block is not in cache, it is loaded in a separate thread - in non-blocking way. - - Parameters - ---------- - blocksize : int - The number of bytes to store in each block. - Requests are only ever made for ``blocksize``, so this - should balance the overhead of making a request against - the granularity of the blocks. - fetcher : Callable - size : int - The total size of the file being cached. - maxblocks : int - The maximum number of blocks to cache for. The maximum memory - use for this cache is then ``blocksize * maxblocks``. - """ - - name = "background" - - def __init__(self, blocksize, fetcher, size, maxblocks=32): - super().__init__(blocksize, fetcher, size) - self.nblocks = math.ceil(size / blocksize) - self.maxblocks = maxblocks - self._fetch_block_cached = UpdatableLRU(self._fetch_block, maxblocks) - - self._thread_executor = ThreadPoolExecutor(max_workers=1) - self._fetch_future_block_number = None - self._fetch_future = None - self._fetch_future_lock = threading.Lock() - - def __repr__(self): - return "".format( - self.blocksize, self.size, self.nblocks - ) - - def cache_info(self): - """ - The statistics on the block cache. - - Returns - ------- - NamedTuple - Returned directly from the LRU Cache used internally. - """ - return self._fetch_block_cached.cache_info() - - def __getstate__(self): - state = self.__dict__ - del state["_fetch_block_cached"] - del state["_thread_executor"] - del state["_fetch_future_block_number"] - del state["_fetch_future"] - del state["_fetch_future_lock"] - return state - - def __setstate__(self, state): - self.__dict__.update(state) - self._fetch_block_cached = UpdatableLRU(self._fetch_block, state["maxblocks"]) - self._thread_executor = ThreadPoolExecutor(max_workers=1) - self._fetch_future_block_number = None - self._fetch_future = None - self._fetch_future_lock = threading.Lock() - - def _fetch(self, start, end): - if start is None: - start = 0 - if end is None: - end = self.size - if start >= self.size or start >= end: - return b"" - - # byte position -> block numbers - start_block_number = start // self.blocksize - end_block_number = end // self.blocksize - - fetch_future_block_number = None - fetch_future = None - with self._fetch_future_lock: - # Background thread is running. Check we we can or must join it. - if self._fetch_future is not None: - if self._fetch_future.done(): - logger.info("BlockCache joined background fetch without waiting.") - self._fetch_block_cached.add_key( - self._fetch_future.result(), self._fetch_future_block_number - ) - # Cleanup the fetch variables. Done with fetching the block. - self._fetch_future_block_number = None - self._fetch_future = None - else: - # Must join if we need the block for the current fetch - must_join = bool( - start_block_number - <= self._fetch_future_block_number - <= end_block_number - ) - if must_join: - # Copy to the local variables to release lock - # before waiting for result - fetch_future_block_number = self._fetch_future_block_number - fetch_future = self._fetch_future - - # Cleanup the fetch variables. Have a local copy. - self._fetch_future_block_number = None - self._fetch_future = None - - # Need to wait for the future for the current read - if fetch_future is not None: - logger.info("BlockCache waiting for background fetch.") - # Wait until result and put it in cache - self._fetch_block_cached.add_key( - fetch_future.result(), fetch_future_block_number - ) - - # these are cached, so safe to do multiple calls for the same start and end. - for block_number in range(start_block_number, end_block_number + 1): - self._fetch_block_cached(block_number) - - # fetch next block in the background if nothing is running in the background, - # the block is within file and it is not already cached - end_block_plus_1 = end_block_number + 1 - with self._fetch_future_lock: - if ( - self._fetch_future is None - and end_block_plus_1 <= self.nblocks - and not self._fetch_block_cached.is_key_cached(end_block_plus_1) - ): - self._fetch_future_block_number = end_block_plus_1 - self._fetch_future = self._thread_executor.submit( - self._fetch_block, end_block_plus_1, "async" - ) - - return self._read_cache( - start, - end, - start_block_number=start_block_number, - end_block_number=end_block_number, - ) - - def _fetch_block(self, block_number, log_info="sync"): - """ - Fetch the block of data for `block_number`. - """ - if block_number > self.nblocks: - raise ValueError( - "'block_number={}' is greater than the number of blocks ({})".format( - block_number, self.nblocks - ) - ) - - start = block_number * self.blocksize - end = start + self.blocksize - logger.info("BlockCache fetching block (%s) %d", log_info, block_number) - block_contents = super()._fetch(start, end) - return block_contents - - def _read_cache(self, start, end, start_block_number, end_block_number): - """ - Read from our block cache. - - Parameters - ---------- - start, end : int - The start and end byte positions. - start_block_number, end_block_number : int - The start and end block numbers. - """ - start_pos = start % self.blocksize - end_pos = end % self.blocksize - - if start_block_number == end_block_number: - block = self._fetch_block_cached(start_block_number) - return block[start_pos:end_pos] - - else: - # read from the initial - out = [] - out.append(self._fetch_block_cached(start_block_number)[start_pos:]) - - # intermediate blocks - # Note: it'd be nice to combine these into one big request. However - # that doesn't play nicely with our LRU cache. - for block_number in range(start_block_number + 1, end_block_number): - out.append(self._fetch_block_cached(block_number)) - - # final block - out.append(self._fetch_block_cached(end_block_number)[:end_pos]) - - return b"".join(out) - - -caches = { - # one custom case - None: BaseCache, -} - - -def register_cache(cls, clobber=False): - """'Register' cache implementation. - - Parameters - ---------- - clobber: bool, optional - If set to True (default is False) - allow to overwrite existing - entry. - - Raises - ------ - ValueError - """ - name = cls.name - if not clobber and name in caches: - raise ValueError(f"Cache with name {name!r} is already known: {caches[name]}") - caches[name] = cls - - -for c in ( - BaseCache, - MMapCache, - BytesCache, - ReadAheadCache, - BlockCache, - FirstChunkCache, - AllBytes, - KnownPartsOfAFile, - BackgroundBlockCache, -): - register_cache(c) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/importlib_resources/tests/data02/two/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/importlib_resources/tests/data02/two/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/legoandmars/glide-inpainting/glide_text2im/clip/utils.py b/spaces/legoandmars/glide-inpainting/glide_text2im/clip/utils.py deleted file mode 100644 index 8fc5b059dad76877f4442da36a8d6327302fe341..0000000000000000000000000000000000000000 --- a/spaces/legoandmars/glide-inpainting/glide_text2im/clip/utils.py +++ /dev/null @@ -1,97 +0,0 @@ -import math -from typing import Callable, Optional - -import attr -import torch -import torch.nn as nn -import torch.nn.functional as F - -FilterFn = Callable[[torch.Tensor], torch.Tensor] - - -class ZeroKeyBiasGrad(torch.autograd.Function): - @staticmethod - def forward(ctx, x): - return x - - @staticmethod - def backward(ctx, output_grad): - output_grad = output_grad.clone() - output_grad.chunk(3)[1].zero_() - return output_grad - - -def zero_key_bias_grad(x: torch.Tensor) -> torch.Tensor: - return ZeroKeyBiasGrad.apply(x) - - -@attr.s(eq=False, repr=False) -class LayerNorm(nn.Module): - n_state: int = attr.ib() - eps: float = attr.ib(default=1e-6) - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - self.g = nn.Parameter(torch.ones((self.n_state,), dtype=torch.float32, device=self.device)) - self.b = nn.Parameter(torch.zeros((self.n_state,), dtype=torch.float32, device=self.device)) - self.g.weight_decay_level = "disable" # type: ignore - self.b.weight_decay_level = "disable" # type: ignore - - def forward(self, x: torch.Tensor) -> torch.Tensor: - return F.layer_norm( - x.type(torch.float32), torch.Size((self.n_state,)), self.g, self.b, self.eps - ) - - -@attr.s(eq=False, repr=False) -class Affine(nn.Module): - n_in: int = attr.ib() - n_out: int = attr.ib() - use_bias: bool = attr.ib(default=True) - use_admnet_init: bool = attr.ib(default=False) - std: Optional[float] = attr.ib(default=None) - extra_init_scale: Optional[float] = attr.ib(default=None) - bias_filter_fn: FilterFn = attr.ib(default=lambda x: x) - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - if not self.use_admnet_init: - self.std = self.std if self.std is not None else math.sqrt(2 / (self.n_in + self.n_out)) - self.std = ( - self.std if self.extra_init_scale is None else self.std * self.extra_init_scale - ) - - w = torch.empty((self.n_out, self.n_in), dtype=torch.float32, device=self.device) - self.w = nn.Parameter(w) - - if self.use_bias: - self.b = nn.Parameter( - torch.zeros((self.n_out,), dtype=torch.float32, device=self.device) - ) - self.b.weight_decay_level = "disable" # type: ignore - else: - if self.extra_init_scale is not None: - raise ValueError("extra_init_scale incompatible with admnet init") - - w = torch.empty((self.n_out, self.n_in), dtype=torch.float32, device=self.device) - - if self.use_bias: - b = torch.empty((self.n_out,), dtype=torch.float32, device=self.device) - - self.w = nn.Parameter(w) - - if self.use_bias: - self.b = nn.Parameter(b) - self.b.weight_decay_level = "disable" # type: ignore - - def forward(self, x: torch.Tensor) -> torch.Tensor: - w = self.w if self.w.dtype == x.dtype else self.w.to(x.dtype) - b = ( - self.bias_filter_fn(self.b if self.b.dtype == x.dtype else self.b.to(x.dtype)) - if self.use_bias - else None - ) - return F.linear(x, w, b) diff --git a/spaces/lelafav502/fallpt-chat/README.md b/spaces/lelafav502/fallpt-chat/README.md deleted file mode 100644 index d39f59899da79a4f1cd9e479729052fbc982f086..0000000000000000000000000000000000000000 --- a/spaces/lelafav502/fallpt-chat/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Fallpt Chat -emoji: 💻 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/leogabraneth/text-generation-webui-main/repositories/exllama/sh/test_benchmark_perf.sh b/spaces/leogabraneth/text-generation-webui-main/repositories/exllama/sh/test_benchmark_perf.sh deleted file mode 100644 index c34efddcecca4c20759984162a82faeca614ecb7..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/repositories/exllama/sh/test_benchmark_perf.sh +++ /dev/null @@ -1,18 +0,0 @@ - -echo "-------------------------------------------------------------------------------------------------------------" -python test_benchmark_inference.py -p -d /mnt/str/models/_test_models/iambestfeed_open_llama_3b_4bit_128g -cs -echo "-------------------------------------------------------------------------------------------------------------" -python test_benchmark_inference.py -p -d /mnt/str/models/llama-7b-4bit-128g -cs -echo "-------------------------------------------------------------------------------------------------------------" -python test_benchmark_inference.py -p -d /mnt/str/models/llama-13b-4bit-128g -cs -echo "-------------------------------------------------------------------------------------------------------------" -python test_benchmark_inference.py -p -d /mnt/str/models/llama-30b-4bit-128g -echo "-------------------------------------------------------------------------------------------------------------" -python test_benchmark_inference.py -p -d /mnt/str/models/llama-30b-4bit-128g-act -echo "-------------------------------------------------------------------------------------------------------------" -python test_benchmark_inference.py -p -d /mnt/str/models/llama-30b-4bit-32g-act-ts -l 1550 -echo "-------------------------------------------------------------------------------------------------------------" -python test_benchmark_inference.py -p -d /mnt/str/models/koala-13B-4bit-128g-act -echo "-------------------------------------------------------------------------------------------------------------" -python test_benchmark_inference.py -p -d /mnt/str/models/wizardlm-30b-uncensored-4bit-act-order -echo "-------------------------------------------------------------------------------------------------------------" diff --git a/spaces/lewispons/GrammarGuru/src/models/train_recommender.py b/spaces/lewispons/GrammarGuru/src/models/train_recommender.py deleted file mode 100644 index d86317d85321567bf1d382c9f75a45597c886c42..0000000000000000000000000000000000000000 --- a/spaces/lewispons/GrammarGuru/src/models/train_recommender.py +++ /dev/null @@ -1,102 +0,0 @@ -import pandas as pd -from gensim.similarities import SparseMatrixSimilarity -import argparse -import logging -import time - -from utils.utilities import read_yaml_config, validate_and_create_subfolders -from utils.mlutilities import * - -import logging -import sys - -logging.basicConfig( - level=logging.INFO, - format="%(asctime)s [%(levelname)s] %(message)s", - handlers=[ - logging.FileHandler("debug.log"), - logging.StreamHandler(sys.stdout) - ] -) - - -model_configurations = read_yaml_config("/Users/luis.morales/Desktop/arxiv-paper-recommender/src/models/configs.yaml") - - -if __name__ == "__main__": - """ - Example: - python3 ./src/models/train_recommender.py --modelsize Medium - - """ - # Define and parse command-line arguments - parser = argparse.ArgumentParser(description='ArXiv Paper Recommender CLI') - parser.add_argument('--modelsize',choices=["Large", "SubLarge", "Medium", "Small"], default=None, type=str, help='Model Size') - - args = parser.parse_args() - model_size = args.modelsize - start = time.time() - - - if model_size is None: - raise Exception("The `modelsize` flag was no passed to the CLI.") - - - model_config = model_configurations["GensimConfig"][model_size] - model_name = model_configurations["GensimConfig"][model_size]["ModelName"] - dataset_fraq_split = model_configurations["GensimConfig"][model_size]["DataSetFracSplit"] - random_seed = model_configurations["GensimConfig"][model_size]["RandomSeedSplit"] - logging.info(f"Started training of {model_name} Model.") - - - validate_and_create_subfolders( - model_name=model_name - ) - logging.info(f"Model Folder `{model_name}` was created successfully.") - - - if dataset_fraq_split is None: - df = pd.read_parquet("/Users/luis.morales/Desktop/arxiv-paper-recommender/data/processed/reduced_arxiv_papers.parquet.gzip") - logging.info(f"The full text Corpus was readed.") - - else : - df = pd.read_parquet("/Users/luis.morales/Desktop/arxiv-paper-recommender/data/processed/reduced_arxiv_papers.parquet.gzip") \ - .sample(frac=dataset_fraq_split, random_state=random_seed) \ - .reset_index(drop=True) - logging.info(f"A random split of {dataset_fraq_split}% was applied on the Text Corpus ") - logging.info(f"Dimensions of the dataset: {df.shape}") - - df.to_parquet(f"/Users/luis.morales/Desktop/arxiv-paper-recommender/models/data/{model_name}.parquet.gzip", compression='gzip') - logging.info(f"The Dataset used for this training was successfully saved in: `/Users/luis.morales/Desktop/arxiv-paper-recommender/models/data/{model_name}.parquet.gzip`.") - - - - corpus = df['cleaned_abstracts'].to_list() - tokenized_corpus = gensim_tokenizer(corpus) - logging.info(f"Dictionary Learned on the {model_name} corpus dataset.") - - - dictionary = get_gensim_dictionary(tokenized_docs=tokenized_corpus, dict_name=model_name, save_dict=True) - logging.info("Dictionary Saved Locally.") - - - BoW_corpus = [dictionary.doc2bow(doc, allow_update=True) for doc in tokenized_corpus] - tfidf_model = TfidfModel(BoW_corpus) - logging.info(f"TD-IDF {model_name} Model was successfully trained.") - - - tfidf_model.save(f"/Users/luis.morales/Desktop/arxiv-paper-recommender/models/tfidf/{model_name}.model") - logging.info(f"Model: {model_name} was successfully saved.") - - - index = SparseMatrixSimilarity(tfidf_model[BoW_corpus], num_features=len(dictionary)) - logging.info(f"The Similarities Sparse Matrix was successfully created.") - index.save(f"/Users/luis.morales/Desktop/arxiv-paper-recommender/models/similarities_matrix/{model_name}") - logging.info(f"The Similarities Matrix was successfully saved for the model: {model_name}.") - - end = time.time() - total_time = end - start - logging.info(f"Full Training of {model_size} model took {total_time} secs.") - logging.info(f"The {model_name} Model was successfully trained! yei :)") - - \ No newline at end of file diff --git a/spaces/lfoppiano/grobid-quantities/Dockerfile b/spaces/lfoppiano/grobid-quantities/Dockerfile deleted file mode 100644 index 6ab95742fd04c7f50fc345b6312d75142ecfd6cf..0000000000000000000000000000000000000000 --- a/spaces/lfoppiano/grobid-quantities/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM lfoppiano/grobid-quantities:0.7.3 -USER root -WORKDIR /opt/grobid - -RUN mkdir -m 777 -p /opt/grobid/grobid-home/tmp -RUN chmod -R uog+rw /data/db - -COPY --chown=lfoppiano config-dl.yml /opt/grobid/grobid-quantities/resources/config/config.yml -CMD ["./grobid-quantities/bin/grobid-quantities", "server", "grobid-quantities/resources/config/config.yml"] \ No newline at end of file diff --git a/spaces/lixq/bingo61/src/components/settings.tsx b/spaces/lixq/bingo61/src/components/settings.tsx deleted file mode 100644 index e18aa5b484852bb5d047442a06e7143b6893cb0d..0000000000000000000000000000000000000000 --- a/spaces/lixq/bingo61/src/components/settings.tsx +++ /dev/null @@ -1,141 +0,0 @@ -import { useEffect, useState } from 'react' -import { useAtom } from 'jotai' -import { Switch } from '@headlessui/react' -import { toast } from 'react-hot-toast' -import { hashAtom, voiceAtom } from '@/state' -import { - Dialog, - DialogContent, - DialogDescription, - DialogFooter, - DialogHeader, - DialogTitle -} from '@/components/ui/dialog' -import { Button } from './ui/button' -import { Input } from './ui/input' -import { ChunkKeys, parseCookies, extraCurlFromCookie, randomIP, encodeHeadersToCookie } from '@/lib/utils' -import { ExternalLink } from './external-link' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - -export function Settings() { - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - const [loc, setLoc] = useAtom(hashAtom) - const [curlValue, setCurlValue] = useState(extraCurlFromCookie(parseCookies(document.cookie, ChunkKeys))) - const [enableTTS, setEnableTTS] = useAtom(voiceAtom) - - useEffect(() => { - if (isCopied) { - toast.success('复制成功') - } - }, [isCopied]) - - if (loc === 'settings') { - return ( - setLoc('')} modal> - - - 设置你的用户信息 - - 请使用 Edge 浏览器 - - 打开并登录 Bing - - ,然后再打开 - Challenge 接口 - 右键 》检查。打开开发者工具,在网络里面找到 Create 接口 》右键复制》复制为 cURL(bash),粘贴到此处,然后保存。 -
      - 图文示例: - 如何获取 BING_HEADER - - -
      - -
      - setCurlValue(e.target.value)} - /> - - - - - - -
      - ) - } else if (loc === 'voice') { - return ( - setLoc('')} modal> - - - 语音设置 - - 目前仅支持 PC 端 Edge 及 Chrome 浏览器 - - - -
      - 启用语音回答 - setEnableTTS(checked)} - > - - -
      - - - - -
      -
      - ) - } - return null -} diff --git a/spaces/luckwill/chiakicc/text/cantonese.py b/spaces/luckwill/chiakicc/text/cantonese.py deleted file mode 100644 index 32eae72ef7eb43d493da6d6f75dd46176d0e8808..0000000000000000000000000000000000000000 --- a/spaces/luckwill/chiakicc/text/cantonese.py +++ /dev/null @@ -1,59 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('chinese_dialect_lexicons/jyutjyu') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'ei˥'), - ('B', 'biː˥'), - ('C', 'siː˥'), - ('D', 'tiː˥'), - ('E', 'iː˥'), - ('F', 'e˥fuː˨˩'), - ('G', 'tsiː˥'), - ('H', 'ɪk̚˥tsʰyː˨˩'), - ('I', 'ɐi˥'), - ('J', 'tsei˥'), - ('K', 'kʰei˥'), - ('L', 'e˥llou˨˩'), - ('M', 'ɛːm˥'), - ('N', 'ɛːn˥'), - ('O', 'ou˥'), - ('P', 'pʰiː˥'), - ('Q', 'kʰiːu˥'), - ('R', 'aː˥lou˨˩'), - ('S', 'ɛː˥siː˨˩'), - ('T', 'tʰiː˥'), - ('U', 'juː˥'), - ('V', 'wiː˥'), - ('W', 'tʊk̚˥piː˥juː˥'), - ('X', 'ɪk̚˥siː˨˩'), - ('Y', 'waːi˥'), - ('Z', 'iː˨sɛːt̚˥') -]] - - -def number_to_cantonese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: cn2an.an2cn(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def cantonese_to_ipa(text): - text = number_to_cantonese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/luost26/DiffAb/diffab/modules/diffusion/dpm_full.py b/spaces/luost26/DiffAb/diffab/modules/diffusion/dpm_full.py deleted file mode 100644 index 49fe30db80a76deaf7d0a011dbd8116cf4e27b0e..0000000000000000000000000000000000000000 --- a/spaces/luost26/DiffAb/diffab/modules/diffusion/dpm_full.py +++ /dev/null @@ -1,319 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import functools -from tqdm.auto import tqdm - -from diffab.modules.common.geometry import apply_rotation_to_vector, quaternion_1ijk_to_rotation_matrix -from diffab.modules.common.so3 import so3vec_to_rotation, rotation_to_so3vec, random_uniform_so3 -from diffab.modules.encoders.ga import GAEncoder -from .transition import RotationTransition, PositionTransition, AminoacidCategoricalTransition - - -def rotation_matrix_cosine_loss(R_pred, R_true): - """ - Args: - R_pred: (*, 3, 3). - R_true: (*, 3, 3). - Returns: - Per-matrix losses, (*, ). - """ - size = list(R_pred.shape[:-2]) - ncol = R_pred.numel() // 3 - - RT_pred = R_pred.transpose(-2, -1).reshape(ncol, 3) # (ncol, 3) - RT_true = R_true.transpose(-2, -1).reshape(ncol, 3) # (ncol, 3) - - ones = torch.ones([ncol, ], dtype=torch.long, device=R_pred.device) - loss = F.cosine_embedding_loss(RT_pred, RT_true, ones, reduction='none') # (ncol*3, ) - loss = loss.reshape(size + [3]).sum(dim=-1) # (*, ) - return loss - - -class EpsilonNet(nn.Module): - - def __init__(self, res_feat_dim, pair_feat_dim, num_layers, encoder_opt={}): - super().__init__() - self.current_sequence_embedding = nn.Embedding(25, res_feat_dim) # 22 is padding - self.res_feat_mixer = nn.Sequential( - nn.Linear(res_feat_dim * 2, res_feat_dim), nn.ReLU(), - nn.Linear(res_feat_dim, res_feat_dim), - ) - self.encoder = GAEncoder(res_feat_dim, pair_feat_dim, num_layers, **encoder_opt) - - self.eps_crd_net = nn.Sequential( - nn.Linear(res_feat_dim+3, res_feat_dim), nn.ReLU(), - nn.Linear(res_feat_dim, res_feat_dim), nn.ReLU(), - nn.Linear(res_feat_dim, 3) - ) - - self.eps_rot_net = nn.Sequential( - nn.Linear(res_feat_dim+3, res_feat_dim), nn.ReLU(), - nn.Linear(res_feat_dim, res_feat_dim), nn.ReLU(), - nn.Linear(res_feat_dim, 3) - ) - - self.eps_seq_net = nn.Sequential( - nn.Linear(res_feat_dim+3, res_feat_dim), nn.ReLU(), - nn.Linear(res_feat_dim, res_feat_dim), nn.ReLU(), - nn.Linear(res_feat_dim, 20), nn.Softmax(dim=-1) - ) - - def forward(self, v_t, p_t, s_t, res_feat, pair_feat, beta, mask_generate, mask_res): - """ - Args: - v_t: (N, L, 3). - p_t: (N, L, 3). - s_t: (N, L). - res_feat: (N, L, res_dim). - pair_feat: (N, L, L, pair_dim). - beta: (N,). - mask_generate: (N, L). - mask_res: (N, L). - Returns: - v_next: UPDATED (not epsilon) SO3-vector of orietnations, (N, L, 3). - eps_pos: (N, L, 3). - """ - N, L = mask_res.size() - R = so3vec_to_rotation(v_t) # (N, L, 3, 3) - - # s_t = s_t.clamp(min=0, max=19) # TODO: clamping is good but ugly. - res_feat = self.res_feat_mixer(torch.cat([res_feat, self.current_sequence_embedding(s_t)], dim=-1)) # [Important] Incorporate sequence at the current step. - res_feat = self.encoder(R, p_t, res_feat, pair_feat, mask_res) - - t_embed = torch.stack([beta, torch.sin(beta), torch.cos(beta)], dim=-1)[:, None, :].expand(N, L, 3) - in_feat = torch.cat([res_feat, t_embed], dim=-1) - - # Position changes - eps_crd = self.eps_crd_net(in_feat) # (N, L, 3) - eps_pos = apply_rotation_to_vector(R, eps_crd) # (N, L, 3) - eps_pos = torch.where(mask_generate[:, :, None].expand_as(eps_pos), eps_pos, torch.zeros_like(eps_pos)) - - # New orientation - eps_rot = self.eps_rot_net(in_feat) # (N, L, 3) - U = quaternion_1ijk_to_rotation_matrix(eps_rot) # (N, L, 3, 3) - R_next = R @ U - v_next = rotation_to_so3vec(R_next) # (N, L, 3) - v_next = torch.where(mask_generate[:, :, None].expand_as(v_next), v_next, v_t) - - # New sequence categorical distributions - c_denoised = self.eps_seq_net(in_feat) # Already softmax-ed, (N, L, 20) - - return v_next, R_next, eps_pos, c_denoised - - -class FullDPM(nn.Module): - - def __init__( - self, - res_feat_dim, - pair_feat_dim, - num_steps, - eps_net_opt={}, - trans_rot_opt={}, - trans_pos_opt={}, - trans_seq_opt={}, - position_mean=[0.0, 0.0, 0.0], - position_scale=[10.0], - ): - super().__init__() - self.eps_net = EpsilonNet(res_feat_dim, pair_feat_dim, **eps_net_opt) - self.num_steps = num_steps - self.trans_rot = RotationTransition(num_steps, **trans_rot_opt) - self.trans_pos = PositionTransition(num_steps, **trans_pos_opt) - self.trans_seq = AminoacidCategoricalTransition(num_steps, **trans_seq_opt) - - self.register_buffer('position_mean', torch.FloatTensor(position_mean).view(1, 1, -1)) - self.register_buffer('position_scale', torch.FloatTensor(position_scale).view(1, 1, -1)) - self.register_buffer('_dummy', torch.empty([0, ])) - - def _normalize_position(self, p): - p_norm = (p - self.position_mean) / self.position_scale - return p_norm - - def _unnormalize_position(self, p_norm): - p = p_norm * self.position_scale + self.position_mean - return p - - def forward(self, v_0, p_0, s_0, res_feat, pair_feat, mask_generate, mask_res, denoise_structure, denoise_sequence, t=None): - N, L = res_feat.shape[:2] - if t == None: - t = torch.randint(0, self.num_steps, (N,), dtype=torch.long, device=self._dummy.device) - p_0 = self._normalize_position(p_0) - - if denoise_structure: - # Add noise to rotation - R_0 = so3vec_to_rotation(v_0) - v_noisy, _ = self.trans_rot.add_noise(v_0, mask_generate, t) - # Add noise to positions - p_noisy, eps_p = self.trans_pos.add_noise(p_0, mask_generate, t) - else: - R_0 = so3vec_to_rotation(v_0) - v_noisy = v_0.clone() - p_noisy = p_0.clone() - eps_p = torch.zeros_like(p_noisy) - - if denoise_sequence: - # Add noise to sequence - _, s_noisy = self.trans_seq.add_noise(s_0, mask_generate, t) - else: - s_noisy = s_0.clone() - - beta = self.trans_pos.var_sched.betas[t] - v_pred, R_pred, eps_p_pred, c_denoised = self.eps_net( - v_noisy, p_noisy, s_noisy, res_feat, pair_feat, beta, mask_generate, mask_res - ) # (N, L, 3), (N, L, 3, 3), (N, L, 3), (N, L, 20), (N, L) - - loss_dict = {} - - # Rotation loss - loss_rot = rotation_matrix_cosine_loss(R_pred, R_0) # (N, L) - loss_rot = (loss_rot * mask_generate).sum() / (mask_generate.sum().float() + 1e-8) - loss_dict['rot'] = loss_rot - - # Position loss - loss_pos = F.mse_loss(eps_p_pred, eps_p, reduction='none').sum(dim=-1) # (N, L) - loss_pos = (loss_pos * mask_generate).sum() / (mask_generate.sum().float() + 1e-8) - loss_dict['pos'] = loss_pos - - # Sequence categorical loss - post_true = self.trans_seq.posterior(s_noisy, s_0, t) - log_post_pred = torch.log(self.trans_seq.posterior(s_noisy, c_denoised, t) + 1e-8) - kldiv = F.kl_div( - input=log_post_pred, - target=post_true, - reduction='none', - log_target=False - ).sum(dim=-1) # (N, L) - loss_seq = (kldiv * mask_generate).sum() / (mask_generate.sum().float() + 1e-8) - loss_dict['seq'] = loss_seq - - return loss_dict - - @torch.no_grad() - def sample( - self, - v, p, s, - res_feat, pair_feat, - mask_generate, mask_res, - sample_structure=True, sample_sequence=True, - pbar=False, - ): - """ - Args: - v: Orientations of contextual residues, (N, L, 3). - p: Positions of contextual residues, (N, L, 3). - s: Sequence of contextual residues, (N, L). - """ - N, L = v.shape[:2] - p = self._normalize_position(p) - - # Set the orientation and position of residues to be predicted to random values - if sample_structure: - v_rand = random_uniform_so3([N, L], device=self._dummy.device) - p_rand = torch.randn_like(p) - v_init = torch.where(mask_generate[:, :, None].expand_as(v), v_rand, v) - p_init = torch.where(mask_generate[:, :, None].expand_as(p), p_rand, p) - else: - v_init, p_init = v, p - - if sample_sequence: - s_rand = torch.randint_like(s, low=0, high=19) - s_init = torch.where(mask_generate, s_rand, s) - else: - s_init = s - - traj = {self.num_steps: (v_init, self._unnormalize_position(p_init), s_init)} - if pbar: - pbar = functools.partial(tqdm, total=self.num_steps, desc='Sampling') - else: - pbar = lambda x: x - for t in pbar(range(self.num_steps, 0, -1)): - v_t, p_t, s_t = traj[t] - p_t = self._normalize_position(p_t) - - beta = self.trans_pos.var_sched.betas[t].expand([N, ]) - t_tensor = torch.full([N, ], fill_value=t, dtype=torch.long, device=self._dummy.device) - - v_next, R_next, eps_p, c_denoised = self.eps_net( - v_t, p_t, s_t, res_feat, pair_feat, beta, mask_generate, mask_res - ) # (N, L, 3), (N, L, 3, 3), (N, L, 3) - - v_next = self.trans_rot.denoise(v_t, v_next, mask_generate, t_tensor) - p_next = self.trans_pos.denoise(p_t, eps_p, mask_generate, t_tensor) - _, s_next = self.trans_seq.denoise(s_t, c_denoised, mask_generate, t_tensor) - - if not sample_structure: - v_next, p_next = v_t, p_t - if not sample_sequence: - s_next = s_t - - traj[t-1] = (v_next, self._unnormalize_position(p_next), s_next) - traj[t] = tuple(x.cpu() for x in traj[t]) # Move previous states to cpu memory. - - return traj - - @torch.no_grad() - def optimize( - self, - v, p, s, - opt_step: int, - res_feat, pair_feat, - mask_generate, mask_res, - sample_structure=True, sample_sequence=True, - pbar=False, - ): - """ - Description: - First adds noise to the given structure, then denoises it. - """ - N, L = v.shape[:2] - p = self._normalize_position(p) - t = torch.full([N, ], fill_value=opt_step, dtype=torch.long, device=self._dummy.device) - - # Set the orientation and position of residues to be predicted to random values - if sample_structure: - # Add noise to rotation - v_noisy, _ = self.trans_rot.add_noise(v, mask_generate, t) - # Add noise to positions - p_noisy, _ = self.trans_pos.add_noise(p, mask_generate, t) - v_init = torch.where(mask_generate[:, :, None].expand_as(v), v_noisy, v) - p_init = torch.where(mask_generate[:, :, None].expand_as(p), p_noisy, p) - else: - v_init, p_init = v, p - - if sample_sequence: - _, s_noisy = self.trans_seq.add_noise(s, mask_generate, t) - s_init = torch.where(mask_generate, s_noisy, s) - else: - s_init = s - - traj = {opt_step: (v_init, self._unnormalize_position(p_init), s_init)} - if pbar: - pbar = functools.partial(tqdm, total=opt_step, desc='Optimizing') - else: - pbar = lambda x: x - for t in pbar(range(opt_step, 0, -1)): - v_t, p_t, s_t = traj[t] - p_t = self._normalize_position(p_t) - - beta = self.trans_pos.var_sched.betas[t].expand([N, ]) - t_tensor = torch.full([N, ], fill_value=t, dtype=torch.long, device=self._dummy.device) - - v_next, R_next, eps_p, c_denoised = self.eps_net( - v_t, p_t, s_t, res_feat, pair_feat, beta, mask_generate, mask_res - ) # (N, L, 3), (N, L, 3, 3), (N, L, 3) - - v_next = self.trans_rot.denoise(v_t, v_next, mask_generate, t_tensor) - p_next = self.trans_pos.denoise(p_t, eps_p, mask_generate, t_tensor) - _, s_next = self.trans_seq.denoise(s_t, c_denoised, mask_generate, t_tensor) - - if not sample_structure: - v_next, p_next = v_t, p_t - if not sample_sequence: - s_next = s_t - - traj[t-1] = (v_next, self._unnormalize_position(p_next), s_next) - traj[t] = tuple(x.cpu() for x in traj[t]) # Move previous states to cpu memory. - - return traj diff --git a/spaces/lxe/simple-llm-finetuner/README.md b/spaces/lxe/simple-llm-finetuner/README.md deleted file mode 100644 index d97c321349a6f0ade0c7e9c574d17dcf35ea4e46..0000000000000000000000000000000000000000 --- a/spaces/lxe/simple-llm-finetuner/README.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: Simple LLM Finetuner -emoji: 🦙 -colorFrom: yellow -colorTo: orange -sdk: gradio -app_file: app.py -pinned: false ---- - -# 🦙 Simple LLM Finetuner - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lxe/simple-llama-finetuner/blob/master/Simple_LLaMA_FineTuner.ipynb) -[![Open In Spaces](https://img.shields.io/badge/🤗-Open%20In%20Spaces-blue.svg)](https://huggingface.co/spaces/lxe/simple-llama-finetuner) -[![](https://img.shields.io/badge/no-bugs-brightgreen.svg)](https://github.com/lxe/no-bugs) -[![](https://img.shields.io/badge/coverage-%F0%9F%92%AF-green.svg)](https://github.com/lxe/onehundred/tree/master) - -Simple LLM Finetuner is a beginner-friendly interface designed to facilitate fine-tuning various language models using [LoRA](https://arxiv.org/abs/2106.09685) method via the [PEFT library](https://github.com/huggingface/peft) on commodity NVIDIA GPUs. With small dataset and sample lengths of 256, you can even run this on a regular Colab Tesla T4 instance. - -With this intuitive UI, you can easily manage your dataset, customize parameters, train, and evaluate the model's inference capabilities. - -## Acknowledgements - - - https://github.com/zphang/minimal-llama/ - - https://github.com/tloen/alpaca-lora - - https://github.com/huggingface/peft - -## Features - -- Simply paste datasets in the UI, separated by double blank lines -- Adjustable parameters for fine-tuning and inference -- Beginner-friendly UI with explanations for each parameter - -## Getting Started - -### Prerequisites - -- Linux or WSL -- Modern NVIDIA GPU with >= 16 GB of VRAM (but it might be possible to run with less for smaller sample lengths) - -### Usage - -I recommend using a virtual environment to install the required packages. Conda preferred. - -``` -conda create -n simple-llm-finetuner python=3.10 -conda activate simple-llm-finetuner -conda install -y cuda -c nvidia/label/cuda-11.7.0 -conda install -y pytorch=2 pytorch-cuda=11.7 -c pytorch -``` - -On WSL, you might need to install CUDA manually by following [these steps](https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&Distribution=WSL-Ubuntu&target_version=2.0&target_type=deb_local), then running the following before you launch: - -``` -export LD_LIBRARY_PATH=/usr/lib/wsl/lib -``` - -Clone the repository and install the required packages. - -``` -git clone https://github.com/lxe/simple-llm-finetuner.git -cd simple-llm-finetuner -pip install -r requirements.txt -``` - -Launch it - -``` -python main.py -``` - -Open http://127.0.0.1:7860/ in your browser. Prepare your training data by separating each sample with 2 blank lines. Paste the whole training dataset into the textbox. Specify the new LoRA adapter name in the "New PEFT Adapter Name" textbox, then click train. You might need to adjust the max sequence length and batch size to fit your GPU memory. The model will be saved in the `lora/` directory. - -After training is done, navigate to "Inference" tab, select your LoRA, and play with it. - -Have fun! - -## YouTube Walkthough - -https://www.youtube.com/watch?v=yM1wanDkNz8 - -## License - -MIT License diff --git a/spaces/lyf/faster-whisper-webui/README.md b/spaces/lyf/faster-whisper-webui/README.md deleted file mode 100644 index ea2c81a62fc4216c3d5ac1c110e8abdedad0cafc..0000000000000000000000000000000000000000 --- a/spaces/lyf/faster-whisper-webui/README.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: Faster Whisper Webui -emoji: 🚀 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: aadnk/faster-whisper-webui ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -# Running Locally - -To run this program locally, first install Python 3.9+ and Git. Then install Pytorch 10.1+ and all the other dependencies: -``` -pip install -r requirements.txt -``` - -You can find detailed instructions for how to install this on Windows 10/11 [here (PDF)](docs/windows/install_win10_win11.pdf). - -Finally, run the full version (no audio length restrictions) of the app with parallel CPU/GPU enabled: -``` -python app.py --input_audio_max_duration -1 --server_name 127.0.0.1 --auto_parallel True -``` - -You can also run the CLI interface, which is similar to Whisper's own CLI but also supports the following additional arguments: -``` -python cli.py \ -[--vad {none,silero-vad,silero-vad-skip-gaps,silero-vad-expand-into-gaps,periodic-vad}] \ -[--vad_merge_window VAD_MERGE_WINDOW] \ -[--vad_max_merge_size VAD_MAX_MERGE_SIZE] \ -[--vad_padding VAD_PADDING] \ -[--vad_prompt_window VAD_PROMPT_WINDOW] -[--vad_cpu_cores NUMBER_OF_CORES] -[--vad_parallel_devices COMMA_DELIMITED_DEVICES] -[--auto_parallel BOOLEAN] -``` -In addition, you may also use URL's in addition to file paths as input. -``` -python cli.py --model large --vad silero-vad --language Japanese "https://www.youtube.com/watch?v=4cICErqqRSM" -``` - -Rather than supplying arguments to `app.py` or `cli.py`, you can also use the configuration file [config.json5](config.json5). See that file for more information. -If you want to use a different configuration file, you can use the `WHISPER_WEBUI_CONFIG` environment variable to specify the path to another file. - -### Multiple Files - -You can upload multiple files either through the "Upload files" option, or as a playlist on YouTube. -Each audio file will then be processed in turn, and the resulting SRT/VTT/Transcript will be made available in the "Download" section. -When more than one file is processed, the UI will also generate a "All_Output" zip file containing all the text output files. - -## Whisper Implementation - -You can choose between using `whisper` or `faster-whisper`. [Faster Whisper](https://github.com/guillaumekln/faster-whisper) as a drop-in replacement for the -default Whisper which achieves up to a 4x speedup and 2x reduction in memory usage. - -You can install the requirements for a specific Whisper implementation in `requirements-fastWhisper.txt` -or `requirements-whisper.txt`: -``` -pip install -r requirements-fastWhisper.txt -``` -And then run the App or the CLI with the `--whisper_implementation fast-whisper` flag: -``` -python app.py --whisper_implementation fast-whisper --input_audio_max_duration -1 --server_name 127.0.0.1 --auto_parallel True -``` -You can also select the whisper implementation in `config.json5`: -```json5 -{ - "whisper_implementation": "fast-whisper" -} -``` -### GPU Acceleration - -In order to use GPU acceleration with Faster Whisper, both CUDA 11.2 and cuDNN 8 must be installed. You may want to install it in a virtual environment like Anaconda. - -## Google Colab - -You can also run this Web UI directly on [Google Colab](https://colab.research.google.com/drive/1qeTSvi7Bt_5RMm88ipW4fkcsMOKlDDss?usp=sharing), if you haven't got a GPU powerful enough to run the larger models. - -See the [colab documentation](docs/colab.md) for more information. - -## Parallel Execution - -You can also run both the Web-UI or the CLI on multiple GPUs in parallel, using the `vad_parallel_devices` option. This takes a comma-delimited list of -device IDs (0, 1, etc.) that Whisper should be distributed to and run on concurrently: -``` -python cli.py --model large --vad silero-vad --language Japanese \ ---vad_parallel_devices 0,1 "https://www.youtube.com/watch?v=4cICErqqRSM" -``` - -Note that this requires a VAD to function properly, otherwise only the first GPU will be used. Though you could use `period-vad` to avoid taking the hit -of running Silero-Vad, at a slight cost to accuracy. - -This is achieved by creating N child processes (where N is the number of selected devices), where Whisper is run concurrently. In `app.py`, you can also -set the `vad_process_timeout` option. This configures the number of seconds until a process is killed due to inactivity, freeing RAM and video memory. -The default value is 30 minutes. - -``` -python app.py --input_audio_max_duration -1 --vad_parallel_devices 0,1 --vad_process_timeout 3600 -``` - -To execute the Silero VAD itself in parallel, use the `vad_cpu_cores` option: -``` -python app.py --input_audio_max_duration -1 --vad_parallel_devices 0,1 --vad_process_timeout 3600 --vad_cpu_cores 4 -``` - -You may also use `vad_process_timeout` with a single device (`--vad_parallel_devices 0`), if you prefer to always free video memory after a period of time. - -### Auto Parallel - -You can also set `auto_parallel` to `True`. This will set `vad_parallel_devices` to use all the GPU devices on the system, and `vad_cpu_cores` to be equal to the number of -cores (up to 8): -``` -python app.py --input_audio_max_duration -1 --auto_parallel True -``` - -# Docker - -To run it in Docker, first install Docker and optionally the NVIDIA Container Toolkit in order to use the GPU. -Then either use the GitLab hosted container below, or check out this repository and build an image: -``` -sudo docker build -t whisper-webui:1 . -``` - -You can then start the WebUI with GPU support like so: -``` -sudo docker run -d --gpus=all -p 7860:7860 whisper-webui:1 -``` - -Leave out "--gpus=all" if you don't have access to a GPU with enough memory, and are fine with running it on the CPU only: -``` -sudo docker run -d -p 7860:7860 whisper-webui:1 -``` - -# GitLab Docker Registry - -This Docker container is also hosted on GitLab: - -``` -sudo docker run -d --gpus=all -p 7860:7860 registry.gitlab.com/aadnk/whisper-webui:latest -``` - -## Custom Arguments - -You can also pass custom arguments to `app.py` in the Docker container, for instance to be able to use all the GPUs in parallel (replace administrator with your user): -``` -sudo docker run -d --gpus all -p 7860:7860 \ ---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \ ---mount type=bind,source=/home/administrator/.cache/huggingface,target=/root/.cache/huggingface \ ---restart=on-failure:15 registry.gitlab.com/aadnk/whisper-webui:latest \ -app.py --input_audio_max_duration -1 --server_name 0.0.0.0 --auto_parallel True \ ---default_vad silero-vad --default_model_name large -``` - -You can also call `cli.py` the same way: -``` -sudo docker run --gpus all \ ---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \ ---mount type=bind,source=/home/administrator/.cache/huggingface,target=/root/.cache/huggingface \ ---mount type=bind,source=${PWD},target=/app/data \ -registry.gitlab.com/aadnk/whisper-webui:latest \ -cli.py --model large --auto_parallel True --vad silero-vad \ ---output_dir /app/data /app/data/YOUR-FILE-HERE.mp4 -``` - -## Caching - -Note that the models themselves are currently not included in the Docker images, and will be downloaded on the demand. -To avoid this, bind the directory /root/.cache/whisper to some directory on the host (for instance /home/administrator/.cache/whisper), where you can (optionally) -prepopulate the directory with the different Whisper models. -``` -sudo docker run -d --gpus=all -p 7860:7860 \ ---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \ -registry.gitlab.com/aadnk/whisper-webui:latest -``` \ No newline at end of file diff --git a/spaces/ma-xu/LIVE/pybind11/tests/test_call_policies.py b/spaces/ma-xu/LIVE/pybind11/tests/test_call_policies.py deleted file mode 100644 index ec005c132f9c172fda1570073ada46342e38a2ea..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pybind11/tests/test_call_policies.py +++ /dev/null @@ -1,192 +0,0 @@ -# -*- coding: utf-8 -*- -import pytest - -import env # noqa: F401 - -from pybind11_tests import call_policies as m -from pybind11_tests import ConstructorStats - - -@pytest.mark.xfail("env.PYPY", reason="sometimes comes out 1 off on PyPy", strict=False) -def test_keep_alive_argument(capture): - n_inst = ConstructorStats.detail_reg_inst() - with capture: - p = m.Parent() - assert capture == "Allocating parent." - with capture: - p.addChild(m.Child()) - assert ConstructorStats.detail_reg_inst() == n_inst + 1 - assert capture == """ - Allocating child. - Releasing child. - """ - with capture: - del p - assert ConstructorStats.detail_reg_inst() == n_inst - assert capture == "Releasing parent." - - with capture: - p = m.Parent() - assert capture == "Allocating parent." - with capture: - p.addChildKeepAlive(m.Child()) - assert ConstructorStats.detail_reg_inst() == n_inst + 2 - assert capture == "Allocating child." - with capture: - del p - assert ConstructorStats.detail_reg_inst() == n_inst - assert capture == """ - Releasing parent. - Releasing child. - """ - - -def test_keep_alive_return_value(capture): - n_inst = ConstructorStats.detail_reg_inst() - with capture: - p = m.Parent() - assert capture == "Allocating parent." - with capture: - p.returnChild() - assert ConstructorStats.detail_reg_inst() == n_inst + 1 - assert capture == """ - Allocating child. - Releasing child. - """ - with capture: - del p - assert ConstructorStats.detail_reg_inst() == n_inst - assert capture == "Releasing parent." - - with capture: - p = m.Parent() - assert capture == "Allocating parent." - with capture: - p.returnChildKeepAlive() - assert ConstructorStats.detail_reg_inst() == n_inst + 2 - assert capture == "Allocating child." - with capture: - del p - assert ConstructorStats.detail_reg_inst() == n_inst - assert capture == """ - Releasing parent. - Releasing child. - """ - - -# https://foss.heptapod.net/pypy/pypy/-/issues/2447 -@pytest.mark.xfail("env.PYPY", reason="_PyObject_GetDictPtr is unimplemented") -def test_alive_gc(capture): - n_inst = ConstructorStats.detail_reg_inst() - p = m.ParentGC() - p.addChildKeepAlive(m.Child()) - assert ConstructorStats.detail_reg_inst() == n_inst + 2 - lst = [p] - lst.append(lst) # creates a circular reference - with capture: - del p, lst - assert ConstructorStats.detail_reg_inst() == n_inst - assert capture == """ - Releasing parent. - Releasing child. - """ - - -def test_alive_gc_derived(capture): - class Derived(m.Parent): - pass - - n_inst = ConstructorStats.detail_reg_inst() - p = Derived() - p.addChildKeepAlive(m.Child()) - assert ConstructorStats.detail_reg_inst() == n_inst + 2 - lst = [p] - lst.append(lst) # creates a circular reference - with capture: - del p, lst - assert ConstructorStats.detail_reg_inst() == n_inst - assert capture == """ - Releasing parent. - Releasing child. - """ - - -def test_alive_gc_multi_derived(capture): - class Derived(m.Parent, m.Child): - def __init__(self): - m.Parent.__init__(self) - m.Child.__init__(self) - - n_inst = ConstructorStats.detail_reg_inst() - p = Derived() - p.addChildKeepAlive(m.Child()) - # +3 rather than +2 because Derived corresponds to two registered instances - assert ConstructorStats.detail_reg_inst() == n_inst + 3 - lst = [p] - lst.append(lst) # creates a circular reference - with capture: - del p, lst - assert ConstructorStats.detail_reg_inst() == n_inst - assert capture == """ - Releasing parent. - Releasing child. - Releasing child. - """ - - -def test_return_none(capture): - n_inst = ConstructorStats.detail_reg_inst() - with capture: - p = m.Parent() - assert capture == "Allocating parent." - with capture: - p.returnNullChildKeepAliveChild() - assert ConstructorStats.detail_reg_inst() == n_inst + 1 - assert capture == "" - with capture: - del p - assert ConstructorStats.detail_reg_inst() == n_inst - assert capture == "Releasing parent." - - with capture: - p = m.Parent() - assert capture == "Allocating parent." - with capture: - p.returnNullChildKeepAliveParent() - assert ConstructorStats.detail_reg_inst() == n_inst + 1 - assert capture == "" - with capture: - del p - assert ConstructorStats.detail_reg_inst() == n_inst - assert capture == "Releasing parent." - - -def test_keep_alive_constructor(capture): - n_inst = ConstructorStats.detail_reg_inst() - - with capture: - p = m.Parent(m.Child()) - assert ConstructorStats.detail_reg_inst() == n_inst + 2 - assert capture == """ - Allocating child. - Allocating parent. - """ - with capture: - del p - assert ConstructorStats.detail_reg_inst() == n_inst - assert capture == """ - Releasing parent. - Releasing child. - """ - - -def test_call_guard(): - assert m.unguarded_call() == "unguarded" - assert m.guarded_call() == "guarded" - - assert m.multiple_guards_correct_order() == "guarded & guarded" - assert m.multiple_guards_wrong_order() == "unguarded & guarded" - - if hasattr(m, "with_gil"): - assert m.with_gil() == "GIL held" - assert m.without_gil() == "GIL released" diff --git a/spaces/ma-xu/LIVE/pydiffvg/save_svg.py b/spaces/ma-xu/LIVE/pydiffvg/save_svg.py deleted file mode 100644 index 7f5641a63849cfec25fa2f560d50e92dc78576c3..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pydiffvg/save_svg.py +++ /dev/null @@ -1,167 +0,0 @@ -import torch -import pydiffvg -import xml.etree.ElementTree as etree -from xml.dom import minidom -def prettify(elem): - """Return a pretty-printed XML string for the Element. - """ - rough_string = etree.tostring(elem, 'utf-8') - reparsed = minidom.parseString(rough_string) - return reparsed.toprettyxml(indent=" ") -def save_svg(filename, width, height, shapes, shape_groups, use_gamma = False, background=None): - root = etree.Element('svg') - root.set('version', '1.1') - root.set('xmlns', 'http://www.w3.org/2000/svg') - root.set('width', str(width)) - root.set('height', str(height)) - if background is not None: - print(f"setting background to {background}") - root.set('style', str(background)) - defs = etree.SubElement(root, 'defs') - g = etree.SubElement(root, 'g') - if use_gamma: - f = etree.SubElement(defs, 'filter') - f.set('id', 'gamma') - f.set('x', '0') - f.set('y', '0') - f.set('width', '100%') - f.set('height', '100%') - gamma = etree.SubElement(f, 'feComponentTransfer') - gamma.set('color-interpolation-filters', 'sRGB') - feFuncR = etree.SubElement(gamma, 'feFuncR') - feFuncR.set('type', 'gamma') - feFuncR.set('amplitude', str(1)) - feFuncR.set('exponent', str(1/2.2)) - feFuncG = etree.SubElement(gamma, 'feFuncG') - feFuncG.set('type', 'gamma') - feFuncG.set('amplitude', str(1)) - feFuncG.set('exponent', str(1/2.2)) - feFuncB = etree.SubElement(gamma, 'feFuncB') - feFuncB.set('type', 'gamma') - feFuncB.set('amplitude', str(1)) - feFuncB.set('exponent', str(1/2.2)) - feFuncA = etree.SubElement(gamma, 'feFuncA') - feFuncA.set('type', 'gamma') - feFuncA.set('amplitude', str(1)) - feFuncA.set('exponent', str(1/2.2)) - g.set('style', 'filter:url(#gamma)') - # Store color - for i, shape_group in enumerate(shape_groups): - def add_color(shape_color, name): - if isinstance(shape_color, pydiffvg.LinearGradient): - lg = shape_color - color = etree.SubElement(defs, 'linearGradient') - color.set('id', name) - color.set('x1', str(lg.begin[0].item()/width)) - color.set('y1', str(lg.begin[1].item()/height)) - color.set('x2', str(lg.end[0].item()/width)) - color.set('y2', str(lg.end[1].item()/height)) - offsets = lg.offsets.data.cpu().numpy() - stop_colors = lg.stop_colors.data.cpu().numpy() - for j in range(offsets.shape[0]): - stop = etree.SubElement(color, 'stop') - stop.set('offset', str(offsets[j])) - c = lg.stop_colors[j, :] - stop.set('stop-color', 'rgb({}, {}, {})'.format(\ - int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) - stop.set('stop-opacity', '{}'.format(c[3])) - if isinstance(shape_color, pydiffvg.RadialGradient): - lg = shape_color - color = etree.SubElement(defs, 'radialGradient') - color.set('id', name) - color.set('cx', str(lg.center[0].item()/width)) - color.set('cy', str(lg.center[1].item()/height)) - # this only support width=height - color.set('r', str(lg.radius[0].item()/width)) - offsets = lg.offsets.data.cpu().numpy() - stop_colors = lg.stop_colors.data.cpu().numpy() - for j in range(offsets.shape[0]): - stop = etree.SubElement(color, 'stop') - stop.set('offset', str(offsets[j])) - c = lg.stop_colors[j, :] - stop.set('stop-color', 'rgb({}, {}, {})'.format(\ - int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) - stop.set('stop-opacity', '{}'.format(c[3])) - if shape_group.fill_color is not None: - add_color(shape_group.fill_color, 'shape_{}_fill'.format(i)) - if shape_group.stroke_color is not None: - add_color(shape_group.stroke_color, 'shape_{}_stroke'.format(i)) - for i, shape_group in enumerate(shape_groups): - shape = shapes[shape_group.shape_ids[0]] - if isinstance(shape, pydiffvg.Circle): - shape_node = etree.SubElement(g, 'circle') - shape_node.set('r', str(shape.radius.item())) - shape_node.set('cx', str(shape.center[0].item())) - shape_node.set('cy', str(shape.center[1].item())) - elif isinstance(shape, pydiffvg.Polygon): - shape_node = etree.SubElement(g, 'polygon') - points = shape.points.data.cpu().numpy() - path_str = '' - for j in range(0, shape.points.shape[0]): - path_str += '{} {}'.format(points[j, 0], points[j, 1]) - if j != shape.points.shape[0] - 1: - path_str += ' ' - shape_node.set('points', path_str) - elif isinstance(shape, pydiffvg.Path): - shape_node = etree.SubElement(g, 'path') - num_segments = shape.num_control_points.shape[0] - num_control_points = shape.num_control_points.data.cpu().numpy() - points = shape.points.data.cpu().numpy() - num_points = shape.points.shape[0] - path_str = 'M {} {}'.format(points[0, 0], points[0, 1]) - point_id = 1 - for j in range(0, num_segments): - if num_control_points[j] == 0: - p = point_id % num_points - path_str += ' L {} {}'.format(\ - points[p, 0], points[p, 1]) - point_id += 1 - elif num_control_points[j] == 1: - p1 = (point_id + 1) % num_points - path_str += ' Q {} {} {} {}'.format(\ - points[point_id, 0], points[point_id, 1], - points[p1, 0], points[p1, 1]) - point_id += 2 - elif num_control_points[j] == 2: - p2 = (point_id + 2) % num_points - path_str += ' C {} {} {} {} {} {}'.format(\ - points[point_id, 0], points[point_id, 1], - points[point_id + 1, 0], points[point_id + 1, 1], - points[p2, 0], points[p2, 1]) - point_id += 3 - shape_node.set('d', path_str) - elif isinstance(shape, pydiffvg.Rect): - shape_node = etree.SubElement(g, 'rect') - shape_node.set('x', str(shape.p_min[0].item())) - shape_node.set('y', str(shape.p_min[1].item())) - shape_node.set('width', str(shape.p_max[0].item() - shape.p_min[0].item())) - shape_node.set('height', str(shape.p_max[1].item() - shape.p_min[1].item())) - else: - assert(False) - shape_node.set('stroke-width', str(2 * shape.stroke_width.data.cpu().item())) - if shape_group.fill_color is not None: - if isinstance(shape_group.fill_color, pydiffvg.LinearGradient): - shape_node.set('fill', 'url(#shape_{}_fill)'.format(i)) - elif isinstance(shape_group.fill_color, pydiffvg.RadialGradient): - shape_node.set('fill', 'url(#shape_{}_fill)'.format(i)) - else: - c = shape_group.fill_color.data.cpu().numpy() - shape_node.set('fill', 'rgb({}, {}, {})'.format(\ - int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) - shape_node.set('opacity', str(c[3])) - else: - shape_node.set('fill', 'none') - if shape_group.stroke_color is not None: - if isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): - shape_node.set('stroke', 'url(#shape_{}_stroke)'.format(i)) - elif isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): - shape_node.set('stroke', 'url(#shape_{}_stroke)'.format(i)) - else: - c = shape_group.stroke_color.data.cpu().numpy() - shape_node.set('stroke', 'rgb({}, {}, {})'.format(\ - int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) - shape_node.set('stroke-opacity', str(c[3])) - shape_node.set('stroke-linecap', 'round') - shape_node.set('stroke-linejoin', 'round') - with open(filename, "w") as f: - f.write(prettify(root)) diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/cuda/detail/set_operations.h b/spaces/ma-xu/LIVE/thrust/thrust/system/cuda/detail/set_operations.h deleted file mode 100644 index 38ba1011d581b3187f3b6ac847070192d6f292d7..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/cuda/detail/set_operations.h +++ /dev/null @@ -1,1998 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ -#pragma once - -#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace thrust -{ - -namespace cuda_cub { - -namespace __set_operations { - - template - THRUST_DEVICE_FUNCTION void - binary_search_iteration(It data, - Size &begin, - Size &end, - T key, - int shift, - Comp comp) - { - - IntT scale = (1 << shift) - 1; - Size mid = (begin + scale * end) >> shift; - - T key2 = data[mid]; - bool pred = UpperBound ? !comp(key, key2) : comp(key2, key); - if (pred) - begin = mid + 1; - else - end = mid; - } - - template - THRUST_DEVICE_FUNCTION Size - binary_search(It data, Size count, T key, Comp comp) - { - Size begin = 0; - Size end = count; - while (begin < end) - binary_search_iteration(data, - begin, - end, - key, - 1, - comp); - return begin; - } - - template - THRUST_DEVICE_FUNCTION Size - biased_binary_search(It data, Size count, T key, IntT levels, Comp comp) - { - Size begin = 0; - Size end = count; - - if (levels >= 4 && begin < end) - binary_search_iteration(data, begin, end, key, 9, comp); - if (levels >= 3 && begin < end) - binary_search_iteration(data, begin, end, key, 7, comp); - if (levels >= 2 && begin < end) - binary_search_iteration(data, begin, end, key, 5, comp); - if (levels >= 1 && begin < end) - binary_search_iteration(data, begin, end, key, 4, comp); - - while (begin < end) - binary_search_iteration(data, begin, end, key, 1, comp); - return begin; - } - - template - THRUST_DEVICE_FUNCTION Size - merge_path(It1 a, Size aCount, It2 b, Size bCount, Size diag, Comp comp) - { - typedef typename thrust::iterator_traits::value_type T; - - Size begin = thrust::max(0, diag - bCount); - Size end = thrust::min(diag, aCount); - - while (begin < end) - { - Size mid = (begin + end) >> 1; - T aKey = a[mid]; - T bKey = b[diag - 1 - mid]; - bool pred = UpperBound ? comp(aKey, bKey) : !comp(bKey, aKey); - if (pred) - begin = mid + 1; - else - end = mid; - } - return begin; - } - - template - THRUST_DEVICE_FUNCTION pair - balanced_path(It1 keys1, - It2 keys2, - Size num_keys1, - Size num_keys2, - Size diag, - Size2 levels, - CompareOp compare_op) - { - typedef typename iterator_traits::value_type T; - - Size index1 = merge_path(keys1, - num_keys1, - keys2, - num_keys2, - diag, - compare_op); - Size index2 = diag - index1; - - bool star = false; - if (index2 < num_keys2) - { - T x = keys2[index2]; - - // Search for the beginning of the duplicate run in both A and B. - Size start1 = biased_binary_search(keys1, - index1, - x, - levels, - compare_op); - Size start2 = biased_binary_search(keys2, - index2, - x, - levels, - compare_op); - - // The distance between x's merge path and its lower_bound is its rank. - // We add up the a and b ranks and evenly distribute them to - // get a stairstep path. - Size run1 = index1 - start1; - Size run2 = index2 - start2; - Size total_run = run1 + run2; - - // Attempt to advance b and regress a. - Size advance2 = max(total_run >> 1, total_run - run1); - Size end2 = min(num_keys2, start2 + advance2 + 1); - - Size run_end2 = index2 + binary_search(keys2 + index2, - end2 - index2, - x, - compare_op); - run2 = run_end2 - start2; - - advance2 = min(advance2, run2); - Size advance1 = total_run - advance2; - - bool round_up = (advance1 == advance2 + 1) && (advance2 < run2); - if (round_up) star = true; - - index1 = start1 + advance1; - } - return thrust::make_pair(index1, (diag - index1) + star); - } // func balanced_path - - template - struct PtxPolicy - { - enum - { - BLOCK_THREADS = _BLOCK_THREADS, - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, - ITEMS_PER_TILE = _BLOCK_THREADS * _ITEMS_PER_THREAD - 1 - }; - - static const cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; - static const cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; - static const cub::BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; - }; // PtxPolicy - - template - struct Tuning; - - namespace mpl = thrust::detail::mpl::math; - - template - struct Tuning - { - enum - { - MAX_INPUT_BYTES = mpl::max::value, - COMBINED_INPUT_BYTES = sizeof(T), // + sizeof(Value), - NOMINAL_4B_ITEMS_PER_THREAD = 7, - ITEMS_PER_THREAD = mpl::min< - int, - NOMINAL_4B_ITEMS_PER_THREAD, - mpl::max< - int, - 1, - ((NOMINAL_4B_ITEMS_PER_THREAD * 4) + - COMBINED_INPUT_BYTES - 1) / - COMBINED_INPUT_BYTES>::value>::value, - }; - - typedef PtxPolicy<128, - ITEMS_PER_THREAD, - cub::BLOCK_LOAD_WARP_TRANSPOSE, - cub::LOAD_DEFAULT, - cub::BLOCK_SCAN_WARP_SCANS> - type; - }; // tuning sm30 - - template - struct Tuning - { - enum - { - MAX_INPUT_BYTES = mpl::max::value, - COMBINED_INPUT_BYTES = sizeof(T), // + sizeof(U), - NOMINAL_4B_ITEMS_PER_THREAD = 15, - ITEMS_PER_THREAD = mpl::min< - int, - NOMINAL_4B_ITEMS_PER_THREAD, - mpl::max< - int, - 1, - ((NOMINAL_4B_ITEMS_PER_THREAD * 4) + - COMBINED_INPUT_BYTES - 1) / - COMBINED_INPUT_BYTES>::value>::value, - }; - - typedef PtxPolicy<256, - ITEMS_PER_THREAD, - cub::BLOCK_LOAD_WARP_TRANSPOSE, - cub::LOAD_DEFAULT, - cub::BLOCK_SCAN_WARP_SCANS> - type; - }; // tuning sm52 - - template - struct Tuning - { - enum - { - MAX_INPUT_BYTES = mpl::max::value, - COMBINED_INPUT_BYTES = sizeof(T), // + sizeof(U), - NOMINAL_4B_ITEMS_PER_THREAD = 19, - ITEMS_PER_THREAD = mpl::min< - int, - NOMINAL_4B_ITEMS_PER_THREAD, - mpl::max< - int, - 1, - ((NOMINAL_4B_ITEMS_PER_THREAD * 4) + - COMBINED_INPUT_BYTES - 1) / - COMBINED_INPUT_BYTES>::value>::value, - }; - - typedef PtxPolicy<512, - ITEMS_PER_THREAD, - cub::BLOCK_LOAD_WARP_TRANSPOSE, - cub::LOAD_DEFAULT, - cub::BLOCK_SCAN_WARP_SCANS> - type; - }; // tuning sm60 - - template - struct SetOpAgent - { - typedef typename iterator_traits::value_type key1_type; - typedef typename iterator_traits::value_type key2_type; - typedef typename iterator_traits::value_type value1_type; - typedef typename iterator_traits::value_type value2_type; - - typedef key1_type key_type; - typedef value1_type value_type; - - typedef cub::ScanTileState ScanTileState; - - template - struct PtxPlan : Tuning::type - { - typedef Tuning tuning; - - typedef typename core::LoadIterator::type KeysLoadIt1; - typedef typename core::LoadIterator::type KeysLoadIt2; - typedef typename core::LoadIterator::type ValuesLoadIt1; - typedef typename core::LoadIterator::type ValuesLoadIt2; - - typedef typename core::BlockLoad::type BlockLoadKeys1; - typedef typename core::BlockLoad::type BlockLoadKeys2; - typedef typename core::BlockLoad::type BlockLoadValues1; - typedef typename core::BlockLoad::type BlockLoadValues2; - - typedef cub::TilePrefixCallbackOp - TilePrefixCallback; - - typedef cub::BlockScan - BlockScan; - - // gather required temporary storage in a union - // - union TempStorage - { - struct - { - typename BlockScan::TempStorage scan; - typename TilePrefixCallback::TempStorage prefix; - }; - - struct - { - core::uninitialized_array - offset; - union - { - typename BlockLoadKeys1::TempStorage load_keys1; - typename BlockLoadKeys2::TempStorage load_keys2; - typename BlockLoadValues1::TempStorage load_values1; - typename BlockLoadValues2::TempStorage load_values2; - - // Allocate extra shmem than truely neccessary - // This will permit to avoid range checks in - // serial set operations, e.g. serial_set_difference - core::uninitialized_array< - key_type, - PtxPlan::ITEMS_PER_TILE + PtxPlan::BLOCK_THREADS> - keys_shared; - - core::uninitialized_array< - value_type, - PtxPlan::ITEMS_PER_TILE + PtxPlan::BLOCK_THREADS> - values_shared; - }; - }; - }; // union TempStorage - }; // struct PtxPlan - - typedef typename core::specialize_plan_msvc10_war::type::type ptx_plan; - - typedef typename ptx_plan::KeysLoadIt1 KeysLoadIt1; - typedef typename ptx_plan::KeysLoadIt2 KeysLoadIt2; - typedef typename ptx_plan::ValuesLoadIt1 ValuesLoadIt1; - typedef typename ptx_plan::ValuesLoadIt2 ValuesLoadIt2; - - typedef typename ptx_plan::BlockLoadKeys1 BlockLoadKeys1; - typedef typename ptx_plan::BlockLoadKeys2 BlockLoadKeys2; - typedef typename ptx_plan::BlockLoadValues1 BlockLoadValues1; - typedef typename ptx_plan::BlockLoadValues2 BlockLoadValues2; - - typedef typename ptx_plan::TilePrefixCallback TilePrefixCallback; - typedef typename ptx_plan::BlockScan BlockScan; - - typedef typename ptx_plan::TempStorage TempStorage; - - enum - { - ITEMS_PER_THREAD = ptx_plan::ITEMS_PER_THREAD, - BLOCK_THREADS = ptx_plan::BLOCK_THREADS, - }; - - struct impl - { - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - TempStorage & storage; - ScanTileState &tile_state; - KeysLoadIt1 keys1_in; - KeysLoadIt2 keys2_in; - ValuesLoadIt1 values1_in; - ValuesLoadIt2 values2_in; - Size keys1_count; - Size keys2_count; - KeysOutputIt keys_out; - ValuesOutputIt values_out; - CompareOp compare_op; - SetOp set_op; - pair *partitions; - std::size_t *output_count; - - //--------------------------------------------------------------------- - // Utility functions - //--------------------------------------------------------------------- - - template - THRUST_DEVICE_FUNCTION void - gmem_to_reg(T (&output)[ITEMS_PER_THREAD], - It1 input1, - It2 input2, - int count1, - int count2) - { - if (IS_FULL_TILE) - { -#pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD - 1; ++ITEM) - { - int idx = BLOCK_THREADS * ITEM + threadIdx.x; - output[ITEM] = (idx < count1) - ? static_cast(input1[idx]) - : static_cast(input2[idx - count1]); - } - - // last ITEM might be a conditional load even for full tiles - // please check first before attempting to load. - int ITEM = ITEMS_PER_THREAD - 1; - int idx = BLOCK_THREADS * ITEM + threadIdx.x; - if (idx < count1 + count2) - output[ITEM] = (idx < count1) - ? static_cast(input1[idx]) - : static_cast(input2[idx - count1]); - } - else - { -#pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int idx = BLOCK_THREADS * ITEM + threadIdx.x; - if (idx < count1 + count2) - { - output[ITEM] = (idx < count1) - ? static_cast(input1[idx]) - : static_cast(input2[idx - count1]); - } - } - } - } - - template - THRUST_DEVICE_FUNCTION void - reg_to_shared(It output, - T (&input)[ITEMS_PER_THREAD]) - { -#pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int idx = BLOCK_THREADS * ITEM + threadIdx.x; - output[idx] = input[ITEM]; - } - } - - template - void THRUST_DEVICE_FUNCTION - scatter(OutputIt output, - T (&input)[ITEMS_PER_THREAD], - SharedIt shared, - int active_mask, - Size thread_output_prefix, - Size tile_output_prefix, - int tile_output_count) - { - using core::sync_threadblock; - - - - int local_scatter_idx = thread_output_prefix - tile_output_prefix; -#pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (active_mask & (1 << ITEM)) - { - shared[local_scatter_idx++] = input[ITEM]; - } - } - sync_threadblock(); - - for (int item = threadIdx.x; - item < tile_output_count; - item += BLOCK_THREADS) - { - output[tile_output_prefix + item] = shared[item]; - } - } - - int THRUST_DEVICE_FUNCTION - serial_set_op(key_type *keys, - int keys1_beg, - int keys2_beg, - int keys1_count, - int keys2_count, - key_type (&output)[ITEMS_PER_THREAD], - int (&indices)[ITEMS_PER_THREAD], - CompareOp compare_op, - SetOp set_op) - { - int active_mask = set_op(keys, - keys1_beg, - keys2_beg, - keys1_count, - keys2_count, - output, - indices, - compare_op); - - return active_mask; - } - - //--------------------------------------------------------------------- - // Tile operations - //--------------------------------------------------------------------- - - template - void THRUST_DEVICE_FUNCTION - consume_tile(Size tile_idx) - { - using core::sync_threadblock; - using core::uninitialized_array; - - pair partition_beg = partitions[tile_idx + 0]; - pair partition_end = partitions[tile_idx + 1]; - - Size keys1_beg = partition_beg.first; - Size keys1_end = partition_end.first; - Size keys2_beg = partition_beg.second; - Size keys2_end = partition_end.second; - - // number of keys per tile - // - int num_keys1 = static_cast(keys1_end - keys1_beg); - int num_keys2 = static_cast(keys2_end - keys2_beg); - - - // load keys into shared memory for further processing - key_type keys_loc[ITEMS_PER_THREAD]; - - gmem_to_reg(keys_loc, - keys1_in + keys1_beg, - keys2_in + keys2_beg, - num_keys1, - num_keys2); - - reg_to_shared(&storage.keys_shared[0], keys_loc); - - sync_threadblock(); - - int diag_loc = min(ITEMS_PER_THREAD * threadIdx.x, - num_keys1 + num_keys2); - - pair partition_loc = - balanced_path(&storage.keys_shared[0], - &storage.keys_shared[num_keys1], - num_keys1, - num_keys2, - diag_loc, - 4, - compare_op); - - int keys1_beg_loc = partition_loc.first; - int keys2_beg_loc = partition_loc.second; - - // compute difference between next and current thread - // to obtain number of elements per thread - int value = threadIdx.x == 0 - ? (num_keys1 << 16) | num_keys2 - : (partition_loc.first << 16) | partition_loc.second; - - int dst = threadIdx.x == 0 ? BLOCK_THREADS - 1 : threadIdx.x - 1; - storage.offset[dst] = value; - - core::sync_threadblock(); - - pair partition1_loc = thrust::make_pair( - storage.offset[threadIdx.x] >> 16, - storage.offset[threadIdx.x] & 0xFFFF); - - int keys1_end_loc = partition1_loc.first; - int keys2_end_loc = partition1_loc.second; - - int num_keys1_loc = keys1_end_loc - keys1_beg_loc; - int num_keys2_loc = keys2_end_loc - keys2_beg_loc; - - // perform serial set operation - // - int indices[ITEMS_PER_THREAD]; - - int active_mask = serial_set_op(&storage.keys_shared[0], - keys1_beg_loc, - keys2_beg_loc + num_keys1, - num_keys1_loc, - num_keys2_loc, - keys_loc, - indices, - compare_op, - set_op); - sync_threadblock(); -#if 0 - if (ITEMS_PER_THREAD*threadIdx.x >= num_keys1 + num_keys2) - active_mask = 0; -#endif - - // look-back scan over thread_output_count - // to compute global thread_output_base and tile_otput_count; - Size tile_output_count = 0; - Size thread_output_prefix = 0; - Size tile_output_prefix = 0; - Size thread_output_count = static_cast(__popc(active_mask)); - - if (tile_idx == 0) // first tile - { - BlockScan(storage.scan) - .ExclusiveSum(thread_output_count, - thread_output_prefix, - tile_output_count); - if (threadIdx.x == 0) - { - // Update tile status if this is not the last tile - if (!IS_LAST_TILE) - { - tile_state.SetInclusive(0, tile_output_count); - } - } - } - else - { - TilePrefixCallback prefix_cb(tile_state, - storage.prefix, - cub::Sum(), - tile_idx); - - BlockScan(storage.scan) - .ExclusiveSum(thread_output_count, - thread_output_prefix, - prefix_cb); - tile_output_count = prefix_cb.GetBlockAggregate(); - tile_output_prefix = prefix_cb.GetExclusivePrefix(); - } - - sync_threadblock(); - - // scatter results - // - scatter(keys_out, - keys_loc, - &storage.keys_shared[0], - active_mask, - thread_output_prefix, - tile_output_prefix, - tile_output_count); - - if (HAS_VALUES::value) - { - value_type values_loc[ITEMS_PER_THREAD]; - gmem_to_reg(values_loc, - values1_in + keys1_beg, - values2_in + keys2_beg, - num_keys1, - num_keys2); - - sync_threadblock(); - - reg_to_shared(&storage.values_shared[0], values_loc); - - sync_threadblock(); - - // gather items from shared mem - // -#pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - if (active_mask & (1 << ITEM)) - { - values_loc[ITEM] = storage.values_shared[indices[ITEM]]; - } - } - - sync_threadblock(); - - scatter(values_out, - values_loc, - &storage.values_shared[0], - active_mask, - thread_output_prefix, - tile_output_prefix, - tile_output_count); - } - - if (IS_LAST_TILE && threadIdx.x == 0) - { - *output_count = tile_output_prefix + tile_output_count; - } - } - - //--------------------------------------------------------------------- - // Constructor - //--------------------------------------------------------------------- - - THRUST_DEVICE_FUNCTION - impl(TempStorage & storage_, - ScanTileState &tile_state_, - KeysIt1 keys1_, - KeysIt2 keys2_, - ValuesIt1 values1_, - ValuesIt2 values2_, - Size keys1_count_, - Size keys2_count_, - KeysOutputIt keys_out_, - ValuesOutputIt values_out_, - CompareOp compare_op_, - SetOp set_op_, - pair *partitions_, - std::size_t * output_count_) - : storage(storage_), - tile_state(tile_state_), - keys1_in(core::make_load_iterator(ptx_plan(), keys1_)), - keys2_in(core::make_load_iterator(ptx_plan(), keys2_)), - values1_in(core::make_load_iterator(ptx_plan(), values1_)), - values2_in(core::make_load_iterator(ptx_plan(), values2_)), - keys1_count(keys1_count_), - keys2_count(keys2_count_), - keys_out(keys_out_), - values_out(values_out_), - compare_op(compare_op_), - set_op(set_op_), - partitions(partitions_), - output_count(output_count_) - { - int tile_idx = blockIdx.x; - int num_tiles = gridDim.x; - - if (tile_idx < num_tiles-1) - { - consume_tile(tile_idx); - } - else - { - consume_tile(tile_idx); - } - } - }; // struct impl - - //--------------------------------------------------------------------- - // Agent entry point - //--------------------------------------------------------------------- - - THRUST_AGENT_ENTRY(KeysIt1 keys1, - KeysIt2 keys2, - ValuesIt1 values1, - ValuesIt2 values2, - Size keys1_count, - Size keys2_count, - KeysOutputIt keys_output, - ValuesOutputIt values_output, - CompareOp compare_op, - SetOp set_op, - pair *partitions, - std::size_t * output_count, - ScanTileState tile_state, - char * shmem) - { - TempStorage &storage = *reinterpret_cast(shmem); - - impl(storage, - tile_state, - keys1, - keys2, - values1, - values2, - keys1_count, - keys2_count, - keys_output, - values_output, - compare_op, - set_op, - partitions, - output_count); - } - }; // struct SetOpAgent - - template - struct PartitionAgent - { - template - struct PtxPlan : PtxPolicy<256> {}; - - typedef core::specialize_plan ptx_plan; - - //--------------------------------------------------------------------- - // Agent entry point - //--------------------------------------------------------------------- - - THRUST_AGENT_ENTRY(KeysIt1 keys1, - KeysIt2 keys2, - Size keys1_count, - Size keys2_count, - Size num_partitions, - pair *partitions, - CompareOp compare_op, - int items_per_tile, - char * /*shmem*/) - { - Size partition_idx = blockDim.x * blockIdx.x + threadIdx.x; - if (partition_idx < num_partitions) - { - Size partition_at = min(partition_idx * items_per_tile, - keys1_count + keys2_count); - pair diag = balanced_path(keys1, - keys2, - keys1_count, - keys2_count, - partition_at, - 4ll, - compare_op); - partitions[partition_idx] = diag; - } - } - }; // struct PartitionAgent - - template - struct InitAgent - { - template - struct PtxPlan : PtxPolicy<128> {}; - - typedef core::specialize_plan ptx_plan; - - //--------------------------------------------------------------------- - // Agent entry point - //--------------------------------------------------------------------- - - THRUST_AGENT_ENTRY(ScanTileState tile_state, - Size num_tiles, - char * /*shmem*/) - { - tile_state.InitializeStatus(num_tiles); - } - }; // struct InitAgent - - //--------------------------------------------------------------------- - // Serial set operations - //--------------------------------------------------------------------- - - // serial_set_intersection - // ----------------------- - // emit A if A and B are in range and equal. - struct serial_set_intersection - { - // max_input_size <= 32 - template - int THRUST_DEVICE_FUNCTION - operator()(T * keys, - int keys1_beg, - int keys2_beg, - int keys1_count, - int keys2_count, - T (&output)[ITEMS_PER_THREAD], - int (&indices)[ITEMS_PER_THREAD], - CompareOp compare_op) - { - int active_mask = 0; - - int aBegin = keys1_beg; - int bBegin = keys2_beg; - int aEnd = keys1_beg + keys1_count; - int bEnd = keys2_beg + keys2_count; - - T aKey = keys[aBegin]; - T bKey = keys[bBegin]; - -#pragma unroll - for (int i = 0; i < ITEMS_PER_THREAD; ++i) - { - bool pA = compare_op(aKey, bKey); - bool pB = compare_op(bKey, aKey); - - // The outputs must come from A by definition of set interection. - output[i] = aKey; - indices[i] = aBegin; - - if ((aBegin < aEnd) && (bBegin < bEnd) && pA == pB) - active_mask |= 1 << i; - - if (!pB) {aKey = keys[++aBegin]; } - if (!pA) {bKey = keys[++bBegin]; } - } - return active_mask; - } - }; // struct serial_set_intersection - - // serial_set_symmetric_difference - // --------------------- - // emit A if A < B and emit B if B < A. - struct serial_set_symmetric_difference - { - // max_input_size <= 32 - template - int THRUST_DEVICE_FUNCTION - operator()(T * keys, - int keys1_beg, - int keys2_beg, - int keys1_count, - int keys2_count, - T (&output)[ITEMS_PER_THREAD], - int (&indices)[ITEMS_PER_THREAD], - CompareOp compare_op) - { - int active_mask = 0; - - int aBegin = keys1_beg; - int bBegin = keys2_beg; - int aEnd = keys1_beg + keys1_count; - int bEnd = keys2_beg + keys2_count; - int end = aEnd + bEnd; - - T aKey = keys[aBegin]; - T bKey = keys[bBegin]; - - -#pragma unroll - for (int i = 0; i < ITEMS_PER_THREAD; ++i) - { - bool pB = aBegin >= aEnd; - bool pA = !pB && bBegin >= bEnd; - - if (!pA && !pB) - { - pA = compare_op(aKey, bKey); - pB = !pA && compare_op(bKey, aKey); - } - - // The outputs must come from A by definition of set difference. - output[i] = pA ? aKey : bKey; - indices[i] = pA ? aBegin : bBegin; - - if (aBegin + bBegin < end && pA != pB) - active_mask |= 1 << i; - - if (!pB) {aKey = keys[++aBegin]; } - if (!pA) {bKey = keys[++bBegin]; } - - } - return active_mask; - } - }; // struct set_symmetric_difference - - // serial_set_difference - // --------------------- - // emit A if A < B - struct serial_set_difference - { - // max_input_size <= 32 - template - int THRUST_DEVICE_FUNCTION - operator()(T * keys, - int keys1_beg, - int keys2_beg, - int keys1_count, - int keys2_count, - T (&output)[ITEMS_PER_THREAD], - int (&indices)[ITEMS_PER_THREAD], - CompareOp compare_op) - { - int active_mask = 0; - - int aBegin = keys1_beg; - int bBegin = keys2_beg; - int aEnd = keys1_beg + keys1_count; - int bEnd = keys2_beg + keys2_count; - int end = aEnd + bEnd; - - T aKey = keys[aBegin]; - T bKey = keys[bBegin]; - -#pragma unroll - for (int i = 0; i < ITEMS_PER_THREAD; ++i) - { - bool pB = aBegin >= aEnd; - bool pA = !pB && bBegin >= bEnd; - - if (!pA && !pB) - { - pA = compare_op(aKey, bKey); - pB = !pA && compare_op(bKey, aKey); - } - - // The outputs must come from A by definition of set difference. - output[i] = aKey; - indices[i] = aBegin; - - if (aBegin + bBegin < end && pA) - active_mask |= 1 << i; - - if (!pB) { aKey = keys[++aBegin]; } - if (!pA) { bKey = keys[++bBegin]; } - } - return active_mask; - } - }; // struct set_difference - - // serial_set_union - // ---------------- - // emit A if A <= B else emit B - struct serial_set_union - { - // max_input_size <= 32 - template - int THRUST_DEVICE_FUNCTION - operator()(T * keys, - int keys1_beg, - int keys2_beg, - int keys1_count, - int keys2_count, - T (&output)[ITEMS_PER_THREAD], - int (&indices)[ITEMS_PER_THREAD], - CompareOp compare_op) - { - int active_mask = 0; - - int aBegin = keys1_beg; - int bBegin = keys2_beg; - int aEnd = keys1_beg + keys1_count; - int bEnd = keys2_beg + keys2_count; - int end = aEnd + bEnd; - - T aKey = keys[aBegin]; - T bKey = keys[bBegin]; - -#pragma unroll - for (int i = 0; i < ITEMS_PER_THREAD; ++i) - { - bool pB = aBegin >= aEnd; - bool pA = !pB && bBegin >= bEnd; - - if (!pA && !pB) - { - pA = compare_op(aKey, bKey); - pB = !pA && compare_op(bKey, aKey); - } - - // Output A in case of a tie, so check if b < a. - output[i] = pB ? bKey : aKey; - indices[i] = pB ? bBegin : aBegin; - - if (aBegin + bBegin < end) - active_mask |= 1 << i; - - if (!pB) { aKey = keys[++aBegin]; } - if (!pA) { bKey = keys[++bBegin]; } - - } - return active_mask; - } - }; // struct set_union - - template - cudaError_t THRUST_RUNTIME_FUNCTION - doit_step(void * d_temp_storage, - size_t & temp_storage_size, - KeysIt1 keys1, - KeysIt2 keys2, - ValuesIt1 values1, - ValuesIt2 values2, - Size num_keys1, - Size num_keys2, - KeysOutputIt keys_output, - ValuesOutputIt values_output, - std::size_t * output_count, - CompareOp compare_op, - SetOp set_op, - cudaStream_t stream, - bool debug_sync) - { - Size keys_total = num_keys1 + num_keys2; - if (keys_total == 0) - return cudaErrorNotSupported; - - cudaError_t status = cudaSuccess; - - using core::AgentPlan; - using core::AgentLauncher; - - typedef AgentLauncher< - SetOpAgent > - set_op_agent; - - typedef AgentLauncher > - partition_agent; - - typedef typename set_op_agent::ScanTileState ScanTileState; - typedef AgentLauncher > init_agent; - - - AgentPlan set_op_plan = set_op_agent::get_plan(stream); - AgentPlan init_plan = init_agent::get_plan(); - AgentPlan partition_plan = partition_agent::get_plan(); - - int tile_size = set_op_plan.items_per_tile; - Size num_tiles = (keys_total + tile_size - 1) / tile_size; - - size_t tile_agent_storage; - status = ScanTileState::AllocationSize(num_tiles, tile_agent_storage); - CUDA_CUB_RET_IF_FAIL(status); - - size_t vshmem_storage = core::vshmem_size(set_op_plan.shared_memory_size, - num_tiles); - size_t partition_agent_storage = (num_tiles + 1) * sizeof(Size) * 2; - - void *allocations[3] = {NULL, NULL, NULL}; - size_t allocation_sizes[3] = {tile_agent_storage, - partition_agent_storage, - vshmem_storage}; - - status = core::alias_storage(d_temp_storage, - temp_storage_size, - allocations, - allocation_sizes); - CUDA_CUB_RET_IF_FAIL(status); - - if (d_temp_storage == NULL) - { - return status; - } - - ScanTileState tile_state; - status = tile_state.Init(num_tiles, allocations[0], allocation_sizes[0]); - CUDA_CUB_RET_IF_FAIL(status); - - pair *partitions = (pair *)allocations[1]; - char *vshmem_ptr = vshmem_storage > 0 ? (char *)allocations[2] : NULL; - - init_agent ia(init_plan, num_tiles, stream, "set_op::init_agent", debug_sync); - ia.launch(tile_state, num_tiles); - CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError()); - - partition_agent pa(partition_plan, num_tiles+1, stream, "set_op::partition agent", debug_sync); - pa.launch(keys1, - keys2, - num_keys1, - num_keys2, - num_tiles+1, - partitions, - compare_op, - tile_size); - CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError()); - - set_op_agent sa(set_op_plan, keys_total, stream, vshmem_ptr, "set_op::set_op_agent", debug_sync); - sa.launch(keys1, - keys2, - values1, - values2, - num_keys1, - num_keys2, - keys_output, - values_output, - compare_op, - set_op, - partitions, - output_count, - tile_state); - CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError()); - - return status; - } - - template - THRUST_RUNTIME_FUNCTION - pair - set_operations(execution_policy& policy, - KeysIt1 keys1_first, - KeysIt1 keys1_last, - KeysIt2 keys2_first, - KeysIt2 keys2_last, - ValuesIt1 values1_first, - ValuesIt2 values2_first, - KeysOutputIt keys_output, - ValuesOutputIt values_output, - CompareOp compare_op, - SetOp set_op) - { - typedef typename iterator_traits::difference_type size_type; - - size_type num_keys1 = static_cast(thrust::distance(keys1_first, keys1_last)); - size_type num_keys2 = static_cast(thrust::distance(keys2_first, keys2_last)); - - if (num_keys1 + num_keys2 == 0) - return thrust::make_pair(keys_output, values_output); - - size_t temp_storage_bytes = 0; - cudaStream_t stream = cuda_cub::stream(policy); - bool debug_sync = THRUST_DEBUG_SYNC_FLAG; - - cudaError_t status; - THRUST_DOUBLE_INDEX_TYPE_DISPATCH(status, doit_step, - num_keys1, num_keys2, (NULL, - temp_storage_bytes, - keys1_first, - keys2_first, - values1_first, - values2_first, - num_keys1_fixed, - num_keys2_fixed, - keys_output, - values_output, - reinterpret_cast(NULL), - compare_op, - set_op, - stream, - debug_sync)); - cuda_cub::throw_on_error(status, "set_operations failed on 1st step"); - - size_t allocation_sizes[2] = {sizeof(std::size_t), temp_storage_bytes}; - void * allocations[2] = {NULL, NULL}; - - size_t storage_size = 0; - - status = core::alias_storage(NULL, - storage_size, - allocations, - allocation_sizes); - cuda_cub::throw_on_error(status, "set_operations failed on 1st alias_storage"); - - // Allocate temporary storage. - thrust::detail::temporary_array - tmp(policy, storage_size); - void *ptr = static_cast(tmp.data().get()); - - status = core::alias_storage(ptr, - storage_size, - allocations, - allocation_sizes); - cuda_cub::throw_on_error(status, "set_operations failed on 2nd alias_storage"); - - std::size_t* d_output_count - = thrust::detail::aligned_reinterpret_cast(allocations[0]); - - THRUST_DOUBLE_INDEX_TYPE_DISPATCH(status, doit_step, - num_keys1, num_keys2, (allocations[1], - temp_storage_bytes, - keys1_first, - keys2_first, - values1_first, - values2_first, - num_keys1_fixed, - num_keys2_fixed, - keys_output, - values_output, - d_output_count, - compare_op, - set_op, - stream, - debug_sync)); - cuda_cub::throw_on_error(status, "set_operations failed on 2nd step"); - - status = cuda_cub::synchronize(policy); - cuda_cub::throw_on_error(status, "set_operations failed to synchronize"); - - std::size_t output_count = cuda_cub::get_value(policy, d_output_count); - - return thrust::make_pair(keys_output + output_count, values_output + output_count); - } -} // namespace __set_operations - -//------------------------- -// Thrust API entry points -//------------------------- - -__thrust_exec_check_disable__ -template -OutputIt __host__ __device__ -set_difference(execution_policy &policy, - ItemsIt1 items1_first, - ItemsIt1 items1_last, - ItemsIt2 items2_first, - ItemsIt2 items2_last, - OutputIt result, - CompareOp compare) -{ - OutputIt ret = result; - if (__THRUST_HAS_CUDART__) - { - typename thrust::iterator_value::type *null_ = NULL; - // - ret = __set_operations::set_operations( - policy, - items1_first, - items1_last, - items2_first, - items2_last, - null_, - null_, - result, - null_, - compare, - __set_operations::serial_set_difference()) - .first; - } - else - { -#if !__THRUST_HAS_CUDART__ - ret = thrust::set_difference(cvt_to_seq(derived_cast(policy)), - items1_first, - items1_last, - items2_first, - items2_last, - result, - compare); -#endif - } - return ret; -} - -template -OutputIt __host__ __device__ -set_difference(execution_policy &policy, - ItemsIt1 items1_first, - ItemsIt1 items1_last, - ItemsIt2 items2_first, - ItemsIt2 items2_last, - OutputIt result) -{ - typedef typename thrust::iterator_value::type value_type; - return cuda_cub::set_difference(policy, - items1_first, - items1_last, - items2_first, - items2_last, - result, - less()); -} - -/*****************************/ - - -__thrust_exec_check_disable__ -template -OutputIt __host__ __device__ -set_intersection(execution_policy &policy, - ItemsIt1 items1_first, - ItemsIt1 items1_last, - ItemsIt2 items2_first, - ItemsIt2 items2_last, - OutputIt result, - CompareOp compare) -{ - OutputIt ret = result; - if (__THRUST_HAS_CUDART__) - { - typename thrust::iterator_value::type *null_ = NULL; - // - ret = __set_operations::set_operations( - policy, - items1_first, - items1_last, - items2_first, - items2_last, - null_, - null_, - result, - null_, - compare, - __set_operations::serial_set_intersection()) - .first; - } - else - { -#if !__THRUST_HAS_CUDART__ - ret = thrust::set_intersection(cvt_to_seq(derived_cast(policy)), - items1_first, - items1_last, - items2_first, - items2_last, - result, - compare); -#endif - } - return ret; -} - -template -OutputIt __host__ __device__ -set_intersection(execution_policy &policy, - ItemsIt1 items1_first, - ItemsIt1 items1_last, - ItemsIt2 items2_first, - ItemsIt2 items2_last, - OutputIt result) -{ - typedef typename thrust::iterator_value::type value_type; - return cuda_cub::set_intersection(policy, - items1_first, - items1_last, - items2_first, - items2_last, - result, - less()); -} - - -/*****************************/ - -__thrust_exec_check_disable__ -template -OutputIt __host__ __device__ -set_symmetric_difference(execution_policy &policy, - ItemsIt1 items1_first, - ItemsIt1 items1_last, - ItemsIt2 items2_first, - ItemsIt2 items2_last, - OutputIt result, - CompareOp compare) -{ - OutputIt ret = result; - if (__THRUST_HAS_CUDART__) - { - typename thrust::iterator_value::type *null_ = NULL; - // - ret = __set_operations::set_operations( - policy, - items1_first, - items1_last, - items2_first, - items2_last, - null_, - null_, - result, - null_, - compare, - __set_operations::serial_set_symmetric_difference()) - .first; - } - else - { -#if !__THRUST_HAS_CUDART__ - ret = thrust::set_symmetric_difference(cvt_to_seq(derived_cast(policy)), - items1_first, - items1_last, - items2_first, - items2_last, - result, - compare); -#endif - } - return ret; -} - - -template -OutputIt __host__ __device__ -set_symmetric_difference(execution_policy &policy, - ItemsIt1 items1_first, - ItemsIt1 items1_last, - ItemsIt2 items2_first, - ItemsIt2 items2_last, - OutputIt result) -{ - typedef typename thrust::iterator_value::type value_type; - return cuda_cub::set_symmetric_difference(policy, - items1_first, - items1_last, - items2_first, - items2_last, - result, - less()); -} - -/*****************************/ - -__thrust_exec_check_disable__ -template -OutputIt __host__ __device__ -set_union(execution_policy &policy, - ItemsIt1 items1_first, - ItemsIt1 items1_last, - ItemsIt2 items2_first, - ItemsIt2 items2_last, - OutputIt result, - CompareOp compare) -{ - OutputIt ret = result; - if (__THRUST_HAS_CUDART__) - { - typename thrust::iterator_value::type *null_ = NULL; - // - ret = __set_operations::set_operations( - policy, - items1_first, - items1_last, - items2_first, - items2_last, - null_, - null_, - result, - null_, - compare, - __set_operations::serial_set_union()) - .first; - } - else - { -#if !__THRUST_HAS_CUDART__ - ret = thrust::set_union(cvt_to_seq(derived_cast(policy)), - items1_first, - items1_last, - items2_first, - items2_last, - result, - compare); -#endif - } - return ret; -} - - -template -OutputIt __host__ __device__ -set_union(execution_policy &policy, - ItemsIt1 items1_first, - ItemsIt1 items1_last, - ItemsIt2 items2_first, - ItemsIt2 items2_last, - OutputIt result) -{ - typedef typename thrust::iterator_value::type value_type; - return cuda_cub::set_union(policy, - items1_first, - items1_last, - items2_first, - items2_last, - result, - less()); -} - - -/*****************************/ -/*****************************/ -/***** *_by_key *****/ -/*****************************/ -/*****************************/ - -/*****************************/ - -__thrust_exec_check_disable__ -template -pair __host__ __device__ -set_difference_by_key(execution_policy &policy, - KeysIt1 keys1_first, - KeysIt1 keys1_last, - KeysIt2 keys2_first, - KeysIt2 keys2_last, - ItemsIt1 items1_first, - ItemsIt2 items2_first, - KeysOutputIt keys_result, - ItemsOutputIt items_result, - CompareOp compare_op) -{ - pair ret = thrust::make_pair(keys_result, items_result); - if (__THRUST_HAS_CUDART__) - { - ret = __set_operations::set_operations( - policy, - keys1_first, - keys1_last, - keys2_first, - keys2_last, - items1_first, - items2_first, - keys_result, - items_result, - compare_op, - __set_operations::serial_set_difference()); - } - else - { -#if !__THRUST_HAS_CUDART__ - ret = thrust::set_difference_by_key(cvt_to_seq(derived_cast(policy)), - keys1_first, - keys1_last, - keys2_first, - keys2_last, - items1_first, - items2_first, - keys_result, - items_result, - compare_op); -#endif - } - return ret; -} - -template -pair __host__ __device__ -set_difference_by_key(execution_policy &policy, - KeysIt1 keys1_first, - KeysIt1 keys1_last, - KeysIt2 keys2_first, - KeysIt2 keys2_last, - ItemsIt1 items1_first, - ItemsIt2 items2_first, - KeysOutputIt keys_result, - ItemsOutputIt items_result) -{ - typedef typename thrust::iterator_value::type value_type; - return cuda_cub::set_difference_by_key(policy, - keys1_first, - keys1_last, - keys2_first, - keys2_last, - items1_first, - items2_first, - keys_result, - items_result, - less()); -} - -/*****************************/ - -__thrust_exec_check_disable__ -template -pair __host__ __device__ -set_intersection_by_key(execution_policy &policy, - KeysIt1 keys1_first, - KeysIt1 keys1_last, - KeysIt2 keys2_first, - KeysIt2 keys2_last, - ItemsIt1 items1_first, - KeysOutputIt keys_result, - ItemsOutputIt items_result, - CompareOp compare_op) -{ - pair ret = thrust::make_pair(keys_result, items_result); - if (__THRUST_HAS_CUDART__) - { - ret = __set_operations::set_operations( - policy, - keys1_first, - keys1_last, - keys2_first, - keys2_last, - items1_first, - items1_first, - keys_result, - items_result, - compare_op, - __set_operations::serial_set_intersection()); - } - else - { -#if !__THRUST_HAS_CUDART__ - ret = thrust::set_intersection_by_key(cvt_to_seq(derived_cast(policy)), - keys1_first, - keys1_last, - keys2_first, - keys2_last, - items1_first, - keys_result, - items_result, - compare_op); -#endif - } - return ret; -} - -template -pair __host__ __device__ -set_intersection_by_key(execution_policy &policy, - KeysIt1 keys1_first, - KeysIt1 keys1_last, - KeysIt2 keys2_first, - KeysIt2 keys2_last, - ItemsIt1 items1_first, - KeysOutputIt keys_result, - ItemsOutputIt items_result) -{ - typedef typename thrust::iterator_value::type value_type; - return cuda_cub::set_intersection_by_key(policy, - keys1_first, - keys1_last, - keys2_first, - keys2_last, - items1_first, - keys_result, - items_result, - less()); -} - -/*****************************/ - -__thrust_exec_check_disable__ -template -pair __host__ __device__ -set_symmetric_difference_by_key(execution_policy &policy, - KeysIt1 keys1_first, - KeysIt1 keys1_last, - KeysIt2 keys2_first, - KeysIt2 keys2_last, - ItemsIt1 items1_first, - ItemsIt2 items2_first, - KeysOutputIt keys_result, - ItemsOutputIt items_result, - CompareOp compare_op) -{ - pair ret = thrust::make_pair(keys_result, items_result); - if (__THRUST_HAS_CUDART__) - { - ret = __set_operations::set_operations( - policy, - keys1_first, - keys1_last, - keys2_first, - keys2_last, - items1_first, - items2_first, - keys_result, - items_result, - compare_op, - __set_operations::serial_set_symmetric_difference()); - } - else - { -#if !__THRUST_HAS_CUDART__ - ret = thrust::set_symmetric_difference_by_key(cvt_to_seq(derived_cast(policy)), - keys1_first, - keys1_last, - keys2_first, - keys2_last, - items1_first, - items2_first, - keys_result, - items_result, - compare_op); -#endif - } - return ret; -} - -template -pair __host__ __device__ -set_symmetric_difference_by_key(execution_policy &policy, - KeysIt1 keys1_first, - KeysIt1 keys1_last, - KeysIt2 keys2_first, - KeysIt2 keys2_last, - ItemsIt1 items1_first, - ItemsIt2 items2_first, - KeysOutputIt keys_result, - ItemsOutputIt items_result) -{ - typedef typename thrust::iterator_value::type value_type; - return cuda_cub::set_symmetric_difference_by_key(policy, - keys1_first, - keys1_last, - keys2_first, - keys2_last, - items1_first, - items2_first, - keys_result, - items_result, - less()); -} - -/*****************************/ - -__thrust_exec_check_disable__ -template -pair __host__ __device__ -set_union_by_key(execution_policy &policy, - KeysIt1 keys1_first, - KeysIt1 keys1_last, - KeysIt2 keys2_first, - KeysIt2 keys2_last, - ItemsIt1 items1_first, - ItemsIt2 items2_first, - KeysOutputIt keys_result, - ItemsOutputIt items_result, - CompareOp compare_op) -{ - pair ret = thrust::make_pair(keys_result, items_result); - if (__THRUST_HAS_CUDART__) - { - ret = __set_operations::set_operations( - policy, - keys1_first, - keys1_last, - keys2_first, - keys2_last, - items1_first, - items2_first, - keys_result, - items_result, - compare_op, - __set_operations::serial_set_union()); - } - else - { -#if !__THRUST_HAS_CUDART__ - ret = thrust::set_union_by_key(cvt_to_seq(derived_cast(policy)), - keys1_first, - keys1_last, - keys2_first, - keys2_last, - items1_first, - items2_first, - keys_result, - items_result, - compare_op); -#endif - } - return ret; -} - -template -pair __host__ __device__ -set_union_by_key(execution_policy &policy, - KeysIt1 keys1_first, - KeysIt1 keys1_last, - KeysIt2 keys2_first, - KeysIt2 keys2_last, - ItemsIt1 items1_first, - ItemsIt2 items2_first, - KeysOutputIt keys_result, - ItemsOutputIt items_result) -{ - typedef typename thrust::iterator_value::type value_type; - return cuda_cub::set_union_by_key(policy, - keys1_first, - keys1_last, - keys2_first, - keys2_last, - items1_first, - items2_first, - keys_result, - items_result, - less()); -} - -} // namespace cuda_cub -} // end namespace thrust -#endif diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/malloc_and_free.h b/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/malloc_and_free.h deleted file mode 100644 index 7c545250e396611dd3190a3cd95e3302ab345efb..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/detail/sequential/malloc_and_free.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include // for malloc & free -#include - -namespace thrust -{ -namespace system -{ -namespace detail -{ -namespace sequential -{ - - -template -inline __host__ __device__ -void *malloc(execution_policy &, std::size_t n) -{ - return std::malloc(n); -} // end mallc() - - -template -inline __host__ __device__ -void free(sequential::execution_policy &, Pointer ptr) -{ - std::free(thrust::raw_pointer_cast(ptr)); -} // end mallc() - - -} // end sequential -} // end detail -} // end system -} // end thrust - diff --git a/spaces/marioboy/neil-breen/encoder_preprocess.py b/spaces/marioboy/neil-breen/encoder_preprocess.py deleted file mode 100644 index 11502013c8d75d4652fb0ffdcdc49d55e8fb8bc9..0000000000000000000000000000000000000000 --- a/spaces/marioboy/neil-breen/encoder_preprocess.py +++ /dev/null @@ -1,70 +0,0 @@ -from encoder.preprocess import preprocess_librispeech, preprocess_voxceleb1, preprocess_voxceleb2 -from utils.argutils import print_args -from pathlib import Path -import argparse - -if __name__ == "__main__": - class MyFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): - pass - - parser = argparse.ArgumentParser( - description="Preprocesses audio files from datasets, encodes them as mel spectrograms and " - "writes them to the disk. This will allow you to train the encoder. The " - "datasets required are at least one of VoxCeleb1, VoxCeleb2 and LibriSpeech. " - "Ideally, you should have all three. You should extract them as they are " - "after having downloaded them and put them in a same directory, e.g.:\n" - "-[datasets_root]\n" - " -LibriSpeech\n" - " -train-other-500\n" - " -VoxCeleb1\n" - " -wav\n" - " -vox1_meta.csv\n" - " -VoxCeleb2\n" - " -dev", - formatter_class=MyFormatter - ) - parser.add_argument("datasets_root", type=Path, help=\ - "Path to the directory containing your LibriSpeech/TTS and VoxCeleb datasets.") - parser.add_argument("-o", "--out_dir", type=Path, default=argparse.SUPPRESS, help=\ - "Path to the output directory that will contain the mel spectrograms. If left out, " - "defaults to /SV2TTS/encoder/") - parser.add_argument("-d", "--datasets", type=str, - default="librispeech_other,voxceleb1,voxceleb2", help=\ - "Comma-separated list of the name of the datasets you want to preprocess. Only the train " - "set of these datasets will be used. Possible names: librispeech_other, voxceleb1, " - "voxceleb2.") - parser.add_argument("-s", "--skip_existing", action="store_true", help=\ - "Whether to skip existing output files with the same name. Useful if this script was " - "interrupted.") - parser.add_argument("--no_trim", action="store_true", help=\ - "Preprocess audio without trimming silences (not recommended).") - args = parser.parse_args() - - # Verify webrtcvad is available - if not args.no_trim: - try: - import webrtcvad - except: - raise ModuleNotFoundError("Package 'webrtcvad' not found. This package enables " - "noise removal and is recommended. Please install and try again. If installation fails, " - "use --no_trim to disable this error message.") - del args.no_trim - - # Process the arguments - args.datasets = args.datasets.split(",") - if not hasattr(args, "out_dir"): - args.out_dir = args.datasets_root.joinpath("SV2TTS", "encoder") - assert args.datasets_root.exists() - args.out_dir.mkdir(exist_ok=True, parents=True) - - # Preprocess the datasets - print_args(args, parser) - preprocess_func = { - "librispeech_other": preprocess_librispeech, - "voxceleb1": preprocess_voxceleb1, - "voxceleb2": preprocess_voxceleb2, - } - args = vars(args) - for dataset in args.pop("datasets"): - print("Preprocessing %s" % dataset) - preprocess_func[dataset](**args) diff --git a/spaces/masoodkhanpatel/twitter-trends-qatar/app.py b/spaces/masoodkhanpatel/twitter-trends-qatar/app.py deleted file mode 100644 index 1373bb82843743138438868b3d2f3e9d0a4f9133..0000000000000000000000000000000000000000 --- a/spaces/masoodkhanpatel/twitter-trends-qatar/app.py +++ /dev/null @@ -1,25 +0,0 @@ -import pandas as pd -import gradio as gr - - -URL = "https://docs.google.com/spreadsheets/d/1pJPFxqbWeASWi4KZAPf7Go7b46y_7sHePmf-vPRD67I/edit?usp=sharing" -csv_url = URL.replace('/edit?usp=', '/export?format=csv&usp=') - -def get_data(): - df = pd.read_csv(csv_url) - df['Tweet Volume'] = df['Tweet Volume'].str[:-1] - df['Tweet Volume'] = df['Tweet Volume'].transform( lambda x: x[-2:] if 'Under' in x else x) - df['Trending Topic / Hashtag'] = df['Trending Topic / Hashtag'].transform( lambda x: x.split()[0]) - df["Tweet Volume"] = pd.to_numeric(df["Tweet Volume"]) - df = df.sort_values(by=['Tweet Volume'], ascending=False) - return df[["Trending Topic / Hashtag", "Tweet Volume"]][:15] - -with gr.Blocks() as demo: - gr.Markdown("# 📈 Twitter Trends - Qatar using Real-Time Line and Scatter Plot") - gr.Markdown("Following are the current top twitter trending topics in Qatar, Trends last updated every 30 minutes !") - with gr.Row(): - gr.LinePlot(get_data, x="Trending Topic / Hashtag", y="Tweet Volume", tooltip=["Trending Topic / Hashtag","Tweet Volume"] , every=5, overlay_point=True, width=500, height=500, title='Real-Time Line Plot') - gr.ScatterPlot(get_data, y="Tweet Volume", x="Trending Topic / Hashtag", tooltip=["Trending Topic / Hashtag","Tweet Volume"] , every=5, width=500, height=500, title='Real-Time Scatter Plot') - with gr.Row(): - gr.DataFrame(get_data, every=5) -demo.queue().launch() # Run the demo with queuing enabled \ No newline at end of file diff --git a/spaces/matthoffner/open-codetree/additional.d.ts b/spaces/matthoffner/open-codetree/additional.d.ts deleted file mode 100644 index 8aaad0456d413f0c7ea25f5d5cf6cca5c6694fc2..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/open-codetree/additional.d.ts +++ /dev/null @@ -1,21 +0,0 @@ -import "iron-session"; -import { User } from "./graphql/generated/graphql"; -import { OauthInput, OauthProvider } from "./store/features/authSlice"; - -declare global { - interface Window { - withOauth: (input: OauthInput, provider: OauthProvider) => void; - } -} - -declare module "iron-session" { - interface IronSessionData { - user?: { - message?: string; - token?: string | null; - status: boolean; - data: User; - isLoggedIn?: boolean; - }; - } -} diff --git a/spaces/mayordp/DeepFakeAI/tests/test_utilities.py b/spaces/mayordp/DeepFakeAI/tests/test_utilities.py deleted file mode 100644 index e503e74378796c8bf9c4d9d2f6bc077c4e593b39..0000000000000000000000000000000000000000 --- a/spaces/mayordp/DeepFakeAI/tests/test_utilities.py +++ /dev/null @@ -1,107 +0,0 @@ -import glob -import subprocess -import pytest - -import DeepFakeAI.globals -from DeepFakeAI.utilities import conditional_download, detect_fps, extract_frames, create_temp, get_temp_directory_path, clear_temp - - -@pytest.fixture(scope = 'module', autouse = True) -def before_all() -> None: - DeepFakeAI.globals.temp_frame_quality = 100 - DeepFakeAI.globals.trim_frame_start = None - DeepFakeAI.globals.trim_frame_end = None - DeepFakeAI.globals.temp_frame_format = 'png' - conditional_download('.assets/examples', - [ - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-240p.mp4' - ]) - subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=25', '.assets/examples/target-240p-25fps.mp4' ]) - subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=30', '.assets/examples/target-240p-30fps.mp4' ]) - subprocess.run([ 'ffmpeg', '-i', '.assets/examples/target-240p.mp4', '-vf', 'fps=60', '.assets/examples/target-240p-60fps.mp4' ]) - - -@pytest.fixture(scope = 'function', autouse = True) -def before_each() -> None: - DeepFakeAI.globals.trim_frame_start = None - DeepFakeAI.globals.trim_frame_end = None - DeepFakeAI.globals.temp_frame_quality = 90 - DeepFakeAI.globals.temp_frame_format = 'jpg' - - -def test_detect_fps() -> None: - assert detect_fps('.assets/examples/target-240p-25fps.mp4') == 25.0 - assert detect_fps('.assets/examples/target-240p-30fps.mp4') == 30.0 - assert detect_fps('.assets/examples/target-240p-60fps.mp4') == 60.0 - - -def test_extract_frames() -> None: - target_paths =\ - [ - '.assets/examples/target-240p-25fps.mp4', - '.assets/examples/target-240p-30fps.mp4', - '.assets/examples/target-240p-60fps.mp4' - ] - for target_path in target_paths: - temp_directory_path = get_temp_directory_path(target_path) - create_temp(target_path) - - assert extract_frames(target_path, 30.0) is True - assert len(glob.glob1(temp_directory_path, '*.jpg')) == 324 - - clear_temp(target_path) - - -def test_extract_frames_with_trim_start() -> None: - DeepFakeAI.globals.trim_frame_start = 224 - data_provider =\ - [ - ('.assets/examples/target-240p-25fps.mp4', 55), - ('.assets/examples/target-240p-30fps.mp4', 100), - ('.assets/examples/target-240p-60fps.mp4', 212) - ] - for target_path, frame_total in data_provider: - temp_directory_path = get_temp_directory_path(target_path) - create_temp(target_path) - - assert extract_frames(target_path, 30.0) is True - assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total - - clear_temp(target_path) - - -def test_extract_frames_with_trim_start_and_trim_end() -> None: - DeepFakeAI.globals.trim_frame_start = 124 - DeepFakeAI.globals.trim_frame_end = 224 - data_provider =\ - [ - ('.assets/examples/target-240p-25fps.mp4', 120), - ('.assets/examples/target-240p-30fps.mp4', 100), - ('.assets/examples/target-240p-60fps.mp4', 50) - ] - for target_path, frame_total in data_provider: - temp_directory_path = get_temp_directory_path(target_path) - create_temp(target_path) - - assert extract_frames(target_path, 30.0) is True - assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total - - clear_temp(target_path) - - -def test_extract_frames_with_trim_end() -> None: - DeepFakeAI.globals.trim_frame_end = 100 - data_provider =\ - [ - ('.assets/examples/target-240p-25fps.mp4', 120), - ('.assets/examples/target-240p-30fps.mp4', 100), - ('.assets/examples/target-240p-60fps.mp4', 50) - ] - for target_path, frame_total in data_provider: - temp_directory_path = get_temp_directory_path(target_path) - create_temp(target_path) - - assert extract_frames(target_path, 30.0) is True - assert len(glob.glob1(temp_directory_path, '*.jpg')) == frame_total - - clear_temp(target_path) diff --git a/spaces/menghanxia/ReversibleHalftoning/inference.py b/spaces/menghanxia/ReversibleHalftoning/inference.py deleted file mode 100644 index 62449547e8fdcd0c9878f9e1234016a95ca27d02..0000000000000000000000000000000000000000 --- a/spaces/menghanxia/ReversibleHalftoning/inference.py +++ /dev/null @@ -1,90 +0,0 @@ -import numpy as np -import cv2 -import os, argparse, json -from os.path import join -from glob import glob - -import torch -import torch.nn.functional as F - -from model.model import ResHalf -from model.model import Quantize -from model.loss import l1_loss -from utils import util -from utils.dct import DCT_Lowfrequency -from utils.filters_tensor import bgr2gray -from collections import OrderedDict - -class Inferencer: - def __init__(self, checkpoint_path, model, use_cuda=True, multi_gpu=True): - self.checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu')) - self.use_cuda = use_cuda - self.model = model.eval() - if multi_gpu: - self.model = torch.nn.DataParallel(self.model) - state_dict = self.checkpoint['state_dict'] - else: - ## remove keyword "module" in the state_dict - state_dict = OrderedDict() - for k, v in self.checkpoint['state_dict'].items(): - name = k[7:] - state_dict[name] = v - if self.use_cuda: - self.model = self.model.cuda() - self.model.load_state_dict(state_dict) - - def __call__(self, input_img, decoding_only=False): - with torch.no_grad(): - scale = 8 - _, _, H, W = input_img.shape - if H % scale != 0 or W % scale != 0: - input_img = F.pad(input_img, [0, scale - W % scale, 0, scale - H % scale], mode='reflect') - if self.use_cuda: - input_img = input_img.cuda() - if decoding_only: - resColor = self.model(input_img, decoding_only) - if H % scale != 0 or W % scale != 0: - resColor = resColor[:, :, :H, :W] - return resColor - else: - resHalftone, resColor = self.model(input_img, decoding_only) - resHalftone = Quantize.apply((resHalftone + 1.0) * 0.5) * 2.0 - 1. - if H % scale != 0 or W % scale != 0: - resHalftone = resHalftone[:, :, :H, :W] - resColor = resColor[:, :, :H, :W] - return resHalftone, resColor - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='invHalf') - parser.add_argument('--model', default=None, type=str, - help='model weight file path') - parser.add_argument('--decoding', action='store_true', default=False, help='restoration from halftone input') - parser.add_argument('--data_dir', default=None, type=str, - help='where to load input data (RGB images)') - parser.add_argument('--save_dir', default=None, type=str, - help='where to save the result') - args = parser.parse_args() - - invhalfer = Inferencer( - checkpoint_path=args.model, - model=ResHalf(train=False) - ) - save_dir = os.path.join(args.save_dir) - util.ensure_dir(save_dir) - test_imgs = glob(join(args.data_dir, '*.*g')) - print('------loaded %d images.' % len(test_imgs) ) - for img in test_imgs: - print('[*] processing %s ...' % img) - if args.decoding: - input_img = cv2.imread(img, flags=cv2.IMREAD_GRAYSCALE) / 127.5 - 1. - c = invhalfer(util.img2tensor(input_img), decoding_only=True) - c = util.tensor2img(c / 2. + 0.5) * 255. - cv2.imwrite(join(save_dir, 'restored_' + img.split('/')[-1].split('.')[0] + '.png'), c) - else: - input_img = cv2.imread(img, flags=cv2.IMREAD_COLOR) / 127.5 - 1. - h, c = invhalfer(util.img2tensor(input_img), decoding_only=False) - h = util.tensor2img(h / 2. + 0.5) * 255. - c = util.tensor2img(c / 2. + 0.5) * 255. - cv2.imwrite(join(save_dir, 'halftone_' + img.split('/')[-1].split('.')[0] + '.png'), h) - cv2.imwrite(join(save_dir, 'restored_' + img.split('/')[-1].split('.')[0] + '.png'), c) diff --git a/spaces/meraGPT/meraKB/stats.py b/spaces/meraGPT/meraKB/stats.py deleted file mode 100644 index d35f4b1d147b9c8e664d10b69f16c7530c7d8af1..0000000000000000000000000000000000000000 --- a/spaces/meraGPT/meraKB/stats.py +++ /dev/null @@ -1,31 +0,0 @@ -from datetime import datetime, timedelta - -# -- Create a table called "stats" -# create table -# stats ( -# -- A column called "time" with data type "timestamp" -# time timestamp, -# -- A column called "details" with data type "text" -# chat boolean, -# embedding boolean, -# details text, -# metadata jsonb, -# -- An "integer" primary key column called "id" that is generated always as identity -# id integer primary key generated always as identity -# ); - - -def get_usage_today(supabase): - # Returns the number of rows in the stats table for the last 24 hours - response = supabase.table("stats").select("id", count="exact").gte("time", datetime.now() - timedelta(hours=24)).execute() - return response.count - -def add_usage(supabase, type, details, metadata): - # Adds a row to the stats table - supabase.table("stats").insert({ - "time": datetime.now().isoformat(), - "chat": type == "chat", - "embedding": type == "embedding", - "details": details, - "metadata": metadata - }).execute() diff --git a/spaces/merve/fill-in-the-blank/source/third_party/weepeople.css b/spaces/merve/fill-in-the-blank/source/third_party/weepeople.css deleted file mode 100644 index 33ed7472967ade6cddc630b1a2ad62597c1cd2b2..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/source/third_party/weepeople.css +++ /dev/null @@ -1,14 +0,0 @@ -/* https://github.com/propublica/weepeople This work is licensed under the Creative Commons Attribution-NonCommercial-NoDerivs 3.0 United States License */ - -@font-face { - font-family: 'WeePeople'; - src: url(data:application/font-woff2;charset=utf-8;base64,d09GMgABAAAAAGlAAA8AAAAA4KwAAGjcAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP0ZGVE0cGh4GYACCeggEEQgKg644grdwATYCJAOCHAuBEAAEIAWFbAeCNj93ZWJmBhvNoxNuTDxsHIAID7ZzNqKCjRMoBrCLIFmsRdl/fWAbSx+vtlRiwYRgHiehmaIe1S1xW9y/toIZegmaX6AImBEUXWQKwMwpfrH/PueHJEX5EKmupu3squ9sUbFcpFWzu6S1LNtybEuWWxI7kW25ptlOnE7iyInTiEkllSMVAoGeAKFdCCHHhVYOjiu00J6rcK38HccdV/yTTfuqSrvTB1VdAnssWbb1CUAz3t0Dyu/iWyXdqZwWNEky0XxglOQDnn9/d+7zbVIRiiw0sWtakTKtSQwBAFUO2WPBJtCFrMo3ZxcL9pb50Lqy+P3b0q87HaXdrwWGD4YFhtRfWoj2bBJiVfo6vVX3wcxIlgcENsufOTRkwfr9r/X/VtnTdtfeFz6BSlhJABIuY7rtjK1Tp+HOfRQgWD4+z8iY3/L1i96nd1qnV9pwAKwM/qES1c44t26FBeUFMfvgmPHiluV1C8GNRjOOvGV/dWiJPWBEEz7QE9D/7y3PAuWbBxSdVHgx7EXHiWGzDWwByNQXrdEvssgDxf5PU7NlOqfTc+V0SudS6Tv+/4e2Zj6o5WAgPwFD7TMA+gBAeQUMtE8k6Bx3ma5MKXDoS9xLx15yjqvogoVu9itPSDncEhCA1hRfYewiG8iQ6zQ2oQOn6BJzkerQHmDF1v/9EBf5Jr6dVWJ4CO2LAAAQAODDP+ErAcD1M9Gv1+nDV22fYwaAHQAIBLWByNFLACCtC94KOKTXyQ8AcAc8F50magIAADjYHnpTdhnoBi8Bz/gfOvG/CcDdDt0nwKueAwB4hCjWo/l+aQqGIRpLDAJAIqLnIB7DtrvXY/RUeZYG/oNo9vddTILRBQf8yewvZ1+dfX729p/V/Uz96a8+nZseP94FaUKzEFE519GbnMXjHxCO8oLBaDJbrDaRSbKi2h1OV547vwD+BxUWebyazx8IhopLSsvKKyqrwpGoXh2riQPg+FpwXJpjAAI4OwtsgNV+wy0AgIcBmF8FQHcFAD1mAEAlf8K4fPhV91EUlZn10LkbrSZEhPQoOXPv4xB63Rj2WSpQG2ch/kZmZyKls59fhrN3zz44u2R2bPYZXZj90+yDltlt4uz2Wd/sIf/sB7Ovzz7xRsA7u3s2Ypn1m2aruNljsw0VRt9saPZtP5TsszuD3v+5b5gdEspnuw3FketyiWt20+zEe4ezhnBg1vcvV2v2w78c6d/N8rMVsyZjAW/mDQt7zmQxGhlvJJjQf8+r4Ynf36X3E9MO27Yxi8G8YwN8B9AG+eA1sGBzWqEDLTn/gu0HTFUSYG9pWlz0o5LGgcD1MAu4H41ZNwxH9adWifuifrGzcnmR3DCjvhpOxAyl6sUrwGX9xFdJgkpLqOfgCwOMbXMqtwKgDcvTArs0sTgM5kfX/ikzUIM0Y/AwRClybsGauAQwlIcVg8vEHIeibbmp1VLwfYmHwUi66jf5F7Q6MDvnRmaQIqWmxb4gjoCDXg4Xscet8d+zmJUi+UmWASiGhgHfPVxiI2W064fvPxbEiaZgiyGKRkNxwShgEqzltG1oKww9+TG9/SupJF6Wk9W7AxCVSJppfkjb1V/FcZxh6lLkuCmGr59KRomaDjT+BWLRAa2ODAIQEaDF2ebeKa6hDqGYthAFR8fSUz/EIqrjZz1sJrgJSU0Bov1EFrkbm8ujpDHFQFAf1tPDoEtKxZku+VavyGw4S7of3hRH1iBKQLCEeEVFQbFIIulmTzqr1LTXAyzqmSAHhNFq2/eTMOPIkKKroZj60Rji0SRSVh4lSiEeEtpk6msOX2Kh+kVmuYhGabMQZI5Z50G61orMumtNSdeOfuKihL4GauGdMpHxqPJvdBLDfSXvVThEScOKrQSx7ZAuzu06ypI6YwsGuMWZetbMAIESpjVESf89484AFKZM3pBUrCCS0px8l89ZvIsVD7BUjStclmGh+3RdWLJc54me0jd8jhp/qJEs2BzYkIdiLOOzD07qFaWoEvJD4y63nIlAU0FxptgzbAQhj0IbQRJVh7VW0Mw9LjQNssPE4um+dXmG2ESDvYl5DmirktI6LTXScu5ApZVaG4RM2zhcbAcMXeni3czDvu8uP6zfK5+wMCt6HboKqoNPSA1DOcLQqTx2cTSYSNH0TJcbW5TSzT2aNDgS687l1/7L1RU56eyYvdoPGMSU2e6iCmcyyMkePdhOubuh5bIuyxW4d2fQrT7lu+qICD3UkrLqh+T2OV8sq9G2RMxaL0lAVT9ULXVMTYqXWgxPe6fdJS6bGe0vNnNrTBkuW/QVfHAsd+ye4kD0tgquWA/MRH8qfTKHta7vH0gDuYEzEDUVrcVBJkBKuDhbW7xDn6gm7rXDFVZunJTeG7pfHBNf6VsJ0JgqCAGipMf5arrE1ohVpaRZ3c4hd7ycOGf4jBJqgilL7peqcIRZFU6dixBfe0Jt01eRcw1lCzteUJvKYULPZRqFrQMzOjNqCWAxuZIgMEyeDXC9wclP/04P4tvvXjZt70fPurwnuIKDQuZZTMxhdaRJnRkfyUMYs/cZGiW8NArykRsBnmF7qLsheRIC9e/IF4expS5ObtiTtsQ9Fi7xi6PrkevaWDfomi1D9SOF7hLLO5fCPGbi6FJDMSPN4ABg0WQTuzztWwDdNGaFVOymYbmhNlPxfo8NE7weVr+Dw9qnter+oN52jZw8O5hoC+sxR6ZcOshv2rUiFhBFbTFQXUum7oJ7g2DZbFrQZoMs98MEvIFBs2O8zqjCDkIEHlLvNFrysO9KybOhgkXtWFZSWwblLOVQWI0sDkJNzA0z5mKfRRcACdCBCFlFpX5eOVk712/oXWHaujNvfwiT7y5OHkKdS15VNaf99e2DBg1Rsb7YiiYSYb/sfrSQDFNcde9kDnNv5AW0jY0lAYybmpdQyC066aJW52ZYpSbYBpzCrk6ApCQ/jt96L3KDk9CpcUTqvHvSqYOZFUuXFE7qhnqga5IaKllIzZwy1gezjU8b+Rbs/xUv39VCydeMYLQreSW+OcFwCCbkmakiA69h6HfXVHt30Ze0vS8jz8kjtk86o6oMd6ijSZmVG804mQcad3tDOTyV60tTeWTV6ATuxbaHMPUGlw3FzWmlGCZqeFTjUoBQUFuCZu5Er3leTYfssWsneODc6G5g27S7cWJf1c04iQsceUSfEbPIikyZjsxe1vBGznPoyTB8UKTY/xzzut0odeaZVffkY0T76kxhBuLeFGjehbbBC6ZMXiMYHAisBT2HnUWP9qx8pQgVzemET44LE9JSu2GiC/JyX8pLlsLSgRKFdNLulLCxcS4BBEVm4iwpZsfJ27pgRqs264/LnTBAFIFy4IN+oV/nu3QAuZSR20FqnrK2j6zHI2laDn3J7grAO4UsDM9UErHgIUXp0SacidYGYL4P+IXkGPKUnpuH1EuMbXttZ0D6zPh0Q3Om5S2uWkWm76pnNLqipib0bktbPmHAZ0tAjtS03M8IOgapyixmR4gD/ILUzM/focu/MAJE8f92GqUSTwLCM1ylspIpL0FnNZwejpwfgcrrAkgNaFMkJoy44kmNSWrZ61a/KtX2U6kw3GCrvaPYyYcp28oL1Rsiw1TzaIkixDTlc0TMCKeawjbX4DzAHMzwLIrzPY+nZd2Y1qxFCx8rYQgxEDsraQkUoTfBNbvTYvHlsPtLgNdyvroo8zOVisTkkbsmpRCAfxqGHktty1mss4wNPL2dsTJvbB2iJofjQY8MjQSZMTS0hdMCdwnrprHUUmyIhM6TcgkWpWpUX2J0t/b0gw6AHOKX+wQUfTEICuTor56hgKj8ZbIbbqt64jh2YMrjmu/Q3KZ70pocBHshETpmVCIVsiEZl0+cyErqKKiXrWeFiKcsXMnJqwUB/LFYgsdVfKmuekvJZUFSUljqaqQlb7PiNqdNsl7ixL0as1vOrnPm4/dD6lla8xWtRntoaKtM6QUjuq7ILaZ6kmRVTqaN0/IyDZPSpmfAn2epcwBoncHmFbl4aGNQZlT348GGRBwxCIDOS0hOjTUXwEa6DGNMyspZwDZTDaf6dmV+qD9LghYB7xQRoVFP28kDozxeyGQenaToG5KR/SUpGBt0Vp1BjGY5FIkikX6iw25hiSrtDZza1Fg1FbpW7EAw201CwJlMlfoRpM7RbY7D4QMc4qsHlZCNGPIjrkxcp27UF28n2zkAcF48khrJaqbdUE1vgv7xe7tpW2DGrPDIAo42BjFnPr02kzOnlxLn+XybSZEKOMUarfAXUTt6cSU3OxMxM2lwep4Y0iQseagskZzVFzcXZBoe4hc1zoO2sW9BOpVnUhg5C5ONQUPwRGk7kkvH50bDwC/rwpherb9eP54D+Hc2KugkTvLFF6mMuPkNZUbPjW6L+0N5W6yuDp1RWfJRy8gWVFp30IYqxEvym/yN0s5t2sQFW8QmDmLnzbS1dVKrDh6I7ixc+8P2TyI8WRbvp4RfVFRxLEx8VnGxUu70Xe5mqUON7LQvDYdyTcqUMjgIU084pHfzaIxxpqnI3laSCg+QPrHWKnDeY9Bpt9mDEsScDEreBKLLkSMWmktbJwVR8g+VAhfLTQ/aSdg4MohuEC+/CTR+VVwPAbE23obPRTjpJWhCG72lFpu9mMhrdRdznM7yLQCeIqS43l4XuOWeANGr+cE1I+QjyQND9Jkn/fT9q2u83C21oYox4pg2uWg7c4I4hYXtQuimHEx4jRYZHuJfGNdb5RiQrhRC3ea8tkppkVo61ufxd0KHIXeJwqq7ukhAdRiLILJz8W3HJrpJPxctRJF4OS2+EumE2TrkG7xJMH4un+16FomxNWswFwQdCFxOZVY6bovrDeRrxkvhkC5A3it3evgzqAO5hM8khVkt1W30vNAwinaSzJ72fjJnSp/EQWn2WQNZTxsQkyLha8EehRSTe3KVqy8TrcdmAIkirXki2DKc4NlqhLMOngAoB9PlmbiLmaR4KG/ExUXgTh1EixOoZu41tXBW08ZrW/VjSOpI3b11eXQc4rTo9InKzXXv7uLVho7xjaiE9vG7r/SZFRlCfTnxC1MvqO0FNx2qJG2h71XF2FLKwOZ2TS5a3LtqVwaAxoSz3jCmZOUxaLDtSGUTZAUxE1Xi+jAq/h2cfp4wpb7cRtkULe7HedwG4sfv1a6LW85mgvo0otg2j67jlW8KgSDNbKGQlFFd8dUOTo5F04O2AgwZZG/8LFbFy8XN+Y1H9R4rme8VzJ2zjdVTK4kcMM7EQrUaBi55Mc27zYprbhPDTQWbEDcbqSovwVRxDlFmQdA3eq7m2M5+Q2+SS0Knqvj6dE+sKBgWqfk/GIO+y8KUnFCpHSQ2GdyLF/KYDpP5sssZfRllso2e6lWRzKdadzt0ud3q0J1bx6718y/oTAB9FrtKUex27c5ackie6CzuRfRh6BCbVw1t4ziNAZOJeSUWMWuYR2EK+0ATVYXL+FZX8nMZtplHH87vvbMQv8zewODgjW6M/4XwiMCsguRWgU2R5oFTomK0df1Z8x7eysiXW+TLlnGsozqA1Q5YoDiiU90sKpYuHx48bvkup7VGpSAmIR76er3GE/KBEcfiLHVUbZTd5/cJ2hxtWcYzlLKYAVursG7xvuis0SsfJEeRa4drg2NXbHkYasfVX+zlTi+L0SamgPqh7k6LdTVprDZ7xsla2Aii0m0ro+aUFSmxs+dw8jyX2ec7c0y8g262XCIpRlzgKo+Ntp8LOgde++X/nNZVQZ4xiGtAbKO8K9Ad1OHZ3gOoc5vVqM8CCsgmBTnYcyYeqbb3W4aV29eKkN1c++ygDnmt57RaJC5dgZEsYxixeutq55iLkdnAfo0Cn2ATa0j3Y1Cgmd0oxkYBIlqrmdG2RtiTmlmYRUnAQXUZBqLFzpyAbdM+xVoQFz0Pope4kKOfABixLZuM3kgST2O33dmI3FIqYSPfQ/eNo3Ima7bngvXiMwaZeXxN2sZvHm3N60psj+MfkDMTxgfO4Xsrwz50VJ33b3vRcHnRMaAUsBGTYoCRCKgXFO6Jj/VwRZdEu0r44ioZmkAngHuk0wAtUUhvN4VtG8ERG1FsmxaBSLYbu17dJ0rTVNqmv6h8xGO+i8NekCMpe+8dR7oaogQPjr88nmHiwwaonTl30Ijcctptj8NT2ZsNmyaXjT5D2ZLx78PGeDHs2ybn3QBYYWgT6vpmoPJ+xZ6hoHWX99pcnJvFvik2xKObOsasTzLkJE4XWziSgzgiiuEVwDU4B94D/E/ZxOErWpuVrxugYC72sMs5f2rd5x1lmN4AlbNw3ervyV2rlnqA+hqjftk5b+8blsswsTTNp937tA2VFGzyHFhLyDN10ToLtqMW+AB5iMJb9AyiQKzIJapJxcd0sKKKFNnDNfG2JkoRyg1bDa6rEx6aC9+rjAFXpnpqTm/n46i4RymA3LtBH6khj4gDritp2zb4A7C7l/KGUuSR4sbsZDs3aQ02gdFLUK+xae4KGVzLxbtCiil07XTY0WQtHt7Xajh8aeelu4tuXHoiaUzcHzXkYe/H5xlKMWPTiivSeYvJ/R2J0kdLJ/vjE7Eii8fu/27ksosn5J5lww+rdj3tWNTFHf/R0U+UfSLslm974Rr99OWT/7x8f+fhBjWa2nwuQdKT4oMf/SwHk3v/2ntXbNBq0vYBVpNmCOEkIPFJ/7qZOiu03VFWrKcWzeHrnNWJZy/RlpSuR5ERopz01s6I0bewhPyesNlmRIRoVDSZI0Az/ZdKhAbTBA0roYH0dQn2wvazZoamW5Lwx0yND4ZIsVhMV0yXrZl3XNTNsx5gZ4Ri/sh5Mu4KHCj6Z++OtQy/Nb1BpTe1W57MzbftT13WFD0TaZpNW3EeVLybHvwplkdiyT9lHCJTyjMmRTGbThxcG8OgyhC2ykCzx7dJsmnwu8BcGG7OEvV1GYXRQzqZlDEln5CVIFi05sySYih288KIci6vodSx6F1KgWQ1kzK0MTbbTX30lkB4Ze5/fney0KxR8fgbv3cC5K62wvK5QPPhs1ASRacDVMRvWNzQWzMN02C3Mq+U/gVrohu+yG66T9EPqDCakNEus4ii578NRXJp9OVkjSjBQ6fIMrF4lUFK+vi0xfUwXvf5rhgGpV7rOMbL8KGaLozbRL3bRkul4FpO5X3Geaddvc1L8m+/XXzZ/UTbz+7Z4zutWPFIoX6Ac0Yz3VTQeSmpveyV9rM2x+U/mx3mXX0RZD6cDdJ2iPlBzpyyBXYDD8wmBLWofOxV+qiWztZgX2m5lAfogs3oo1yncqYZ8WRNboIkHG8xa6SiwwfHvhvzefsvURa32xCoHdXJo9/1U5LhHAKDtCRxvCgsTW+ANoUG4Yr331lccY1MlbwUKzdMX4jTJwkpssNxcXKTg+qpbe5pZxJP+Tv0tjsQ0/zarJ1uriV4CcfzdnD9VtQH2bUeVS/Ytu784fG1dpImre0rl4e0kg9FrHYF9tHdlyYqzTmLiRoyA5BWDQKJXSXzNF8cP5ufQUDsrggrALzU3E9ZTC0SlS96iB58AIYL5q6DNhtqfj1VyAOQTXq1/RJomgnxMSJGT/jKdNQfQZ9mwj5AxflmXTgeZ+hhNNqpC4aVO9QjpDKsR4tEm9EBFyMLncgfJV+0Z1lYLrjS9/YDb6n2+WMMNSMzo2Bmh74t+NnDj21XLDJrGcoXaaR88GzN698R3JbhRxWW8ZGgSHlc9JGagjfU0oe7dq9dtediJ6SwBSGzFTRwA5o2n40HvugYC6rI7sPtrFCUxWQUCN4srIUV+1PgK1pJwRrt0JsTOEhtN/Cg+8gTD9SS3+okUWTnttsDYs3cqGEE+UPUmobF2drLI63wTGAU7cCA8SD049FaS2nCitFcROG4UW79m2VbK3/4pnoAFrLetCDuzRohpjNO+6OHszsRaISJE4jgH+Mwwf+RG4bqSp3CtXCFBlNiVXHcOnsSs4Q4aFXIShQ9qcFZPPRJund+8f5Tkb+bRbQtUcAjUsa+QnOTeOD5MDzuvqKteGkUIuikxi0oAua6oZm1gaDBQvjsOzg29DFq9BlYUh65WAOxc/Rn85NYasHSs3fopy7642bAi7o50h7xFBGd/A1n2HVNTFEAuQkJxfX11SMRC8aQz66GFT+t4sznbLqhzdLBtVXeYGNl6NGpKvkb2ieWRMGNu8js/zTZbCT381Nf/8P4uo8WdsL0AlAYN5dWuWPhq+i5kiKJXLGLH2oN1ScwjHQ4vwxfQysYG5FdD4A8RxrySBmZ4HmsoBCKKW6RfVwpzP0oXsHjZq6f2pNCit4c0zk0KRWJTRueRnbNvFbTzi3F4gVr2fXt9rFCgV8ieiA6dy7BJvqpD2ysmMxPRc8wmbqtvtPDFWfvKqV0moNtLd29Kwt5JJE8F+mKKXJ5qZpo5c8A8D+mf0K6H6/+hksGjYHMmNjT9A3QQewaHuPlEZzaYLYZ9g5pxCB6xpx0ga9hfkjv1cZODurNLKWVToeU99jDzAddHVZ4fyxSBgRRsYVLKN93r3LTxKSoGJyOF6sgDXFZXGFib8w4y5FciUTC4THAxn6SHEc/eEw8lcNCSzokHfRQ6tQ2km7ozmhoPAHyDYPfWTdyfYbY4ia7YtoQN8K0gpfKtbm+a2vRLxWKruCilN952Gd1pFpPiIW53gCIWCvWhyoNvRQ3IO9xq1pbolYV7A//+ONdtRIExkezjMWXmW7jaOypjT2WTU79ccBk/oV7tiLbNHjEtmXM/w/4ckjQJGjwiLgxNEx8lZcP3KRuRMpN1vXW2xvf1bpH3gnfZiLlYdKRX0bIhqaXJB/THzkKac3B/2dthjojWhqBri5W20FpKgQNpPQGM4Midd04yEB2rmU7gwRCgtEkpxKN3mlH+4Y8at9r0FD+2sEsHF+NccjsPTC2AkKfNfZusIdYqSORzCVhtjF94iqPS/6LRBcLeIbWtT5FROIZfibA1dLAMJZqM03UxHPo2kF6VL4ndERXnWNAyTmq568sueq68g7ixWQ+16xR21hbZmODGdQq50hjwW+KcpiEMpfJVR0L/0mY3tg2uGBxY7x8HhQdK92JerVYegRTFBYw6ECijyNoobGj78Jk+kbm1qDfEiUojMmksJyILQsZemg1SclQR/reoB+i89EP8XZUr+YE8o6lBEo78jCx0SZFK+todi8/+72J5Os1rqe9h9S2sBfstU+acy012oFQwmWF5ce4tdkh5brLs51zHigH3EpN3ZRJmYQZOhRO/WY2CAFTjQ8mQtjaoVV+Xwx1ZHwa8GxgV5WKjbBdrIQH4DdUqepw8GAt8LBVRraKMvGHwyOm37HhvkaxDC0/zuQKOoUJAMw0fvPCGIdC/BYCSR0InGkPrULaYxzTsU2z5aDA3EBz2DqOouIvqqHNs89fMQhwWO4d85mbK84yfonIXHhJIAnrkBoHo1xdIFXArFvoTfVuNFm13EOg09VO+WyrbO6bSuOGJMwWvcufi54tg4DkNvmiT13UxdL+Zk1bdLBXVk/951uZwREnayxeM/sfqXAp6xp7G1HJhWquo5QwZFkGuu2+XuBS/IchBChU69JGv9Hxs0ssY8dlZLHCS9xVNPezr9hB9PhJhIICzyuUrUp4nEN0JsZvI+WrXZFbegcAtTlyMHZOGsZpANJN9+AnQKfnRJ1rIeoTADTRghNLhQ7Mk0gBUZc1LEHege3/Ntus7jJyrme3wEMkl3E0ErpF5e7RYkZp5y100ZDcHz6S2XjpaKCdaxOvw9vqVEItJv07atoARfA4tS1AGq80h06jvvIfX3xwV3LAjM4eTXc08mU5cUxYdmNPN/dWqoavuuTj6JuUFQbtyKyPVH0tT1p5f2Bh5AT4PIuMcxtM6lXKrPSwNL2f/TVBs1zHEfsNxeu5qE2x0YfImp0rZuj4HJ1bhEi5HXYgMujqKLxcKUZra4TIQRTnyzD/v7qarM67YbgU6s4EZZuMY0vrXtKc3ZKO3ovhhrCdgzmAvmIdXevNoEEqoIzLWB3tZPuAXbWanxgqIulHOe1zElB7ETArEeyPWOutlWYP/TJOos02HdumqNbdBoBncIsOTLtoGmCsbbHnxhRtx7Tnc6vVBJP1zZy/c5Z4NlTlmsZ2mxfmBjlXc3WFiQOikmtRIKEppBD3wHyCNKyuJ12Jav+HONvwiT/8sdYNZp2Tl3TV7tU0LoHVkoeGlQZfgkbu9+xrObpgQjXQmLsN75rClecT6Ay7KAP9wfxiIA9i1vfu61R1JX1Ju97+FW0UkODHnpOVpcJjYBzrnyl8hg7Qqy0gCPbLBGZD/sQYYW1+2XYid+r+IO8CNvu9kJWvA6WNxMudicWkg/MYANfYkCK2dpxZlQXczsLb6m2vgDGYMeoXB0XmKq2HcohKS8pGFLq2TRzo5gF8OBcNZMTQn7VflbvFv1x5cD/GJWshLNV3SdnDR+puYCNmqKXAOZAnDsf48NQXzReAHI467+uyD63NDuDozzOO9aXBlYlZLY/POSf14gZ7IXXx8iJ28Eq0KBQvP/F0CpBNI7vN84nshYYB8kKcvGaWu6dIyuVAafbg27f3RLcgSChdkrfE12gfh530Td2WsX7Ffx3o6wzBPb6lOTOCTYbV2OIbdYv/uh8JOfM4/w9K+BUiZReib5SMJmkZgo+wmWA6Iobgj2Jdn68adDi75uYabFbxyqJqR6qUgjA7xidwWBCwBVaDMR/I9D99/0GP/Nhq9dVOPGSASo8NuT43olwTL399d19il+VKmRyHtwLBDJKwtJlwb41//Joq6/gXBnqfifPp2T/0Up9Pe5czvnCJg5OAQ7kpL5ty/TXa558Wm/2VjN+9Ym2Q7hovqs/1cfE12db5DNLaZsal2dz7T6zG4VhsnCyS1alZM8/w3gnngnm5slauKaju5zlRbWn3Z03AtrGDqfCXnxm7y3VHkyYs229ltzYEg1z4ffcQdVUsBE3ZCfpWM22CceQ0+skGUVEb1njk6iapCdrWIY249+wsN/kr3HUigu43O8PcnDXv2cS9YjN/eD63sNF4b+dh2zfTAZNE6KRzGm8ZqOxwRrhir2F25xdMf4fRO5eyvt5IMxTsM+YOfoKXE+chaF+28S4MxiwfXYtEp8Hch2+uF/JYPsuH1NQBdi8kQENuKKVkF8ygzTJljvL2PQnNtnk7iUQeZcxdAEyt0j4pt6ZYgcp7lfc2LmAWnjB5GKP+OLKuG5ZDvJ7Vb1icPxhj67WjbUPB2ZeU1owiskmcSAFJ9cG1yfV/laEx+6QMUNspD8aExap1RObC8UBDiaJQQCQKLENf9xGQR76d4fCfPMUiPbNTp2PItoNgvwlClgcNJmhoGYWCB68orrZ4/q2V1PZ8O89cLZeNgeoZyK3IcPccZZVjQvpTo5j2mNWqk0UDZfcVXWqOMCYh03KMJKjbkwByomJPtVJ1wkhk7wIHpGFOadbg8r83uZu3yh+r/tYpdxar7vdi8JJhn+uVsjDc8FfoHzBMFeJ2vuJgSS5zd1rq5pbFWGcPSP3OsqmbewLLDYblI0ulYR6W2VT0Nsnl7UCFOIEqQLlkuLQ2nN7feXR5YupRd275arUGK1D2cdxa35ljtbdsBPjk/xJExdZwq8c7+Hh5pvyY7YdJDt3PnpZPDfsjZZd5rkh9MddYNBuGmEDCv/2dum3THWirDE5jKgYgx3tk8AgInSybAFhoU3b3c6KeqrZ8+wHDpJj22zZAcA2u2s99zUpRbMfvuJnF20zT6ouY2d3h9ZyyNZ9zDYiJl+jQkU19DWqnFRX5pmoLc4/CE67jPDzuc0BNKDN1Z1aDbmV7qp/2Juqdd0lHW19KPEM7mEa9DtGUwrhjI7VAbP8KTQSxotnbQy6mpay00VrXRfug7+SuuTAw8ZGDROXPNpxjBbC4iWFu0ng9X1UdrtH6n5CHCpdLpmeIluYqOwlrPu6bGeEIYZvEFMFluHaQN89R0sw8Z6tD9FaHXGpz/sitQdLnTSHPxB9vIdcKpLKamnhJqxKXD4ON37ODA2035jWcv7xpltTssAehPNPYJDa7LFVDt0p7BA4tRGbYl0ItSDx3oqAW0BM6oSQswqI6yBBCl8vojOJXDmJuKiZO0RRe5+SS7YuAzZp5kDOd99dvn27dsjiNsPsYik7zxBc0BJTVa35pv1IyHDQwqymRwpHXZAmHTWPfsHz9Mfe1jOExABH2DBfJr7QUJqqoV7xMP828nRnf1IPZrdHXOtjBqFxhluE7Fy7k0ytEdTd90nc59ltPrBnct7GorXisv0ZbkMxcELWqANQ3cnEfmWMWz0rHJB88TOfr8Gc7NQ8BHc3fB6w6ckdvgOZwzVcZpgyfpd/dfNw1sxn5ajj4EG5p5cpOd561YrtGMEJ2drXN8bEAFiMhnHfR0H/5obG7ZWjQRXLf5ua8tWUQvScS9Tg43W7G8SMDEoyYU9Bo051VCUla1UrqgnYvBGDpBGpXlKKfA0X0d+fNUwPbKQCIrez4RxQpphurhWbMxVHhghM9lYqABzMGmTBSoRkT0MwgM4MOfcCQZQNxSpEcDWuXJALk66xPVlyFs78qyQPdJF/h0+rwrWxPahn/Mx76bKDQXkcKMAvPYddcRFR3OfwxP73LIe63qSKuBo98iBQl4hc+YRr07SUdSUb0DoYWWDK33o7fBsldlc6e9g2rrLSXaYlwR87hB947NN/Z953c/5z+yq/9QExy1f8yP5XaR2KTWgVMPmX6Rhd6d4Dp2YrKp/hwU5wS/dfCQghCG+um7b3bhtrVOpD7hj6Jv1eirb+hU22vRpapd5oBjtGliNFN33QmLtBBjQOUItcffs+w9FRarPo7fMxnO0D9XygsLoH38H5S7n6NWUp5WJW+bnJTmSsut5Bk/LT9zxEnUtgt6b+QKzTWD63yre4r1tPgmh58Qt5yOFK2gDtDUnCQa+qSwY6cisT3xLA6dx4PDtC6o9qbJC4/urm3xLp2dtm6N/sWgh6BsnVzkiAbDHEr+ikvNBtQfORiQRocDDDb0QTBee+1AliN5DZmzWv2Q8TDcJfZZW3aL508aR2bXNCc4rqno76yawHq5njSAo4L+oM1uEngWaGRVzapn/YxatX4jd4zxnXYrpt3hqtjoPEVMsUN3CRi9w42Gk1o7uOogfO1+L3NnKt0OSsZ29aisH4QJr3oADOd54YudQmmB+8z3rmGNQy3401OG8V7YfirMX0ytyHNm3n7n8RLbYWZQys2Sw3Mupq+NDV7RLrUTxH5a6lhFIBSnt22EwIDsXeZtkHW0a4L8kwDl7CdfF1++MF7VM4k/or9h5vj8orc7yxDHoSdxCK4FVj4eSUMJkFOilgalegkAlHv2Kp3wprXl7OWbgeIJg8eqEFXSaHrGiLjypRTxOu2PNoppGH5gw1JQ2Yis20YUZuwaOuwYPGkzTTfD3GZeX9hbluZVBz4iWzFX5QVbu9OvLf5VBkq2UsY9h1tlpElf5WGPdXb2V9zPaiG7BGjkq/CiBC9+0ZKeQ9/Lu5jJgVuvL/N245jUKSl3Lq50GmnRzR3+b97EgLvZTrv0P2gV+DmOq4ctlm2wivtbivMIN2Yd56ac8x1uzgleJn4GywUi6QsFlZXcs/BiC4U6OmCTgpqVI3XJ59qzcP2CgRxm6NoSB+P+cUgewBYumAk0oRvn8rP4QAX0fBfGwYm+FDuDjX+YrTYHzMlTSs2Y57q10ZJ7a8BtMD0dLQ5REEdZfduy8mXHHoTBum/594Bu9JknpREI3hcQv6//qvG2+/RewMMGvbqWfP87pv/3nxHtSP+vfnwaw3k2X5svjg4fAt7FYUHEuvn/jH/oG31RbK9e8NlczaL4pO77TAEiIxmZ3/3Omyf2/jhry0+2He1f+U992yrXfqPpn4Uhyu10vB9vjq4tJG3P9OgN3MqXl/vJgHRlo94/CPv+O6Ub/dtNjov6TzYelWZb4dV7B/HcO0AzWKyeZZh6OH8P3mmoX0eLboXwsmGX8yUKrv1+Ly56OfJt75x65/2ok7MzLv9Pa2xOaI5q+uL9JWf3HtpEMJfPv1k4No+/6/Z7Z2L42d/9oK6EdfX3En0u/9weTMhIv5SlJ99CnHK8MdwYXZ+cm5v77priqJgG82F3L/++++PCO6FJR0U/78l/Zxf14cGAGIElJUCmx1XFjT9PqstSCsP79VPVpuy9XQeNS9akPfiI6cLaEuLJLgQljwMl5b0XMvncJvadh1J/1sgV7wD+n01Cg1qL7L//jz6x7daelt56Ekh7KYlT9aelM1F3Jwo9diFIJ7bjru0+FQB146FEqTbhOZFG3qX8i+X/gkJjvb1nk7eXkkTuMAahyuLuZj21oWdQ0rNRPSDiPyP9IXUIa0W+qpE3AYJTLcgsJnpWI4Pi7b8BK/X5FkTgaMJIvWYBBW1Qr1DbnLqAlqpzJRAbi6PhTz/gj1SGBfmARKz3fPbE+DxO6xIdvMCNEgLoQuSs7rhio6KTyUvgLUYPSH9CTGjwnjxcpdonixhY/UlE1TGGSQQgKqVTRBhpg7IFuwjHkQQzTfeuDsN+wRqkIfPKqkwE4l9fmBlEPkReVdM3RH59TLPP8lkNlcNP6L8VZaAf1of3fnI59WnRlCyG+9y0Tw4kL3ylviBVRb0qRTKPgUc0kClFd+HT+FAMdCQijRzQyu2sZskZc4cJdUio5AkoW33KbIX06BU784/M2JKGHkxaLLdOr8BBD9cE5w+Z/OgcShSuc5lm6Zx1SJGzBkzcNPkS2P0ruM38TP+88WHF7mcdxU6Coy+ZNaGV/b1MYUYOEcuc/d0vQnlmABHkRiRwUVBlWQCZSfX0AyJSCwWTP+tQ6uFp/BBwtCXkAloDhTizCiTbs4U5zogwN00HsIGYOhf8vqFLYuMBnm3Jo28IeBRhz2c566pdhQgvNGBbfnyM9P2pXWVX9rvjV/4Ixjrh4GQIcEdJDL5DmLuwwKvQHIjJxXwjSGg6pkETq0v44DEpIyVYdGDs26j3FN7rayCJNeMuhVWwJP8j+YF1ar9f/0VPOhjvAOqWTOaC2UD0qFMcKvafxAOZfguYM8WJMynpK2sKQgrBpyNi+OnFLPRvCoWPottCcKNc+RGTFxWDKObdbOoyCL/Puy8/ba6Be9sz2de/lQyzouU7gJSErawoYw3wF8/AbmcsImTEegFnZI9cBRxS6krgzwzAKwyQVCk2bgJdg5yAPFMxDwUyRO8/+l1HkHo9WrRK09DfYqcwh0pf9CNQg4gjEAH58GIeifXZkCQW4UaSBYoGElh4Mkt8uNU8pejdfP8TapsLNbQEhSnGXzQXF9DmsOYLpR69EiWm3R9edRcVGWYJqFFmJ7m6WqsONEKWNy+qx4Ga6sWRlYx4RxcCDmihQGpBNpfkpmxCZdysJwUbqo+6TL3RtMalF0L3Pc2NdbT3djswvEaWOJvk9mqI6HPJQ2ogXiqF+fVfFBDan6AAm0s5IUPsHELcnIWLonR6z7swCT2iI8o4Vj1f+aN1BDIONeNu7o9cwocuGMHkmUscEVwZKnL2frXU3TT65cr2uwd9mk0VuftbYAOVU6EIGBRKOtQQkqezByc+iPZ17w7mqp1kVJW7h5uthkaO5AdTqskwcX12YDJEU+qHMLHKX8nA/v9muCGhtF1qc2P9TpZOgr+8yhp/jO+gnqMwChHlgHxpLhHY69Sw3p4UpCIquaDoEoZocDD/UhLvLvf//SRfzWVeevwwj0Q4iRE95FXBjuSxyOzoy/VzT1NcZ4tDj+zxq7ORw14fjFuvPpQfmGMPSYLHt6Yvx/14N7wrL0GdjdtdtLheK9cjQiWmB8wvqR4Pn5zQ5HjzpXdhdLzizprlhq3IQlLvN1WSAxuFKcW8zZ0zJy3PF4eqtq9vaDqoJaHbYLBWCFjuHIVN/ezO6nvoI5LqBHv7XlfddJDPw2UGO0jyJVH0BNo2oEVgr+NaxcpwMyKTM2Tdue3BENnqBtyVkTLuwx2AZ3LM5ZhVGg1s68n8GJ5+DhLCiVBEggI4rT4+dkvMgIGfmAQEZ3c2OwRI0Bc5APEIPZwAMrhbA0TcLUofme2/9dCdOtdl5pvk30tl+N1X//mvohAcRaCNoTZFSAQQuEc3m6gU3tg+kkkyi894D01dVohzpbVeFzgruhI4Epks2D2RlhGZMevxJBsMcsBCGSAFERyOgVaH1A5JmbhNCj8csmrG2vTE05mPk4+uL/lcx2PP1G3efmqxOOru/1jx42CGA8k0wYUFrz/CR0LkYDjcn71UCFk7iLZPY/72UV9BBMma+2swEEGzyJPsKtteXX8ZJ8dF2N7gVPwCEansz/uPFmdHoCSjX/pzo+Pn/92+81/By/M6ykWiW2m84a4rJU47aWKhll0bonIFHvTGDWbBqbzo3pvqS2jmkkWqO4KU4JDNinqsiASwOBml0iKqm1ZwZGCenvM98KsHTWhtgp6+JWNV3rrEaUCvUneEyPJZPay53B7eUQbjYibDAge3I2GVWSlVVHr1TjieaRgEZk4bMrP+zNxxYXXbRWyOs8uCVJ3acW+56G+SG4KzaC4t3hAMUKkr0tFRgiXWan/MVg8Mxp+K3LqxZGJfQf+1QyhVMvFMO5AWk8MbyIEwuZR8JcyYuy+jwixxf8+kQ1ogXe/azzOl21CW8PPCmejEmZJQHWiM4yc1JsRPRyBlHETJMwIVJBWNgFiv/zD0t6bq+O7mky3I2Oyu4hOCLx9C9Rj9jqkl2NVq3GWOTDEToNSF87//JEh5R8Sk4ncLoW4Ywi+NGUlBwndl67DlDNcvQGZTZPKd7bVQ1OeoYA3xjmXSvEIzlWTqxxpczn6AGAihhx1n7y6GOsv2LuPVTeiDk43GVqlLic9jJDNvSR336rmL+83PnPde9Yb16Qf3oGA1s7x1bzqVQXDELa7ZJFKNdA1reBVMJ6ljU161TcqHzRZllI2D0K1EKM/fMKjXZgc73U37/awXchM+1ctnwo4FJ3nLgRmiuuegcP/uo0ZdW9zs0RGDrQonZCsRb45mt8XTD0OZfcknRa4nEfkjBFx4e7s1Yi7W0Z1xIlNDjHODKLaiuLqsBnldBFWKH6TFHzwzWnY3CenUn1nA5cbwsDfOj/+yfiVyfvjFxdEyNXtwTOYcF0+uQCxFPOOdMESCU+Ep3rYXiGCdBpJBsDi4zjR9Y0d56FAGiIISA4JME2dOkzIM500ju4G8Yz4XW9vkbpjwNVf9a1W+Uh2UfXr/7R/qWup17hp4h3puBhykhVQiMm2RJzTxeP52N3u7Jh7JN3garrMiiTDUFYjDuhZtXNH/Lm6Iz+Orikug8lURIRFpLRUEvIxDuXTLkwsqLOk1hylLYSM2QLHUIQmEAPSUnJluceRIXm4FK2Z56Ft1vICb9pTCAEt3zK65YFXHp3OV+M1gtBnrHHAsJtsJDgy4NAbCro4lxvmyTiB4GV/NGSEoZpFXyDS7AEpI4xTuNIF1QHjXELyqnD3N9fawrFw5KgYjtqcGjMUHEAZzyEMtABHVuUgoqgI2RyGJW4Q9l2IcEGIE45+E10JtSPxxrke8Y5mj4YRhH2QZkUpzvRyvoYa7wqoc+Z1EziAJAP0VDhCIi17p6Z+4p3Mt/8h2F//7VA1czmO9FlTEbcuSP45hReTbbpa3HksWvZ3xwZUW25k0FBkD8WLW4qLHvbPfUeSJR9yZDl8Ipjrd+vQUd06ObasBYggMJ7q2vbR0fMH33QV5KYC6BmC9ayEL8kaqoGEo3QBXDBRMQeqozEC5CSjLhEZs8QttSLXjQPldHInyhbhKGCMsyOu2YM9T/x2QllcvsIog7gfEgog1vxCz1gETvWRVYTkMIByspRfAcmuc9BnELJhgFQoQzaQNkqwze2qHniXSw6IuiAgEQJR4HuADgLLoWDXzRSlBE1jEZCGMkYRBlUEy2GOmfGr3jqvR9WYmaM4J2ZE0uvxi1rzbhSWFKj3skmvpIoZC1rjwPNqipdFoB+ZQYU0EIG13ECGNJAYPMvM19848Vg4JOKXeGRtMwYH4L0hW4o0VZJwG0c1bFVrXaBPC0qR+gQynJXIs8OIdmhVaazQOMR/jbQEum8Ub9apeWAc+D683zZv8MaPnD7BaldwRvQIeBtyewc22zAkIPvWK/fM6yBLK60Sm4eFAjtB5DQjcW0NYj1iRkUTcDVWRDApsy/o19d8v5jY//rVUXxDTI1PWOleVB0EA4TBBKJCRG+0CA8MxYlngjNt94yvekf5sPmBztAa7IGw6gJgsWPdKpgvoXKvPSg0ktRTHgARpoO+jFJcfFspMy7mhC5L2hFjlUXxrGMtMUysh/+zmEiztLb8fGp+wbKNSqoqVO6c8vpcaO6xO3/c9puji1jEG/P6j6Apmd9oudiJEQSKNKXaRJ7/L1HjpkHniCxxz3v91jWoC7YgppI67kKkpRYpvPmSBOMQ3sCX/uPXXmulYOdQKYU7JMKZTB2Y1e7uKVw61LPhWGAhAhgOqwhIkKFMeCFrK4ap+b7xe4tNkht6RVlCWnCEF+JlJxDpgDXrP9INKoaBvJdNSA5YJSijCD6ZwzJETADUc5UyMRmejh9E0tKG/HqXWxRHMzlRdeCANwNFZOeMI7JHun2TyG+kxQvvaj0nFtP1FnySI0QSZaOZelfLxkLBB5EhLse5KETcufUH0l8AvToFP1zbaRYHvg+L3PL0/iMtoqbKM7fNQV4IAZQduDRI05STRRY42jGBahHugAEsEju0E5WnpcTbE67PpophZzGQlpixjyuSp22kHBm3a6n6oUA+anKUCVsrptdO3lxPLZCfh44XlMDsyKnLsoRAj5hsZ7ER/0p7DST9XPmT50T6bVQe4OLbZN6jP6FwgqKmcQKXP56+By5UfOXEYkamhJ6chphVS8FTIhZlzHkF7ujGcp+/btA72xzfvaNaNI44Vwq3jYyoyioUDQCYQKuQwyUerfR6IJyL7T0jAX/ItQJ7SLqzwiy7LDgcnpaCnfZbnzDMTfBBAakIxIA2tGAuytwTiEbek5W3euMIeBgdHNA4ESQj1YRxKme6/KFVdXD93IDMLTVyF69eB4KOVspohde7+WS38GIebbLX+0CEqtXEheUFWsBzYxrsn8IvGLrSGmlfGI8dXN1lyVdlRIkxdDCYRhEBeN9JRl4Zc/y5spIrc8JcfX3bBr2hGsR+hnJ5jqmLlzIdVlML9mqARrF3BlmbOx9axjY5DIi2wtWcsAMifI/Dfu0KTFChId+1UJ2rZWusOIaxizCrQYjb8n3WuMbl4Tw2wt+kvFodeOyc7RcuwlXLyz37wIcga0Ruk9zePbpqxG5bQczTvRTWl1FOWyQYXzaZegvktgPgQLqd+HVpOojnfC0xWR8cEVFQ8nEcqfIhK+GbluUCJ/b6sdA3mfkvUbKfm6smdsvOSdJEhBwETsPmypZzqTfubL4Uvq4DXsxw1xtD3XKdmNp9zrAwhyIu4YGQRBD0nOJEQYAwcr0aIIVgEJar1UhYbPaB8M8Ty7GRnZRW431aNrPZLkgyhkHEHFweca+dDfE1I7BFJpbplIqQ6lTmziUmZCeG0L5Dt5R/VGsoOwM5L0WyL3wKh4KKB6L+R6K32UkhscYSFjNDKnIVcJdXc52DjjJZ390yUjwCFwIncH/Znady3GsfHHQN9VchAFUTgwRCBk3JCMqrwQ6zNCUwnw5xPxIPiuOjdb9d7hJnFf6IoSD7qtAog1aJaAMk3sMGIPPGtpHAKdCj0ZsgKCptWO0JBeGyybsjMaiKHL+U6xPkSKCNtBTuuDuSs1CnGoLpFU4rDdWhJdTPtmgBgs0GGKeNIzU/kwYunPUWdTTwKJ4RIyJCFReLN8wZSwt1TkcCcYjAGEQmB9dvoO8UBhzcdk4cqZXiCuyTkcNtdberOI/60pXhaw5kMRkmdU4RCbyDm4FOseNouWnJU5qhhEud3c8K7NpcsST1995beBzlja4Qmh9siJWjGnvID+OKKXDT+iEEpC/UypYl9iZjDpMBo7bhWBDeQPlqnC8dt5y4QnBArorL6wBpc+64EisvPiNWHAZv7X7vYVPuTFgj0GiPfi/t3erVqc2xFviXf2YbLTm/R7R63l7U+/DhwV32ZdBhyaQOjtRF6+91EX10E6y7Ix5POAVnTbcDp8ZgZA6cRm6z3TGylbh5JAsoD7J23/Fh3FZjnijw/y372Ik/q3e81WIQfoO4uaq1fs6B5Q8+IQ9ydvPJWFvhj+/7E/us0PS5z+ych3D2NhY58Kx34K0A6VixxZAtKGmJTf4jGhHNZP1ayViMaDspIHls5Mnq+xZM9g+OoqweSQAWCYugQ76H6dE0Fw4noYnhp0iOsSSxTn+BvR3eJDQ6+CcjLOnxakg5lZbCE2v0O1VP3zmtdS9Qehd+cWNi/+7zzWY5F+Z0QuI4IiJvjrkI8lzxqoSD+EBnMr1nobelkP0NJPFehSMfcZMQKnI7l4Fite8YthAeQozkMOGeWnj86ytcz6jJU71W1vFcNCw92saMju489+6ykKk3lNpkmDV0hBJxYqYOz4m4I6gxAjyiqE0oq08PVM79a+R3RZkBExdCsDKSG7k0PI28L3VBSBl21hhlRS3WMNeE9YSk8tAR7HXdMACxgUcrerN9pNrHF7FhsyV2M4OdnjiCDhSXpSMS3LZGVrYT4Y7JKyt+1Nk37cJd0pmNT7T7LQPHba5Ewt5SoknUgu13S9tjxZicNKwJLDGjjUhgNOCBN6a10clhdyNsxXX2v4jahuF6XIIZbEaUu8gVPAu3C67Hb/d+Fu169O8rA/z9k6/wgVdhqKb6/ffzLS3/YN+bGCoUqCyfNKlShIgLt4nQC9FCDi4baD49QMP/Vjv30kwyMyVn15tcOfwUZ38EbliTQSACXnWpQtf5V2+oyR/fuxsUgnimN4/7G3n16RWXhuNNlGIYgWxT5A2XJ9ZbrTZlnvFv3NjLOVLGWyFWgS+2I8IfT37s+WZ/sJboDUhBZg3dQZAuZ2XoAvDkB96vM5M3PfTTJ//YooyY6DO/4GDH1UXXuE6LmowbPZ8Ctpq9+uX5HRDL5iVygdmIVAEsqaYKPg5FU3U+ghBzDzDRaXe0eEc5DImLSv5Gak8DYsDfa1v/TeOcQlWp5gqooYmg9WE5+rk846h29McKG/L4AUyTnCXAJUo5TsRkG0a9K8k0sx3cbbo0pLhqNmJfWa1nAke8/UpXG2cnMMUXnOC5dhhyTx+93vjnP8j+5UsBdOajo3z+td3Emgh1I4wgXCDMiRTd4ajw6yqFTPD86s1S8aUvvmE15m+qIAfTMkYQUofBgTuShGZoVRWSGUTEwUHScceRJfn2Qd8FJT4C1z+INs4p7Y0mpOe6E+Ol8B7RIdVAS5NrGppgmUoMKrmRiEykjFJMR6TdkrSOyXolbhGbocg5kVnF62VZ8YoQXfnNTtTyqujxiGwAbpw8F3L/vDVJuZe3O+C/qltLWpza6euRN/7ad70dkSHNK/DTcaHbqoo2h5pFJbh0jUxqCoxA+3AZ7fWMe2ivZww9sEPTIGA7/G0Sg8AiUozgICKE1ZC4MBkX0LCwmelUVDCe3BKDuDksesBMdqJq2o0Crj88D9f1o5IS7hEUkgmSgTSGcZ6ABSPHdTL2s7jfl5WpPIIq1iiD7bqXAAiDb91k/FKSICSU8hzFEKIHw5gDAEh25A/5klwzIilqQAhwEZMUUXlRCkL1LMu0azgamFmAWwPGcvdyf2zeyWDW7PjbmorEElxl1gtWHXS525o/+cg199rA2OC13wTyWOQmcXTFn59PPNflyTsLxvsQyaIcSPkDfKQ24eB6yT9q/AZSAosn2Bvwrga+yFz8LJGdnODPR6oCp9eaYOpURB1xwMO9vSg+bBg6NX/BYMxqFKwWgSIGjUIathuZijUEZayTidzIDOW6I9shKm1EI9QCfwfhNE4JwMOJOD0RGAEDgKkD9U8uJTvJiIC7ZtD+77HiaIIRE8k6YnpuoXmkA0PPXEKI/StEGDISGcqIEBA0sqx3IlAtRIDn9b17EYkAxBDQSY4wpqUhFgnsxByRqcbFvAN+CKGCiI44lgWiB1JC9VjcXZd0kGeIM8bgnzq+urznoLeDHIwg6IKyBSdFrGbQOMQ5iDl5OuVvSSMjPuC95EAHA2k/8oYBmk4HponsMk+Pf6cd929dt6xZHPD+et1Ii2xHmIe1Q0qB0wlWnvaAHjNWu5yUzBVoH5WNXcFXkoUdrPOVBXhf3V1q3tofHT7w+z881dmHlWh40vNgGqc4t3Gze0vYeZt3nrHbg0EQk5UwMH9a9dcrQozppY8ETSV5c5x8E1aVuDYfG1CWhda1ebhx+/Jjrsius8ufoepfn/P+7tf2uUs/UOqDTAvKRtKK8KvBRczab3TbqY4FFRcRZ35q/FvFlbr4AEFUkr8FA66zSOLoGwVHK+ufXKjC+XMheDHk7P9/xdBj21SO+z3MEV/afJveRCnSMaKQQZBGHuDXN/vSKALSjCCU1TEDEIYhR1GEIBYGEHKRCAF1+hOb2hduCbFtfc0erMEAmv+rpw/8hBTW2QKGLnHcngmW8jmDd/nHH7hxu4HW3gPsRaVisYGjRoL6iimWq/LuqLGsZXMNQ2iJcZXI7qbdDifoMRuD1sAHnFjvF39ud3z5vVaZvkTrEniyKwYT3Q0SZ+GPWtKuc9vrhQH51dbfbhStrU9qZtS6+NHgmpLuxOGvF+bkVE0onoVcSMKD+HbUewkPlBbtnGzljk3aw42tRrNkUKLxAZ31KK5XwOtXn25oDI6CVfv+JXWMyTOiIT7NRdID0jYFdyJl8qr+gQDrI75gAMmHHqpqa+wZn1gwel5Nw6taVF9nMklcMXbU1Z4LezoWysozIBTri3KQQQQQMEAGGSIAAZ4QRJAMCIAMIl2XoShiiADSGQIa9KIIo7+kCiOsmBz3LoFHbQW0CYUxyYLSE0TgQp9p8vjTqde/Pb6qBy3tX+r0+YPK/yHYuMAfJH1wMEgfwGTsXhDmOZy/RSJi/sJB14DnKRKKaLY5nBAzl8whNoNbwmGOykIexvNuRP5baht2nWnNHj4ZusnBqIobAiNy9YK6wwsqI2dawNwuIFslXV18UKCr8wylOGOkLn+2CLfO+9sbT7gd1/dixC4jjCEeGeV6xr2QcXwA2wYKd8k9pJDwldbSNrK4Rn7svgqkIZRFcejA9ZFM+w1Mi7CEWk3UAAx10h0S5+RsbhhrVfqsZBv2+aQf1Nrv3J+KZCBCHElRAGIQqlmYxldRiTHZIpusuIvZOZJlRoAQJ0Yyu0GA6gHvPtC48MML+zq++7ax9GVy32/2JDprr3rx29/8pUqW2sEmcUxLx9qV4RECMckDsIvK9U1JeYvgziheqWHAV+Jz1fz7w+tTIrd7LB21QwFyxH7919mH3hugWpdSv+bYeY76Zn66fSuYqloZV/ntcmvluifC8KL8t2u/GTjI/bbcdNUbAYgIiJouCZeZgQIvg2AdQTrhs9nsdY/KJs6n3xAuXDWquszFJaNZQnYXdr//vNbX6KuuxcCMVr1wY8u8Jr41EQNaS4u2xiZGPBcIeZxzQoI5zXOiIUGLTH/b9I1+rahQ//UezRMNrHeq36d/czzdvvm9ldX6xzc1HQA3E9Omx+KPb4NIFgwLvgUpqkbqLfkP7P7njmm0/TllQWPr0ezV+rblP/p0vVYk2JbJ09is4NhpEGv5ZJk5G3HJuJDQe8GNqaxRvI6pgCIRJvAwDRnWIKBrfDdxOCPxbqFWvx3oeYQKG5rCtC+Sy2BRZvEdvmqsix7VhVW+xeE2bkKLo91dhbg5sNhm2XsO/cOO59zIlX1ld20hebd/p6Bwd5Nh7Fvotde74YHabcb6gNi+H3XSoD1wf+3crRHmHzn/UYj5X5SFS3PDYngg4xxa38clNpeMLjkefresJBzMOtF1O7mNq7CsvpW4Pvk33PCWgxVYFT5rMlYXQ9iK8FJ6t6ERKYFkxdj48txQtabV/aYTfPJ5tst8kRdUvMDh0w6Jb73wi6/ePP3fnQtMVl7hhuul2mG4pjmnx2I13rYkDY7I8cQPs/NL3lD5BLX85mykh4OgevwfFf+9BHHN8eMPsr3rfbilzB4R6ATfdSF9ZUyEQDwuemQK4kuECp5vtXYLVnOhudVi+w4ek1vfG2xbVK5ueapZ2Dj5V24q2zSneFF1WRN22SoM5uVCvU5gs6nwu7CIRQXMLBWZpeaFpIx01dplhSFOK3mAtLbPWRR45SYkcEKmtxWveDv7QZvcQ9aUq3JHxDrS0X40X5gWanEzpVnK51NHdaw0DoFBQkYrZtMKJ/Cb87i5r9YunVepjIQMMiEUbYFaCTJAcGFCaFpCuS5cUAKt3lKLwdp2Q+ZUyfY75u+sbTm4fbMw6ap6e0pRNvXYjOlYApr5cv0qWjasEhIZSSDsouq46ClQjW3sBtiRFDeNJl/f/53nO+xUZwRoH+7wjsWG/UC3iXEoBxHu8EomxnHcKQtzGe2e745Nnn3cmGl03AG5OymHwpA12eLyLWYIuylIFZR2vga82e2iVoE4omHVPsMGgmuFy47atm3S3XYlLc0vnmyqfLKjUqqZOXJMwsXYNa2945/+4cqn7VK2Tl5zCF6S48gOcZwhH5Sno/wGVuDaoe3Xp2ERQh+UjsT5RKYSN3JYdshKKAQdnOhAEHY97+fGYO3Joc3hCohCHH/mta3fK4VsDX32ypmVSi0DogdqKVlHfhR40Gswcd7kUsjNYnW/x49MpMy4zOobKPYWJMT4sAxKB2/j8rj8jYt/8XXnTTUuVYV5FAv1qhlDEQOICNQw3ArtwAcplUSiaRRxsWtZZMkEQroBluMmg1LPeJMy1aG+MG8+yjxbr6LbOfwLbkmhcd3Aams8p6h/GsN9jOHj97VXrMHDRA2BCcWuyhOPeGnLBLD7t031fKC/sK9sgb7TNyBFRc8/vf8vem3JTTzJuaez+3NkIpXLiGPIe6sm9h4Q2X/Zrs3X7h7+uf5zL3CHisd/l9dzwSM2lB0PzCnhlrN6T7zUW+AQ1K5XTnpP6rYs6AXEHkP9DXkFj3CUmaqmv0VwzTzC7eR0g6jD7xpr4noWLd/c8SoMYiHbc/3Xv29JfbOnHUFCS9uhuv7uxSFTykKLBGwsGfEPMeDRCMIBzwNz7QnV7JPX37RpQ+Azp1vXPqGNyskGWGhViu+yoyD13SXDYrhDwXlImYlpAHiF906cb7lZWwWqZ1ZHP2lkdpogQWg+d1+4eBltwU0Y6TBa862ORULZzWTVcq4SBwS5Zo3cV5MtRzYTO2snq+0eVHPLmdutBl/aGcJnJ99Lib1iRECXMDNGcMbL30Fnst6v/rr0TGafR0ICB6Cr+lbOsevgiMyRLMchsXfrjow1A5ORTDMRUUbAD6BTxZcapp0MApwZzXn+MfePwPvV+qe97++fhidu2EmokSgEASkdvHv3JI7VQfl0TQrcVXur17GCRBZypdBgcBkMXE5ozphLON2AoC+tF2BEh7eAjye7ApA+R4p2yA3yaAD5laxmS9Xba5StZ4v7kUGuaDvEhg+O/gymFy6Zzf6WddTEOI/InF8HHZtVLqDJOwYjVDHqBD70CdNa0D0qtK3mhipLtztQpqzD1XimlCr19XyBsPDqu80nu1LPrFkbv5L8UvtObYRmT0UhhCiVIVTHcP+e4jiKmPiSuQBjLuPYNv3KAEP0Ry86qodSn4R+dBZSZiQMaJG525iXQRishS0pJfA3fVE9hJVqTIAMRRGEDBHmo4ZrWQCKP3SodPfjAnPA+CC41iE+u0Lbn17epmz7yQnHOZ5GdBun6t7doHlCahIuix+lxtqyj2q/uNgkrEOkmsHZq4JBOFPDxHr8gFYxNP3ddM2FsxGWJbk4UiALWEbigrx4C8s1yVkRYSkID/0p/UYbWmSusG/w+64Utr8aMUMZ2QkMCm4O+mMLF7rMHUW7iU1iKzt8hJrdaLk832DKIP/BeFwqRsblvCMkxPo4uRl4s0b8MMzO4deejKuBq53w9zoj6nkTHIhZcVGD2W8NEgfkZ40U2+yqQti4n4m4DavzV77fFpmSLKH16QV5+GFi2astD5b+7jf/H3Kunu+ak8RPxthbj1Zf1qw6E1QOwHF6fu1RqXCU7LoGkQNRVQ8QnFxtzBTeOac7mVKhH9lReGNHktu4U9csvh/Psy4wSobc1UvkOEdTiECIdRPfFYsg4NVjhAEqcjxW8GUI+pTcpj74E/j51evfe3LPdZPRZ/jsyk+uXjr7cmj36n/K31AyBcU7mnvNHEkjIY4c2KjiDtjPeuwoLmmCIBFkhD3A5n9a2IdLi+XbD3fGHioOudl0YI9/h/+Wu0Byen+ud1fgU5PyiWyqjy4xblciiZIbpDtW4oWynX+MGqpIvv1Y1brzTcK1VyaHIQt40wRmFTWM8qcvuAMrS+cUbXFX8a2Htuk+urSD4580yYalkaA1u3LAdficBF2OgeYH5q3UN9ntbODw9I7KebXZdIUfwySP1w1wQYch/GrMh7rtVnOKzUftnmzX10P+aMucToiKZemxjO329EwZ/BwMd5AwEhnCPZPoBo/8zhVLxsjHkmw+UHrWGJnCH47BGqiO7HttPd+ix+hoCehV4+UB/aTjBP4Kiy+dkNjqrEcL8v9SCIEoiSGCQETAnE7hEUlWLS++IQUDb2UCBZ9+Y0TnxzSUgkzge4CYZKMCNy5MQwERJI/iCKMRLxQNKIMS1PWbqRO2iLflvmdZx5qXgJpFD4poM0luBBhghJAI0SJ4YBzZjT36bz1BxMIiCWIHbzQSoxEoQFvpmRZhs4gsCfutKgGhDyZwTpsWzH3+Gl4o+8EO/Z+PbR3NbdKUt4+scnDQ1Njmfzm4NIriskIk/dlI9G+9GVrfA988c4iYdtQNOF13sc1SMUIO6mv6g2AvRmQbV8tyQiYDRT9Wiurk+wQu5enoTCAYVzHqt0Wftw34Ng6oqquqYt5vbu3Jug3L8OKo0dROjDt44Sgq3OEuujVRYI1RuGoRVyPnVBZbh3CIaukKw4yvUFnvcBjRjrK5A78dfln/tK++TG2LBR9GDTcq5kKOtnCRRDkH65AqpSUkWWC559h6Z+auSqEqhE092eO6NCYf4FBLnSqLImqW6XahelKig1aPI3qVOay1H4Ma38Uj8YP6Pf7AE/vqHui1jkXaTIIVOZYeeSiXXo8EM+e2k2JsrcvVbnYiByFlSM4GOmUi93I3qM0tg1cqCw+iroPZOu1CoV689F3hcraU8jwXrYQNQ4HHWzvLJrwKJXWw90qvyT2gNgwjJPUANA9aEpb4URbp1xQUh/4n4RqrOmMx9hrNT5uiWW2JX64t9OPYizvZ1tta55KbjNZ5ux+Kp6mCZVbjHFD8uAahSp77qIdvbp8fkawlWOYdqDit15D1AkdgGCJrnhapx9IZPYZhKXXlYDnMoTACObsNoaNcbXXcJv7nrS7TpV1T0jqNVeAAFoxcABlPHXT/yXvxLKImrA94owubwyZz5OcMbdN+rdSc8cgcCCWJbYFPhZ0QreBcD718qZnfv9dc3CCbE1MtjN8PybSeh5EVvPV2bNIoThve+20fhBBJY+JvX/Brdl6oIIfZ7+K5lRsnnVSWmnkICfN6kxubw363IveQPTDsY0DcQ1Q0+Q6OAIBg8l2EY3qPyLKdutczQo4nHxSZCCD09oA5fzsTdxA9PPI30Fqz8vbNYU3DcK1dRRGguuKaGKt10+9zy1A1qRPMT0qkBJeE4hTZkZEWFCdwor7YKFLpTNinqiNKtUPEVlhaNpOzByO3ftB6A/AnDc2Uz8gSCa5VA5jnfGhIVh3GglsQad0sDX2dOBNxO2B1DIcUKsn1uhOXojwogg6pknEDjnVH4JDIBfsBwwwKnAN5nZyrK+HQIWvKA7ygwzm0LsIMcMJqXHfrMm5j9f5TyQElMIlHPdGetXUNlPzeGSceyN0k8EvNxmPU/o6Bv62s9SK3FCYsVxMGZ4/mh0FGyec10krY5GgMVtbqMQEzFHfACudQBXHG0cliicLXveJGndObgcYl34p+YS8Qbx2T7qpHx5WR/35ZGUtLETyDzTXhX4dzuiF7O60mWhoZBAFGIAGbvMln8UFPVlX+sLE5mvVumdy57mt99NlIUmI+5ru1JzWx5ztCljUbPJFKjxSAdBLCE0ZTzwgRKu15lciqBc1I9DKxBq6EWkp7Xx29bVzsuR7GFnTvIO90qEDTFSQgarbGkE6MECJMsK0eAYQUrAKRZglhAGIIhmUbjjNmLTQO22nMZI43PtllFU8GXvCfmfxS/jBYHUu5gQjFtJEaPNgbiYRv84iXwz4KD5HQGtmlMoHBId9AWddO/w+PIKCRTSB6mRZk7DHssVvFxHKjpxfoGWOvR/T5xJiqBFdzEwt9t7S4d1Wv5Hi/MO/mJxfUOxHvoAIXlrUQ4VsxPef7+R72vSC/BOWYjo3XVpUaDUPBkZlFVs1mbif8jOucPYQQEoOY2omez20orUbEPVx0z1draXfHVlS8WkFVXF7g3vYfXB5++uDzDySv9JS5VIwJi0EJExOfny0qhUNS/8a88Mi0YEhYPK1lh2qCADnk2rsStnM3BuZ31CwyGF+OEWYrCxQJlFBrw0mDAAcgmu5RIlG595neU/biJYgQEkIH7+uMwdvucBj+4Em/j5cvs43Z3jjxZzE9lVn68snm6nqITjaTv2aX37J+rpTYOn/FwpM/tY760qkWHzPDcsTrLfhNo+p7QJIYijFPrFd8RWTTub17vb8sWmzNmj+hM3V9NW/GLSwhL19h2kRMVsoi/A7MfYbXLPfVCl5XakVdW8COIeymShXhvHz8TS0IiAvU6c8tIvi6WqBPHrocu7bfYJxWFCYjjqjiaIAJUOTcMpaxP/2OkRzm3Cf7q60l2F6Ztf6QUIlr7N0MvG14G/SkULn2DiGU5f4ef2Hb2uGvLzqJQnF/77MsMBZ72GtvXfvak+cnB4xcO9lUPLp1NV+/FGYhCmprMFw2ta1TMJfxPFlNStJZobwNLsuQgmPvvrz/zLFrP79/2FT2fy9vvn54bMh8i0TlFu8oCJd5p8Y8PgwlKBnc9MrLBW0Q9yPUJZUEoVxupCTV6TLdRs11ikHuivSJuJKXu4AW23RtMB5BDkSFSE/uhodUaJXJV4h7E2b/My/fIR+nGnSaC6okSJX0TrBe39Gy/6bjPa2f7j6dxyXDf7TYz1xT2HIwsfxzs9/NjfuzLfrLHpNv8OI1zihEKH72Lw7AWjEgstxSwK1yj9jLeOgJg17zful1RH4SmRi8c61e/HH7IC9YBPyQJPq8q3IxAQkAe5Ax8jVHw1CEl5GOBZZLgeakgDoGus5qhmkjHVgn3f/CEf+3LKBO3xtt8O6K6c2RU5n/Xrwndoq+hHbzwG6KP+UVJYyDocWL99e/aO6Oox2a+88FyaJ5ubwNRi7A9nqiGx9efued7qD+/yWvvmzOjwWcmTPrnnq0cF3rOyM11gc9/F55fKRfeoF0xh4YT2MNcifD4p4JrOzCgXwBZqm6/5XmUT/kUfXK3P37cqNQmqid1/lk6OCRKsAcd33VeeTuI5nRvQwkN9SUjb+/2LjSYKiTHhCJ2hsZRS1vPBjs+OiNponEj+d2Fvbnqu5r2zz1Hjfn/D9Pret5vXHrF4N5oqf1Ub7s6MiHO6/OtKU9Wcvid05t32PH2Jbfmm/wZsLhggSYt2q90GGnVQa63Go81ZK2FdCqXhtXWFC6YPz0/zg77z9KZ3gaVpS2vyuGuweuKZFL9y0AxafHhh/mNz9cKBhm7GfYMgnFuTDDAK5B/MG9HpSDruAvVu6UTgVL2mdg4Tn0Tneo4cbba0SyjcwYCUflEuoBAAYQkansaYN5MD9EOKPMpVKyBDnbowIYU4/N4nip1OsWCgK7+zbXJ+57+tT2igLPHxqHaO1re8OCCsW2YjmUh0157tMnvzInuen04S7kb5cxz9sMsjH9j8lzbUYVqZLVWRtuJbbnG+bJZtxcGjfcPF4CIevf9ONpQ8rOJ9zViX2/e0D6cvSrzmMvyNuOR74vhHtfHF1cgmOCydrMyTGn03ryGStXOGhZZ2nPXxI3ztip1anqATQYia332B+v/ddHKBT3B6jq7sGx5CaDjeiAB+TcXP1ipRV7dilEJQ6CkOhU5Zw7jh+3n3AI6r3UkClJ7IAT002mrvsfvwe+v/r7Cy+czak5lPPs9ugmZbU8XFYpeIytYMHwJyBwvmPvf7crYh4N6TtFTm2DaNQkTil0mk2kEc95IIexKIG7wxGTerJZUiCGvVokRkTS+Fj64L/ffGjv1T6oT+sbDajUgWzdWPbVKga7LwY6VWPeRij8CuWtLuVaBEcGe5jMFUO/H4kQKcPKCLh6ZkP8wqPvfTYFS7ac2BCz+PBiB0P4PjmvIbDswM0uNAcyqEPVrahI0oFs4tQ6x50Yv0bwRdxEe/XGAcYvmMQr3BapeIw1BirRLqv3+s/yDOqES8aYElIvSbpsVxuhajVOyKbY3BEo8Uy0oimvBHsBjjKgdaJbkOaFjMIlIaIaOS8D2RpGyjBhGZg7pY1RljtzWzjP27E4Ou/AYq429H9DKldUhC5hToIzCDp5aix4odcobKau/tLQ7uZ2QtDF/Y/3K5lXh8rm1nwxDfSNwuSe6NCm7z9QO6er7uWWlkOzPxPbalF785KX91189LmqZx/ZOROyLwxxU0O24ErGLeIrO3ek0y3Lmz2Q27fsp0XCr6BRRy6Jn2dorbK03OPhcrx3aNU42fp0oVRYUvLN0mKbKoeHU6/vqeysiZJ55ubof/tWAJwXH755y9ojh88r5LGufxz56vITv5VWGC81Kv9eVG9o34KgFA1PerRtoKf//AsH1+xabTjgOInR86Ftn3iTnsmhLAtPy7Ij6+3ALNRagFiwuPPHv1m1fZ9RcWLuPoweAft/5rmYXU6Mt1rJQaRcWxYqJfo4O/zmx2VfVPCgbvz1rxn3jzWdBbG2n8Pg76hQu2NywfzNN297z1wQYQNWkhQ8ecvkdsTlleGPBisf1bZoLdgTjT0/mDFE5tSflbr8L91QZOKXTM+7Zbmv6HJv3ZCRe6htAIH//uEd7IxbnDBJW1u/w27Wlzc3t0i0ynHVvtXVNBho8NXs+9EfXZotk1+U0Gg+lQYFXCZoOxrk1cyVgC0tLA2tOvu2HQdEcOD5z8senGAchMJfC2KZNdlI+OzjDF5Osgav9m+djfe0YktiuJZMUuc8klDfKfXd/2+19xebUsG/Zp+O1sfXjWsUHXLZNx9a+rECI543ol4H3gOEYPKUAvPoutvjFN6U5qxD/nvkjUTNwU1IJxARNqHBcQQ8byKJxKKtH5WzC0i67J9Ob2L6PkAo0ymK/F0DBjb9ubGM7hMjYd27pVfkbtIGq78B/vM9eed8Z0/mV63bELCarFb17uzeCNUkcbPOQ5a5cuLNoa2dQyKGhTyv2NTFX0TzhQ6E1/P5r1fs6uioKXA4JxyYi4YX8reSe3+97N4Nanmqh4O82Zh1y/PvPldT+CwabX/hTMPJwc5v7I9ApC9nn1aW3xxYcrBsydkZy56IO77uyYRWvqGlAHVMttD24JajVBhd5qzoueLMc3U/YRpo7Hhc3Sf8DBfSfefFsZ4eP5QO/9Jd12uoeKf4wJJINkKRUos6x5OtsSsi0wgXMNOcEDHQrCwE3lfNyPeV7C17n8jMPZhZQ3/oylQWG1oNQ5/qRqcYcWBtnK3ef6OHyHi77hvXp1XuYrM/vo0J9Y/fX5VU7e0BEL5JemzFyUOWqP49G7Y4oicLvnTZ2HxS+BxE3I2Xu59wO+cWFuwMBZdHDThdRRw1kkFwU4fg77DeIKOlwCg5eryDGx+uY65KqOdhMcMkmTYXOHBy+vKgsMQsj7gbnciQIokYRnpgXofP5afVpYKcrMQGDojv97Wcjt3rXRbv6/9c3ue8gOjH2np2wd5DWNxoYmDbxAHXbt5c8CS8d06UwT4p5jRunjR7CDUPBn/ltfZkf3VeLysV5qs9wnKO71XmlSfO8oehXxP4ooH+kgW0CpkkuVihEHAGBGvx9OVKhyETkDwOI1zisf19/vHbeicSndh89qVMuAXCTcbLevNywl2+4qEWhtgck8TCqL2UBqBqQxSV4BPytIXYQNmK3K0fnH47tZMbnKDZNuPJmx8POiwTDmECCudljiBP9/NFZltcKo2Qxq/h9CXfN6N7d1tHpAwSRXgKXq3L4TU4ngUNYFRBgWnIAW4mnuUCT6SIpkHq6XPE9RlKCXAAtMGha8u76jrl5s6gud4A01xH4YBN5gMojmfUACstW8M2F+GRRYny+3iuAEHMI+eqC+R9u/+g16sy2YabYMBj+ey2jy8syqwoJVRQC8royRPZ8ZTgInYT3wVNvSJCAcShoCpTBKv44HafRYalQT/rsbUX0fuJVJxy5CE8sjQoz/yueEf0VzDpQzJw2KYfm3Y6//fhvafe1m4Ei4Gu4uwjQHOgWD/UbZqmAwlm9btltxpV7ZqdHO0nyB8ZBwo8GufgqPjQGliIyAKCYJtNhC3IeZi53COEiXGuBoCIKvffLpPxrz2fvD3++54rEZjk/ZfxqUMORq7EUYQSXpAkPTP9ao4cR1a28SZ6SdDFzCPEIGwmJj2bO4vf7xGXjP5EoTDtAw4Xd4l7by089P0V+uvfNnLNOMc4J8lbjIqqVHvWXuyViy+Df732/2vY77VDg4+G9Yktfla3LDD/85m2Nb1//2AzyKB3I50RZbKLpReXFJei0DWwdDHq9rkw3BvBiznFg0ynXvnr8p+2/eEt9ruQaeKsgqUUYNGk1pbeks3pTGXxqZii6CkvBN5r4oQ2CiQAoM4A6vFEdNASHs1GTrHpWwM+Cfyy/UXmWzXgq0qf2y4dmH5JilBxjdBX63/fzuVe0+vflU9YcJ5rGbV6hvbnDxsEk/7ejgwvQXqDw2xCRCQov3Xy/tHkT7M9hkULuPkuTIj0G1O3cf4cvlONmRepokwZMhbgZYP53XffrZS1Ftzn9cGeZbZtO3JrPw6ewySchz9TwR5f3nY7ZLD4vl7Mz3M0pI9/3O5b2GPznfUJwDu1Y/Qg6Dlh/0rmUm0XY6EewPa3qv6K/F/86PGN4luX/kK65wP5Zryb4L0IX46/V2rvJUX5vsSXph6SXIGFPSiDl0HQqod/mWOKBjM65YU9o12mmfFpqPJon7Fh26V3n35CoYja1dTPi/pusGULi9TxtXNDToHt/WX9ov8gMcLo8SVg0eBF7ttywRxFalbjYh54M981VSXiPU6afHrKmC0CQm9kh189YiTv5A0MwYwjPvPF6IrWrsF9DP3Z26PkAymEh4TLCz2bPJcW2DdlXtQfXAZhZyJgWpG3xLcVrz5bd8/EoX+3hro0EPgyter1Lnpe2zudJqDUsDMQ94Hx8whwjs+zSwRwOtWeeub+isYfeYD5OJbJXq/PvPiWoMGO6/exT1jERjZ47No30GBSHSQfdtcXc+vLTLuSkXq0QMj7LLf7apnRBZ0qXBUnkUZbS4WrqAySkqVElUrxgzgxx6XBrV7jHfES37XvFty0rtQh/punuI6gVPR8y6te/k5rsr2jLgpRXIfVb2yKcMIURhOaQYJOQpFEEm1tBDOPJ0LQVeTxiQcjAkogGKQh4q4VaIsd51NTiCvhP27c8fTN0V+h3X9S/LfBXV/rFKUBDi1Wgu4a8CY/rd1wtuPXILS5/+rzgQgzElkyMFlHDAGmIahBVk0IAxTaZK07aJ0U/ff5uJvHLv/Lsgr2BGcCrd8f/OWgZI2Xm6UrE1CFyANuVmMKgFaj0GOQalDAD4lQxLF3ISxm1naL2WJHJnOeYOCGLQJqLUdvFAhPG4StG430XY70+qAqIu4itK7lQiejP+bMML+81o9VBIs4N5f+GbTkwhHO34HVXcNxR89C0wqcXFxfAElSRowAPoQaSjCcmnGZ/8gNRcN9c6RPd+1d9CFwyz1ZRgCEkIsQgCFAXEyWQxoClBB4AMJJEoWQIF3zQx6fRxRFYIktxgjjBCLjanJoVM8wU/DB6L5NH7CwAH6ps4f0R+h8ve61Uw+M7jqxp24OJB7sVLljnMXY51vpjDyM17vm1th63+l3GWfQUkczrUWCM1Oxf9CzrLXS6rvLNT3nSL3VfaMqGXqFFo7nk7wwTg1yxAhnwQ+feD7cIY0c1Oo2BGp+MSuxx44q7Yt6C391eyaiAS3laU56/KiFymyzJJ/v2YnSuh3KUM96BE9/lrV49V6GQvuzFvNb2unpnz540zvpUyZURYPdkTccdXKNBIuzulBrLyuuV5xx6Zc5vu5p8PDwcxAktQDurKheirYutkr8K6dBtKIXS4yGtL3wl0ycZ6tce2nQ1lo/o/SpjTMhw4SZLhCcIw4FQmlLs9tE0K2ceeKHL98jjoql30Fw6UDPt66DDi+nt/R7eyp9xzQBBPctRETOFIHVv/B/J5lcQV7jJgM1Rw865KvxT1n7XAk3V3b4vWLBbWfRq5nK032FXXrJFvWkdOXLJ0VxtGbPndJ/Z1pxV/Lb59dsb2kYfPjcqtJUjfZgJHXz+p8zs2uJzlx41Rd/3cvRBxdxHx4ucun/t9NgHBO2Rt9yFpPl1o1pdCKSGVv8QR37zaaclzmMW2KxFpF8sHCZrDNmMAir0Cdc5adXPPumJzfeMXAksTDyi894ZnCZfnujIDnqhG8/xa/PjX6Y7a3cevDGoXb9m7Ee04FlRfS2Zk5IsON/8axo3vi4wG++eiG+W3hepQu+sd7eNd9f3DzwBUVnb1uy1/dGc/LMpGnHSzUhOUPWQxj0HN0xVMl/tHQZdzo1j+OU1hqQOj0W/Kawq6/ytPZw/a0G8Vv5vStIYKT65cauL3ZWKz/u+SFuW8IlzXlLLYcT/qkpephIliWeJRGgnTj+fdvtld2KYej2h801tMVieSNugfDKhrfzbwGvTAXluRECbhtcaBZw2yUIquaneAL+GL1Euf/7TIwQuIQvGVzwFE/hf34WnCEI6I0fvfKzNUttDV+B9gBw8f+ElwKzu7j9VHCE//dbEgLc/X/ORA+RgvL/WV1fe/1BGgAA7QCQQ98U1bwtzrv7tKx20pRSm5wz591pn5/pEPHQdqfziuCEUbboW9HvaXD/Atadn1pkzFxvuN30l/XzxLeWvLUb8tjdB/ZisyE+JsfAGr8L6Y0Aeqn3r5Pwg7gCAGYnvwgAnAAAWeC2d/EpDJC+vVqBPYzO3KixWSV8m813AQabAQBQCOyLkIS8ABGrMGK1GpHQbEHOVrcjpUVEgZKok6ratAE0qcku/B6w52l8C4TzNr4NrPnG8H0glvDITzqQV04QFRZRy2OhTTaYl202Zp3Vxo0asEKlhaBtg43WCYwe+UqbJ47aJKJSGI59pkR23kD3dgEf61WgBVxteRj92k251Xtv8MTSsbDXs59jM3DVhqlx0vgWVKysVQcm1o+ujatjaqKSuuJYdEsxva2mOq5uNZbuBXlUTK47cqeWpqSBnpbR1STk2R7mhuW8La6aUi035uqrWNPlyo1k1hsnVm7YtLIKlscmqOROUd+8Y4VnbmwdHj1s3zdWX11rUkyNe3J74PIVE6M0ladyXGvbtHH1oibXFnJH7+RmSmjTIrquNTki+O/qCfz9dgALYoD5A8FQcUlpWXlFZVXcvy6qV8dq4rV19YmGOenWtvaOzrnz5nd1L1iY6enty/YPDA4NjyxaHAQJ5CCFPBSgARqhCZqhBVqhDYqQQYmuXLdt46qINT7Kb75hdTicDIuaaDgMEYiCDtUQgxqIQy3UsZOiaBs75qR/C4DDGz+NAwAAAA==) format('woff2'), - url(data:application/font-woff;charset=utf-8;base64,d09GRgABAAAAAHo4AA8AAAAA4KwAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAABGRlRNAAABWAAAABwAAAAcgxtSpEdERUYAAAF0AAAAHAAAAB4AJwBNT1MvMgAAAZAAAABJAAAAYHJYlnpjbWFwAAAB3AAAAKMAAAF6K26sXGN2dCAAAAKAAAAABAAAAAQARAURZ2FzcAAAAoQAAAAIAAAACAAAABBnbHlmAAACjAAAc9cAANc4BKegHmhlYWQAAHZkAAAAMgAAADYR5QgpaGhlYQAAdpgAAAAdAAAAJAuBBZ1obXR4AAB2uAAAAKwAAAEcKkRAzmxvY2EAAHdkAAAAewAAAJDMYwHYbWF4cAAAd+AAAAAfAAAAIACfAnZuYW1lAAB4AAAAAXAAAALsHaNuI3Bvc3QAAHlwAAAAvQAAATbMg4Xgd2ViZgAAejAAAAAGAAAABto4WnAAAAABAAAAANXulPUAAAAA1pYy9wAAAADWloq3eNpjYGRgYOABYjEgZmJgBEI3IGYB8xgABqAAdXjaY2Bh8WWcwMDKwMJqzHKWgYFhFoRmOsuQxpQG5AOl4ICRAQmEeof7MRxgUFD9w5b2D6iS9RfDMpgaxi9Me4CUAgMjAGYvDc8AAAB42mNgYGBmgGAZBkYGECgB8hjBfBaGCCAtxCAAFGFiUGCIYqhiWKDApaCvEK/65/9/oJwCgyNDIlCMASb2//H/w//3/p/xwPKB6P1nt7ygZqIBRjYGuAQjE5BgQlcAcRJewMLKxs7BycXNw8vHLyAoJCwiKiYuISklLSMLkZeTV1BUUlZRVVPX0NTS1tHV0zcwNDI2MTUzZ6AusCBLFwCa8x6LAABEBREAAQAB//8AD3jaVL1prGXrdh1UX/+tvtlr7ebsfbp96uxd7am6p7333VtVr/N18+xnJ35+RGmMsRzHcQgoOA3BpEGRpUQ0EokUIoTAilAQDkGyFQkhAhFCCVJEEixBkBNEkFB+EBEU8QOJCN5ljPmtfd7Lrbp1ztln79V+c8wx5xxzrif6ydefPNE/437iiXkSnlz9qnry5rNfC/bJP7r+Ve/+589+zWh8++RXDV92fPnXglf/72e/pvj6TXfeXZ5351/XZ995qv7cd37O/cQ/+Ytft3/zyZMnRo1f/FXz4+6vqF9U/4n6O0+eqHnw81Ar+auDT38Wg59+GodTNR/86Oen6joMfOFELeb4Db8sThy+2+/2Vxp/73b3d/e7h/s9/r/d3eFffoOfb/nileJv7/Bnz33UClsdFtz9iG/8sRqwS24ZO65N8DtuBn/eq/3t/cNn6o3avVcP2AZev7t9uN3v7m75ljv58b26u5c9v1c7/ubhFoewl4PBBrb7Wx6kkn/eq3eK73vAod1xg7KT6Z+79+qau72WV+6xG3zyM3wQp7nlFrnx++vFzfXdlh+Yj8PN9YJXcPdGbUePazLgsqhFujzqZi5feKYLXMK5u5erhYPGFeKFuMC1HWqNT57g7G+u72/mOMUrg41f3t/xkHHib/Q9vgy+UdgB/sqNkMt3c403mh8zRimtjLan3jini8zmtvPWKeWdLoscr2mljTbGl8ppVyvdjtocK28sPmfK67PGNpXzThnHLRU219pabFIp55yxUSv8zirecK+qRrtgHN8RjLKmKqLXNtja9meucNlswEa1w0eVitpqi9/iP5NZ7bEh6/iaUkFHj8NyGX7AAVp71Bypl3nX4Qel8SuV29Ka3BVqoVShG7xLB12Ouow3OlgT8CalrI54O7ZsrOGu1LPV9/lM+1w7i7NwJvD61Nod2SaY1kYfrapL7LQJ9jt/ora5x5FGbMGXVVtrGx0uU9/7WW0Kk+PDhtdEqXedznhZNK4nLg12i0vKy65zb7UpmtxZ+6QsVBvsad1+5k07upW1gyv92m8W1bu3xduj+bHHpYi5ttjs+vnFZxtnYRE4yKa19bEKNjj81PZ1b+rcWo+zm2lbhrzNBlu27mQWzkO2s0U0tYlj7Zcme6GHJlPV2fDluH1xqmOMtXoRFld+Y47rqGPm/SwP1RG3jKuPPcyOZittW4WLiNuKY8E9wn0uVLMpscCO6tJXuPgGt9Z4ZVcWe89MdO1z9Y0wxMK1M9PZplC+afMb9b5obmwZM5XhrHBuuc1wTTKnF2pjTYM/OsdVs1h6R7hUefDW9AY3AguWVxD3pML60q/XJ1p/K9riyPGq42LFoJ3hBS94n3Ff7aiXPix1qU9r8yu1Gzemxw5yX6jofe5qG3HTtZ+pZxfxFB/2GZajN6b6fGiwHVfXWNG4BVijuYkhC3uzLjehLMo8qHBevQNUPlHqG1/8T+bPuZ9Xv4EftjDxxQAk2Y4w8cU1DF9g6gLASbs8QOcohinfwN5PVA3bDx6WT5i4uZ7fzIFHhMH7G0HSByDJ7lYsnQj5cLu4BnYBiPAaAOqdkq+32GXa6DAnyACi+WF8maedETcVvxcYIZp77koDHW4AwMC66zvC113a8P07fXslCJdQdnebsO7hfgS4ELNk2+IECDeLOQF2zwO93d8R3O/SQb5XhCiAmezjVjD2DrslLONE74iq+Ag+wb/Y5e2JlhPAeUy7wfUZPiieKU4Svx2SB8Imrh9uAM83hOIJwN/gOg43PNQdruc1z4cX8547lgMjQO/ly5W+uFIXdDd0EPQiuK47gLgcpf69u2WOpQ+UAXp4opHvVO/dcejyPFNLGH7msY6N87iWnoAUfY5lqWFMAC6sWAAS/sWSdgpYplr+owpLlCJE4GX5hfdERwXowJKs8ElBEGyGmFVGPYd1zIuhaFSYY5nazJYKMBOJDtbDNINJ6KaN17B/Y1xYcCt49cSZWXZ2ii1rAD2sx2msdQ8Y9wBwIK8yRN4c7w34AijBd7AOfrx2eW5dFQGvdBSO8GcAOA7YgY0DErXJcPhKy9lr/vE8Hs/DwZkqswmm82Zmy1LZjy28jIFFAUdwGiqGnfv5ZfUWJ6Fmed7EOFZKV7pSpe5MqPsuvnXz1csqX56cxOMcCNAAeOGcXO4WT7dNLHhx8Wlcvd4qwEmlWlfB6Da4xnUIcsi8kLXCEecz27giOhyyacJyb7Mu25/OF4vXxZdsP5tt9tH0MbwtXgUXa226aNvKLmI4y5s5jrbGavAxK+G87Mro9TqGUdkr1x+HJuJy6tLOAIldr02Pj2Mvvsx/PNtsTr663pZO10tXPSsXY+t1UPBzIarRl7rAgdPZWeN9yHTehys198dl6cxKtzt8LGt+RxzdcVFmXh2ZprQ/4+x7A1fIm4e14GwXrMpXz16cVcWm64PTMbQ8T1xmU2GNajoPeGGHq6RyM/evjf9y+BJvUo1b62Oh88wR6JUNurWncbysuriyema8s6MzGdbUGk5DxVLRJYfzvuwFA3/HF3/efO5+Wf0ifrjdJcZ1TwuFEZH6EZMCeMSCBj3Cogkg+93FlqZNeiNYBKoyJFIksDUkfBEArDXsH+8ZyQflzcQ0wbADCglfNAIL+AR+Jzzo8PM8/XsvHFMYoPyT0IloQE7I74XrvVcfgJNeaK/HwY7puHBEI48C4IKfZBuPf67c3S7RQVAxoba4DAKtaWdkkFcglbcCr2SN91f6jbq9Jp3jkcj+06ZUIsvAIVyai90tibPsS6gnNi4AS8QCmumfsuRAtGL44M65Jjy9tCRxeD2z1nFRAcCsd7FvVkpHOGqVw9ZhoAF+DhhCUHLJfMlVYICWTAbfRcACOSK+BWwUoGcqUyRhWFawrJCZE37A3I7zcw1md24rbBJ7B+5hJQEYKiJUARJgbeuADNhHjHC8LgpA4Ni+r33RfVyV+NjYAzUHw3dpIT5W/vInHBMoIM6GnIqsEYgayRdbvo2OP/fhhVYr7FoHWO/a93q0qi2UOzUleB7eFkg6SD0UbUBzU+7ndIULhe0aksTKWWFq5dMWVAiQ2WgCS1YWMC/VAXmBJrk/rto32TMbKxvmMRxZkFOvswieA/MFOtmRjMzvgTsLb4506GyXt7bwunLNxXh7VNnZiB3lZ+X52ek6bG2ZW1CY3AXQoU2sDTATjM0ORRjDZTgxfoFtESMqvZrXX7XwCDYa63M7DufwSGWJT/Y+r5vmo3f1wmQlyUsGUlG2+VKPAVf1JIDFgM/k3Ar8WUlGbOhocDML2y3deIY7i58ysOnrqvyt521+utJz9QxvxkJp78vlsqejK4HgQIRSZX01m1eAr9GoqnRd6w34uOa1JMCTWxd6iDM7+g3sF47GC2968utf/OfmN/kn6t/FDzCGhTh6+nUEQnNEONf314yFDjGSmOIo7Gme6Iu/SGETTJ2AEIgLggkS9yw2mrxkSG+lUZHGiNG/EzoiRgRCIETlzYGbjdwLg0s9TrGtYEyiTtwyY0Na9V0y9tuEJAxiBT2EhjDEvSXruCKhgNlP1ALMbf8g/Ao/7ME89oJ+Wxzexe6FoslzLxcCBFOoSh72QSU8wCvTJTnFJbsmSSNxeqOE0wiI6EOcig/tr3l03Bq418V2v9sipsW1eKMfhJsBgHmA97IFHOX+4Vo+rZ/MhpPVrFy1WAxkxQj2Clt5+Fx7Fo6ty4kU4OhYE7jLwAZbFhuszYK3XYNa2BaLKpI14fYTHrCJatFvc7glXcKbeBWTWYNnZKpHkIY1F+FhyI4YT9Jx1zlCQYBQl/lanz+vadmAuh6kggxFgU94ghaDMjg60H+VAtNcS0jaBi/hHbZtIhlA3vjIMA3hmRcMIs60M4aHJFE82LL1p+c25qV70c9caZ+2TYFQkCwCpwnXB4aXKYSava4Q9cV86EG9Qmm5uVLQAeAhZy1IR/dMwoV9RPe3ZyXIoytftC/PEXrPeAFmGY5Gt7rLi/tg1+3HatPcLytT2Sqz4EOnXh/b1VksqrmZfevt27BQ32x3mV8/+/FhBEYSCnXIs8o+O2tjGd80s5Pq6OHLxZHu7UUMTQPwgmeoyhf6pPp0jNWzt31x3GbmyLarvDgZjF6u/Los/QBvgfBosGbwy8XRmc+LUJp8zcBsGRGf2s5mleuq/rQAw8jOsvwownLqmT8+yfeZfl0UJRCzqbplMLVd48RiHqzn3a4Z1zleFJX+8TatLCwxRPi8d1kbliuT1VpF4PoQjyJvFL1V0IyjEYp1g13nYZzpsYVH6RhjYzvgrAjGgdo2lsJ5NTHKIjBFSMqrj8157syTd8aEP3/3i//B9O7Pql/GDymJdH8zh2NnDmvKXpFXNGq8SdB0oq7lK3Mr4wJUZJ6YCP8ZhYXME3iRMgj1IIMBIIGyHHIyQlvmsnFx8IjmiAspByWYxO+FQO23CQWY2wEkYUfHSnJszIqFwd+c6IkEAfa8REmJDNUIEu0b9ZkiDlz4tO37tIO9pNyIFkxvSayGP/iyl9Byf5sgi5/k8SU0k0gNRwmYkLNt1H5CyylZxi0zlrtNn0Bspz4VVHyYh4sAggMsOsASgYmItt1LqLefknT3tym2vdgiULvh/iQ3qP7PKtox9BrusS7GejAB5hiCLYIzIQcBMA4YFM3ZxQo2X9o5bv9GwWZhdZoMBXc8B/w4Lm1CkaRkZK0gRNHHeZ2vyzFrVTl6YEak/4VN8X9mB+BxEddhOfpZy2iH+SgtyTCCiMCRI8Mq8VtBwtVxLD3pVKbJvZwvTeaMNSGln8BAQspq6WQIiAOtqoRcYXOAqkxQzZqYwM0GgiVMxDbGLIOt7IsSIULW0nKwGUnFRfkunL81tsDlsb7NEXCZuuyOuosY4efhkXGZrESHxFjPSI9nGdxP2i5zYC3Yua0H2+Tzy6wEVOLsc9B87ciXqmW9d/P4dHOZn6h+3Q4/7D8qbIlt4Eh1Hd3qSHehXvzMGAdQjcyG4yzrYr5aLWI2MG2GcOujMtsCCuKoGlyVzjUtAj5r8iwUw8z0s3XeahdaUxYxbmbWfvqbfYmb5Nevi1fOtOAtlaHnyDvX3jb38Be48942bcAvF1eLwgBUChNC9J298LzNCAsrYgfYIb6Up9nKfVwPvoAfCBFnrEzh7aYqztzqWTZvX1uPaDUzTVHD/dtj28bWxPNnS4eP2wpbcMAP8KispZcZ2zK4kKte4SaDYWe4fbggvJPwTj+qfV3OPCh8v173EciYqTw3PX+PN2Od6UyCZu86H7i0wJvLwHjqd3/xd8zPuJ9Xv1Vy7xLuMD1M/jGXOIhmyCSSpOCZ0RmmHI/EKWQujIEIRn6/vTKIM5KB3x14gnpIqSNJ9/BXQnYAdn5ggJUyOgQsgo58I7gnv5kiJ5ANCXkm1nMr2xGwIKwQwBKwXZErfeAuJJnOXSfmQcDb305wI6miKUN/Ax4yB7+5T0ERozNBjofrD5rM8IO6/1TN76dsOYJKVgbmY5gfq7C7EEoF4kYQF8R+uCdhSkTQ/KSfwzDhl+mPcNUjzYLhDO8H7BAw4+GOxRphKpmqonre+czRqEsabTTApJTqsMSECCIiWQYx6ryEzylhNdgmVg12kjFL/axYXDYvz8va5Oap0VVOu6VzmtEl6ZTDAEcJMHsuDtMyl+xVf86NNjX2kvu1WxmBkWh6xlTg/yVsvcutGV2PY4T5KFIpsHl8LJLkRBAEpk0yX9qeGXX+5VdfCjvRpsBfnCRWNuAqNzBq83+cDfEyNmflc51ff7MN+ywE4NY8HOnhlR1nIXsVByz6zOVvN+9zRC2ES8Bw9DX42liPmSNbDD5bOmw/2qbKC70p8vcxf+ZWPmA3w61berfPgQdSlnB+PSyJQKGpXQOLuik+NzXAQZu2PEVY1PnZGoeR66WBhWrGu3GM/tpn2ZFBEKZAxHQOIpbh1Jt2aNc+Lor6bfjhnmkj/fpFMB94t3nvGpjaos5H/8wNMGLfZvlCFXlk6GIRS4Hk2tJc1DNnF7PaysVSUnCIumzoPzJTxDbqrIWD8IiMW107OCUXWQXBXXC5H47PJT/yZ774u+aX3E+rn5xiHS7NRl3TtF9KxWYUw9Ujk79csIvrh+vFNb9LdpLcIem55GgfbiTpcM9caAqPEhP47h8puPGDu4udT7legYjFAK4ypoDJH3LDTL0ATE5TLmV/Ze4ed3kw61SjExfPzOmVxuv+o0QD8Plh2tg4lyw3OE4CCkYu5BuSNpaksuR+hSi807e7hEqHuhs/cai0Sa55ogwTwvBX79Qe+8IxCTu5u5aMMfFBfx2wvIlrW8DVlxFmRv8tRoxAtYcDjozQWWViNsPj9o6tphGy+ITFb1khgpXndLylwT3VfpaNwGhvj9u+gY3B7rGxgpSbmJFz684BGjTWB+lFoLMdVic2COUQJoGVHZREJ/gcjI2pGIJFGchsjQKRYcRj5EsMz4qqYKAPdqzpymFXRCVgD1iEMlws5BlyBFiPHoaOn9pytIO1Kx5VyLxhKQO0GRtCQOJ+GP7f61lct9sIC3K1tcdMouiyVjkCLWY85Jjq3pZ1McdRel484KQCkIFWgVBE0zDsKrQ9HUEoFmftPdDneJxnLNIY+vjy+ctl9rPMLDSf4lK21xtbLrNP1q9clbdWn/a/9X5rWZ2pynicl50pwHbgD1vVgriYCPf+rPqmdZXJ8wJkICz1t+t41AOqtDlz93fdondHAL6i7MLnzeur3qkT1YUq6DDMNsvKx338ieI+qm5tz63pl3H47PjE56U6mhk44MGxohb31Z3ceJCCJU4Pjnz5DWeL1jZFGcjyKgSPHXDkeEFEx2X3HtdB0TE0MWtK1sh/P2x6dL9P/Zr6B+ofwa6lsiLhQSL/4rZ3t6k2s99JgnMuRXM/UfVUnBnEAJMp4rf7lB8gYd/Bg7JgcggPUuEkiHtOKVD5WHJswzyZdwouHgs/UiKnoxfnL+xcDEdq5JJXZIVmJ8USMvNDiADfLrHIlCfZTQUXVnlYfQGoIM654LthvruH2/spH3L3XuOML7i3tKF7Bhq3BzNmAYcwdnc7ZTnN3e3FDoGFJDdBU97wcu2k5H57sPub6/T9dUoN398QKBfzBKNjSu7ypOH+eVn228BLgXfdLOY388O7JO6qtbk/BB44rDe6NosTky4m3iWnuBgHuZMPb4HDd3tQi8U9UwUsgpPaEleOwNSZBqMRMpsOK3VSBWG6XevK2lD5slWvmzkMK5P8okW0UggqYC219PRGqjuwa2yxMK7Skizx2B5xyiHOkTJ7gG3V2cusimC8KelRMhtBVGHhR/xZiktgs5n1LKxECX3J6osmIIpmWl8AB16TWBiYysBvI/h0TAlXj60BHuvO3XNbPBLL6he37BgQKSech55Xw1YNMaSE3TOpxx9YotesrUdGTogxxBEG5nAMybjEK7QoXZ5U+drOVNUwP1I8Y3DimG8mo2J5SDJJlAGATjmmj6Ru5mCu4A5/QePwzSZ+u/w5NdvEZs06Ey88t+89D8++iouQX3lQ8MbheAzikzruP/Ivq1CESp/vV3Of55f9KpYgG6zbPH1/su6D9VleX7y8jG9NVuVa522Iy2x43q5cwXNGvOMRXIXh22uLcNH36UYAAk9eAICyuDoeEEoGphwMwyNcQOd9ONPtoq63c/KHuclZh7Fd2XzS/qiZH4ftsfanVpLsgbltc9Yc7d1JNc+5EyU5Wd4ChfWzcvapUb3tX/6sck2lLjI/3Sv4r7zX+3U+xt7rJaIjyZPFYNRAjUcWgNhS0Na801aiY6xW3FOmrnJeeywVrifea9EzpLBRM4tVgfCAGmUdCWSpalzmzDu3wM3NdQimUrG1HwX9/QUcj/rOP565syt7vkcEZGtcVUWJBTYv5HPmvrRm9eAMfssvlVDv5n0Lh1dGK5zpxRe/Yf4N9x+qf4gfJCETjjUTLm8UA51R4C7xe+LmiWK96MTgtYdrQUV1qE/DY/It1wdaFLZTqhmREbZGzASgeW73jUK0tjuA027iUItrJoau7B2xD3ACgCcMnzLzPFGvxxLUvHYESeB8AhXEIok+TSWdhwO0S0bmSk/AvhMqhQORFPYQDnki0rJRHMR08LfiBe4fUhH7QYpXKaK6P6RrHv8Hzt5eabx4t01FrymjnLLddFVSV/fJeTTCrJLfkEqcvqGaCwdwsdviMgUvKL2fBFMPey8KBQR8N1LtEm6aSmfp37t7yQNJFgsgn3JNn9LFkMTu05v1u2b0sZaFCCwC7cLS88fV0eg8FnkKqVgYZRGeeBCkbqWWWC6+LWdNAJkITK/YwNib+hRmDwuqZwKifGWOwcsLx+9It8i1EEQQLWC5rpRsCLMiJi8ETzUDQcv6rAqhZWQXZoDtY5ARqVxHMUOAhGsyEcMg6Ccxil3B9CarNKAzPNaS6RUEgIMqnl4OEj0iRlGmbYyUoExKYxMTB2zKs/ZrmPTxqRTnsRVSPRx7YQSgaYgGUJOT/4V6kI3QnAnueaQVRZwN6BKCMTLIlNTW4OKRp+yTACrn4aqZz90LfRMQCZP8wUNoAWAmk0Ai3an782VWL7OLWM9ZdlMk1WuGVkaSam25smenTf/DNquDS7BfSPmQ28opF9jCccQSNwIhEu6Xj6EG+4brsXRhuHoNKHUewYFB/9amPXaAsTwWVuoIscrUxpiqDyFr7EzuHeAZYewqe/ai+0mfndSXoOlNi0CwdDls8U2Nm2DK4/msUp2btcsKQfFwZNY1GOPzquqFD5cA39jVxQtmzw1ili7E93ZcFlnGIPmkH+u1PlGD7rx74ffaPjdDLEFUcd2auN4/u65PwXpNEdSr2aUvcSH65/2safNZW3uG9YWny+u06gByelYNw3mG5QBohdM3oYRPKWfGZiByS5FAeUqbLmsW6Jmt1klKQj8YY2ZMW1VrEOZzeNfWLyzTRjq4vLRdv5p18Tx80tUgKitfg3rYHhcRAYlleg0hz5IOtMI9ck+eaPUvffFfmB9xf0z9svpLQFaYOWtdPnHQIWW852F7EYaJQgqp9Qe5ENAAaCGZaEk6PeaJEgqmNNRiIovDVJZnSJkI4l3CJqmhTSpQpnzku1TEur2aquoTg552/MiUydGmQ0v5LYoIDvuRAHJKFk25aEae9yl/TdHRdxHy9n5/qMwD2HhgH9SFVPSS0IrvuvBJq3Q4MH5z+0HNT9UtVVrXpyJbuk96qfeGqJawj6D+hjnupOMiTk/QD7h8uE8FTfEO8E7DzfVUP0gM14Ij31+EFBHcm5uaNNBXfcwypqQRzAmOaUlDszCVMe3DsNBMAGE9cUMskckiWK4DG4Gbn6k4W+5jRgMbGdl4BEeSg6Ra0XqR+YFAFQiZDPWcnbGFsNSRXLBwOXHaSPGdWIEISuigyyJX9/2sHW9XjFofupz8mCRXseyONcygVdRDAGBBtaTmxLHpvJD8GvFxBkah57kZbK0rzwSVZ2L+0g6d611R1sRJha8AkUBCjojOlQJ0PEGbsug4IoVD2nR340wys0HYNaAzI6OBozl2FmFj851fx8aXIJ8kbParal4eVd1yo87Ws+gR9xo/nC1rkwRRBQi5x4F2vsqCWRm7wrXC2c0tVmuBq9HB7jOdZ51pSnMLRrdsLILZwsyK3G9MGzfZql6WOhZY1HFj+uNc+3IcbFKPgkmXbhkWIPW+aqvWd6aaO9zvZ93QOJ85f2lLW9Xure5w+K6qdLG56SP1EHA1iujhddVVw5hnuu0Qkbs2L7NltixLRLB0AMDdzMRV+7rpO5Z1g8FvYj43nRuP6mObV7gRsQ956E6LMu4obMBZzo7aQre4E2bmciuyNHBdIx5vtPBU9Rjd3i3B+XJfxMBkCg8Gl/w0tus2ANNx50abU7HhVQ2i7pq8Hmc/AJzLjuqKNZmI+0QnVnEZx/Y7v+7KzBbForXVJbHr3/vir5ifBXb9AfVLT55czqd6vEjK97vt7uFA3KSKtUuJJkkx6QtBskOWill0L5xRhJhJFTnJC2p94e9u3zuJZm93h/CbRXy8cKXvUqXqjZKgmqmyK/1BCRV6EGm7mDor8Qha7Z3EsEmASITxd/uLu4e7R/nRQtLYxLMpUhX6BVzYexEZkfcBHoaL6ySRGIdDiu9RJsVP3CQgRhTBvD9PK8lMJ6mkpCWo0R9rPf8uaAPDUsg7Js3UxGUF8YewxYdNmLAUGLnd1cr8ZMTi3cxHgg2CHtCIxhVBdIpOhag6daQ1lX5cIBTVWglDKb+W8MZn/bE1xVHzBq8gmlb1ORaJcyGQ2zFolUVK/Z/LVEZROQGMggIGdlK8MqIS2Efqc5SPvtW+NqxgRSas4DGxRN3xkSttLYfhaitBB0ibp848FrRnZXIHepfeo2xvRY85U7ZJMiHWgbxIIYV77dsbyoQQYoluwTlhcFmWzbABEjLKHGwutNJTGJ+cuNa/zc9JKGG8ODESsKFYuELrrC6fGkSPxp635iv58ryfxSMmFXJQJn1jvxzDZo5o67NN3vnCYGFaMj/bN3l81hxf1NHg5SNdBmCB81VlVoB90ZP6c48Qu7DxtH++WbQ7WxKSCxj67DgrrstvHdsAHu2iHRHOBlt/3PezIStbswDB8q61phnjh44RrIN30brGHuqyDT11ii1T4Vm+I7YtSlyHwHWgQxNPbd7N+irrpBbPtIZiOpzQRO04vAapLQix2paAAj3gHgAkfIj9jMuhi66w5WftsxBK0qAK5wgHkIEesrDn8jgeNa+Xq7xcME5lxTUGh3h08WL28Rx3rvZlBWdg8tr87h9CaM9sf5mVOFyJKd998ff0P3H/gfqD1CneI1A5VYG29HYM14vr4WKQUj0oUQIFSRrdiAD7/pBon+jQiZ7EQpPFEjcG4UbphVPl04amGAuUKVnaJEiai1xpYMRa60ZdHLKBF9tUpbsy3xPKUaUkGurHvyRL7yZ2pA8p/Hkq1E3i7pQjZMcMAkufOmtEcX1QAwB/bveH4DRRovdqd5AiTOKj/cU29djciprpbhumwv19Sh0KiyPu4WLiMl2nRJ+eZJbEX/VfbbvuuF7pOcmIkAmXZH2xg8XA+lcIyIYm9LrVKvOVZXKCqlbP8jpICKyLGW1YSiCLCHCICVM80y6OhCgydnFBdH1eYjRCBP2SbbQv8vNTyl69pLVMMWPzBBNZVmRqtH9iDgJQiZcYf+mexwD0kZIfuIbsMFdxXJndZtYZd7QSgYFxrc+kkkfGZYWEGOmXQYSALTcUh2uR6imAFNNpVvV5lZdACssoEyfADorcZF2V1esCpyP0Dg7eRDPAer55tINVAr7yRSsRrhJZuSkWFmEJs26Sp8uw6zwHRWtHEI1jb2PDq4GtYPdaagG5ZfOO8Z06fVkddcugu23D4KPzsfVjnPWILr52Wjb908pnQL9xnp2GfW4ajziYzLLKdWzKcG6WIV9VS71kjGJMXaza+uIu+1IG8GIjBmjiJvpFHqpmvnBr15ZsC8GtNxHBWBwRPFnXdGU09Sa/furHzum1b9WyzaqqOMX1yN3saN7VwRQgqm44jXYWinxpqlD5ilV+p6IrfVczQT/u3gxvQouVUMzOroJqQhztRs/mICEErAqrIyt38ypbtCdFl2tqFKhULQIBLoPxNnFDjPhXv/hvzF90/4o6xQ8XjEL8lqxgkgON8n9KQIf0I+zthm6YeiC4X9IKGuOxktDlEJosUp+FRCFT5PFBTZ42peuZrVF30iQnxT1pjZinhMpktmA679R1kgnjhYfvJQ1hFFxh68jNNTMsMEl2fn23dw2oYhM7wg4u/HfBZJIW8/+Ha3aXYCv8+eaDAkakJj0RV3r9eYP7kenavJ77ggsEvrJ2iGBtASYcSe+kmwo2HEjuuehSE4MzOZePNu28OHrTH1XgjqxlsdS1KAosQUp8Gn3Vn5+wwgYXRuUabrWDxWs3xNYyA8/cjUI4A4/govdB+8EO0kzBejs1N0wlF7rkzp2tY2NEGajZdcHQAM7d9XCANh+Nx8rKFKAJkfIqKxwphJyBidRClyQKmfvq8922tX9PgyUrVVQ2K7ImdDr+5otPXn0yK85XbuhnKi9iLDNQWIpajK7VEkQoxM2K5UgcTKG6aPvi+xe/q+byuGl9cTTk2XO4cV8X9eJu/DA89/Pydv75Sdc3+lWzOaHz0mzI0AF2d+R/cL74dv2c2Smju9JdvZoVn+jwzB4fdWV7ed32i+cMhcokL8pmLQVSUmQwpCMUKWa61zjSJHFg3Q7MBey8VK4iW3ODcnVJ6C3pXxmfUZugV1TPUVJu90ffZ9TXT7qn1NFp9We++Nv20v3b6pfUf8xM7QJeMFzTBqYvL9V2HMaNqhVcCQN0StL2UqoSN0kCC+eYEpGjNPwEKjrkl4vvbWQ6VLXvGLpPPktytXe3SVI7NZXKCmeOEz5QjFJYffKRbCgQlzxxXknobuibp3L5/WOO9k6idXpJMnkzuco7UclhdwNT0VcHdv+Yjd3ycKQb6z5x/wfZ1qGxQLO2P9X/afo4wXC7346NuhtE5TdQX0fScar2A3vDEueQr7xaFxIrXE9G/ZgdoVz6XgWqfnd329RwJkWxURR3lAk/3Mzxj20HBF/SCumogGNEXJH7uo3P29L5n0bozXsNYwBjLo2LLf0a1kouBW3q50zQi3xn3NnT0/6eRoH17UsfK6lVSxICXo52hP3MsFrZgBkYuFsWgjJqzfi+DGQ9MKMRy4fq0kegQR4LKc8LG5eSTi5dTvC9LJhHrlks48BWUvxpcSyekQVoL8JVvc4NuGrRLtQ6VqbCgdmTq03nWl0Fpk+5XYqA2a/lck+lMtDLwcL8gkIeWKmEIla9Xo2uqILtQ5apnrKcIxDj6jvfKL1NvAHHBTh7Oh8GX85ixPbgVv6ET7gjIpA6MN1sJbb3Bib2e2aDeWkKU1Ko0hyt+5f7by2Nyd5mq+VmaStcAhMj1TSigw7uyC+btdbrtsd1iWxHsGVfLPX5eb1VvgfJoJAxA09v7UDVgo77IW8i7lYwc+tHh0V22q/LijCcS6Ka1xPIlhe4UPCXCMvbFy6uLIKoronuJCLE+KwMA4KZUNl5YwqfWXebr31TmnMgo58rrCGsg6JsPjcvwkfdRVsUXB+gbfD1xy/Uy5hfZbo09SsXL+NM7pJeLtxTxFsLjaNuwLXsUMzB+rq8JYfoPAAS8ZzakKYwxrL+qlgN9sKHI90WJBpDrMHocFS2V5QjsB/lZwFnbcA10zMdjrq5dWPLkAPRxkI6sJLe93/94q+YP+3+uPrv8UMqJU+cm+IxycJNUfWQsObwZ3zsZ+efm0PY7feEEv76QspIUnzes11btLr71Lgd/MMtxa1sP7jdT5LfVH7hnuaTcPfQsJ2OSjBrkheLMIhlIQCL3qdEx93uIoERcWbqXn9IqQ4hD9skENQTs6hFkjxOlTAi3zyJdvapMP+ZYpFIuuWnRiOgnehq7ve7QwO9NDXdscSzP0wBEOGxbOb2jeA3kB9nTa0zX97t7x4S8u4udklsSExN4rz99wgHp1L/u+/RCEsbw/3U+yAFpH1qWOAp3ydKgxA55K42bWYEagzrRFpS5oj+Wh9XBejEqOebBiyx9GqO6LxWeqGrplhKeiKXIIKSHwkM2DwuFfMgVXkl2qBqDK2E8qkqykJTzZ5JkfFEZj0kgsDCZUV9tUKUrUn3GawwAcZeRTAOvjFmtW0amoIkb8E7yswmBRKjkTw5adMAt5q3vW1n0pPFtyZxgZGMKWlO5r0pwAvguBHZ40BKqtTY0aTTUTFNnIoIHiAbpJlKDgQvsc6DzZQN4Bb4NdN6rmLMG+deR5au3Mz1CKDtcTuvnQfaWsqo8U3Qtu/cL2p2WxTzsHJPKz2yyrVmsdvyJPFWoF8X2/bq1r0qfMDpZctq80P1V6MqpOXbe4b2ZWOPSwROzQj6hoMqLiute26lXYU44JjgQXIEL6dh7nDGdrWvXzY1whBYRMhaa066YVbbszjGxfykKn6LDdfu6tJvXrg5zgHYNSzds67NgPkuD50ZyuwMpx5cvXDBqptYX0Rg4h5+4qE5OyZqqKwyr2P1amsRoTRmmPWqzeqP2nWsHdNPfexCLNpuLPSiXJ9xdIGliMPHLNrn1y5ch7YVomWfOTVma1C4+WpYMKFTj/r85MXTm9K8bsrbl8zOzhaqanMQQw2XAgpdq67zLKR7xob9MS5TI64GER7eUdsZ1oPHMfLFjunibvt6v+59712/jvqF818aVgaUeKHBmTXWu45hGWNT+xZ4iwVvisLaUBeIc37gi79t/qj7GfVfC2sDylB0e5N6qyY1EqvRU3WcusFFuB7DDeW1O4pi8K7rQ/Jy0uEk7CSOGSqT1UuVykTD/FTdz0/1nDJdYiejqv1dDazaSfdS+vNODV5Uw6kuzk4CkQhe7LaSPr3YHXSLCYMF4mo1VZ/ksAc/FYMOcdc0IeQgLxRxccrNCtRc7CYOKTM/7qbu0NS8JeNKBLiSbHH/MNJPPCongfzv9AXIFvZ9vWDHP8guLtT2UcOVGkrfqaRn3G3FDSQ+Nima0zgRPRXO2fMhkslUPaKruNg/3NzupW/0g7q92D3cTerre/0aKIDFwqJNIRkPttGkLmy2IlPlTAkwK7M6ygwFq2/U0i9YaNcI4gkphWgvFAIKkc9o3QyGKTlOzXDSGWqqLG9Su0IhmYlUeTJpcAfxiEQsVY9zCgdB8hCbO9MDAWGIvsVXVp966TJwda3dBndp01DFDHRafHj1tHRnR2B4XvTU4H2inwEY0YHjUCr2FJoU1hDbAhCh96LNZLKTXUB5ZFXUSUGMEQuiURJKZl6w0YabjSRiLkx5WG8L7szz9azSMzukCSQwFDYGENgnIbiSMhSTo511P8WMNtVgpeiUSmqftapYiVLUPwWqbmDUTNOQwC4k5dmp1i/a5drM89cn6njB9MaRQCvbJNfRZDn4o2YHCrNWMwBG4TJRIyP0oyZKcapKYG+9YnQs9JDNu73vtJ2FnMk10zPJxKa3Ps7szLetnc/zV6/GO19i3ZaLobj67NS0lT3JsrxcnOcxXyM6zcqVX6dxKm1dHtc5gJz3jGvG69UGvM0tcM1drjKiS30c2nL1JTesfDZWiMDVs03BbIrfAObpgooX+Vi12byuq0hNNiJ4nAPWU7Bl630Dnq2LxpS+0i+1esb7zA4tHCMpvo/rszN37Mpo69bMM52ZUhfsqKVoqlzBXzhHRqlzaUGBX8b2camBlMzhfPTFf2d+zv1b6leot2YO5PZ+XMzvFkxrLIbrhzRmAyZ7ww6Cu4dBgCl1aR3rYR8Wo2RN2ao+TNUWbmCaabG4uX/gkI7F9jM1RX43bFadCsX3k9goNWxMc5P8Y11bXmWtfSqPH8glyV54bFnX/nuE3LdXJu1md7s7zOI4FLjvUhQ8tbAehjRNpPImJawGfxjVQXwiT709pGmF5n1vH5eMwLibEsIIdbe7lPmVLjHZ/RslvR+PGJawiyRwLx3uOyq7774nrbwXGejNB3UtJXimsNjdAUBnq9ctaBAzmDrNnDAuKQ1FKAkGJZoUSQTjP6dBHnTjzjbPs+1KOhlgmcyiRqrfjAPecK1kVMuJII6dn2HjOh/3AevTNIj7pB4FtOhoNV6nxkREvKYCR+oRxGYcbLSosqxpXo2CCaYgsjB0ghN1LkmJkv1JaatIE36k2JVSLkBQF6j+sDEPpWrLi6ZxvmZ/kDRtqspIZVWnD1H5ByZbSGtYxv78CmdhX3phf6VZtf25/4iRK9VJTvJsotFjgtxZaUARQanwQWENuFx/wk5dtpLTwk45KQj7jSxfiQTQyXAkxUw5rI6x9sKusNMGhOQi+0H7CQVYir1PSsYzUTpjOJspAGNE/ydSFUTQdpbDXp/HpyE7FlF9rMrBgjQGnR2542zZRepPEaTWWfjGanHnh2/ub/rm+Q9dkLCd+GLTrhdlNcsvjUz1cSMYT5nPy/3586Xe5I3wb6C1r56NT4u7bK6H9VEo8+Iqiy3nG5gKnLFbHi/O3G0Y6xyBLHBr7Cs4yHnRjixVwrXkdm7sEKptsy82IQyAfNParJoN7fOrAU4HDqvVl6sceOr39bvTly2IWVUo9Tb+M7MxNy12hCBhEVsVbcteUyODqFgFZG6SjFmUZN4md+ymUSxYHNSO/0df/DXzy+5PPvlrT/6hKoBRJylgAymqpz6sEx0kkJpLbwUVjBTqgCCINJuia1KJVJ2myTHaTLPSmOBKRSzJuKVWSlj1laad8nsaurSUi9XbhykxfYi6rm9TkBqY3nqvT5SgyjGYyEFYDnpHaHKLK+kM5RFMyDOVk9OEoRN9+djZ+oaglEBukNkZ3Kf5M1w3Mx295GURojRlVrM2lHeiMjEBZEFCLMT1far+EhAA+7X3RQrgDKM63IDT+aKpxgrGyloP9SRZDddjOxm6sNSdo5H6tQkvbHbml70bpChDI2TbE8jys8+fM6fg6MitqLLAPI43G2v33Xf+HXYnwj4iewB1KtvU1kpGOSvUWwv37EKhaa1gH9LUxHE6dKGIkv5T83G3WCIYUUImKrz31DUv2stv/sziNz2s4y/kW5ddlnaZI7xR7bpb5c4XAKQsmNs3xacP/3z3Q9kL3fzu8+6D/+Zy9eNVvqgR1ncvXbfdrXYz3/mjV2Z7stuWzXG/q5bvvi9/t365YvfqUaca386qz+tXofjRcs2uN3ar0Gbsyj7/lv4txdnrrT+53nfNrij7u1lcEoAJkHGsmp/+6JeArDPbttly6T4aZSAQ29uiHSObQgLYQh6/v4GHbyn0YJOWrcpyNl+ZRZ5VuBsgT4s0M+tfw/r/pvtj6o/QP0sp8/76get0vx39NmzvLqT5eYvlDn69l6zxFsuZoUcA4d7tL8JW0ruHIudUmT1Vw8V2sqVDF+P3iBtkChaTrEnpIFqy78YTAwW4YkRDas8YRHPmp5k0mvOtmAe5TenhqaX6g3QW8kMpWc10i3Rx3VxPE2d21IowrrljS8fUS5WaMhKX39++P0ynuN+9lAFf93d75nInKdvtoUybhlZohhe7g7TljU790tuUz2HTiAQ29Ln3ODyZ/vdwnThCmpG1078gzQKJ3QaZTyWSUyv+IpUfyfLFqflG9IYGVB6eUdFm2GVXGx1KVwFAwac9ZWG6Jm8Uel2oUFADxMwBowmRk9nU9yy1VJJxNmIejXPg7CpJWAGclu3S+UinBz+XsYfA+Nz3xp+a8bI9Bk2ts4+0lGV9ACUWRQgtcuq+4FQrXYg/FlUY/ELQkm5mjojd0rIv+r1I7WxlOfeITdYytIoyJeVKqkPICyQLKcjNgxGxr/uWDJlgZSjN3VHJ4Ypuk7xARtRIEczpDJex0VVz7FvTEW6KmY0bwyYS39lsZtr9WXYxBlcZ1bQO7k1nu/DUSqAVHGACQUd1/kn+rLtx8aNZ897F+/J5bdscKNPhA7FAKFIb12RVswAoDvGjyBFcx76cjW3+sj3dLm5VgVtT90/Hk+cxvChyRApAF1/a0JRxgchHMqm21DN8C660M82yaeB+T118UxfB1fvheFszMowqGtyE+EYayPxocX/JcIzLAN96Pndj7Y7sYM1VFkovvfQu8XvNyCpy/IwtakvzZBiQslKM5PrxiX7yg1/8L+b/cf+6eq2+9OTJ5YlK6cRaTa1KKUV5FWQ+ZvAfDamMI9afCDUQoFGSMPXsvpoyibvDFDsxm4eko6ctbMPEm5mYxJftMTGEdvpOpRmcnKDy4bFMupDYQQbhUZiVKrdp9gpI8vuYDDn1NxPZbul0EwMGbEgCYkIbL6M/U9pZCwb5CbXSsCv/3SYufi/tmfJRM09ul8HE3PyDInNlddy9bDgeqVTVWXv01R7LPAhP8/NOIkb6tYG5ScRhq/uzuoYDzIIIu09OfVFLY0aAI1NP6wjLsF0cFmsLbG/Lxj/vTl8VJSm1L8pYyoCVwGSerxHFh8JVsSjzEYYgelJdb2b+qSSqQhkd1ehPZYZjaPRgitSuQxlHG9uhPD152qwM43xd+XaxGT+uw619v3qawcTKwhZRkmCp6JLpImtlCBlHL2T35afh+O+J8r071/f/Y7jM1uv8xXYEe94cZctzW6y7zcsgQnmwDVd8dgfrL9QVp630fyiue3fOju6ji9JTaAvuCcIKFjdnAmEGali7oqjnPvzxsyL7A+3vWo7L4vOrdx8Q6A7lKq58HmWODStfmfB5RRE2y11BsfblQoAPdrHHheKo0nrmGst+QsQpTT1s2s/LOakCrgsuqrO1W3L6KQ64Kv1p+dQUQznEi1l3DeYMm3VxPouX9nl3Frql+4XjePVCm1mWtS1vOYCyGvpPNktTVK5tfLWL3+9a/S+YNmt+CvehcubPsm7bfvG3zG9zf0idqZfTlIHF43C1C1JHLvr72xs4k7vbFLTu0sSA22msmUgZZOYtdQVbP2mO0jiSUYqPSew01Uw8a7usEdySXnJkmgwHeFQ1PRpiqgVPK/8wSngnQeteKq5iVQ+HUQNSb30vis9bCUUvHvtd6FcP2TZWOGScpcwVWJxYVlr9NDxK9jaNmxJn7xb371QKz823QsWkbsEwJ9c1ZTCiaWM7IC3K5krqB8HC90Qv4B+Z7JDRE3SCBUMC9t9lueKoENhXy1aRo7I/eR4yVUnQGVkeBYyqmTaFdSBglwW8rClLmOlAPxqwfRPmR8OJIRYrsOM85DC3VoVKS6GUuSEez3DjLgJnYdiZsc2lOqnn31wzXjZZVTZfrvI2pvQdnaQXNSD+u7gcOzPHKVWA6u980tkmB2D8xvm3+y9Z01jfKZx+aizWLNd2pOm+YKqrNzD1zGbKFmPI3rZv2RTCfkHwZLJxVvW+Hstf0Fez5kear/iTpn56bPb74eTEl/OQL2zxyepKf8hmLXsy9CI7Kta+e7bTR2ER/E1WZFr/9vnHsXftLPbFR+cfxT47qi9dOUekp1Vf5m/DPDOL4JbudfXiI5e/eKXbF7E7qYdvF/nlSpoL34YP+ekct9FeDrm7MPnqS+pFPa+KwDAQl/CoPWpmPvq6AvLhloNMZAVnNALuQr/o92FjIzCxOprHja8yzv9UooX4ddjU73SfqpfqLaM6LcNykrT/0JMmTQq3yU6SHjmJHThdkFaW5pvht+Zi/091jkmfBA0w5cX3nIWYxv0sxo0KqSuDMqCpZeKFkX6z9weJj3w0zeOYUusilXivLg7T0Chl2PvvDga4WAwX47Uk7LejTwrmaVQHh13LqMPHqdg34lrvHguG0gjxAL8c2I43yPkyRUUt5PhoZgrMfT+Yz01b4ts8L3QWZD4zxxO6wpWgfyyMaWGdacEivgra1d5R6FdyEOmZYmJw5q20LhraITOzMqjHk43KqBv23npznOlI3ZCDwfiaLFCq7ZTXsSmKs6pp0SbJAqVDC3SqYkeFprjfirgwU8WClFFUwKoJPr+IOSzIreLHMWr4Ewa1dH/Gvur2O64dOSiZZOU0+8KWwR1n5eY7/xlg4v+ur7ZMlOjL/vZk+HoZ+iws4smCTXDpQDj45jQ7CZ/Xp1nz+kN5XzcZKLW/yMJxJSxY9wDYszqrXNnavD3Jqsv4pcF9pWw+Vad6FhEa1tuzqnMv7Y9kYXbZUZJl7XxZH/uZDECbr/1lQLxaVH72A33zKovHVbbMnn9l8W2Pq0VTiAiVTd4YOJ45wsf1N55mZ5vGbRhkwsfmzIotYosrAoL3XJ2ev6iPvr/pXf5yXb3OhlUx13lbuAFhQgMGWupyWQ1Lo16Ooy5xxzjxWana50E3iMKFxPvs/f/3v3vbp77QPwgb+2H3R9SfYuy4SLFXGCZ9zHYPI3i4Yfv3lLWdT4V2rMCLqTFpymIckrKpbhRSPrbWUzQnEaJ/7NOR8Tvi32RExXv9gaHUfZpkwQ6iadT8lEJlviPlfg89UgO7/9MY5mkEgIwXlR8OI3UO3UlCVm/TrNL7aeqnzHu/u1KpCv6A8HDqYno02VEkwJzzd7e7YBS9SULEU4XdXI/smQ0v1faNepyJz8al3cNj9el2MudpxD2Y6oPI/6/vP0ydn7xcknCaxgjd69c3C6ntyPhMtrHrvNJzswnP8rWUhHNyNvw6U52MfzOuYpenpHdwdwG5ATyIszxzx3Z0mCnsMWa899FUs1g9lD22BCdZSgzHD1lOjMtE7F+buZUjkBxq6oUEbLB4LLIhk1fPji/Ot0Ftynwh/UGcXMpZGoUcAoK+lu1JFAazRMFwDxGnlhkynH7BA5E0nryd+2RfC90v2Db7mWQ0gLwjOl1y5pdlzpTsQi2o24e3bAI/pHJpRGU2mzATCoMIzv2ivvDrUdmmBf4EdgWCBeA/vcpVYbb5Vb5t4qfabViX0m0T7ZvqWr8JX6uLnC1VhUIszhSan7ddDJvsNmYhN80cVnac9R+aNjs1oUWQaDOzwkUdI9xa6OGQi1z3liJOE6l4sHEWzvrjFci7XerW6LH4LIayb2xDJSDuUmQ7+lKrbX3y1M10NuQeJATIHILLVlQqhSMflgRT6dQQFZd3Oi99v32r/FsCsjOlLf+5+mKZ9cdly3bPInvm1u51h9fyLqz8qIpQ4DpKHkIKa0GU3dIeUnpd9bi7ma5Aal1BLzB+Mn9TggHluIcrby98c1K8jjkO2Z+yKIrbuKwpGfVLdop7G3K9sOGJVd/64r81f979s+pj9TvVv6j+NFDl6jBH00wtOtcP9+PwcL24T8JYhm338yQd/G4zzvxxGKCXGRR+vjjUpKeazpB6x6VZnUmoJP1N0JUmnPvhVIYkp0r6IKw3qYEfU0AiEWZi55AwOny9w0Lj5kLS+3ADstP5QfMociOJMhkaH7rR/6lU1F0SLiaaPb2UuqqkKD8dwGGU2CVblGQQhzySgjOLiBnTNLGrJMAUEGIKDxeDs1BwHNeL6xNzTX3xtVxPyWCn8jyOWP00mJfdrGY/FjmWXJsWVNk0WHrr6DOYPrw33WjOxhBOzoxxc7TDCjUdoqDQ9L4AgJj8kuobK4DgqiN4Oqn6MqvLpEPMOJFdHH7JUvMijhW7Ies3jRvbbFRplGjImbqixZs0sJxrukg94iGLwbCeU7PbRRJk7CLXRjJMSyBMN4s1LJ6if8s2G+k4MHnBbnZHtp0535mMw04pmAyDFXfPQQyB3emrPO+M+j5sNJKpcIJIafqcRTGQ/iXYams2PpNuJgCe/mPsOv8WLh/njYB+aA841cxtRybLomkReP5hdzFyhM9tZUU0kMlURBbcwYoIPxLbp67NWjd+06x72+uujCsO6QmIbktd57+z/9LZs3VfR478Ypqt4QQKG+Z6cVzUL7y/Lt5QObrYOH/sh6yFkeeBfUijzwbFESQmIDKYcYyZJrPQvm7q+LUqtJnJ89JxHj07MLDddSgBBrbuZq1/la1B0X3jhko1zQhKhMXGLq4M2IvTdW+96Tppg4WPUK7NTX7aB1+6hSqr/nz1URiwpvSgLsJZVn1SPnPPR98Yn81C8ROLHTCl1Pl+PArnJpyG7BSsJeZJ0arlvyKceT3vFSd1IJoCDVy5zsU/xTCH9bDuD8sUUjqEHnF+X8Vlp1K7yaI4IYc5/eKPmtH9fnWBH94r9g+cKiaT5qIdlOG+U8b5u4XhIZDOeOaz4cXxWpLhTXMmOBj0IJRmdSZNLj+RqV+NSlWlx4gY7OSlAlMgAZBpeqKpu51SVmLbnAm8ffPY4UOq85AGfj7Wig9JNeqkr++kzH6qUm2dObsg0/mSfCZp+tgrfcEpQ49J7TRR4krfpWYheSDQB3V9xwQdFS6n5azOQDll3ci4mjyLIkbOB3eeD8CAIJlb6eDx5XPGmTPJafmo6uhmlPRSrRdkBI1p6agjE9najCsASHjTjfN56xt9KQ+y6ZZnu8VSm+1sObe6s0XNIohWLTX4fuYoqhI+48va5aMr92HgABmsTrhTySEzd5tltIkiiLrP67is5prjaF1tqlxIjB3ZtisPy0FM3lppdZ4mg8IiS9+538uO7Gia1zn79DrjwilbeDRH4HT16rx/dm+3nX+h8/woy39/9mXNsvVTZ+vB+CKobFvvVDh2bWkr2/OZP/PZ19Zh4VZH2d5kwwl8IGJqxAFd02K7z/nQHhXnczCYTHtX4Ipw5T/VX573P+9eXL1Yf6zLLG8WxcKt1+GEmWZF2fDLcgmLMCBtzPCKPIi9AiYDq2Pc7PqYLw2vYNsuxty8iC0byWVOc5GLrcj0DucHWwGdOSqCoCSPwdBDyDa+o4b3L3/xN8zfdX9Q/XWpG323yz+1zT8+KQT/T08CGA72wJG913eToPVmIR4Vy1U856mac/avUPSFdCOEFBLU6vE5KUm4q2V6905Ks5It82lAwhjSZIJapcFfUkaaOP7jeHLp9E1NesMho7WXglAa9Ds9l+CxaXk3TSSnP/ePubHvfeZBeuTB1WPtNs3wur+ZMt4SmtxOArbkzdMgXpO6hw5kRQS5W38hrQvYO0KIh93FlcG2LnhgdNOcSDiJ46Y+ZIQF05jxNLfzvbqZhnVJbvz+Lg1ekGYKpuTeqS2ORP+bOiv45CaYbTlqTlt1fFZFnfdJypOxjc/Cw7hw6demKagggP3JDFsOvZxx1geIK4vrohehcEFPAAHLa1zlunnHIQOmhOu3k2hMOlvBI5242A7g7VmAIhPn5zhrRWZRKW8Xue1nG6caokXhpVsR9gfu3Yn6TeRyLA65UIvSn3NeOJZcwYcgsG3OsPdxOzi2HFWF5XAlSRQE0FpSWY6p5jNbCo5JkcdEifrFUUWBHdU//GN9FsvSHvFZW5x1bAUyZOCf7tQATqLmA2d81aou66rI6ll2VZ4gxKnMKrS6pR4Gny6d9DlK7w5TcSbCzbqF932dUc2CoCpT4DhwW8Hlp7MluyRA9r20bgSmwxRYwsKXZvayjRWFDW1EeBE0QhNQH7ZrhMCni+A6yJiKMsYZfLJtZ571sqCPGrcIs6qqQwv/iOhFMjO46Yv4mVN3vutW5epUk8A9y+y4qDZA8Coreu2yyJnw+RjjMttdyoiIeQ7e7+vwangGkMj1Z4P28wJkqLbtcXV3GkYQ/LefBnsW6ItxwC1IjT+Ow6J35iTWY/upXbl6mLlFsc5myyyv8rq3cOngHJQUU1VIIvPqdd/0rhEHAmZ0aW/10W9uXtUNO1o4y6o7f97/vsqaE9txSLw0WTHhU1OEDqzXMrdZHpvldKPrGFatCVi5Ja6Czvt4nX9i1zhKJRjJexNNziTTAnyMc7p6mysD3PvHX/yX5h+4v8BZ5f1wUOdKRYhANEUhJylyJ/6J+DbJ/5NENgyHRAZw6GEy1bvb9OAlceRJnv8gSl7NKQNDgof5oWf5MNrpPtWe0pP7OCZwd3gAH3mDzNtF4ESOkbz+B/Ve3+4uAj8l6c2GYYY0FPJ7VhdYrktYNIi6uFEHVW7YAohSR6R0O9zcX0tPI89UcGYn88OlFCZDf1Owc3iQxDTHQCYohPkN0OlxhoI0QWzD4yO59lt6BHIgCt52F9KJraf64Y7V9Ot7zg4jMRJwlkPeX9wyaBn+Lw7clDpyYWRUfWYo6QQrXEW/NHXNOjR5v86DPBGOIi/OMt9Uca70zIzSNVDInHBNObmC/1Wg+k3wfUb7csVlSP2UsAqZ8UcL5WwUeHfWsbG8/djOzY0dRSRb+O7ILWe4tgwF2IcJ0Ko4nlg6AThyOpPpDPOsCnxeCZ/TpO2Qbylw0hx+FzitpeUI/UMa0omYhW0Dx/7yuG6F4sgYdB0CswP4EJ+CYGWQodVNPOLk1HXZh4otFwWzr+qgIsZbOD204TVLAyLOc9kPl79U39kWYf9yHG2R2dU690v2F0cQMZCuuuuZ9z31RTG8sMV6dvqLw0dl3pgimh8bFn8pZF++rMJVW81bTt9eY/ejVWA353rz7GG9e/biql2Ubz7/Uj5chLfPXXhbzn6qD9+O8UVR9SrH6XBywgxgF4vl5uobq93X69F+NdrXz1xYHlFq96OL+ms/oj9Wvo5H7uv1C/YVm+P+bAXWpWtVZDZ1X8qg98pXvciUjxUilQLEpvCwdb9x7iQ8hQfKz+HLXl0+HWIa1ahz6nnDEVskjGgLSsdBpjNftQEMJ9frdQ7iJcNcY3oEGIfYRj3N/WKbie5N6XMZHWT1l36wOJdHfyVpGvMpqtKLgSN3ADZBngEW4WuLgurCKM/pQMQX4BaTduebX/wt/Tfcter53IRDB+YbtZhm+cJ2t2kCMAGE0zXTIwPAAUCQtgfJ/piap0VoM2cKhOTrVA3y8IVUAlkk1epIKZxP4h5GMoJForu5v5MhxSwHpubl2w9qwrn0HBgZUDXNM5EMcKo5PHCWQrLi9KCm3eNDQZOg7TD8gCHLB5Xw7PBemWz6sE/fah6I/iOKg6+J/bMq2rMiLuC2huNsTzWMNNWmZ5QhRmgpc1AVohje7T7KiEXbMGfAJ8xJYpC3qeC8YMrY4PDL1sQ0+c01+XzOEXjy6B6Sa5YrvMzaYQqBYld5loY/9dXTmnpQ+J611KY5x9FIAZ16V1YBqRPnLADELTrIA1RkyeBI4O96yzI+wuqf5SMQYzm6RczKurSfVCTqpig/Wbi+aILrQys9QpTtUKSrPSL6gAW0yZ9lqxO961dGLxdP23eRAtcXbsU8hjzf8ww8K3MXKqNmgLMX9s31+99iuyWOqa3BTMx8A5OoTf4DNv8sZK/1Sbluu/K4nge1OvYLO4ydrzjS1Lg6e3tx4uoFOEFZFvGo6IfCFSXBfWWqE1eC1dz6vjxmGwynIhhTqUrVdbY9ucrO+Cw/yRdRyfb0yNu2018eFi/jpb7wz+szhi+xyVZ8Pu6Tv//Fb5jf476m/mX176u/yur542Mx6BXMnA/ETU/Inebyp+bmQaYDD9O43uupzPb4+A5h9NsrfSvl7e+Z/jEJtaWNZRri+45dyGk53qYU4PDd+cIp+J/7x0k/HESyS/P2D9MHGLanOgA+PT21ceq5ud9x5setPNTj9lAx5G/DdsogpF0e6h2Hyf23nDtwc39o3bu5lloCh4I8dvpNjzW7kyefyMC31MUsHYopdhJ8OFU3J/ZahiJJ7+Jj++U1S/NkPjuphcgTSfwuVWbAFVLMtMX2Lu9TTHWR5ngmVe314TBwfc1PZ3xyjDHrbCi+FaxE6qsiuiBxBJtnJO9HYTWHsvJRHjrjqAO23pn5SSw9R3LDFRdORpvK8BG2zrOeTlk1aHvmpf+34FNslZfpvRxpw7ADNoywGOyPpve8GJwku71M9jW1jzIqWJ5iQitPk0q0KEptejQbE402lSgzKrzb6pubj+nkDdVrJs+zvjHrvJBODnCAogPSqKVppDOMyTUZkY4T1kVVfiVb3Qx8qi1chtQ7alVVNnJEdt0wxwKQaOB6tgE7zvrvjIQ5rWdKCiqRz01jFJDJWGPq3qL6m5IQqXXHooqecyi+t7n7VHJjuAy/Ui4zPhSucTk4gA5l1uhF+9nS4jNZ6cs5blCdD6v7JQDOleDdnrp3ePxuyHZNs9J15vPZvnm64nyoggLcWQPm4mufs0iMmEYP3hbl0h+vtWiMSzizKlbdR3zCLUKWM/1p7c+q4mlRXpY33jTOPm/u7Lyt+mw5X7vzogHhiJdhsLYL2RJ3JfBhJu7Z7CvPjtOzd0PEwrALV0c6VlMXHOfMUcIA8pA/hYO1iB4HAKU+9pmLTVsB8U5dXxld4+bIU1bKRsOXM4wiO5JZ0LnUVaT0qxvOFM05qiJ/1pP3lKt3lsKQnkOzomVrVg46wSmdLjVoqbxxs0Xtiv5jlsqwcr8S5FlU0swFBkXnL5JFmWQt5IEJclI3+IRuw6l/01MB06yH3/7FXzf/m/ttetBfJ+pNOp1DU94o3tUfnikwvT7IjLH0XKQxiRSGRJ7nafJa+v5YXR8aXgQj0lM/pmeq3YGQ7+VBt1Oa8vCocD6IYBqpwpcnKY8IfyUxKvtLcjsZpsvEC+IEyfRMT2LaifJox2fFPlxPUc7tBcur6dEjUw/id59PcnfQ/kqVlAj2OC3pcbzk3fTchi01ijsvHdkCTi/VpMr4nt6ZqRlxcQ3Uu2em5VhdsCn9UVE/dVwLjeInLB+Pci9PiXivDuh6u99OD45LT1dQOIrbq8OcORzmTYLahYzYGFP6aDHcXB9r+IJjvd3fbo/pFqZO8bvADYXD9Di8sB8RxO1TUYtZ3qkCBsb1QEn2rciuZGLmdhJgq5QeS8+0kBwVnIX5+yy4tOfuNuRLLsEwaYJFP1spzo1RhaxgyZmTt+gjwotPomQGyuZpXjsO8GFqwwR51Crl+hnHkbdWckU2yXBl/IK0u1zoc89Cspdx1vJoBqnDcPKcjH0Byi6OQhefrl44P1xGNoKAZVjgJraS8WF4QBc+XWUONseONm6i5gdl1LdNbUzcsDwrijWqivVqzmXDsVa2mR8zBMz5+Ml5e3phR83haYNtOLDXc0QOcYlZF1dyGjAiKMpW4jKG1obSSnujyeGSsk16OpOdEfGjUrlPaluJBmTSsDP1GPKw9bXtVOHllBlgWhEtHx7XK+UjSQ/JJbZ2PdZtPivNUTzVJ8YOX+PbOGWCOSjLyb+kkhlbDUW6a/xsXJqKI7qi+bMRF0yVw7nfx4wdpkZvJEz1+XLb+qwMG38O15PH6oz9BQizWM8DZj/b2E2RFZ3v5Sk4pXOxA5iW7bDY40ZhCbpt1oXQ2x5XwrMxxuYzH/PjD1SimnwJzC+XQBr2aLCXaslnRdA9mjf6fhmrUiMkD8z3sdEymj7zq7pcDKprCyzBE3/E3qtFU1SUKzs/9ktZcJv82JdZ/P/b+7Yfy7Lzrlp73de+nr33udW5VNU5XefUpburuy6nemb6Mh6Px2N7PBPG41HiKHZigp2LYhGcECELghQbFFkRIEcGv/CAEE/hNQIJ4RcShYcQIjkiCFCEAggkECBekC1P8/2+tU/18AfwgkrT3dOXU1Wn9t7ru/4u+kDoA2/3s1BYO/N2lA0m5oHI8lHmPVAF4MwWaF0rV9pyTM2cqvG0Fn73HiRSWwgWJvHaw1VcM3uFcoW+WwzOTF/WqlKZ5bUkJQpJH9OxT+nFbNmcc7S2izwHEw3PgmIGKSMRITdGNTNLPKfY+pU2LZNC2gyofpQgeDpa9v2x4HT1vPXO9CRgwlnBJtC4YTDXQdkB4zHDlAAJ9aQSdXtRhtQagIDLinJXqYf1Sq2sHkyRU0KPVQccrwKTXe0mIGQ5duYrZprqB8wEUUOLLz7/V/JKf0t8VvwEY3k4NK95gsPqNRjOrM7Ein3Tt4ZUDJmJmy2KSSdiySJ9sajmDRdr/UaAC4c9DMejCye7EBdJVxszxrqTEe4qZcy8ltG2pssCW6/Pyy6uXnfRdWu3+SwG881Vx36iDgCN5sBCFIQqdEvJZ9DnfdhgaLcQGozE1tHcZlsXb/2tNsyhZIT7stPow9cfbgYcko3t1DktpIGQt6K4/Y1p8lacpBCABZViyD1rdPDB5zVbsTHGREWu2J64xpeRp3Fbriy2Y4oiEfMUMXiwGDN5zU0mxRSfYsGWUIHK0Aw6wuV4kUObhx5dKjpgnYMJD5gWgX0w2QuT3TNYdaOCoYXgoH2Y9mfZauqzVE/NktUuFNPXE5gqMWcdAxXmXwQKGbLPT2Ue/Y1h6QcdIGzjTEBQx35e4A2GkIiSmkJh6bjn4WwBzF8eGJJHv1bGesBTqb5ahTlqQSrhqbjWUAFMmSDC2oRUho2cH8tyHzNKatqvmetuM3xHDjWyxbgLvI3oPoHaGyeLv20zq6k7t0WVQT1ZRaoITrHdNesDVd6hnvTIuEN39+hBlb4Ea20FsT4KqPfaWiZja0vZ9loZMn+afm74uMrLpspH2r4aZlSGJ7I3VHao29zsp5dSUXP8YJWZ0h/DwDepzK5whUr7njLUQE7HBgRagx6CgvCmOhEn9fBg98HjI+enxcNBtgsgAqWheVbOCnPP21lIMScqM7MHOKzymAdBZAAEN0UpqMSFc/IYg6rRxIUnvf4d1X9YXlTrpuppXVfYc6QJEwd0SaEuG+eDnqrOFm0fwwIqxxNw/R1nPpkwJzYKJiEhq8+/n79s93f7v+fyOlN981WdlqZ+6O4qM6U8pcbuIDSDMDT2Dt1wqyv6mm40AsAioGZV4pPPf09+R/+q+KT4jPia+K0bRz0bcXOYGg/bLVommn53Gjnbxr1lFU3Wv+szNbOMQyoMlxZnnefm/WiBt0JMYFg611zryMyMLsJt52cuwc+eR5R8xP/0je16VC6SnokYy24ASPATZuR9t6LvFNOfim19TBVksoo7/G5NR5Hk6vpyeRWdj7fwwrihi4IYXSP/1Aw4vkRf0xd66xebbnfacdkBqtTX9xOqnofR1muzvASmN+4XTMdSpdb8nF4cl4OHGzg/LLZlKRr0q9UpPthSbdxZhQ0RIS+iMaj8Fp3hAqPw4+wOJdXgOt6sjSoVgA9iYQUofY7tOBO68AumORQNKH06u4FYlIDQjLRUASQ2T1M6/u7YDRkFBE8W64AmZpaVpGJB00EdF7kPfrr3Cr2iyEeFt6bnox4QPZrO8qHG8BsQe/Y4p7dD7TKDf1hgnf6lUTAApYh07iyLnYpKiJkw+7oeNoDzxGzad5izag/Zh/t+l8rQMsEUjN6YLbImEWtp+hRHUolUqz54g3XNBXVemC+gLqViNMkFMF9hqGShge9JWXntZ0EvkApze5HBnxNqHBoOfzpAMbFOp77x8p/k1LNSZZi83G4yjBsodxtIwoFM1zPwJ0DJGtTETNrhVbErB/dX5ZkyoK/QyywXC7nMof9x6PZClo5e8bodyqGiRrYx49p+LOtXg/pReFMtvzh+2aZVgoG2TJXo9Xd3fT6lu7CrgjYut6JX6xnde0qm00ROMrrQZjiRYVYy4QKWmgmLiyR1In1mVufFmkv/QlJF3c6C+ejLITsyWdBBFYmuZOlUIRojqXQD2cJ9pi2PTFIqPZDmKDH3shmeq/GsqJaJ+RguJhyDKQal9WbXnIt5TpEeyxKrC52ZO4An1EL5FOAkI/+n4x2Her342ewuUgKcAzHeoXtpfXa39WPnXQ756uSXogpmoHthWAw74awmx6PiZOFHVAzLs+ywyjlm7fzu838q/7b+szt/tPOfRU+ccMxCERS1w7pJ14OIqk/mengDU+Ao1bVl9He8F1tEaQgWuOmAAcwgvezKEXG1iuggjg9gu7FSKMctiO8sbsQWtwxxBhCsbZxHUpBaPZWY9W0GW5nC9QA/PyRTPheHl+vtyo9XhPTyhmXGEK3MVsUHYRGf4DKWR7qJ4CO82TMImbF244Cp46ptoNtPNcD+hEoDkydwzMtkpIQb41WWo41KykTvu4mEmsJRanepTrjcb+7Nshqk/XCdpYnVMxjHCl/bQSb0hrk9dKo9+4Oy8oHPNdYrrXWpdxaO5VqnDVX9oq5PqsNxeIkqCszSVGMLgx4WUseUPG2i87S5KvI7VhWUnIxbTrRzRa9N94Nd5a37YEJhJD2QflC4bKSpyTC/7+hzzeregpmllj5zDzAFUFIli4dRvPo2VgeY1NGTHkymsrQvf215b7Fn2PK6SCsMo9wwdQwzUtYVxSDdf+eRnDbZfrF4q/3zgW0P5YHXWZZREAltktOFq4e5zMrczDJb9HuuvdM+fr9+pnf/fnvYNsjOdIQVYkBO0cRw2KPTdeigDVEPfRaqkZ9Woh7NTZiWU54tJdU6U22RjnwIwDwBYKCsBoby1WF2kIZyCE80o6UpzHE/vxjsjX9sl4J49iibTQczR0Xj+8X8hz+fNraV5Ruz3Xd7vSJ763j+CXv9YPfPFWmf3lJh9i8rs1dMqlY8p/6zf8/sUxDX2l61/pm728JqfUeIv/P8e/Lv6V8QX2EOwVbWOxbxy6Ijww3ZSeoFvNd09no3K2SqwZfwQ1k/BHgGQJ4NKKPA3Reik1J5GhFuUSCw7eTGtxv8tpDUwixgSIBfI8rnhik0ZfEsthjoYHZMv9v6Ry3KrvIYtnvd76Lj5hqQnIvr9TW7h+Iw3/htd/Or1VXUJk2u19G/k9uijutgIhx3cYMsXFKlRG9wvbhacFOx6TjrzKtjK8EOVRThQOednW9HZ48jruTXzaQ3Smt0BDYRrY58OeDk6ITVTTl0yS7GyCHXlEGpXUR1jVgqIgUdFCuwuEXUG0nhCe/3RSubxA+oVEyzAJVJeihV7lvXF5WWcpelIkKVU7PvMYChjzzo+8DK3hY0b/DT6HH09rjXo46mcIENkPxeel6uK+0ziIvnrjWZZY5BiSFJEbLaUMJVrKIKzyk6XXKiqoIJ4pCWgj9KNwwCVkQyOZTXaxjZs3AFVK2Vs9Q0TdIgCpubSYBwFLADFDUGDQtiQABSf5FizsCfOb8fnascd0mptNjfJ8HCOfPQBleJ+b3kSfno3v2xrdSJ3qcD3gSIuygxFLtJ7jTYfPQma1NOPaXG+asUE9/L3OywvqJCoz48PH1aHDwpT9jw2PrGTdNs065eMyOdFpC7UdZWvkk1lNqTognB4JqX8/xTbf6RFIj7JKmHFwejj1LuzPpZP1VLUw9CUQKcmPVgkxvalcs+MmhW3itb6GfjX8iPps9c2B/YM099uDym63sweOpPjqQbaTOkYoSuo/Qlrin1ehO6+mKvD+BXha4mgyf30jlzYN8N2V0xHFVvN0WVtCETNkARB3cibQM9J3u+lxrxIA0UB37l+T+UC/1Z8UX4dyb9OKqOnG4T0SRFZ3bZRghtv0P38ZqPzyp7Awy4vu93A292KEq4l+h37fiQnbvjFDyK/MZgg0yYMJ8Ikv/ry3iIrrZmbjjwvHx7JqLEHifHNkYgHnwMPkQv2hqNMBiH8TdXETWDviC54uXffbG1iWNETTQXYARwFOjcNhUXSPcf0nDadjRYiF9GTHDk3G4hmCwvzlPn1fVWm2Jlks+N910i2xEEj6geZjMILaOtGyU3XlUY2d0f7as89dkAuzek3UqwICbUoCZ09JKo3Ca77QaPLs1BFqjGPy0KJ0dUCMM0O2F1fCUh3BtVOzt7bV6wBZEDb8UmSQ70Quha0pmS98sasUlEqHbCUm6aZ6aONWJ4rqvZWZcHFZKpszzqdYE+iQvQqKOwwDvCAjgZnhLQi+vMsEKVkY0H3Mun9M5zI6+oINVvqvunbXVqRu2soioen1e3UFbXEFfXoBfhmwcOEX0Oaw3pni5nFJ4c5Giw/jnwEJF7RzWt1x6Y6NBPf1H4tUx309f9sFY9FrCIrksgVh44u++Gmcz6ggJIRX2AKlwt5UKluemtj8ayYPAt1f5McQLVKUC54lApCnV0+cpRz+4v5CCbU8RKSrOXnBULKyberENDcYKxUErxiKoI1006SDNMfUSBAULCVmiw4GX0D3vbKROfBwuLNpaRB8tK2VEvrX3RZMNCezcLfV1V0AqVbBjhLMWWNvUDuztMR4AvKJmawh7Wd06mr1jtdhLx2ef/WD7T7+/84c5/Y98gZNUzBoHccF06U5/trgWy5JSeMfXB2Y3nqCPhXEZNpRer8U5MaevveLWdDW577mfCxqQZCYk3W38GJkdBpWRbftzszZarWCqbjozYoZt5D4SFuBzcaORGMRi8uUEEJMtrN24HLk9RIOeazlXZA7p65idptsxS5+RQCozpKK1pav2wAz4eFtQbAZkRCuoTbYSpUkNZseQSHZfcY2WcY3qGozSga+89lbmTfuWrbPKsHlJCrSAijwylnGycWJnJ1FtnqZPehYm1KwBTMaYXsWxSQ9R0IY+L0joRU3va+2t0B6kfDD7JbINs6POiKfF4Bpm1vihCrdRJ1fv4+M1eWevTLN8P4V7l/owz/d3q+lHvJ2wZMo03W5xpsV+5Ner5QOnsjbPXH+zr1h4E3+rxYdnshXBa+X2jetRrL8tgz9/6SDOHEWHqUmenWTqgLrgYZqv2LMhBSPb6d/PTMBpyr40nWCztas+nZdgduks6+Y56jFpgJAZDIuodZG9kCnluP/nweDz5enp1rMyCvpVe8u9Favp60ivUmB554VLMvCZHPju8ZzEj/4vP/6X8B/pzFP+uxOfixvVG4k+cimHkp9rO7YkJ5AYW7DwNL+Tivr7avFgAdqH8Kr44QlVmXJdy7Vno1c1ruc5EHnkSiR6sNPbCDevqmh1gLhfbY9Npo1yeiavNjZjY+ZZNij6Pu8eo3rCdaMdWMXZ9Fy9srSLBbjDsd3quehOR6WsWQUTr22f1FPz2uhQL86JG55ETlpZLs1r07Ypn8p0HDlAwEYqykd+uqJ04p4ZByNJQCKUTQJ16gXBHqabFDt9IEJOgIojIh7VXAjUeZpZAkuuxUWaCXr4qc5gu6SyhYgZg7p5MMpR5CJvjHO4qbK4OkBTlFwVB6AIYaGS44+y+TPFJgV+wmGjTccp00kq5r9AOKfbYYsP5XMrTKllRxqATmbWoxGQvGVNJ6XFYjcwhLUj5iXVEP/gWJQGYToUE0ogRKgIaDCrIHCm0qYoGVDg6k+JjicJalL5axTqyhWetPywYx3JAf/k7rqfXaXhqLNRexlldPVmNTpYZJYlHhTr3+z/1JpRh5CN/lvF0Cip/tT8vIMive2fVY7c3yvb67Z66DCHoI3skxXw2kyOXj2Azu/JYkwKQuK7ckR2HypvH7fVhBfZrcq+aLpAwvZMZ/cjpbowzsI417xWwE1gX97O+BbzBJcUA+qsiFAl1fLNETjFuoQuUpV7X1mc6M0/SdpkZ1Vwsh+em30wcXT0bgjXULiiRQRUAABsAf0Rqv+PoDhdBV6qnITOg2LsHkD9Mw2E9D7oBFf1fpRvM4x2MqgJ4DacmumZbJQ7ksZ5Eb+Pv0Yu/yB4zm5t2kFc8qCdRuw3nolvtxHaz3VpZsEUw81S2u58p1a4LGBd3djMoT9toQjXgMrU713F60xFKr9jSt+v5uu4RtKtn2/6uy4pobYGuUM9EBGR04+nlIvab8Se1v1eXdrmKYHJr2BaYh82RY76J7NKOVxtdjxfMTWdCzTMI8L8wqhkkn6yDhJuCtREqHIxXUdQcyppZmBmeo9GxKsGjELlkuBSdBuCYqczfs5B6xGsGdYvCjO6ozkJ7YnRFDz22k4WEwxv9CM7TYUph1U2nV490r9+MwMpMGkMZcDfNlE4T70o5p2xnMfFoMP2QnlUzI5WDvnyGWhUwfvQqRR39dlOIZIG/SgEgYA9fM/0LyqBwhYE9uhOpluwOn0q9qPtL/W5KT3ibV7NqVU7L0pQgwLPhXkyao+BPTiq5mDvbL6kVlVPvEzl3002x5/2wcF9G6GLjKgotQEhq4fKwufvpvdm63JW1g4eCDxO7i7ecTPZmI7N3nO3tVvkyobOwn13kr7bFxbhWa2dtbl3d84Uc+PSdvR8tx3N6ps0hL/d18k01CXf8vZkfItZpDx8Iw1BW6tSYTUFJUBmu6Hj6LZPCzYq69G/4k1SlHrse2I9TTdRIpEDl+ub0QLvd2qVFU9Ur6wzvjD/6/N/IpX5r53+LnjiLlVyUHFltbgQKkCq2HMWuokviNqcb48S9Jx57oIci6bmzVHthq8AOkmAmdhDJ7VADhyfyKT8k4N39tx2xIL/x5KjTE2JWJzeRN+UeJqLIuxYDTd6DbJ12X5jdmIvBNI5TO8YCfwJ5zd9pVDkUtm9Z3+jifNNs0H6tGXjFxNE+ABJ7AuXhpXST9e5YqRx1G5XciaAChQqOjB5sZBAIbrP1MN8ielpslKY1cuNSDnZwacReJoPUH7oo1oLF0lS04EbhlNJjEdp0SeHYFLXHMSsam/AKUdUUS/3C216dskI025g5l0HQtpQPzaDEAfRQzNKyDYIJFQYND887uS+jTs5lAUAg+TwEg6VrFnJquITrZS2WsNlvR+VC6BGJzmQ7OFHfM2o3b5QvqcmRqljtVp/W5T2RDIo75bPsU2Uxc/ZqGT72QDlLSSazj3z/pfKZkpUI0l2ce+FDL1BhxSMl2Zhe4V4b3XnJ7WXN3ezMJx/V0wGEeTPfhr69Sl9qPlIqqgAx6sJZeDjqD4s7fT+krrSh+4P0EtKaei8bkryokoMCwjDSjJC4QMCk8FNK1ARZ+un+u3QibKuz2ePgJpDJsNXo/ZN9W0nqyGD/S8eO6o/RnfvDd43cd0FQwf8WNvZZoDoTSXKZMhXpgz+xRZ30gFHsSZfaQQLuM53ZphnluoxeKf/8+e/KH+jfFH8YNVZkBNRvDVI6S5RlZ1LPNOVznkGWLFq/tDymGFhW7efZJ2o5Vsi0C/6TbRdRFfZy2Y/inAZ/urFuxVYVGW+4xRia/s3MBO1Z/wZxYSLsuVNZgG8incGo13d92c0pGPC/XoGHBHD/JWMHV9ZsV7xFlychugRAiY3wRXRasafjbURMXEhWm8HF5oVWytME2Y6/xuXT7fo1Tl1YTmHNuJHry6iiQqluabocbBcGPGx848s1hY9FR02F5MSAidmUPiEleHUZoc9iu2CBDuL1i0r7aXIO+QVeQHdyvvwj+e3dpyGHYrLph+DpnhcJiHVSZz3Fgh+VGGWyojauwF4RsiUZFMZAuocGgYP1odEQZWEJBPQzgpX/KFeCR4R5wiTZw2xWAF1kwWKAkp2IJ9amkAfxnUc90E6hSFdRrwhHWLPFuUniKpaFB0CKoMRSV6loeNgLWBslX6dKdlkVo5SyYFWkgbK+MNha0iuZUapY9JvSJDuhcNgABA1SBlAupVhkOzVmz2W54lkPKNV0kuYz+2yVKdG3dzIZKCoC4qeMq/ELfOrp4jno71Um7Y0U5OiDmJtkZZ8mIrpZZ6KwwudW5VnQYGMkrFyO6QawDVP91YNDM2numeMvGQpfrhhSpv4y1Ku0EuV8L3vVybTWpXO1SukbgP9clfSTEVU0VIyLHKsWA6Klrug6lzxuhsUqBjLoEyBfRnVqUL1gZ1p8yv1opayhOFD1D+tJNknMnrx7L8sXzswhsyNHx2t9abVn5F+pqsT08uloPMzpomrr+sWpoYOuGhf0OnubIrfgB4EOSzNMxL5bpapoqUDWn3lzzzS69pL1SKuhqk59xqM1JYblKB95XxqhU1mIAsayJojGwasE7C+0C/TVM9MYu7R6NmTDXrpPucMT4FIKhVR7Jx8vq9eT0aQXJpjcjxiqb9TwGL6/IVXTBjdVB4drBLNQEGOiCRnQcspzwRZUK0WZUEUGI1IXzLlZTpPdKZg5Ai7BKp3LqjJ2QwlL+Zbqjq88/3fyu/rtnf9Od/gQvKfOaalTiokKg6icub9ko6ZOYy1W3FiSXJ9HQAjzp3new+ykrbFdx//gkl3Mk/7DBxxPeQMLNMbNP3/Y/Jnb9i4OLldLHkJvcbftoEhY//sJB+V1G/sCNoAy8eNM5yWAqh/Mkg6JtrraPAGs+vqwo3Ju2eidswB9aT1gf7uko152s+zhQHy/orTVeDNpFQBKdBP0fKTXpiroqCaF6iuIJKEYBnSWXepxNFmQQYRZtS50mdPdBqUYmqWgQQEIxpKKcSSILpaPsWAYGtsAK0BB66MgKQ7saeA+jHwQXO1dXivT8+rQ5HCUD4CS9Ip942dpWxwBeMrYVpeu9VNjDlaaPZfqEs5oSsShpJjthuRukqIAtw+dc5QnAn/1gt77B/+WIoSElQgevUJfwKcaIhG/I9Mka4K/ewCMVhJ6Jp/v21PV1ww+Domui4QOqwdG1aNwLsHLUElhdI8ibpPoyuYhSSlLh2hcSTHEwvcC5Rg1QFqz4j96VYRPxTxypnTSo10f9Qw96HfcfqLGTn0sTxvresnPf/b4e4Nf0ipdqItCTkLPngx+3MMRLrENtWEOKmw9JXMKWXVSDZrdWWVm7HkwTvc+XXyyb/InpqKLmoTip47fKUv9BhUUqpbiQcja7NjeU7pHZ5GuwoTDrV+06i2XH1IHdSlkRY2NR43xW89/X/60frbzX1BjUAa0jHi0vINgnDsvOnm7wi3qOesQnG9LkMEWf94f9CM+lLUM1/Sqy+g5seHSox8VF1fx/xHFFR9WtL5Y2jLkSVGxsnVJXa1fyJyciUhvjNj+uC+9mRFjWzS4YMmleA7wRtZR+ZA3OdvmgOo9L0wYVDA1SPlp9wAQJWWejpL87j7UrsdAOlIbl7WFHIpatmWG1pYymGUqUckmxxSlAF10mJb2oBmaFRnjjAYllYMDi/O0oDrX7PPePUCbO+1lTU2Hp8/1fZ7s4imzGYVVdJRUONLfGSYgl0Or3/ZJj0rChDV0MiOdzDRl9UzR3Tx0YoEufCTXQe1aX+mjRn0tQbEOvQ+bwlsR3Z+e5xdVIUSoe+88AQGRQvQyfU8/mLZ3V5/R9UD5JqnlAaP9WCQM2QioBTVKzEgehjzUiaYg3djhR88/W74/3/vpLH3kKSS3aZ7jHC9KkXs6eAU1wD3jHqbiUf7LVe/X/GuH/ff3DyA0oM006Fk6evld56r+YjX4MZ00VSLbnIrf4CKRqqLzUylMaU5cstJq6E78y7vhgQ13B29Dokd8/fmfyAv9KSER+02krW/6D2JlOtj+tovGsf86x7qu0/fsb8vmIlZvU1SWw/kN573PhghcvF0yc4OLyYjtSy6u2Ua8QwfEYek5lnSrx1ydsnvU5QtsDoMBBbjsYL1zgYy5KBS6opVwJPptOj1QptBvLq5XNw4wXQ37NCKn6Y97Md+tks8PqaZIG+/oekPfFtUdRSsDSG1fT1O6rFJkGJTK3MPzniomBT5aJeKm3EhK2gChzkceSB1APyvmLBhmSZ+ykCAyARU2WVZ72wyrCmYOqZ6BzqQjhFZROSa85VU7JqrMQQOaFGfKM+MDnkBRg84Wrcu8Ouv5MqG47SrFrn30hGMJNaPvhMJrqV/zg8KulnCPSE9yYI9hQO4KU9LRpUMwXBZn+8zA+3rdfn5viN2axiK/VA8OLHS20v5Q5+9Mxl/Jsr2E2t7KpRnUb0NAFUox9uHZ68XJ/lSoUNFznWqVzUPzlxaTV1ygLq/o5WnmTLRZNao9cHdfG41fWiZy1MK1qja/DFETSi1e1SqzUbs7AdYeWh5ZQlHmSE2Ox69bNbXZ2gS/l/WPmiGFGUqiSTlCNqazWGgLjoFzstz1Izzf333+p3KmvyCOEYsHQyZNwRIJrGyWvkSJAiDYOrZjS6jbcPSdCfN/OWWeYhmwYcQJBfVSYHvONK0rXiyvV8uWEfcoVKbM+uZA2rLM1XLBNg+nYtV5NlAJc8IANIaAMbqGSUbxWb+6WU7j0QUAB/Oa68grRd1zuZ3MxMrrakA1ksEbAnyOmtM1xGpWV4sVIHKAxWKusrri310NoGeHnd/11XouOhErjGXPk1/JptHlj6dZW+F/KIcXVL/L3thmFT8zOsfoTmZ1ZlmYdsw0O8eoK3RARdcmJZ29Y54JWDGaOaNJ+4lIBT1aQRU6E9Nli823pqA3UrwKBvYdGv7YQmvecFv2XnMlEB+F86uiVyOwRzOgqLngOIcIj92UiKwl+jB2K2MVS6q7sT+GAPUYGxGn36czkPSCouTUYydMXn2zjK2wolSyHCZVv4ZUlTrJf6a+sNVq83jc+rpwOfy52SCINQcKuDNZSb/VlDp6i+SoLj9e5PN1kmW73v947t94Itczdxg8wHysNp/sZRpKnrnXckDtkJ5AEwdwcq/M2eR61PS+9FqRzuvXUAw5mdMB6Vfj7L55uem996R+pWxz08sURFs67zp8A3gxeFvMlWK/eirJYHTh2OMIOCDqFjW8wxMBzvcfP/8evfGv0LfcE/d3du7MNad9LhViPF136qybD++SDYs+MqJkabpXdbPGhP3EEIh5JCA679Vk+wM9RRKR3nSaVku7iLpNm23ZvgVlYMG2HXTgX+LGgOI+dxPdMPDGASyiy/bEopDRqxHAl/g/21LPwLTAm9VA/JhIlaS/kisIN1BCuTwc3BAPo4jb4kUbkPyHJFUpHrrZbmqPgo+EJ0GlLjV8qbC7qsph/mBq3EuqfMz+sC9DMlJ+SvfBO4wYQq8SszR7mAw8u6mWVQK9Mr0ydQkIOFBcGhZ7QGmwyDr8/tivzwABCuwEUEMml7uuOPUzuNnQvc07HVnU0ZFjmJTo9sUEJCYNTgvYLKCxWDTunrpFoMAzyCo61mGULLyaUI0EvrQI4oPv5LtQpcxgheILyDcovaGERw+8uKSHKthcBewoqN0NVPf35MiYQuefvTzy9VgVnxq+92QY/qrcf9S7finx7xaDYzHYK5bz8KiYvlz8lUHz3uvavmb9pYEmbSX2yuITcuploE61gV5Lrt09VcJjUcboguuP/sjIXGqW0QKTGN8TMG5QXDB5FuTAVVrkWlPCsrZMLCUjytnUKjtvMw8XQnknmiKlMwz11cn88oEcz4v6J6lmHbSyrnH06YpOy/Ju7fdNWum+KSv35cxU3k+drpPGigysQZP9QQ2353Qsxf5Rsz9kq6VrIPeKhvqCwc7PPf8D+b/U5Y7ZqXfGO4ud453znX+x8693/tPO/9j5PgXEWkzFvliKtbgnNlQsvS7e3Nk53Lwi0EBQhZNAPWC9OaTi/GpzIZ/xAmA5vB5aqtxYsvzqkqrcgWFBN8uWBBT/gYXsmwUywH25vrJzNqy/n1xfra6oowfRFltpOpEwJU6iNbG96qZ6QEXdmBeLZ6LTW+emZZ60dLLQxgy7w8m46jPGghq77A/psM/FIT7uyi5A22LIiLEDFHoNqx3g1c31koeVpWhaiBlw+gXOxAyv15Y+yeraRiGKa9OnazC0g/615SV7Q6+4T1em2TCSmr9AiTrPJmto/uwd/BxV+fpLOT0uv1F/8F/vU/+ozpYN4JPJPzvb7LVLN0iycjJU4tfkl66mp9a9PpgEe7pZnCrt6blJPq/ocbBl5cWEavLjvfEd6vmHTuXUGtrC9ZNAndfnS+eWiLH9wrCwmxjShZbzbOlHyHz+8llhF358ULr2IMucG1J9uXQeauoVHCwTwacoVHL9kR++K0ZZOoYdyY/gr5fHZs/QO/6CpMc5uN5odt1CGIUq1s/Rl9P0kr4SOtj13uZMpnf+Vm/2pcaoXOn/2M7NPWXMpFAN9fe6OIBQa8irs0FIH4L3dk89VLPZN3rvPvxTTMv04V9YrX/90T+6ntQfn3zw3U9cnR8Uo/Dut6/eppP+m40xr1ZVvT771b+biM1k8uQlf+/4o4d1Qxk1yYAs79ct2Jyr1aSdzUJYurEMY2DDINMj/+il3hiBaRiGsKjnvbiiHBzcS/LElKeDHtVzInXZKz+SyMf0GeeuHgbfa77SJgOjSu8Lam5SKf+6qKhRdI95jPqL3g2cvaaK3f6gPC7yq484HUbi6xiPruvHjw9++MfvFiKxB1SGnNmjqmfwTLyxhlTDD96++rb0afib70FUblEWf/mwPbg4+/jea9lPfjPra/1N0XzhC9h5JtkHX36n9ZTOZ/b1+Xx99DNPTv/GLd/zlu95y/e85Xv+P+d73saZ2zhzG2du48xtnLmNM7dx5jbO3MaZ2zhzG2du48xtnLmNM7dx5jbO3MaZ2zhzG2du48xtnPn/O87Aj3tHf0N/Y0fu7PQOegf6G9//Gn7u7Pwf/gBC4AB42mNgZGBgAOLrh+yy4vltvjLIczCAwLVpRt8hdNd2Bob/daytbCAuBwMTiAIAS7ELYwAAeNpjYGRgYP31PwpI/mYAAtZWBkYGVOAOAGrhBCwAAAB42m3PKwsCQRSG4W+W3W4x223aTRq8gMltWmwaDCJaFkHQaLIY7IIY7dsEwWITi/4Ag128vMMOWBx4+M6Zw8ww3l0lsbyN5B8tk0YNLfoTLtQdLDBAFn2sUECUzHQjy8hhjQl7FTIF7jFDNJBhf4cHdZ28kk20UaXfk0uMELpz9s0iswPZI7fkFDH12Z+r687/wd9eUvD8pXljzKc/TkyfJ8Mk7SyYSV80s1AveNpjYGDQgUCmQ6wu7HGc87jf8F7h3yeUJyolXiE5Q3qVrJP8NCU5lR61OxoxWlk6cron9KsMFxnfMa0w/2JlZb3HzsIxznmS6xv3OZ4LvKf4fPC7E1ARpBOyJSwufEukStS2mKy4sISupH0pn9LVMqOy+/DCOQA/+TbcAHjaY2BkYGBwZ3JlEGMAASYgZmQAiTkw6IEEABOHARgAeNqNUV1LAkEUPaMWSmASEdFDLL63mrkmBkEEPhTEkn08t36ktLm2jtZTv6Mf0lO/oOwX9Fd66szstIkWxGWWM+eee++5swCW8YIkRCoDYJMnwgIrvEU4gSyKBidRxb7BKeQxNngBD3gyeBF5kTM4jVVRMjiDqqgbvISKeDT4FWvi2eA3FMXE4Amy4tPgd+QS6Qh/JLGRWEeJnrbpxoKLEAG/I3jw0UMTV7hEm+HyBBiQbeOU55oan9mQlTbrVezhHMfUnxDNV23N1M0rrBnFBW8hhvQRoM/s9CQXDTJF7fyH7VIp6Vrpx3GFzd2qzN6y642eJ9Ehqzb0uL0Nh6eCMnYZzj+8//ZOB0SediyZs3BIrqd1FlEfrT/et0u95Jwhaigw7nXYZEI9f1prkwnpo6A9etxCbSrjTc+oVu94pCda2CGncg57l/kGNSKHzPcfb1HdoVbtJemgqfsPOG3EWz3u3sAdGbVNyAr/CzM7bOd42m3Mx04CYRhG4fOCgAURvQa7qP/8zFAsi4k6NgTsla1KYgwbF168Ccp87jibZ3fIMOqnzyvjOgZllCXLIksss8Iqa6yzQYVNttjGEeCpEhJRo06DJjvsssc+hxyR/D5OOOWMc1pc0KZDl0uuuOaGW+6454FHnnjmhZ4mlFNeBU1qStOaUVGzKmlOZc1rIf/28T14D1J84euz71zs/vTO/RuY3qyaoRmZNbNuNsymGaf6JDVKjZKDIVmuNI4AAAAAAVpw2jcAAA==) format('woff'); - font-weight: normal; - font-style: normal; - -} - -.weepeople { - font-family: "WeePeople"; -} \ No newline at end of file diff --git a/spaces/mfkeles/Track-Anything/tracker/util/mask_mapper.py b/spaces/mfkeles/Track-Anything/tracker/util/mask_mapper.py deleted file mode 100644 index 815807bf4b98c6674ab3ede55517f38a29bb59fb..0000000000000000000000000000000000000000 --- a/spaces/mfkeles/Track-Anything/tracker/util/mask_mapper.py +++ /dev/null @@ -1,78 +0,0 @@ -import numpy as np -import torch - -def all_to_onehot(masks, labels): - if len(masks.shape) == 3: - Ms = np.zeros((len(labels), masks.shape[0], masks.shape[1], masks.shape[2]), dtype=np.uint8) - else: - Ms = np.zeros((len(labels), masks.shape[0], masks.shape[1]), dtype=np.uint8) - - for ni, l in enumerate(labels): - Ms[ni] = (masks == l).astype(np.uint8) - - return Ms - -class MaskMapper: - """ - This class is used to convert a indexed-mask to a one-hot representation. - It also takes care of remapping non-continuous indices - It has two modes: - 1. Default. Only masks with new indices are supposed to go into the remapper. - This is also the case for YouTubeVOS. - i.e., regions with index 0 are not "background", but "don't care". - - 2. Exhaustive. Regions with index 0 are considered "background". - Every single pixel is considered to be "labeled". - """ - def __init__(self): - self.labels = [] - self.remappings = {} - - # if coherent, no mapping is required - self.coherent = True - - def clear_labels(self): - self.labels = [] - self.remappings = {} - # if coherent, no mapping is required - self.coherent = True - - def convert_mask(self, mask, exhaustive=False): - # mask is in index representation, H*W numpy array - labels = np.unique(mask).astype(np.uint8) - labels = labels[labels!=0].tolist() - - new_labels = list(set(labels) - set(self.labels)) - if not exhaustive: - assert len(new_labels) == len(labels), 'Old labels found in non-exhaustive mode' - - # add new remappings - for i, l in enumerate(new_labels): - self.remappings[l] = i+len(self.labels)+1 - if self.coherent and i+len(self.labels)+1 != l: - self.coherent = False - - if exhaustive: - new_mapped_labels = range(1, len(self.labels)+len(new_labels)+1) - else: - if self.coherent: - new_mapped_labels = new_labels - else: - new_mapped_labels = range(len(self.labels)+1, len(self.labels)+len(new_labels)+1) - - self.labels.extend(new_labels) - mask = torch.from_numpy(all_to_onehot(mask, self.labels)).float() - - # mask num_objects*H*W - return mask, new_mapped_labels - - - def remap_index_mask(self, mask): - # mask is in index representation, H*W numpy array - if self.coherent: - return mask - - new_mask = np.zeros_like(mask) - for l, i in self.remappings.items(): - new_mask[mask==i] = l - return new_mask \ No newline at end of file diff --git a/spaces/mfrashad/ClothingGAN/models/stylegan2/stylegan2-pytorch/calc_inception.py b/spaces/mfrashad/ClothingGAN/models/stylegan2/stylegan2-pytorch/calc_inception.py deleted file mode 100644 index 5daa531475c377a73ffa256bdf84bb662e144215..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/ClothingGAN/models/stylegan2/stylegan2-pytorch/calc_inception.py +++ /dev/null @@ -1,116 +0,0 @@ -import argparse -import pickle -import os - -import torch -from torch import nn -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torchvision import transforms -from torchvision.models import inception_v3, Inception3 -import numpy as np -from tqdm import tqdm - -from inception import InceptionV3 -from dataset import MultiResolutionDataset - - -class Inception3Feature(Inception3): - def forward(self, x): - if x.shape[2] != 299 or x.shape[3] != 299: - x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True) - - x = self.Conv2d_1a_3x3(x) # 299 x 299 x 3 - x = self.Conv2d_2a_3x3(x) # 149 x 149 x 32 - x = self.Conv2d_2b_3x3(x) # 147 x 147 x 32 - x = F.max_pool2d(x, kernel_size=3, stride=2) # 147 x 147 x 64 - - x = self.Conv2d_3b_1x1(x) # 73 x 73 x 64 - x = self.Conv2d_4a_3x3(x) # 73 x 73 x 80 - x = F.max_pool2d(x, kernel_size=3, stride=2) # 71 x 71 x 192 - - x = self.Mixed_5b(x) # 35 x 35 x 192 - x = self.Mixed_5c(x) # 35 x 35 x 256 - x = self.Mixed_5d(x) # 35 x 35 x 288 - - x = self.Mixed_6a(x) # 35 x 35 x 288 - x = self.Mixed_6b(x) # 17 x 17 x 768 - x = self.Mixed_6c(x) # 17 x 17 x 768 - x = self.Mixed_6d(x) # 17 x 17 x 768 - x = self.Mixed_6e(x) # 17 x 17 x 768 - - x = self.Mixed_7a(x) # 17 x 17 x 768 - x = self.Mixed_7b(x) # 8 x 8 x 1280 - x = self.Mixed_7c(x) # 8 x 8 x 2048 - - x = F.avg_pool2d(x, kernel_size=8) # 8 x 8 x 2048 - - return x.view(x.shape[0], x.shape[1]) # 1 x 1 x 2048 - - -def load_patched_inception_v3(): - # inception = inception_v3(pretrained=True) - # inception_feat = Inception3Feature() - # inception_feat.load_state_dict(inception.state_dict()) - inception_feat = InceptionV3([3], normalize_input=False) - - return inception_feat - - -@torch.no_grad() -def extract_features(loader, inception, device): - pbar = tqdm(loader) - - feature_list = [] - - for img in pbar: - img = img.to(device) - feature = inception(img)[0].view(img.shape[0], -1) - feature_list.append(feature.to('cpu')) - - features = torch.cat(feature_list, 0) - - return features - - -if __name__ == '__main__': - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - parser = argparse.ArgumentParser( - description='Calculate Inception v3 features for datasets' - ) - parser.add_argument('--size', type=int, default=256) - parser.add_argument('--batch', default=64, type=int, help='batch size') - parser.add_argument('--n_sample', type=int, default=50000) - parser.add_argument('--flip', action='store_true') - parser.add_argument('path', metavar='PATH', help='path to datset lmdb file') - - args = parser.parse_args() - - inception = load_patched_inception_v3() - inception = nn.DataParallel(inception).eval().to(device) - - transform = transforms.Compose( - [ - transforms.RandomHorizontalFlip(p=0.5 if args.flip else 0), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), - ] - ) - - dset = MultiResolutionDataset(args.path, transform=transform, resolution=args.size) - loader = DataLoader(dset, batch_size=args.batch, num_workers=4) - - features = extract_features(loader, inception, device).numpy() - - features = features[: args.n_sample] - - print(f'extracted {features.shape[0]} features') - - mean = np.mean(features, 0) - cov = np.cov(features, rowvar=False) - - name = os.path.splitext(os.path.basename(args.path))[0] - - with open(f'inception_{name}.pkl', 'wb') as f: - pickle.dump({'mean': mean, 'cov': cov, 'size': args.size, 'path': args.path}, f) diff --git a/spaces/miculpionier/Fill-Mask/README.md b/spaces/miculpionier/Fill-Mask/README.md deleted file mode 100644 index 74e11e9145e66e5492a7404104764c41175abdcb..0000000000000000000000000000000000000000 --- a/spaces/miculpionier/Fill-Mask/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Fill Mask -emoji: 📊 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -A frontent made in gradio for Fill-Mask. diff --git a/spaces/mikeee/radiobee-aligner/radiobee/paras2sents.py b/spaces/mikeee/radiobee-aligner/radiobee/paras2sents.py deleted file mode 100644 index c33460480f594eb9f86fd4c87c7df62ff98ad19d..0000000000000000000000000000000000000000 --- a/spaces/mikeee/radiobee-aligner/radiobee/paras2sents.py +++ /dev/null @@ -1,110 +0,0 @@ -"""Convert paras to sents.""" -# pylint: disable=unused-import, too-many-branches, ungrouped-imports - -from typing import Callable, List, Optional, Tuple, Union - -from itertools import zip_longest -import numpy as np -import pandas as pd -from logzero import logger - -from radiobee.align_sents import align_sents -from radiobee.seg_text import seg_text -from radiobee.detect import detect - -try: - from radiobee.shuffle_sents import shuffle_sents -except Exception as exc: - logger.error("shuffle_sents not available: %s, using align_sents", exc) - shuffle_sents = lambda x1, x2, lang1="", lang2="": align_sents(x1, x2) # noqa - - -def paras2sents( - paras_: Union[pd.DataFrame, List[Tuple[str, str, Union[str, float]]], np.ndarray], - align_func: Optional[Union[Callable, str]] = None, - lang1: Optional[str] = None, - lang2: Optional[str] = None, -) -> List[Tuple[str, str, Union[str, float]]]: - """Convert paras to sents using align_func. - - Args: - paras_: list of 3-tuples or numpy or pd.DataFrame - lang1: fisrt lang code - lang2: second lang code - align_func: func used in the sent level - if set to None, default to align_sents - Returns: - list of sents (possible with likelihood for shuffle_sents) - """ - # wrap everything in pd.DataFrame - # necessary to make pyright happy - paras = pd.DataFrame(paras_).fillna("") - - # take the first three columns at maximum - paras = paras.iloc[:, :3] - - if len(paras.columns) < 2: - logger.error( - "Need at least two columns, got %s", - len(paras.columns) - ) - raise Exception("wrong data") - - # append the third col (all "") if there are only two cols - if len(paras.columns) < 3: - paras.insert(2, "likelihood", [""] * len(paras)) - - if lang1 is None: - lang1 = detect(" ".join(paras.iloc[:, 0])) - if lang2 is None: - lang2 = detect(" ".join(paras.iloc[:, 1])) - - left, right = [], [] - row0, row1 = [], [] - for elm0, elm1, elm2 in paras.values: - sents0 = seg_text(elm0, lang1) - sents1 = seg_text(elm1, lang2) - if isinstance(elm2, float) and elm2 > 0: - if row0 or row1: - left.append(row0) - right.append(row1) - row0, row1 = [], [] # collect and prepare - - if sents0: - left.append(sents0) - if sents1: - right.append(sents1) - else: - if sents0: - row0.extend(sents0) - if sents1: - row1.extend(sents1) - # collect possible last batch - if row0 or row1: - left.append(row0) - right.append(row1) - - # res = [*zip(left, right)] - - # align each batch using align_func - - # ready align_func - if align_func is None: - align_func = align_sents - if isinstance(align_func, str) and align_func.startswith("shuffle") or not isinstance(align_func, str) and align_func.__name__ in ["shuffle_sents"]: - align_func = lambda row0, row1: shuffle_sents(row0, row1, lang1=lang1, lang2=lang2) # noqa - else: - align_func = align_sents - - res = [] - for row0, row1 in zip(left, right): - try: - _ = align_func(row0, row1) - except Exception as exc: - logger.error("errors: %s, resorting to zip_longest", exc) - _ = [*zip_longest(row0, row1, fillvalue="")] - - # res.append(_) - res.extend(_) - - return res diff --git a/spaces/mindart/infinite-zoom-stable-diffusion/helpers/image.py b/spaces/mindart/infinite-zoom-stable-diffusion/helpers/image.py deleted file mode 100644 index 8e82f5053e572ce5891d533b1885451895d5ac79..0000000000000000000000000000000000000000 --- a/spaces/mindart/infinite-zoom-stable-diffusion/helpers/image.py +++ /dev/null @@ -1,50 +0,0 @@ -from PIL import Image -import requests -import numpy as np - -def image_grid(imgs, rows, cols): - assert len(imgs) == rows*cols - - w, h = imgs[0].size - grid = Image.new('RGB', size=(cols*w, rows*h)) - grid_w, grid_h = grid.size - - for i, img in enumerate(imgs): - grid.paste(img, box=(i%cols*w, i//cols*h)) - return grid - -def shrink_and_paste_on_blank(current_image, mask_width): - """ - Decreases size of current_image by mask_width pixels from each side, - then adds a mask_width width transparent frame, - so that the image the function returns is the same size as the input. - :param current_image: input image to transform - :param mask_width: width in pixels to shrink from each side - """ - - height = current_image.height - width = current_image.width - - #shrink down by mask_width - prev_image = current_image.resize((height-2*mask_width,width-2*mask_width)) - prev_image = prev_image.convert("RGBA") - prev_image = np.array(prev_image) - - #create blank non-transparent image - blank_image = np.array(current_image.convert("RGBA"))*0 - blank_image[:,:,3] = 1 - - #paste shrinked onto blank - blank_image[mask_width:height-mask_width,mask_width:width-mask_width,:] = prev_image - prev_image = Image.fromarray(blank_image) - - return prev_image - -def load_img(address, res=(512, 512)): - if address.startswith('http://') or address.startswith('https://'): - image = Image.open(requests.get(address, stream=True).raw) - else: - image = Image.open(address) - image = image.convert('RGB') - image = image.resize(res, resample=Image.LANCZOS) - return image \ No newline at end of file diff --git a/spaces/mrstuffandthings/Bark-Voice-Cloning/cloning/__init__.py b/spaces/mrstuffandthings/Bark-Voice-Cloning/cloning/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py b/spaces/mshukor/UnIVAL/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py deleted file mode 100644 index b41bfbe38789ba14e6a5ea938c75d761424c00ab..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -import argparse -import glob - -import numpy as np - - -DIM = 1024 - - -def compute_dist(source_embs, target_embs, k=5, return_sim_mat=False): - target_ids = [tid for tid in target_embs] - source_mat = np.stack(source_embs.values(), axis=0) - normalized_source_mat = source_mat / np.linalg.norm( - source_mat, axis=1, keepdims=True - ) - target_mat = np.stack(target_embs.values(), axis=0) - normalized_target_mat = target_mat / np.linalg.norm( - target_mat, axis=1, keepdims=True - ) - sim_mat = normalized_source_mat.dot(normalized_target_mat.T) - if return_sim_mat: - return sim_mat - neighbors_map = {} - for i, sentence_id in enumerate(source_embs): - idx = np.argsort(sim_mat[i, :])[::-1][:k] - neighbors_map[sentence_id] = [target_ids[tid] for tid in idx] - return neighbors_map - - -def load_embeddings(directory, LANGS): - sentence_embeddings = {} - sentence_texts = {} - for lang in LANGS: - sentence_embeddings[lang] = {} - sentence_texts[lang] = {} - lang_dir = f"{directory}/{lang}" - embedding_files = glob.glob(f"{lang_dir}/all_avg_pool.{lang}.*") - for embed_file in embedding_files: - shard_id = embed_file.split(".")[-1] - embeddings = np.fromfile(embed_file, dtype=np.float32) - num_rows = embeddings.shape[0] // DIM - embeddings = embeddings.reshape((num_rows, DIM)) - - with open(f"{lang_dir}/sentences.{lang}.{shard_id}") as sentence_file: - for idx, line in enumerate(sentence_file): - sentence_id, sentence = line.strip().split("\t") - sentence_texts[lang][sentence_id] = sentence - sentence_embeddings[lang][sentence_id] = embeddings[idx, :] - - return sentence_embeddings, sentence_texts - - -def compute_accuracy(directory, LANGS): - sentence_embeddings, sentence_texts = load_embeddings(directory, LANGS) - - top_1_accuracy = {} - - top1_str = " ".join(LANGS) + "\n" - for source_lang in LANGS: - top_1_accuracy[source_lang] = {} - top1_str += f"{source_lang} " - for target_lang in LANGS: - top1 = 0 - top5 = 0 - neighbors_map = compute_dist( - sentence_embeddings[source_lang], sentence_embeddings[target_lang] - ) - for sentence_id, neighbors in neighbors_map.items(): - if sentence_id == neighbors[0]: - top1 += 1 - if sentence_id in neighbors[:5]: - top5 += 1 - n = len(sentence_embeddings[target_lang]) - top1_str += f"{top1/n} " - top1_str += "\n" - - print(top1_str) - print(top1_str, file=open(f"{directory}/accuracy", "w")) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Analyze encoder outputs") - parser.add_argument("directory", help="Source language corpus") - parser.add_argument("--langs", help="List of langs") - args = parser.parse_args() - langs = args.langs.split(",") - compute_accuracy(args.directory, langs) diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/speech_synthesis/preprocessing/denoiser/pretrained.py b/spaces/mshukor/UnIVAL/fairseq/examples/speech_synthesis/preprocessing/denoiser/pretrained.py deleted file mode 100644 index 2fa846075b6872cdcc0baebca0b9acbb9ffcd287..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/speech_synthesis/preprocessing/denoiser/pretrained.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# author: adefossez - -import logging - -import torch.hub - -from .demucs import Demucs -from .utils import deserialize_model - -logger = logging.getLogger(__name__) -ROOT = "https://dl.fbaipublicfiles.com/adiyoss/denoiser/" -DNS_48_URL = ROOT + "dns48-11decc9d8e3f0998.th" -DNS_64_URL = ROOT + "dns64-a7761ff99a7d5bb6.th" -MASTER_64_URL = ROOT + "master64-8a5dfb4bb92753dd.th" - - -def _demucs(pretrained, url, **kwargs): - model = Demucs(**kwargs) - if pretrained: - state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu') - model.load_state_dict(state_dict) - return model - - -def dns48(pretrained=True): - return _demucs(pretrained, DNS_48_URL, hidden=48) - - -def dns64(pretrained=True): - return _demucs(pretrained, DNS_64_URL, hidden=64) - - -def master64(pretrained=True): - return _demucs(pretrained, MASTER_64_URL, hidden=64) - - -def add_model_flags(parser): - group = parser.add_mutually_exclusive_group(required=False) - group.add_argument( - "-m", "--model_path", help="Path to local trained model." - ) - group.add_argument( - "--dns48", action="store_true", - help="Use pre-trained real time H=48 model trained on DNS." - ) - group.add_argument( - "--dns64", action="store_true", - help="Use pre-trained real time H=64 model trained on DNS." - ) - group.add_argument( - "--master64", action="store_true", - help="Use pre-trained real time H=64 model trained on DNS and Valentini." - ) - - -def get_model(args): - """ - Load local model package or torchhub pre-trained model. - """ - if args.model_path: - logger.info("Loading model from %s", args.model_path) - pkg = torch.load(args.model_path) - model = deserialize_model(pkg) - elif args.dns64: - logger.info("Loading pre-trained real time H=64 model trained on DNS.") - model = dns64() - elif args.master64: - logger.info( - "Loading pre-trained real time H=64 model trained on DNS and Valentini." - ) - model = master64() - else: - logger.info("Loading pre-trained real time H=48 model trained on DNS.") - model = dns48() - logger.debug(model) - return model diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/speech_to_text/prep_librispeech_data.py b/spaces/mshukor/UnIVAL/fairseq/examples/speech_to_text/prep_librispeech_data.py deleted file mode 100644 index f379fa7bf195f48ad6b2ed3dbd93a5fbeb7abf79..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/speech_to_text/prep_librispeech_data.py +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import logging -from pathlib import Path -import shutil -from tempfile import NamedTemporaryFile - -import pandas as pd -from examples.speech_to_text.data_utils import ( - create_zip, - extract_fbank_features, - gen_config_yaml, - gen_vocab, - get_zip_manifest, - save_df_to_tsv, -) -from torchaudio.datasets import LIBRISPEECH -from tqdm import tqdm - - -log = logging.getLogger(__name__) - -SPLITS = [ - "train-clean-100", - "train-clean-360", - "train-other-500", - "dev-clean", - "dev-other", - "test-clean", - "test-other", -] - -MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"] - - -def process(args): - out_root = Path(args.output_root).absolute() - out_root.mkdir(exist_ok=True) - # Extract features - feature_root = out_root / "fbank80" - feature_root.mkdir(exist_ok=True) - for split in SPLITS: - print(f"Fetching split {split}...") - dataset = LIBRISPEECH(out_root.as_posix(), url=split, download=True) - print("Extracting log mel filter bank features...") - for wav, sample_rate, _, spk_id, chapter_no, utt_no in tqdm(dataset): - sample_id = f"{spk_id}-{chapter_no}-{utt_no}" - extract_fbank_features( - wav, sample_rate, feature_root / f"{sample_id}.npy" - ) - # Pack features into ZIP - zip_path = out_root / "fbank80.zip" - print("ZIPing features...") - create_zip(feature_root, zip_path) - print("Fetching ZIP manifest...") - audio_paths, audio_lengths = get_zip_manifest(zip_path) - # Generate TSV manifest - print("Generating manifest...") - train_text = [] - for split in SPLITS: - manifest = {c: [] for c in MANIFEST_COLUMNS} - dataset = LIBRISPEECH(out_root.as_posix(), url=split) - for _, _, utt, spk_id, chapter_no, utt_no in tqdm(dataset): - sample_id = f"{spk_id}-{chapter_no}-{utt_no}" - manifest["id"].append(sample_id) - manifest["audio"].append(audio_paths[sample_id]) - manifest["n_frames"].append(audio_lengths[sample_id]) - manifest["tgt_text"].append(utt.lower()) - manifest["speaker"].append(spk_id) - save_df_to_tsv( - pd.DataFrame.from_dict(manifest), out_root / f"{split}.tsv" - ) - if split.startswith("train"): - train_text.extend(manifest["tgt_text"]) - # Generate vocab - vocab_size = "" if args.vocab_type == "char" else str(args.vocab_size) - spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size}" - with NamedTemporaryFile(mode="w") as f: - for t in train_text: - f.write(t + "\n") - gen_vocab( - Path(f.name), - out_root / spm_filename_prefix, - args.vocab_type, - args.vocab_size, - ) - # Generate config YAML - gen_config_yaml( - out_root, - spm_filename=spm_filename_prefix + ".model", - specaugment_policy="ld" - ) - # Clean up - shutil.rmtree(feature_root) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--output-root", "-o", required=True, type=str) - parser.add_argument( - "--vocab-type", - default="unigram", - required=True, - type=str, - choices=["bpe", "unigram", "char"], - ), - parser.add_argument("--vocab-size", default=10000, type=int) - args = parser.parse_args() - - process(args) - - -if __name__ == "__main__": - main() diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/wmt20/README.md b/spaces/mshukor/UnIVAL/fairseq/examples/wmt20/README.md deleted file mode 100644 index b4f2874652f8be19998a65faa1d9276d8017ec59..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/wmt20/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# WMT 20 - -This page provides pointers to the models of Facebook-FAIR's WMT'20 news translation task submission [(Chen et al., 2020)](https://arxiv.org/abs/2011.08298). - -## Single best MT models (after finetuning on part of WMT20 news dev set) - -Model | Description | Download ----|---|--- -`transformer.wmt20.ta-en` | Ta->En | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz) -`transformer.wmt20.en-ta` | En->Ta | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz) -`transformer.wmt20.iu-en.news` | Iu->En (News domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz) -`transformer.wmt20.en-iu.news` | En->Iu (News domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz) -`transformer.wmt20.iu-en.nh` | Iu->En (Nunavut Hansard domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz) -`transformer.wmt20.en-iu.nh` | En->Iu (Nunavut Hansard domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz) - -## Language models -Model | Description | Download ----|---|--- -`transformer_lm.wmt20.en` | En Language Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en.tar.gz) -`transformer_lm.wmt20.ta` | Ta Language Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta.tar.gz) -`transformer_lm.wmt20.iu.news` | Iu Language Model (News domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu.news.tar.gz) -`transformer_lm.wmt20.iu.nh` | Iu Language Model (Nunavut Hansard domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu.nh.tar.gz) - -## Example usage (torch.hub) - -#### Translation - -```python -import torch - -# English to Tamil translation -en2ta = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.en-ta') -en2ta.translate("Machine learning is great!") # 'இயந்திரக் கற்றல் அருமை!' - -# Tamil to English translation -ta2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.ta-en') -ta2en.translate("இயந்திரக் கற்றல் அருமை!") # 'Machine learning is great!' - -# English to Inuktitut translation -en2iu = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.en-iu.news') -en2iu.translate("machine learning is great!") # 'ᖃᒧᑕᐅᔭᓄᑦ ᐃᓕᓐᓂᐊᕐᓂᖅ ᐱᐅᔪᒻᒪᕆᒃ!' - -# Inuktitut to English translation -iu2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.iu-en.news') -iu2en.translate("ᖃᒧᑕᐅᔭᓄᑦ ᐃᓕᓐᓂᐊᕐᓂᖅ ᐱᐅᔪᒻᒪᕆᒃ!") # 'Machine learning excellence!' -``` - -#### Language Modeling - -```python -# Sample from the English LM -en_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt20.en') -en_lm.sample("Machine learning is") # 'Machine learning is a type of artificial intelligence that uses machine learning to learn from data and make predictions.' - -# Sample from the Tamil LM -ta_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt20.ta') -ta_lm.sample("இயந்திரக் கற்றல் என்பது செயற்கை நுண்ணறிவின்") # 'இயந்திரக் கற்றல் என்பது செயற்கை நுண்ணறிவின் ஒரு பகுதியாகும்.' - -# Sample from the Inuktitut LM -iu_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt20.iu.news') -iu_lm.sample("ᖃᒧᑕᐅᔭᓄᑦ ᐃᓕᓐᓂᐊᕐᓂᖅ") # 'ᖃᒧᑕᐅᔭᓄᑦ ᐃᓕᓐᓂᐊᕐᓂᖅ, ᐊᒻᒪᓗ ᓯᓚᐅᑉ ᐊᓯᙳᖅᐸᓪᓕᐊᓂᖓᓄᑦ ᖃᓄᐃᓕᐅᕈᑎᒃᓴᑦ, ᐃᓚᖃᖅᖢᑎᒃ ᐅᑯᓂᖓ:' -``` - -## Citation -```bibtex -@inproceedings{chen2020facebook - title={Facebook AI's WMT20 News Translation Task Submission}, - author={Peng-Jen Chen and Ann Lee and Changhan Wang and Naman Goyal and Angela Fan and Mary Williamson and Jiatao Gu}, - booktitle={Proc. of WMT}, - year={2020}, -} -``` diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/transformer/transformer_config.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/models/transformer/transformer_config.py deleted file mode 100644 index 2580d20aacc5be4680971646d9523489d903c56c..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/transformer/transformer_config.py +++ /dev/null @@ -1,318 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import re -from dataclasses import dataclass, field, fields -from typing import List, Optional - -from fairseq import utils -from fairseq.dataclass import FairseqDataclass, ChoiceEnum -from omegaconf import II - -DEFAULT_MAX_SOURCE_POSITIONS = 1024 -DEFAULT_MAX_TARGET_POSITIONS = 1024 - -DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8) - -_NAME_PARSER = r"(decoder|encoder|quant_noise)_(.*)" - - -@dataclass -class EncDecBaseConfig(FairseqDataclass): - embed_path: Optional[str] = field( - default=None, metadata={"help": "path to pre-trained embedding"} - ) - embed_dim: Optional[int] = field( - default=512, metadata={"help": "embedding dimension"} - ) - ffn_embed_dim: int = field( - default=2048, metadata={"help": "embedding dimension for FFN"} - ) - layers: int = field(default=6, metadata={"help": "number of layers"}) - attention_heads: int = field( - default=8, metadata={"help": "number of attention heads"} - ) - normalize_before: bool = field( - default=False, metadata={"help": "apply layernorm before each block"} - ) - learned_pos: bool = field( - default=False, metadata={"help": "use learned positional embeddings"} - ) - # args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019) - layerdrop: float = field(default=0, metadata={"help": "LayerDrop probability"}) - layers_to_keep: Optional[List[int]] = field( - default=None, metadata={"help": "which layers to *keep* when pruning"} - ) - - -@dataclass -class DecoderConfig(EncDecBaseConfig): - input_dim: int = II("model.decoder.embed_dim") - output_dim: int = field( - default=II("model.decoder.embed_dim"), - metadata={ - "help": "decoder output dimension (extra linear layer if different from decoder embed dim)" - }, - ) - - def __post_init__(self): - # II doesn't work if we are just creating the object outside of hydra so fix that - if self.input_dim == II("model.decoder.embed_dim"): - self.input_dim = self.embed_dim - if self.output_dim == II("model.decoder.embed_dim"): - self.output_dim = self.embed_dim - - -@dataclass -class QuantNoiseConfig(FairseqDataclass): - pq: float = field( - default=0.0, - metadata={"help": "iterative PQ quantization noise at training time"}, - ) - pq_block_size: int = field( - default=8, - metadata={"help": "block size of quantization noise at training time"}, - ) - scalar: float = field( - default=0.0, - metadata={ - "help": "scalar quantization noise and scalar quantization at training time" - }, - ) - - -@dataclass -class TransformerConfig(FairseqDataclass): - activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( - default="relu", - metadata={"help": "activation function to use"}, - ) - dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) - attention_dropout: float = field( - default=0.0, metadata={"help": "dropout probability for attention weights"} - ) - activation_dropout: float = field( - default=0.0, - metadata={ - "help": "dropout probability after activation in FFN.", - "alias": "--relu-dropout", - }, - ) - adaptive_input: bool = False - encoder: EncDecBaseConfig = EncDecBaseConfig() - # TODO should really be in the encoder config - max_source_positions: int = field( - default=DEFAULT_MAX_SOURCE_POSITIONS, - metadata={"help": "Maximum input length supported by the encoder"}, - ) - decoder: DecoderConfig = DecoderConfig() - # TODO should really be in the decoder config - max_target_positions: int = field( - default=DEFAULT_MAX_TARGET_POSITIONS, - metadata={"help": "Maximum output length supported by the decoder"}, - ) - share_decoder_input_output_embed: bool = field( - default=False, metadata={"help": "share decoder input and output embeddings"} - ) - share_all_embeddings: bool = field( - default=False, - metadata={ - "help": "share encoder, decoder and output embeddings (requires shared dictionary and embed dim)" - }, - ) - no_token_positional_embeddings: bool = field( - default=False, - metadata={ - "help": "if True, disables positional embeddings (outside self attention)" - }, - ) - adaptive_softmax_cutoff: Optional[List[int]] = field( - default=None, - metadata={ - "help": "list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion" - }, - ) - adaptive_softmax_dropout: float = field( - default=0.0, - metadata={"help": "sets adaptive softmax dropout for the tail projections"}, - ) - adaptive_softmax_factor: float = field( - default=4, metadata={"help": "adaptive input factor"} - ) - layernorm_embedding: bool = field( - default=False, metadata={"help": "add layernorm to embedding"} - ) - tie_adaptive_weights: bool = field( - default=False, - metadata={ - "help": "if set, ties the weights of adaptive softmax and adaptive input" - }, - ) - tie_adaptive_proj: bool = field( - default=False, - metadata={ - "help": "if set, ties the projection weights of adaptive softmax and adaptive input" - }, - ) - no_scale_embedding: bool = field( - default=False, metadata={"help": "if True, dont scale embeddings"} - ) - checkpoint_activations: bool = field( - default=False, - metadata={ - "help": "checkpoint activations at each layer, which saves GPU memory usage at the cost of some additional compute" - }, - ) - offload_activations: bool = field( - default=False, - metadata={ - "help": "checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations." - }, - ) - # args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019) - no_cross_attention: bool = field( - default=False, metadata={"help": "do not perform cross-attention"} - ) - cross_self_attention: bool = field( - default=False, metadata={"help": "perform cross+self-attention"} - ) - # args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020) - quant_noise: QuantNoiseConfig = field(default=QuantNoiseConfig()) - min_params_to_wrap: int = field( - default=DEFAULT_MIN_PARAMS_TO_WRAP, - metadata={ - "help": "minimum number of params for a layer to be wrapped with FSDP() when " - "training with --ddp-backend=fully_sharded. Smaller values will " - "improve memory efficiency, but may make torch.distributed " - "communication less efficient due to smaller input sizes. This option " - "is set to 0 (i.e., always wrap) when --checkpoint-activations or " - "--offload-activations are passed." - }, - ) - # DEPRECATED field, but some old checkpoints might have it - char_inputs: bool = field( - default=False, metadata={"help": "if set, model takes character ids as input"} - ) - relu_dropout: float = 0.0 - # config for "BASE Layers: Simplifying Training of Large, Sparse Models" - base_layers: Optional[int] = field( - default=0, metadata={"help": "number of BASE layers in total"} - ) - base_sublayers: Optional[int] = field( - default=1, metadata={"help": "number of sublayers in each BASE layer"} - ) - base_shuffle: Optional[int] = field( - default=1, - metadata={"help": "shuffle tokens between workers before computing assignment"}, - ) - - export: bool = field( - default=False, - metadata={"help": "make the layernorm exportable with torchscript."}, - ) - - # copied from transformer_lm but expected in transformer_decoder: - no_decoder_final_norm: bool = field( - default=False, - metadata={"help": "don't add an extra layernorm after the last decoder block"}, - ) - - # We need to make this hierarchical dataclass like the flat namespace - # __getattr__ and __setattr__ here allow backward compatibility - # for subclasses of Transformer(Legacy) that depend on read/write on - # the flat namespace. - - def __getattr__(self, name): - match = re.match(_NAME_PARSER, name) - if match: - sub = getattr(self, match[1]) - return getattr(sub, match[2]) - raise AttributeError(f"invalid argument {name}.") - - def __setattr__(self, name, value): - match = re.match(_NAME_PARSER, name) - if match: - sub = getattr(self, match[1]) - setattr(sub, match[2], value) - else: - super().__setattr__(name, value) - - @staticmethod - def _copy_keys(args, cls, prefix, seen): - """ - copy the prefixed keys (decoder_embed_dim) to the DC fields: decoder.embed_dim - """ - cfg = cls() - for fld in fields(cls): - # for all the fields in the DC, find the fields (e.g. embed_dim) - # in the namespace with the prefix (e.g. decoder) - # and set it on the dc. - args_key = f"{prefix}_{fld.name}" - if hasattr(args, args_key): - seen.add(args_key) - setattr(cfg, fld.name, getattr(args, args_key)) - if hasattr(args, fld.name): - seen.add(fld.name) - setattr(cfg, fld.name, getattr(args, fld.name)) - return cfg - - @classmethod - def from_namespace(cls, args): - if args is None: - return None - if not isinstance(args, cls): - seen = set() - config = cls() - # currently, we can go generically from DC fields to args hierarchically - # but we can't easily deconstruct a flat namespace to a hierarchical - # DC. Mostly because we could have a sub-dc called `decoder-foo` that should not - # go to the sub struct called `decoder`. There are ways to go around this, but let's keep it simple - # for now. - for fld in fields(cls): - # concretelly, the transformer_config know what sub-dc it has, so we go through all the dc fields - # and if it's one that has a sub-dc, we build that sub-dc with `copy_keys()` - if fld.name == "decoder": - if hasattr(args, "decoder"): - # in some cases, the args we receive is already structured (as DictConfigs), so let's just build the correct DC - seen.add("decoder") - config.decoder = DecoderConfig(**args.decoder) - else: - config.decoder = cls._copy_keys( - args, DecoderConfig, "decoder", seen - ) - elif fld.name == "encoder": - # same but for encoder - if hasattr(args, "encoder"): - seen.add("encoder") - config.encoder = EncDecBaseConfig(**args.encoder) - else: - config.encoder = cls._copy_keys( - args, EncDecBaseConfig, "encoder", seen - ) - elif fld.name == "quant_noise": - # same but for quant_noise - if hasattr(args, "quant_noise"): - seen.add("quant_noise") - config.quant_noise = QuantNoiseConfig(**args.quant_noise) - else: - config.quant_noise = cls._copy_keys( - args, QuantNoiseConfig, "quant_noise", seen - ) - elif hasattr(args, fld.name): - # if it's not a structure field, it's just a normal field, copy it over - seen.add(fld.name) - setattr(config, fld.name, getattr(args, fld.name)) - # we got all the fields defined in the dataclass, but - # the argparse namespace might have extra args for two reasons: - # - we are in a legacy class so all the args are not declared in the dataclass. Ideally once everyone has defined a dataclass for their model, we won't need this - # - some places expect args to be there but never define them - args_dict = args._asdict() if hasattr(args, '_asdict') else vars(args) if hasattr(args, '__dict__') else {} # namedtupled doesn't have __dict__ :-/ - for key, value in args_dict.items(): - if key not in seen: - setattr(config, key, value) - return config - else: - return args diff --git a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/refcoco/ofa_warefcocoplus_ground_refcocoplus_lr1e5.sh b/spaces/mshukor/UnIVAL/slurm_adastra/averaging/refcoco/ofa_warefcocoplus_ground_refcocoplus_lr1e5.sh deleted file mode 100644 index 574d2e5bf277250a912958911ca91def0243cba2..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/refcoco/ofa_warefcocoplus_ground_refcocoplus_lr1e5.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -#SBATCH --job-name=ofa_warefcocoplus_ground_refcocoplus_lr1e5 -#SBATCH --nodes=1 -#SBATCH --ntasks=1 -#SBATCH --gpus=8 -#SBATCH --threads-per-core=2 -#SBATCH --gpu-bind=closest -#SBATCH -C MI250 -#SBATCH -A gda2204 -#SBATCH --time=24:00:00 -#SBATCH --mail-type=END,FAIL -#SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_warefcocoplus_ground_refcocoplus_lr1e5.out -#SBATCH --exclusive -#SBATCH --mail-user=mustafa.shukor@isir.upmc.fr - - -cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts -source /lus/home/NAT/gda2204/mshukor/.bashrc - -conda activate main - - -rm core-python3* - - -srun -l -N 1 -n 1 -c 128 --gpus=8 bash averaging/refcoco/ofa_warefcocoplus_ground_refcocoplus_lr1e5.sh - - diff --git a/spaces/msy666/White-box-Cartoonization/wbc/cartoonize.py b/spaces/msy666/White-box-Cartoonization/wbc/cartoonize.py deleted file mode 100644 index 25faf1ceb95aaed9a3f7a7982d17a03dc6bc32b1..0000000000000000000000000000000000000000 --- a/spaces/msy666/White-box-Cartoonization/wbc/cartoonize.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -import cv2 -import numpy as np -import tensorflow as tf -import wbc.network as network -import wbc.guided_filter as guided_filter -from tqdm import tqdm - - -def resize_crop(image): - h, w, c = np.shape(image) - if min(h, w) > 720: - if h > w: - h, w = int(720 * h / w), 720 - else: - h, w = 720, int(720 * w / h) - image = cv2.resize(image, (w, h), - interpolation=cv2.INTER_AREA) - h, w = (h // 8) * 8, (w // 8) * 8 - image = image[:h, :w, :] - return image - - -def cartoonize(load_folder, save_folder, model_path): - print(model_path) - input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - network_out = network.unet_generator(input_photo) - final_out = guided_filter.guided_filter(input_photo, network_out, r=1, eps=5e-3) - - all_vars = tf.trainable_variables() - gene_vars = [var for var in all_vars if 'generator' in var.name] - saver = tf.train.Saver(var_list=gene_vars) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - sess = tf.Session(config=config) - - sess.run(tf.global_variables_initializer()) - saver.restore(sess, tf.train.latest_checkpoint(model_path)) - name_list = os.listdir(load_folder) - for name in tqdm(name_list): - try: - load_path = os.path.join(load_folder, name) - save_path = os.path.join(save_folder, name) - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = sess.run(final_out, feed_dict={input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - -class Cartoonize: - def __init__(self, model_path): - print(model_path) - self.input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - network_out = network.unet_generator(self.input_photo) - self.final_out = guided_filter.guided_filter(self.input_photo, network_out, r=1, eps=5e-3) - - all_vars = tf.trainable_variables() - gene_vars = [var for var in all_vars if 'generator' in var.name] - saver = tf.train.Saver(var_list=gene_vars) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - self.sess = tf.Session(config=config) - - self.sess.run(tf.global_variables_initializer()) - saver.restore(self.sess, tf.train.latest_checkpoint(model_path)) - - def run(self, load_folder, save_folder): - name_list = os.listdir(load_folder) - for name in tqdm(name_list): - try: - load_path = os.path.join(load_folder, name) - save_path = os.path.join(save_folder, name) - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - def run_sigle(self, load_path, save_path): - try: - image = cv2.imread(load_path) - image = resize_crop(image) - batch_image = image.astype(np.float32) / 127.5 - 1 - batch_image = np.expand_dims(batch_image, axis=0) - output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image}) - output = (np.squeeze(output) + 1) * 127.5 - output = np.clip(output, 0, 255).astype(np.uint8) - cv2.imwrite(save_path, output) - except: - print('cartoonize {} failed'.format(load_path)) - - -if __name__ == '__main__': - model_path = 'saved_models' - load_folder = 'test_images' - save_folder = 'cartoonized_images' - if not os.path.exists(save_folder): - os.mkdir(save_folder) - cartoonize(load_folder, save_folder, model_path) diff --git a/spaces/nakas/MusicGenDemucs/tests/data/test_audio_utils.py b/spaces/nakas/MusicGenDemucs/tests/data/test_audio_utils.py deleted file mode 100644 index 0480671bb17281d61ce02bce6373a5ccec89fece..0000000000000000000000000000000000000000 --- a/spaces/nakas/MusicGenDemucs/tests/data/test_audio_utils.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import julius -import torch -import pytest - -from audiocraft.data.audio_utils import ( - _clip_wav, - convert_audio_channels, - convert_audio, - normalize_audio -) -from ..common_utils import get_batch_white_noise - - -class TestConvertAudioChannels: - - def test_convert_audio_channels_downmix(self): - b, c, t = 2, 3, 100 - audio = get_batch_white_noise(b, c, t) - mixed = convert_audio_channels(audio, channels=2) - assert list(mixed.shape) == [b, 2, t] - - def test_convert_audio_channels_nochange(self): - b, c, t = 2, 3, 100 - audio = get_batch_white_noise(b, c, t) - mixed = convert_audio_channels(audio, channels=c) - assert list(mixed.shape) == list(audio.shape) - - def test_convert_audio_channels_upmix(self): - b, c, t = 2, 1, 100 - audio = get_batch_white_noise(b, c, t) - mixed = convert_audio_channels(audio, channels=3) - assert list(mixed.shape) == [b, 3, t] - - def test_convert_audio_channels_upmix_error(self): - b, c, t = 2, 2, 100 - audio = get_batch_white_noise(b, c, t) - with pytest.raises(ValueError): - convert_audio_channels(audio, channels=3) - - -class TestConvertAudio: - - def test_convert_audio_channels_downmix(self): - b, c, dur = 2, 3, 4. - sr = 128 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=2) - assert list(out.shape) == [audio.shape[0], 2, audio.shape[-1]] - - def test_convert_audio_channels_upmix(self): - b, c, dur = 2, 1, 4. - sr = 128 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=3) - assert list(out.shape) == [audio.shape[0], 3, audio.shape[-1]] - - def test_convert_audio_upsample(self): - b, c, dur = 2, 1, 4. - sr = 2 - new_sr = 3 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c) - out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr) - assert torch.allclose(out, out_j) - - def test_convert_audio_resample(self): - b, c, dur = 2, 1, 4. - sr = 3 - new_sr = 2 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c) - out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr) - assert torch.allclose(out, out_j) - - -class TestNormalizeAudio: - - def test_clip_wav(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - _clip_wav(audio) - assert audio.abs().max() <= 1 - - def test_normalize_audio_clip(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - norm_audio = normalize_audio(audio, strategy='clip') - assert norm_audio.abs().max() <= 1 - - def test_normalize_audio_rms(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - norm_audio = normalize_audio(audio, strategy='rms') - assert norm_audio.abs().max() <= 1 - - def test_normalize_audio_peak(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - norm_audio = normalize_audio(audio, strategy='peak') - assert norm_audio.abs().max() <= 1 diff --git a/spaces/nateraw/deepafx-st/deepafx_st/models/efficient_net/__init__.py b/spaces/nateraw/deepafx-st/deepafx_st/models/efficient_net/__init__.py deleted file mode 100644 index 2b529dfe3f61da71f7427fbeb7ab47710450d372..0000000000000000000000000000000000000000 --- a/spaces/nateraw/deepafx-st/deepafx_st/models/efficient_net/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -__version__ = "0.7.1" -from .model import EfficientNet, VALID_MODELS -from .utils import ( - GlobalParams, - BlockArgs, - BlockDecoder, - efficientnet, - get_model_params, -) diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Douglas Fluid Mechanics Solution Manual Free BETTER Download.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Douglas Fluid Mechanics Solution Manual Free BETTER Download.md deleted file mode 100644 index 041516a4764bd7e3246bb3f9bc93fa4a08b3045f..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Douglas Fluid Mechanics Solution Manual Free BETTER Download.md +++ /dev/null @@ -1,15 +0,0 @@ - -

      How to Download the Douglas Fluid Mechanics Solution Manual for Free

      -

      If you are looking for a comprehensive and concise textbook on fluid mechanics, you may have come across the fifth edition of Fluid Mechanics by John Douglas, Janusz Gasiorek and John Swaffield. This book covers both established theory and emerging topics in fluid mechanics, with relevant examples and applications for mechanical, civil, energy and environmental engineers.

      -

      Douglas Fluid Mechanics Solution Manual Free Download


      DOWNLOAD ———>>> https://urlcod.com/2uIbXV



      -

      But what if you need some help with solving the problems in the book? How can you check your answers and improve your understanding of the concepts? That's where the solution manual comes in handy. The solution manual provides detailed and step-by-step solutions to all the problems in the textbook, as well as additional exercises and questions for self-assessment.

      -

      The good news is that you can download the Douglas Fluid Mechanics Solution Manual for free from various online sources. Here are some of the ways you can get access to this valuable resource:

      -
        -
      • Search for PDF files on academic websites. Some websites that host academic papers and books may have PDF files of the solution manual that you can download or view online. For example, you can find a PDF file of the solution manual for the fifth edition of Fluid Mechanics on Academia.edu [^2^]. You can also find a PDF file of the solution manual for volume 1 of Solving Problems in Fluid Mechanics by John Douglas on Academia.edu [^3^]. However, be aware that some of these PDF files may not be complete or accurate, and you may need to sign up or pay a fee to access them.
      • -
      • Search for torrent files on peer-to-peer networks. Another way to download the solution manual for free is to use torrent files that are shared by other users on peer-to-peer networks. Torrent files are small files that contain information about larger files that can be downloaded using a torrent client software. You can find torrent files of the solution manual by using a torrent search engine or browsing torrent websites. However, be careful when downloading torrent files, as they may contain viruses or malware, or infringe on copyright laws.
      • -
      • Search for online libraries or databases that offer free access to books and manuals. Some online libraries or databases may have digital copies of the solution manual that you can access for free or with a subscription. For example, you can find an online version of the solution manual for volume 2 of Solving Problems in Fluid Mechanics by John Douglas on StuDocu.com [^1^]. You can also find an online version of the solution manual for Engineering Fluid Mechanics by Clayton Crowe and Donald Elger on Academia.edu [^2^]. However, some of these online libraries or databases may require registration or membership, or have limited access or functionality.
      • -
      -

      As you can see, there are several ways to download the Douglas Fluid Mechanics Solution Manual for free. However, before you do so, make sure you are aware of the risks and ethical issues involved in downloading copyrighted material without permission. You may also want to consider buying a legitimate copy of the solution manual from a reputable publisher or seller, as this will ensure you get a high-quality and updated version of the solutions, as well as support the authors and publishers who created this valuable resource.

      -

      e93f5a0c3f
      -
      -
      \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Adobe Lightroom 5 ((BETTER)) Full Version For Free..md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Adobe Lightroom 5 ((BETTER)) Full Version For Free..md deleted file mode 100644 index 649366f4e3ef62d4f5b58bc6eb7fa07a352d02a2..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Adobe Lightroom 5 ((BETTER)) Full Version For Free..md +++ /dev/null @@ -1,43 +0,0 @@ -
      -

      How to Download Adobe Lightroom 5 Full Version for Free

      -

      Adobe Lightroom 5 is a powerful photo editing software that can help you enhance your images, organize your collections, and share your work with others. However, it is not cheap and you may not want to pay for a subscription or a license. Fortunately, there is a way to download Adobe Lightroom 5 full version for free and use it legally on your computer.

      -

      In this article, we will show you how to download Adobe Lightroom 5 full version for free from the official Adobe website, how to install it on your PC or Mac, and how to activate it with a serial number. We will also provide some tips and tricks to make the most of this software.

      -

      Download Adobe Lightroom 5 full version for free.


      DOWNLOAD --->>> https://urlcod.com/2uIaxf



      -

      Step 1: Download Adobe Lightroom 5 Full Version for Free

      -

      The first step is to download Adobe Lightroom 5 full version for free from the official Adobe website. To do this, you need to follow these steps:

      -
        -
      1. Go to this page and scroll down to the section "Download apps from other versions of Creative Cloud".
      2. -
      3. Click on the link "Download apps from Creative Cloud 2014 and earlier".
      4. -
      5. Sign in with your Adobe ID or create one if you don't have one.
      6. -
      7. Find Adobe Photoshop Lightroom 5 in the list of apps and click on the "Download" button next to it.
      8. -
      9. Choose your operating system (Windows or Mac) and language and click on "Download".
      10. -
      11. Save the file to your computer and wait for the download to finish.
      12. -
      -

      Step 2: Install Adobe Lightroom 5 Full Version for Free

      -

      The next step is to install Adobe Lightroom 5 full version for free on your computer. To do this, you need to follow these steps:

      -
        -
      1. Locate the downloaded file on your computer and double-click on it to launch the installer.
      2. -
      3. Follow the on-screen instructions to complete the installation process.
      4. -
      5. When prompted, enter your Adobe ID and password to sign in.
      6. -
      7. Select "I want to try Adobe Photoshop Lightroom 5" and click on "Continue".
      8. -
      9. The installation will finish and you will be able to launch Adobe Lightroom 5 from your desktop or start menu.
      10. -
      -

      Step 3: Activate Adobe Lightroom 5 Full Version for Free

      -

      The final step is to activate Adobe Lightroom 5 full version for free with a serial number. To do this, you need to follow these steps:

      -
        -
      1. Go to this page and scroll down to the section "Find your serial number in a product box".
      2. -
      3. Click on the link "Find your serial number quickly" and enter your Adobe ID and password.
      4. -
      5. You will see a list of products that you have registered with your Adobe ID. Find Adobe Photoshop Lightroom 5 and copy the serial number next to it.
      6. -
      7. Launch Adobe Lightroom 5 and go to "Help" > "Activate".
      8. -
      9. Paste the serial number that you copied and click on "Activate".
      10. -
      11. You will see a confirmation message that your product has been activated successfully.
      12. -
      -

      Tips and Tricks for Using Adobe Lightroom 5 Full Version for Free

      -

      Now that you have downloaded, installed, and activated Adobe Lightroom 5 full version for free, you can start using it to edit your photos. Here are some tips and tricks to help you get started:

      -
        -
      • To import photos into Lightroom, go to "File" > "Import Photos and Video" and choose the source of your images (such as your camera, computer, or external drive).
      • -
      • To organize your photos into collections, go to "Library" > "New Collection" and give it a name. Then drag and drop your photos into the collection.
      • -
      • To edit your photos, go to "Develop" > "Basic" and adjust the sliders for exposure, contrast, white balance,

        -

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Processim Telecharger Gratuit __HOT__.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Processim Telecharger Gratuit __HOT__.md deleted file mode 100644 index 3b0d6213e526b2b48cba8744e8d9477d8f2f913b..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Processim Telecharger Gratuit __HOT__.md +++ /dev/null @@ -1,52 +0,0 @@ -
        -Here is a possible title and article with SEO optimization and HTML formatting for the keyword "Processim Telecharger Gratuit": - -

        Processim Telecharger Gratuit: Comment Simuler des Processus Industriels sur Votre Ordinateur

        - -

        Vous êtes intéressé par la simulation des processus industriels, mais vous ne savez pas comment faire? Vous cherchez un logiciel facile à utiliser, performant et gratuit? Alors, vous devriez essayer Processim, le logiciel de simulation des processus industriels développé par l'Université de Liège.

        - -

        Processim est un logiciel qui vous permet de modéliser, simuler et analyser des processus industriels complexes, tels que la production d'électricité, la fabrication de produits chimiques, la gestion des déchets ou la purification de l'eau. Avec Processim, vous pouvez créer des scénarios réalistes, tester des hypothèses, optimiser des paramètres, comparer des alternatives et évaluer des impacts environnementaux.

        -

        Processim Telecharger Gratuit


        Download File ⚙⚙⚙ https://urlcod.com/2uIbvW



        - -

        Processim est un logiciel gratuit et open source, ce qui signifie que vous pouvez le télécharger librement, le modifier à votre guise et le partager avec d'autres utilisateurs. Processim est compatible avec Windows, Linux et Mac OS X. Processim est également facile à prendre en main, grâce à son interface graphique intuitive et à sa documentation complète.

        - -

        Pour télécharger Processim gratuitement, il vous suffit de vous rendre sur le site officiel du logiciel: https://www.processim.uliege.be/. Vous y trouverez le lien de téléchargement, ainsi que des tutoriels, des exemples et un forum d'entraide. Vous pourrez également accéder au code source du logiciel et contribuer à son amélioration.

        - -

        Processim est un outil puissant et ludique pour apprendre et enseigner la simulation des processus industriels. Que vous soyez étudiant, enseignant, chercheur ou professionnel, Processim vous aidera à mieux comprendre et maîtriser les processus industriels qui façonnent notre monde. N'attendez plus et téléchargez Processim gratuitement dès maintenant!

        Here is a possible continuation of the article: - -

        Dans cet article, nous allons vous présenter quelques exemples de processus industriels que vous pouvez simuler avec Processim. Vous verrez comment Processim vous permet de visualiser les flux de matière et d'énergie, de calculer les bilans massiques et énergétiques, de tracer des diagrammes de phases et de réactions, et de réaliser des analyses de sensibilité et d'optimisation.

        -

        - -

        Exemple 1: Production d'électricité à partir de biomasse

        - -

        La biomasse est une source d'énergie renouvelable qui provient de la matière organique d'origine végétale ou animale. La biomasse peut être utilisée pour produire de l'électricité par combustion, gazéification ou méthanisation. Dans cet exemple, nous allons simuler un procédé de production d'électricité à partir de biomasse par gazéification.

        - -

        La gazéification consiste à transformer la biomasse en un gaz combustible appelé syngas, composé principalement de CO, H2, CH4 et CO2. Le syngas peut ensuite être utilisé pour alimenter une turbine à gaz ou un moteur à combustion interne, qui entraîne un générateur électrique. Le procédé produit également des cendres et des gaz résiduaires qui doivent être traités avant leur rejet.

        - -

        Avec Processim, vous pouvez modéliser le procédé de gazéification en utilisant des blocs fonctionnels qui représentent les différentes unités du procédé: réacteur de gazéification, épurateur de syngas, turbine à gaz, condenseur, etc. Vous pouvez définir les caractéristiques de la biomasse (composition, humidité, granulométrie), les conditions opératoires (température, pression, rapport air/biomasse) et les performances des équipements (rendement, puissance).

        - -

        Processim vous permet ensuite de simuler le fonctionnement du procédé et d'obtenir les résultats suivants:

        - -
          -
        • Le débit et la composition du syngas produit
        • -
        • La puissance électrique générée
        • -
        • Le rendement global du procédé
        • -
        • Les émissions de CO2 et de NOx
        • -
        • Le coût du procédé
        • -
        - -

        Vous pouvez également utiliser Processim pour analyser l'influence des paramètres du procédé sur les résultats obtenus. Par exemple, vous pouvez faire varier la température de gazéification ou le rapport air/biomasse et observer l'effet sur le rendement ou les émissions. Vous pouvez aussi optimiser le procédé en cherchant les valeurs optimales des paramètres qui maximisent la puissance électrique ou minimisent le coût.

        - -

        Exemple 2: Fabrication d'ammoniac par le procédé Haber-Bosch

        - -

        L'ammoniac est un composé chimique essentiel pour la fabrication d'engrais, d'explosifs ou de produits pharmaceutiques. L'ammoniac est produit industriellement par le procédé Haber-Bosch, qui consiste à synthétiser l'ammoniac à partir d'hydrogène et d'azote sous haute pression et température en présence d'un catalyseur.

        - -

        Avec Processim, vous pouvez modéliser le procédé Haber-Bosch en utilisant des blocs fonctionnels qui représentent les différentes unités du procédé: réformeur primaire, réformeur secondaire, séparateur d'hydrogène, compresseur, réacteur de synthèse, condenseur, etc. Vous pouvez définir les caractéristiques du gaz naturel utilisé comme source d'hydrogène (composition, pouvoir calorifique), les conditions opératoires (pression, température) et les performances des équipements (rendement, capacité).

        - -

        Processim vous permet ensuite de simuler le fonctionnement du procédé et d'obtenir les résultats suivants:

        - -
          -
        • Le débit et la composition du mélange réactionnel
        • -
        • 7196e7f11a
          -
          -
          \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Singh Saab The Great Full Movie Download In Hindi 720p Kickass.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Singh Saab The Great Full Movie Download In Hindi 720p Kickass.md deleted file mode 100644 index d890aef13b8118e78240cbdf6b9dadf8513da323..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Singh Saab The Great Full Movie Download In Hindi 720p Kickass.md +++ /dev/null @@ -1,29 +0,0 @@ -
          -

          How to Download Singh Saab The Great Full Movie in Hindi 720p Kickass

          -

          Singh Saab The Great is a 2013 Hindi movie starring Sunny Deol, Amrita Rao, Urvashi Rautela and Prakash Raj. It is directed by Anil Sharma and tells the story of an honest collector who fights against corruption and injustice. The movie was praised for its action, drama, humor and message of change not revenge.

          -

          If you are looking for a way to download Singh Saab The Great full movie in Hindi 720p kickass, you have come to the right place. In this article, we will show you how to download the movie from various online sources using a torrent client. However, we do not endorse or promote piracy and urge you to watch the movie legally on OTT platforms or DVD.

          -

          Singh Saab The Great Full Movie Download In Hindi 720p Kickass


          Download →→→ https://urlcod.com/2uIbR0



          -

          Steps to Download Singh Saab The Great Full Movie in Hindi 720p Kickass

          -
            -
          1. Download and install a torrent client such as uTorrent, BitTorrent or Vuze on your device.
          2. -
          3. Go to any of the following websites that offer torrent links for Singh Saab The Great full movie in Hindi 720p kickass: - -
          4. -
          5. Search for Singh Saab The Great full movie in Hindi 720p kickass and choose a torrent link that has good seeders and leechers.
          6. -
          7. Click on the torrent link and open it with your torrent client.
          8. -
          9. Choose a destination folder for the movie file and start the download.
          10. -
          11. Wait for the download to finish and enjoy watching Singh Saab The Great full movie in Hindi 720p kickass.
          12. -
          -

          Note: Downloading movies from torrent sites may be illegal in your country and may expose you to malware and viruses. Use a VPN service and antivirus software to protect your device and privacy.

          - -

          Singh Saab The Great Movie Review

          -

          Singh Saab The Great is not just another masala movie with mindless action and dialogues. It is a movie that has a strong message of change not revenge, and shows how one man can make a difference in society. The movie also has some emotional moments, some comic relief and some patriotic fervor.

          -

          The movie's strength lies in Sunny Deol's performance as Singh Saab, the honest collector who becomes a crusader against corruption. He delivers his powerful dialogues with conviction and fights like a lion. He also shows his softer side as a loving husband and brother. He is ably supported by Prakash Raj, who plays the villainous Bhudev Singh with his trademark wickedness. He is the perfect foil for Sunny Deol's heroism.

          -

          -

          The movie's weakness lies in its melodramatic and cliched plot, which has been seen many times before. The romance between Sunny Deol and Urvashi Rautela is unconvincing and awkward, given their huge age difference. The songs are forgettable and hamper the pace of the movie. The comedy by Johnny Lever and others is mostly out of place and forced. The direction by Anil Sharma is average and could have been more crisp and engaging.

          -

          Overall, Singh Saab The Great is a movie that will appeal to Sunny Deol fans and those who like action movies with a social message. It is not a flawless film, but it has its moments of entertainment and inspiration. It is a rare masala movie that has some novelty in it.

          7b8c122e87
          -
          -
          \ No newline at end of file diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/utils/__init__.py b/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/utils/__init__.py deleted file mode 100644 index 9020c2df23e2af280b7bb168b996ae9eaf312eb8..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/__init__.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/__init__.py deleted file mode 100644 index b50a3da91dd0d2a69502af9d5d62f2f4280d973f..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .data.datasets import builtin # just to register data -from .converters import builtin as builtin_converters # register converters -from .config import ( - add_densepose_config, - add_densepose_head_config, - add_hrnet_config, - add_dataset_category_config, - add_bootstrap_config, - load_bootstrap_config, -) -from .structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData -from .evaluation import DensePoseCOCOEvaluator -from .modeling.roi_heads import DensePoseROIHeads -from .modeling.test_time_augmentation import ( - DensePoseGeneralizedRCNNWithTTA, - DensePoseDatasetMapperTTA, -) -from .utils.transform import load_from_cfg -from .modeling.hrfpn import build_hrfpn_backbone diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/dev/README.md b/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/dev/README.md deleted file mode 100644 index e3a94b67ed4b4d0c2934f074802cd00f3660f9a9..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/dev/README.md +++ /dev/null @@ -1,7 +0,0 @@ - -## Some scripts for developers to use, include: - -- `run_instant_tests.sh`: run training for a few iterations. -- `run_inference_tests.sh`: run inference on a small dataset. -- `../../dev/linter.sh`: lint the codebase before commit -- `../../dev/parse_results.sh`: parse results from log file. diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/tests/modeling/test_model_e2e.py b/spaces/nikitaPDL2023/assignment4/detectron2/tests/modeling/test_model_e2e.py deleted file mode 100644 index 8c07e6856d2f4304e0b0cb32747fb667e3bbcb4c..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/tests/modeling/test_model_e2e.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - - -import itertools -import unittest -from contextlib import contextmanager -from copy import deepcopy -import torch - -from detectron2.structures import BitMasks, Boxes, ImageList, Instances -from detectron2.utils.events import EventStorage -from detectron2.utils.testing import get_model_no_weights - - -@contextmanager -def typecheck_hook(model, *, in_dtype=None, out_dtype=None): - """ - Check that the model must be called with the given input/output dtype - """ - if not isinstance(in_dtype, set): - in_dtype = {in_dtype} - if not isinstance(out_dtype, set): - out_dtype = {out_dtype} - - def flatten(x): - if isinstance(x, torch.Tensor): - return [x] - if isinstance(x, (list, tuple)): - return list(itertools.chain(*[flatten(t) for t in x])) - if isinstance(x, dict): - return flatten(list(x.values())) - return [] - - def hook(module, input, output): - if in_dtype is not None: - dtypes = {x.dtype for x in flatten(input)} - assert ( - dtypes == in_dtype - ), f"Expected input dtype of {type(module)} is {in_dtype}. Got {dtypes} instead!" - - if out_dtype is not None: - dtypes = {x.dtype for x in flatten(output)} - assert ( - dtypes == out_dtype - ), f"Expected output dtype of {type(module)} is {out_dtype}. Got {dtypes} instead!" - - with model.register_forward_hook(hook): - yield - - -def create_model_input(img, inst=None): - if inst is not None: - return {"image": img, "instances": inst} - else: - return {"image": img} - - -def get_empty_instance(h, w): - inst = Instances((h, w)) - inst.gt_boxes = Boxes(torch.rand(0, 4)) - inst.gt_classes = torch.tensor([]).to(dtype=torch.int64) - inst.gt_masks = BitMasks(torch.rand(0, h, w)) - return inst - - -def get_regular_bitmask_instances(h, w): - inst = Instances((h, w)) - inst.gt_boxes = Boxes(torch.rand(3, 4)) - inst.gt_boxes.tensor[:, 2:] += inst.gt_boxes.tensor[:, :2] - inst.gt_classes = torch.tensor([3, 4, 5]).to(dtype=torch.int64) - inst.gt_masks = BitMasks((torch.rand(3, h, w) > 0.5)) - return inst - - -class InstanceModelE2ETest: - def setUp(self): - torch.manual_seed(43) - self.model = get_model_no_weights(self.CONFIG_PATH) - - def _test_eval(self, input_sizes): - inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes] - self.model.eval() - self.model(inputs) - - def _test_train(self, input_sizes, instances): - assert len(input_sizes) == len(instances) - inputs = [ - create_model_input(torch.rand(3, s[0], s[1]), inst) - for s, inst in zip(input_sizes, instances) - ] - self.model.train() - with EventStorage(): - losses = self.model(inputs) - sum(losses.values()).backward() - del losses - - def _inf_tensor(self, *shape): - return 1.0 / torch.zeros(*shape, device=self.model.device) - - def _nan_tensor(self, *shape): - return torch.zeros(*shape, device=self.model.device).fill_(float("nan")) - - def test_empty_data(self): - instances = [get_empty_instance(200, 250), get_empty_instance(200, 249)] - self._test_eval([(200, 250), (200, 249)]) - self._test_train([(200, 250), (200, 249)], instances) - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") - def test_eval_tocpu(self): - model = deepcopy(self.model).cpu() - model.eval() - input_sizes = [(200, 250), (200, 249)] - inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes] - model(inputs) - - -class MaskRCNNE2ETest(InstanceModelE2ETest, unittest.TestCase): - CONFIG_PATH = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml" - - def test_half_empty_data(self): - instances = [get_empty_instance(200, 250), get_regular_bitmask_instances(200, 249)] - self._test_train([(200, 250), (200, 249)], instances) - - # This test is flaky because in some environment the output features are zero due to relu - # def test_rpn_inf_nan_data(self): - # self.model.eval() - # for tensor in [self._inf_tensor, self._nan_tensor]: - # images = ImageList(tensor(1, 3, 512, 512), [(510, 510)]) - # features = { - # "p2": tensor(1, 256, 256, 256), - # "p3": tensor(1, 256, 128, 128), - # "p4": tensor(1, 256, 64, 64), - # "p5": tensor(1, 256, 32, 32), - # "p6": tensor(1, 256, 16, 16), - # } - # props, _ = self.model.proposal_generator(images, features) - # self.assertEqual(len(props[0]), 0) - - def test_roiheads_inf_nan_data(self): - self.model.eval() - for tensor in [self._inf_tensor, self._nan_tensor]: - images = ImageList(tensor(1, 3, 512, 512), [(510, 510)]) - features = { - "p2": tensor(1, 256, 256, 256), - "p3": tensor(1, 256, 128, 128), - "p4": tensor(1, 256, 64, 64), - "p5": tensor(1, 256, 32, 32), - "p6": tensor(1, 256, 16, 16), - } - props = [Instances((510, 510))] - props[0].proposal_boxes = Boxes([[10, 10, 20, 20]]).to(device=self.model.device) - props[0].objectness_logits = torch.tensor([1.0]).reshape(1, 1) - det, _ = self.model.roi_heads(images, features, props) - self.assertEqual(len(det[0]), 0) - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_autocast(self): - from torch.cuda.amp import autocast - - inputs = [{"image": torch.rand(3, 100, 100)}] - self.model.eval() - with autocast(), typecheck_hook( - self.model.backbone, in_dtype=torch.float32, out_dtype=torch.float16 - ), typecheck_hook( - self.model.roi_heads.box_predictor, in_dtype=torch.float16, out_dtype=torch.float16 - ): - out = self.model.inference(inputs, do_postprocess=False)[0] - self.assertEqual(out.pred_boxes.tensor.dtype, torch.float32) - self.assertEqual(out.pred_masks.dtype, torch.float16) - self.assertEqual(out.scores.dtype, torch.float32) # scores comes from softmax - - -class RetinaNetE2ETest(InstanceModelE2ETest, unittest.TestCase): - CONFIG_PATH = "COCO-Detection/retinanet_R_50_FPN_1x.yaml" - - def test_inf_nan_data(self): - self.model.eval() - self.model.score_threshold = -999999999 - for tensor in [self._inf_tensor, self._nan_tensor]: - images = ImageList(tensor(1, 3, 512, 512), [(510, 510)]) - features = [ - tensor(1, 256, 128, 128), - tensor(1, 256, 64, 64), - tensor(1, 256, 32, 32), - tensor(1, 256, 16, 16), - tensor(1, 256, 8, 8), - ] - pred_logits, pred_anchor_deltas = self.model.head(features) - pred_logits = [tensor(*x.shape) for x in pred_logits] - pred_anchor_deltas = [tensor(*x.shape) for x in pred_anchor_deltas] - det = self.model.forward_inference(images, features, [pred_logits, pred_anchor_deltas]) - # all predictions (if any) are infinite or nan - if len(det[0]): - self.assertTrue(torch.isfinite(det[0].pred_boxes.tensor).sum() == 0) - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_autocast(self): - from torch.cuda.amp import autocast - - inputs = [{"image": torch.rand(3, 100, 100)}] - self.model.eval() - with autocast(), typecheck_hook( - self.model.backbone, in_dtype=torch.float32, out_dtype=torch.float16 - ), typecheck_hook(self.model.head, in_dtype=torch.float16, out_dtype=torch.float16): - out = self.model(inputs)[0]["instances"] - self.assertEqual(out.pred_boxes.tensor.dtype, torch.float32) - self.assertEqual(out.scores.dtype, torch.float16) - - -class FCOSE2ETest(InstanceModelE2ETest, unittest.TestCase): - CONFIG_PATH = "COCO-Detection/fcos_R_50_FPN_1x.py" - - -class SemSegE2ETest(unittest.TestCase): - CONFIG_PATH = "Misc/semantic_R_50_FPN_1x.yaml" - - def setUp(self): - torch.manual_seed(43) - self.model = get_model_no_weights(self.CONFIG_PATH) - - def _test_eval(self, input_sizes): - inputs = [create_model_input(torch.rand(3, s[0], s[1])) for s in input_sizes] - self.model.eval() - self.model(inputs) - - def test_forward(self): - self._test_eval([(200, 250), (200, 249)]) diff --git a/spaces/nomic-ai/amazon_reviews_multi/README.md b/spaces/nomic-ai/amazon_reviews_multi/README.md deleted file mode 100644 index 38aafb393fd750afdb4e0c80dae659ea890bbb38..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/amazon_reviews_multi/README.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: amazon_reviews_multi -emoji: 🗺️ -colorFrom: purple -colorTo: red -sdk: static -pinned: false ---- \ No newline at end of file diff --git a/spaces/nomic-ai/junelee_wizard_vicuna_70k/style.css b/spaces/nomic-ai/junelee_wizard_vicuna_70k/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/junelee_wizard_vicuna_70k/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/nooji/ImpCatcher/make.jl b/spaces/nooji/ImpCatcher/make.jl deleted file mode 100644 index b4fb929e6db0f0c3b7538ae24098aad548a2f3f7..0000000000000000000000000000000000000000 --- a/spaces/nooji/ImpCatcher/make.jl +++ /dev/null @@ -1,10 +0,0 @@ -using PackageCompiler - -include("packages.jl") - -PackageCompiler.create_sysimage( - PACKAGES, - sysimage_path = "sysimg.so", - precompile_execution_file = "precompile.jl", - cpu_target = PackageCompiler.default_app_cpu_target() -) diff --git a/spaces/nota-ai/compressed-wav2lip/gradio_theme.py b/spaces/nota-ai/compressed-wav2lip/gradio_theme.py deleted file mode 100644 index e29814905999bf1221f571de8133c1123db2c005..0000000000000000000000000000000000000000 --- a/spaces/nota-ai/compressed-wav2lip/gradio_theme.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.themes.builder(server_port=50002, share=True) diff --git a/spaces/nsarrazin/agents-js-oasst/README.md b/spaces/nsarrazin/agents-js-oasst/README.md deleted file mode 100644 index 1de6134c0c9ee0ad6f5e8d4e7b5ed9c747e17be2..0000000000000000000000000000000000000000 --- a/spaces/nsarrazin/agents-js-oasst/README.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Agents.js PoC - Open Assistant 30B -emoji: 🤖 -colorFrom: yellow -colorTo: orange -sdk: docker -app_port: 3000 ---- diff --git a/spaces/nschenone/lyric-buddy/src/profanity_filter.py b/spaces/nschenone/lyric-buddy/src/profanity_filter.py deleted file mode 100644 index 296834690f57362c4dec62b239c98e91dff262aa..0000000000000000000000000000000000000000 --- a/spaces/nschenone/lyric-buddy/src/profanity_filter.py +++ /dev/null @@ -1,48 +0,0 @@ -import string - -import requests - -BANNED_LIST_URL = "https://raw.githubusercontent.com/snguyenthanh/better_profanity/master/better_profanity/profanity_wordlist.txt" - - -def censor(text="", censor_char="*", keep_first_letter=True): - - banned_list = requests.get(BANNED_LIST_URL).text.split("\n") - - # Split sentences by newline - sentence_list = text.split("\n") - for s, sentence in enumerate(sentence_list): - - # Split words in sentence by space - word_list = sentence.split() - for w, word in enumerate(word_list): - - # Process word to match banned list - processed_word = word.translate( - str.maketrans("", "", string.punctuation) - ).lower() - - # Replace if word is profane - if processed_word in banned_list: - censored_word = censor_char * len(word) - - # Keep first letter of word for context if desired - if keep_first_letter: - censored_word = word[0] + censored_word[1:] - - # Replcate punctuation - censored_word_punc = "" - for c, char in enumerate(word): - if char in string.punctuation: - censored_word_punc += word[c] - else: - censored_word_punc += censored_word[c] - - # Update word list - word_list[w] = censored_word_punc - - # Update sentence list - sentence_list[s] = word_list - - # Join everything back together - return "\n".join([" ".join(word_list) for word_list in sentence_list]) diff --git a/spaces/ohmyteeth/seo-tools/app.py b/spaces/ohmyteeth/seo-tools/app.py deleted file mode 100644 index c87b40b7b973fb00aa334cb31ebf98ce9b9f49c2..0000000000000000000000000000000000000000 --- a/spaces/ohmyteeth/seo-tools/app.py +++ /dev/null @@ -1,103 +0,0 @@ -import requests -from bs4 import BeautifulSoup -from googleapiclient.discovery import build -import openai -import gradio as gr -import os - -# Google APIキーとカスタム検索エンジンIDの設定 -GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") -CUSTOM_SEARCH_ENGINE_ID = os.getenv("CUSTOM_SEARCH_ENGINE_ID") - -# GPT APIキーの設定 -openai.api_key = os.getenv("OPENAI_API_KEY") - -# キーワードから検索上位のURLを取得する関数 -def fetch_top_search_urls(query): - service = build("customsearch", "v1", developerKey=GOOGLE_API_KEY) - res = service.cse().list(q=query, cx=CUSTOM_SEARCH_ENGINE_ID, num=10).execute() - urls = [item["link"] for item in res["items"]] - return "\n".join(urls) - -# URLから見出しを取得する関数 -def extract_headings_from_url(urls): - urls = urls.split('\n') - printed_headings = set() # setを使用して重複を排除 - - for url in urls: - try: - response = requests.get(url, headers={"User-Agent": "Mozilla/5.0", "Accept-Encoding": "utf-8"}) - response.encoding = response.apparent_encoding - soup = BeautifulSoup(response.text, "html.parser") - headings = soup.find_all(["h1", "h2", "h3"]) - for heading in headings: - if heading.string is not None: - heading_text = heading.string.strip() - printed_headings.add(f"{heading.name}:{heading_text}") # setに追加して重複を排除 - except requests.exceptions.RequestException as e: - print(f"Error: {e}") - - return "\n".join(printed_headings) - -# 見出しから記事構成案を取得する関数 -def generate_article_structure(model, query, headings): - headings = headings.splitlines() # splitlinesを使用 - - # プロンプト: CINC社主催セミナー「ChatGPTをオウンドメディア運用に活用する方法を徹底解説」にて紹介されていたプロンプトを参考にさせていただきました - prompt = ( - "#命令書\n" - "あなたはWebメディアの記事を執筆するライター兼編集者です。\n" - "以下の検索意図の解析条件に沿って、メインテーマに関する記事構成案を書いてください。\n\n" - "#メインテーマ\n" - f"{query}\n\n" - "#検索意図の解析条件\n" - "- メインテーマで検索上位のWebページの見出し文を解析する\n" - "- 解析した検索意図に答える文章の構成を考える\n" - "- 下記の出力条件に沿って文章構成を見出し文に変換し、出力する\n\n" - "#出力条件\n" - "- h1,h2,h3,h4ごとに見出し文を生成\n" - "- 章ごとに入れ子で表示する\n" - "- 出力時はマークダウン形式で出力する\n\n" - "#検索上位のWebページの見出し文" - ) - - # 見出し文をプロンプトに追加 - for heading in headings: - prompt += f"\n{heading}" - - # GPTで記事構成案を生成 - response = openai.ChatCompletion.create( - model=model, - messages=[ - {"role": "user", "content": prompt} - ] - ) - - # 生成されたテキストを整形して返す - generated_text = response["choices"][0]["message"]["content"].strip() - return "
          ".join(generated_text.splitlines()) # joinを使用 - -# Gradioインターフェースの作成 -with gr.Blocks() as demo: - gr.Markdown("SEO Tools") - with gr.Tab("検索上位記事→構成案"): - keyword_input = gr.inputs.Textbox(lines=1, label="キーワードを入力", placeholder="例)矯正歯科 選び方") - fetch_top_search_urls_button = gr.Button("Step1.上位10サイトのURLを取得") - urls_output = gr.outputs.Textbox(label="上位10サイトのURL") - - urls_input = gr.inputs.Textbox(lines=10, label="見出しを取得したいURLを入力") - extract_headings_from_url_button = gr.Button("Step2.見出しを取得") - headings_output = gr.outputs.Textbox(label="上位サイトの見出し") - - headings_input = gr.inputs.Textbox(lines=30, label="記事構成生成に使用する見出しを入力") - model_input = gr.inputs.Dropdown(["gpt-3.5-turbo", "gpt-4"], label="GPTモデルを選択", default="gpt-3.5-turbo") - generate_article_structure_button = gr.Button("Step3.記事構成案を作成") - article_structure_output = gr.outputs.HTML(label="記事構成案") - - # 各ステップの関数をボタンクリックに紐付け - fetch_top_search_urls_button.click(fetch_top_search_urls, inputs=keyword_input, outputs=urls_output) - extract_headings_from_url_button.click(extract_headings_from_url, inputs=urls_input, outputs=headings_output) - generate_article_structure_button.click(generate_article_structure, inputs=[model_input, keyword_input, headings_input], outputs=article_structure_output) - -# Gradioを起動 -demo.launch(auth=(os.getenv("AUTH_USERNAME"), os.getenv("AUTH_PASSWORD"))) \ No newline at end of file diff --git a/spaces/osanseviero/TheMLGame/style.css b/spaces/osanseviero/TheMLGame/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/osanseviero/TheMLGame/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/parkermini/general/README.md b/spaces/parkermini/general/README.md deleted file mode 100644 index 588f7d6c14e35aef5bb12cbed3d512317bc8b6d8..0000000000000000000000000000000000000000 --- a/spaces/parkermini/general/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: General -emoji: 🌍 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/peterkros/videomatting/app.py b/spaces/peterkros/videomatting/app.py deleted file mode 100644 index 539efab0f81a6b1e99fe9a76edbddee7c07ce95d..0000000000000000000000000000000000000000 --- a/spaces/peterkros/videomatting/app.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -os.system('pip install gradio --upgrade') -os.system('pip install torch') -os.system('pip freeze') -import torch -import gradio as gr -model = torch.hub.load("PeterL1n/RobustVideoMatting", "mobilenetv3") # or "resnet50" - -convert_video = torch.hub.load("PeterL1n/RobustVideoMatting", "converter") - -def inference(video): - convert_video( - model, # The loaded model, can be on any device (cpu or cuda). - input_source=video, # A video file or an image sequence directory. - input_resize=(400, 400), # [Optional] Resize the input (also the output). - downsample_ratio=0.25, # [Optional] If None, make downsampled max size be 512px. - output_type='video', # Choose "video" or "png_sequence" - output_composition='com.mp4', # File path if video; directory path if png sequence. - output_alpha= None, # [Optional] Output the raw alpha prediction. - output_foreground= None, # [Optional] Output the raw foreground prediction. - output_video_mbps=4, # Output video mbps. Not needed for png sequence. - seq_chunk=7, # Process n frames at once for better parallelism. - num_workers=1, # Only for image sequence input. Reader threads. - progress=True # Print conversion progress. - ) - return 'com.mp4' - -title = "Robust Video Matting" -description = "Gradio demo for Robust Video Matting. To use it, simply upload your video, or click one of the examples to load them. Read more at the links below." - -article = "

          Robust High-Resolution Video Matting with Temporal Guidance | Github Repo

          " - -examples = [['pexels-darina-belonogova-7539228.mp4']] -gr.Interface( - inference, - gr.inputs.Video(label="Input"), - gr.outputs.Video(label="Output"), - title=title, - description=description, - article=article, - enable_queue=True, - examples=examples - ).launch(debug=True) \ No newline at end of file diff --git a/spaces/pikto/Elite-freegpt-webui/client/css/theme-toggler.css b/spaces/pikto/Elite-freegpt-webui/client/css/theme-toggler.css deleted file mode 100644 index b673b5920a24693e7ea15b873e46731b388ec527..0000000000000000000000000000000000000000 --- a/spaces/pikto/Elite-freegpt-webui/client/css/theme-toggler.css +++ /dev/null @@ -1,33 +0,0 @@ -.theme-toggler-container { - margin: 24px 0px 8px 0px; - justify-content: center; -} - -.theme-toggler-container.checkbox input + label, -.theme-toggler-container.checkbox input:checked + label:after { - background: var(--colour-1); -} - -.theme-toggler-container.checkbox input + label:after, -.theme-toggler-container.checkbox input:checked + label { - background: var(--colour-3); -} - -.theme-toggler-container.checkbox span { - font-size: 0.75rem; -} - -.theme-toggler-container.checkbox label { - width: 24px; - height: 16px; -} - -.theme-toggler-container.checkbox label:after { - left: 2px; - width: 10px; - height: 10px; -} - -.theme-toggler-container.checkbox input:checked + label:after { - left: calc(100% - 2px - 10px); -} \ No newline at end of file diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/euckrprober.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/euckrprober.py deleted file mode 100644 index 1fc5de0462cd9a09472cece4087cafe699da4fa7..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/chardet/euckrprober.py +++ /dev/null @@ -1,47 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .chardistribution import EUCKRDistributionAnalysis -from .codingstatemachine import CodingStateMachine -from .mbcharsetprober import MultiByteCharSetProber -from .mbcssm import EUCKR_SM_MODEL - - -class EUCKRProber(MultiByteCharSetProber): - def __init__(self) -> None: - super().__init__() - self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL) - self.distribution_analyzer = EUCKRDistributionAnalysis() - self.reset() - - @property - def charset_name(self) -> str: - return "EUC-KR" - - @property - def language(self) -> str: - return "Korean" diff --git a/spaces/pknez/face-swap-docker/plugins/plugin_faceswap.py b/spaces/pknez/face-swap-docker/plugins/plugin_faceswap.py deleted file mode 100644 index 5b63e6fac6ecb1c4868fc2e076bbda224f0a434c..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/plugins/plugin_faceswap.py +++ /dev/null @@ -1,86 +0,0 @@ -from chain_img_processor import ChainImgProcessor, ChainImgPlugin -from roop.face_helper import get_one_face, get_many_faces, swap_face -import os -from roop.utilities import compute_cosine_distance - -modname = os.path.basename(__file__)[:-3] # calculating modname - -# start function -def start(core:ChainImgProcessor): - manifest = { # plugin settings - "name": "Faceswap", # name - "version": "1.0", # version - - "default_options": { - "swap_mode": "selected", - "max_distance": 0.65, # max distance to detect face similarity - }, - "img_processor": { - "faceswap": Faceswap - } - } - return manifest - -def start_with_options(core:ChainImgProcessor, manifest:dict): - pass - - -class Faceswap(ChainImgPlugin): - - def init_plugin(self): - pass - - - def process(self, frame, params:dict): - if not "input_face_datas" in params or len(params["input_face_datas"]) < 1: - params["face_detected"] = False - return frame - - temp_frame = frame - params["face_detected"] = True - params["processed_faces"] = [] - - if params["swap_mode"] == "first": - face = get_one_face(frame) - if face is None: - params["face_detected"] = False - return frame - params["processed_faces"].append(face) - frame = swap_face(params["input_face_datas"][0], face, frame) - return frame - - else: - faces = get_many_faces(frame) - if(len(faces) < 1): - params["face_detected"] = False - return frame - - dist_threshold = params["face_distance_threshold"] - - if params["swap_mode"] == "all": - for sf in params["input_face_datas"]: - for face in faces: - params["processed_faces"].append(face) - temp_frame = swap_face(sf, face, temp_frame) - return temp_frame - - elif params["swap_mode"] == "selected": - for i,tf in enumerate(params["target_face_datas"]): - for face in faces: - if compute_cosine_distance(tf.embedding, face.embedding) <= dist_threshold: - temp_frame = swap_face(params["input_face_datas"][i], face, temp_frame) - params["processed_faces"].append(face) - break - - elif params["swap_mode"] == "all_female" or params["swap_mode"] == "all_male": - gender = 'F' if params["swap_mode"] == "all_female" else 'M' - face_found = False - for face in faces: - if face.sex == gender: - face_found = True - if face_found: - params["processed_faces"].append(face) - temp_frame = swap_face(params["input_face_datas"][0], face, temp_frame) - face_found = False - - return temp_frame diff --git a/spaces/posicube/mean_reciprocal_rank/README.md b/spaces/posicube/mean_reciprocal_rank/README.md deleted file mode 100644 index 3d219ccd4e7ff8fe768a3aaab804b38791f0bc54..0000000000000000000000000000000000000000 --- a/spaces/posicube/mean_reciprocal_rank/README.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Mean Reciprocal Rank -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.0.2 -app_file: app.py -pinned: false -tags: -- evaluate -- metric -description: >- - Mean Reciprocal Rank is a statistic measure for evaluating any process that produces a list of possible responses to a sample of queries, ordered by probability of correctness. ---- - -# Metric Card for Mean Reciprocal Rank - -a statistic measure for evaluating any process that produces a list of possible responses to a sample of queries, ordered by probability of correctness. - -## Metric Description -The reciprocal rank of a query response is the multiplicative inverse of the rank of the first correct answer: 1 for first place, 1⁄2 for second place, 1⁄3 for third place and so on. The mean reciprocal rank is the average of the reciprocal ranks of results for a sample of queries Q - -{\text{MRR}}={\frac {1}{|Q|}}\sum _{{i=1}}^{{|Q|}}{\frac {1}{{\text{rank}}_{i}}}.\! - -## How to Use -Provide a list of gold ranks, where each item is rank of gold item of which the first rank starts with zero. - - - -### Inputs -*List all input arguments in the format below* -- **input_field** *(List[int]): a list of integer where each integer is the rank of gold item - -### Output Values - -*Explain what this metric outputs and provide an example of what the metric output looks like. Modules should return a dictionary with one or multiple key-value pairs, e.g. {"bleu" : 6.02}* - -*State the range of possible values that the metric's output can take, as well as what in that range is considered good. For example: "This metric can take on any value between 0 and 100, inclusive. Higher scores are better."* - -#### Values from Popular Papers -*Give examples, preferrably with links to leaderboards or publications, to papers that have reported this metric, along with the values they have reported.* - -### Examples -*Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.* - -## Limitations and Bias -*Note any known limitations or biases that the metric has, with links and references if possible.* - -## Citation -*Cite the source where this metric was introduced.* - -## Further References -*Add any useful further references.* diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/varLib/interpolate_layout.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/varLib/interpolate_layout.py deleted file mode 100644 index aa3f49c6ed08c120f57ef96cac0a04db0dadf264..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/varLib/interpolate_layout.py +++ /dev/null @@ -1,123 +0,0 @@ -""" -Interpolate OpenType Layout tables (GDEF / GPOS / GSUB). -""" -from fontTools.ttLib import TTFont -from fontTools.varLib import models, VarLibError, load_designspace, load_masters -from fontTools.varLib.merger import InstancerMerger -import os.path -import logging -from copy import deepcopy -from pprint import pformat - -log = logging.getLogger("fontTools.varLib.interpolate_layout") - - -def interpolate_layout(designspace, loc, master_finder=lambda s: s, mapped=False): - """ - Interpolate GPOS from a designspace file and location. - - If master_finder is set, it should be a callable that takes master - filename as found in designspace file and map it to master font - binary as to be opened (eg. .ttf or .otf). - - If mapped is False (default), then location is mapped using the - map element of the axes in designspace file. If mapped is True, - it is assumed that location is in designspace's internal space and - no mapping is performed. - """ - if hasattr(designspace, "sources"): # Assume a DesignspaceDocument - pass - else: # Assume a file path - from fontTools.designspaceLib import DesignSpaceDocument - - designspace = DesignSpaceDocument.fromfile(designspace) - - ds = load_designspace(designspace) - log.info("Building interpolated font") - - log.info("Loading master fonts") - master_fonts = load_masters(designspace, master_finder) - font = deepcopy(master_fonts[ds.base_idx]) - - log.info("Location: %s", pformat(loc)) - if not mapped: - loc = {name: ds.axes[name].map_forward(v) for name, v in loc.items()} - log.info("Internal location: %s", pformat(loc)) - loc = models.normalizeLocation(loc, ds.internal_axis_supports) - log.info("Normalized location: %s", pformat(loc)) - - # Assume single-model for now. - model = models.VariationModel(ds.normalized_master_locs) - assert 0 == model.mapping[ds.base_idx] - - merger = InstancerMerger(font, model, loc) - - log.info("Building interpolated tables") - # TODO GSUB/GDEF - merger.mergeTables(font, master_fonts, ["GPOS"]) - return font - - -def main(args=None): - """Interpolate GDEF/GPOS/GSUB tables for a point on a designspace""" - from fontTools import configLogger - import argparse - import sys - - parser = argparse.ArgumentParser( - "fonttools varLib.interpolate_layout", - description=main.__doc__, - ) - parser.add_argument( - "designspace_filename", metavar="DESIGNSPACE", help="Input TTF files" - ) - parser.add_argument( - "locations", - metavar="LOCATION", - type=str, - nargs="+", - help="Axis locations (e.g. wdth=120", - ) - parser.add_argument( - "-o", - "--output", - metavar="OUTPUT", - help="Output font file (defaults to -instance.ttf)", - ) - parser.add_argument( - "-l", - "--loglevel", - metavar="LEVEL", - default="INFO", - help="Logging level (defaults to INFO)", - ) - - args = parser.parse_args(args) - - if not args.output: - args.output = os.path.splitext(args.designspace_filename)[0] + "-instance.ttf" - - configLogger(level=args.loglevel) - - finder = lambda s: s.replace("master_ufo", "master_ttf_interpolatable").replace( - ".ufo", ".ttf" - ) - - loc = {} - for arg in args.locations: - tag, val = arg.split("=") - loc[tag] = float(val) - - font = interpolate_layout(args.designspace_filename, loc, finder) - log.info("Saving font %s", args.output) - font.save(args.output) - - -if __name__ == "__main__": - import sys - - if len(sys.argv) > 1: - sys.exit(main()) - import doctest - - sys.exit(doctest.testmod().failed) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/implementations/webhdfs.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/implementations/webhdfs.py deleted file mode 100644 index 2a57170ea15262c3b89afafb384470fb5c632440..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/implementations/webhdfs.py +++ /dev/null @@ -1,445 +0,0 @@ -# https://hadoop.apache.org/docs/r1.0.4/webhdfs.html - -import logging -import os -import secrets -import shutil -import tempfile -import uuid -from contextlib import suppress -from urllib.parse import quote - -import requests - -from ..spec import AbstractBufferedFile, AbstractFileSystem -from ..utils import infer_storage_options, tokenize - -logger = logging.getLogger("webhdfs") - - -class WebHDFS(AbstractFileSystem): - """ - Interface to HDFS over HTTP using the WebHDFS API. Supports also HttpFS gateways. - - Three auth mechanisms are supported: - - insecure: no auth is done, and the user is assumed to be whoever they - say they are (parameter ``user``), or a predefined value such as - "dr.who" if not given - spnego: when kerberos authentication is enabled, auth is negotiated by - requests_kerberos https://github.com/requests/requests-kerberos . - This establishes a session based on existing kinit login and/or - specified principal/password; parameters are passed with ``kerb_kwargs`` - token: uses an existing Hadoop delegation token from another secured - service. Indeed, this client can also generate such tokens when - not insecure. Note that tokens expire, but can be renewed (by a - previously specified user) and may allow for proxying. - - """ - - tempdir = str(tempfile.gettempdir()) - protocol = "webhdfs", "webHDFS" - - def __init__( - self, - host, - port=50070, - kerberos=False, - token=None, - user=None, - proxy_to=None, - kerb_kwargs=None, - data_proxy=None, - use_https=False, - **kwargs, - ): - """ - Parameters - ---------- - host: str - Name-node address - port: int - Port for webHDFS - kerberos: bool - Whether to authenticate with kerberos for this connection - token: str or None - If given, use this token on every call to authenticate. A user - and user-proxy may be encoded in the token and should not be also - given - user: str or None - If given, assert the user name to connect with - proxy_to: str or None - If given, the user has the authority to proxy, and this value is - the user in who's name actions are taken - kerb_kwargs: dict - Any extra arguments for HTTPKerberosAuth, see - ``_ - data_proxy: dict, callable or None - If given, map data-node addresses. This can be necessary if the - HDFS cluster is behind a proxy, running on Docker or otherwise has - a mismatch between the host-names given by the name-node and the - address by which to refer to them from the client. If a dict, - maps host names ``host->data_proxy[host]``; if a callable, full - URLs are passed, and function must conform to - ``url->data_proxy(url)``. - use_https: bool - Whether to connect to the Name-node using HTTPS instead of HTTP - kwargs - """ - if self._cached: - return - super().__init__(**kwargs) - self.url = f"{'https' if use_https else 'http'}://{host}:{port}/webhdfs/v1" - self.kerb = kerberos - self.kerb_kwargs = kerb_kwargs or {} - self.pars = {} - self.proxy = data_proxy or {} - if token is not None: - if user is not None or proxy_to is not None: - raise ValueError( - "If passing a delegation token, must not set " - "user or proxy_to, as these are encoded in the" - " token" - ) - self.pars["delegation"] = token - if user is not None: - self.pars["user.name"] = user - if proxy_to is not None: - self.pars["doas"] = proxy_to - if kerberos and user is not None: - raise ValueError( - "If using Kerberos auth, do not specify the " - "user, this is handled by kinit." - ) - self._connect() - - self._fsid = f"webhdfs_{tokenize(host, port)}" - - @property - def fsid(self): - return self._fsid - - def _connect(self): - self.session = requests.Session() - if self.kerb: - from requests_kerberos import HTTPKerberosAuth - - self.session.auth = HTTPKerberosAuth(**self.kerb_kwargs) - - def _call(self, op, method="get", path=None, data=None, redirect=True, **kwargs): - url = self.url + quote(path or "") - args = kwargs.copy() - args.update(self.pars) - args["op"] = op.upper() - logger.debug("sending %s with %s", url, method) - out = self.session.request( - method=method.upper(), - url=url, - params=args, - data=data, - allow_redirects=redirect, - ) - if out.status_code in [400, 401, 403, 404, 500]: - try: - err = out.json() - msg = err["RemoteException"]["message"] - exp = err["RemoteException"]["exception"] - except (ValueError, KeyError): - pass - else: - if exp in ["IllegalArgumentException", "UnsupportedOperationException"]: - raise ValueError(msg) - elif exp in ["SecurityException", "AccessControlException"]: - raise PermissionError(msg) - elif exp in ["FileNotFoundException"]: - raise FileNotFoundError(msg) - else: - raise RuntimeError(msg) - out.raise_for_status() - return out - - def _open( - self, - path, - mode="rb", - block_size=None, - autocommit=True, - replication=None, - permissions=None, - **kwargs, - ): - """ - - Parameters - ---------- - path: str - File location - mode: str - 'rb', 'wb', etc. - block_size: int - Client buffer size for read-ahead or write buffer - autocommit: bool - If False, writes to temporary file that only gets put in final - location upon commit - replication: int - Number of copies of file on the cluster, write mode only - permissions: str or int - posix permissions, write mode only - kwargs - - Returns - ------- - WebHDFile instance - """ - block_size = block_size or self.blocksize - return WebHDFile( - self, - path, - mode=mode, - block_size=block_size, - tempdir=self.tempdir, - autocommit=autocommit, - replication=replication, - permissions=permissions, - ) - - @staticmethod - def _process_info(info): - info["type"] = info["type"].lower() - info["size"] = info["length"] - return info - - @classmethod - def _strip_protocol(cls, path): - return infer_storage_options(path)["path"] - - @staticmethod - def _get_kwargs_from_urls(urlpath): - out = infer_storage_options(urlpath) - out.pop("path", None) - out.pop("protocol", None) - if "username" in out: - out["user"] = out.pop("username") - return out - - def info(self, path): - out = self._call("GETFILESTATUS", path=path) - info = out.json()["FileStatus"] - info["name"] = path - return self._process_info(info) - - def ls(self, path, detail=False): - out = self._call("LISTSTATUS", path=path) - infos = out.json()["FileStatuses"]["FileStatus"] - for info in infos: - self._process_info(info) - info["name"] = path.rstrip("/") + "/" + info["pathSuffix"] - if detail: - return sorted(infos, key=lambda i: i["name"]) - else: - return sorted(info["name"] for info in infos) - - def content_summary(self, path): - """Total numbers of files, directories and bytes under path""" - out = self._call("GETCONTENTSUMMARY", path=path) - return out.json()["ContentSummary"] - - def ukey(self, path): - """Checksum info of file, giving method and result""" - out = self._call("GETFILECHECKSUM", path=path, redirect=False) - if "Location" in out.headers: - location = self._apply_proxy(out.headers["Location"]) - out2 = self.session.get(location) - out2.raise_for_status() - return out2.json()["FileChecksum"] - else: - out.raise_for_status() - return out.json()["FileChecksum"] - - def home_directory(self): - """Get user's home directory""" - out = self._call("GETHOMEDIRECTORY") - return out.json()["Path"] - - def get_delegation_token(self, renewer=None): - """Retrieve token which can give the same authority to other uses - - Parameters - ---------- - renewer: str or None - User who may use this token; if None, will be current user - """ - if renewer: - out = self._call("GETDELEGATIONTOKEN", renewer=renewer) - else: - out = self._call("GETDELEGATIONTOKEN") - t = out.json()["Token"] - if t is None: - raise ValueError("No token available for this user/security context") - return t["urlString"] - - def renew_delegation_token(self, token): - """Make token live longer. Returns new expiry time""" - out = self._call("RENEWDELEGATIONTOKEN", method="put", token=token) - return out.json()["long"] - - def cancel_delegation_token(self, token): - """Stop the token from being useful""" - self._call("CANCELDELEGATIONTOKEN", method="put", token=token) - - def chmod(self, path, mod): - """Set the permission at path - - Parameters - ---------- - path: str - location to set (file or directory) - mod: str or int - posix epresentation or permission, give as oct string, e.g, '777' - or 0o777 - """ - self._call("SETPERMISSION", method="put", path=path, permission=mod) - - def chown(self, path, owner=None, group=None): - """Change owning user and/or group""" - kwargs = {} - if owner is not None: - kwargs["owner"] = owner - if group is not None: - kwargs["group"] = group - self._call("SETOWNER", method="put", path=path, **kwargs) - - def set_replication(self, path, replication): - """ - Set file replication factor - - Parameters - ---------- - path: str - File location (not for directories) - replication: int - Number of copies of file on the cluster. Should be smaller than - number of data nodes; normally 3 on most systems. - """ - self._call("SETREPLICATION", path=path, method="put", replication=replication) - - def mkdir(self, path, **kwargs): - self._call("MKDIRS", method="put", path=path) - - def makedirs(self, path, exist_ok=False): - if exist_ok is False and self.exists(path): - raise FileExistsError(path) - self.mkdir(path) - - def mv(self, path1, path2, **kwargs): - self._call("RENAME", method="put", path=path1, destination=path2) - - def rm(self, path, recursive=False, **kwargs): - self._call( - "DELETE", - method="delete", - path=path, - recursive="true" if recursive else "false", - ) - - def rm_file(self, path, **kwargs): - self.rm(path) - - def cp_file(self, lpath, rpath, **kwargs): - with self.open(lpath) as lstream: - tmp_fname = "/".join([self._parent(rpath), f".tmp.{secrets.token_hex(16)}"]) - # Perform an atomic copy (stream to a temporary file and - # move it to the actual destination). - try: - with self.open(tmp_fname, "wb") as rstream: - shutil.copyfileobj(lstream, rstream) - self.mv(tmp_fname, rpath) - except BaseException: # noqa - with suppress(FileNotFoundError): - self.rm(tmp_fname) - raise - - def _apply_proxy(self, location): - if self.proxy and callable(self.proxy): - location = self.proxy(location) - elif self.proxy: - # as a dict - for k, v in self.proxy.items(): - location = location.replace(k, v, 1) - return location - - -class WebHDFile(AbstractBufferedFile): - """A file living in HDFS over webHDFS""" - - def __init__(self, fs, path, **kwargs): - super().__init__(fs, path, **kwargs) - kwargs = kwargs.copy() - if kwargs.get("permissions", None) is None: - kwargs.pop("permissions", None) - if kwargs.get("replication", None) is None: - kwargs.pop("replication", None) - self.permissions = kwargs.pop("permissions", 511) - tempdir = kwargs.pop("tempdir") - if kwargs.pop("autocommit", False) is False: - self.target = self.path - self.path = os.path.join(tempdir, str(uuid.uuid4())) - - def _upload_chunk(self, final=False): - """Write one part of a multi-block file upload - - Parameters - ========== - final: bool - This is the last block, so should complete file, if - self.autocommit is True. - """ - out = self.fs.session.post( - self.location, - data=self.buffer.getvalue(), - headers={"content-type": "application/octet-stream"}, - ) - out.raise_for_status() - return True - - def _initiate_upload(self): - """Create remote file/upload""" - kwargs = self.kwargs.copy() - if "a" in self.mode: - op, method = "APPEND", "POST" - else: - op, method = "CREATE", "PUT" - kwargs["overwrite"] = "true" - out = self.fs._call(op, method, self.path, redirect=False, **kwargs) - location = self.fs._apply_proxy(out.headers["Location"]) - if "w" in self.mode: - # create empty file to append to - out2 = self.fs.session.put( - location, headers={"content-type": "application/octet-stream"} - ) - out2.raise_for_status() - # after creating empty file, change location to append to - out2 = self.fs._call("APPEND", "POST", self.path, redirect=False, **kwargs) - self.location = self.fs._apply_proxy(out2.headers["Location"]) - - def _fetch_range(self, start, end): - start = max(start, 0) - end = min(self.size, end) - if start >= end or start >= self.size: - return b"" - out = self.fs._call( - "OPEN", path=self.path, offset=start, length=end - start, redirect=False - ) - out.raise_for_status() - if "Location" in out.headers: - location = out.headers["Location"] - out2 = self.fs.session.get(self.fs._apply_proxy(location)) - return out2.content - else: - return out.content - - def commit(self): - self.fs.mv(self.path, self.target) - - def discard(self): - self.fs.rm(self.path) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/cli/commands/reload.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/cli/commands/reload.py deleted file mode 100644 index babb459f7dfa0a2c4149a64a1d3ec8c752e9ba88..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/cli/commands/reload.py +++ /dev/null @@ -1,114 +0,0 @@ -""" - -Contains the functions that run when `gradio` is called from the command line. Specifically, allows - -$ gradio app.py, to run app.py in reload mode where any changes in the app.py file or Gradio library reloads the demo. -$ gradio app.py my_demo, to use variable names other than "demo" -""" -from __future__ import annotations - -import inspect -import os -import re -import subprocess -import sys -import threading -from pathlib import Path -from typing import List, Optional - -import typer -from rich import print - -import gradio -from gradio import utils - -reload_thread = threading.local() - - -def _setup_config( - demo_path: Path, - demo_name: str = "demo", - additional_watch_dirs: list[str] | None = None, -): - original_path = demo_path - app_text = Path(original_path).read_text() - - patterns = [ - f"with gr\\.Blocks\\(\\) as {demo_name}", - f"{demo_name} = gr\\.Blocks", - f"{demo_name} = gr\\.Interface", - f"{demo_name} = gr\\.ChatInterface", - f"{demo_name} = gr\\.TabbedInterface", - ] - - if not any(re.search(p, app_text) for p in patterns): - print( - f"\n[bold red]Warning[/]: Cannot statically find a gradio demo called {demo_name}. " - "Reload work may fail." - ) - - abs_original_path = utils.abspath(original_path) - filename = Path(original_path).stem - - gradio_folder = Path(inspect.getfile(gradio)).parent - - message = "Watching:" - message_change_count = 0 - - watching_dirs = [] - if str(gradio_folder).strip(): - watching_dirs.append(gradio_folder) - message += f" '{gradio_folder}'" - message_change_count += 1 - - abs_parent = abs_original_path.parent - if str(abs_parent).strip(): - watching_dirs.append(abs_parent) - if message_change_count == 1: - message += "," - message += f" '{abs_parent}'" - - abs_parent = Path(".").resolve() - if str(abs_parent).strip(): - watching_dirs.append(abs_parent) - if message_change_count == 1: - message += "," - message += f" '{abs_parent}'" - - for wd in additional_watch_dirs or []: - if Path(wd) not in watching_dirs: - watching_dirs.append(wd) - - if message_change_count == 1: - message += "," - message += f" '{wd}'" - - print(message + "\n") - - # guaranty access to the module of an app - sys.path.insert(0, os.getcwd()) - return filename, abs_original_path, [str(s) for s in watching_dirs], demo_name - - -def main( - demo_path: Path, demo_name: str = "demo", watch_dirs: Optional[List[str]] = None -): - # default execution pattern to start the server and watch changes - filename, path, watch_dirs, demo_name = _setup_config( - demo_path, demo_name, watch_dirs - ) - # extra_args = args[1:] if len(args) == 1 or args[1].startswith("--") else args[2:] - popen = subprocess.Popen( - [sys.executable, "-u", path], - env=dict( - os.environ, - GRADIO_WATCH_DIRS=",".join(watch_dirs), - GRADIO_WATCH_FILE=filename, - GRADIO_WATCH_DEMO_NAME=demo_name, - ), - ) - popen.wait() - - -if __name__ == "__main__": - typer.run(main) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/ipython_ext.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/ipython_ext.py deleted file mode 100644 index b6bb8063930f2ff60fba39459ce0b7829987f959..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/ipython_ext.py +++ /dev/null @@ -1,89 +0,0 @@ -try: - from IPython.core.magic import ( - needs_local_scope, - register_cell_magic, - ) - from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring -except ImportError: - pass - -import gradio as gr -from gradio.networking import App -from gradio.utils import BaseReloader - - -class CellIdTracker: - """Determines the most recently run cell in the notebook. - - Needed to keep track of which demo the user is updating. - """ - - def __init__(self, ipython): - ipython.events.register("pre_run_cell", self.pre_run_cell) - self.shell = ipython - self.current_cell: str = "" - - def pre_run_cell(self, info): - self._current_cell = info.cell_id - - -class JupyterReloader(BaseReloader): - """Swap a running blocks class in a notebook with the latest cell contents.""" - - def __init__(self, ipython) -> None: - super().__init__() - self._cell_tracker = CellIdTracker(ipython) - self._running: dict[str, gr.Blocks] = {} - - @property - def current_cell(self): - return self._cell_tracker.current_cell - - @property - def running_app(self) -> App: - assert self.running_demo.server - return self.running_demo.server.running_app # type: ignore - - @property - def running_demo(self): - return self._running[self.current_cell] - - def demo_tracked(self) -> bool: - return self.current_cell in self._running - - def track(self, demo: gr.Blocks): - self._running[self.current_cell] = demo - - -def load_ipython_extension(ipython): - reloader = JupyterReloader(ipython) - - @magic_arguments() - @argument("--demo-name", default="demo", help="Name of gradio blocks instance.") - @argument( - "--share", - default=False, - const=True, - nargs="?", - help="Whether to launch with sharing. Will slow down reloading.", - ) - @register_cell_magic - @needs_local_scope - def blocks(line, cell, local_ns): - """Launch a demo defined in a cell in reload mode.""" - - args = parse_argstring(blocks, line) - - exec(cell, None, local_ns) - demo: gr.Blocks = local_ns[args.demo_name] - if not reloader.demo_tracked(): - demo.launch(share=args.share) - reloader.track(demo) - elif reloader.queue_changed(demo): - print("Queue got added or removed. Restarting demo.") - reloader.running_demo.close() - demo.launch() - reloader.track(demo) - else: - reloader.swap_blocks(demo) - return reloader.running_demo.artifact diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_tkagg.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_tkagg.py deleted file mode 100644 index f95b6011eadffe72dfc1dc092f3d20b44fba24a9..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_tkagg.py +++ /dev/null @@ -1,20 +0,0 @@ -from . import _backend_tk -from .backend_agg import FigureCanvasAgg -from ._backend_tk import _BackendTk, FigureCanvasTk -from ._backend_tk import ( # noqa: F401 # pylint: disable=W0611 - FigureManagerTk, NavigationToolbar2Tk) - - -class FigureCanvasTkAgg(FigureCanvasAgg, FigureCanvasTk): - def draw(self): - super().draw() - self.blit() - - def blit(self, bbox=None): - _backend_tk.blit(self._tkphoto, self.renderer.buffer_rgba(), - (0, 1, 2, 3), bbox=bbox) - - -@_BackendTk.export -class _BackendTkAgg(_BackendTk): - FigureCanvas = FigureCanvasTkAgg diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_marker.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_marker.py deleted file mode 100644 index 463ff1d05c96ab677cd7396d86fc7daa357d2a98..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_marker.py +++ /dev/null @@ -1,303 +0,0 @@ -import numpy as np -import matplotlib.pyplot as plt -from matplotlib import markers -from matplotlib.path import Path -from matplotlib.testing.decorators import check_figures_equal -from matplotlib.transforms import Affine2D - -import pytest - - -def test_marker_fillstyle(): - marker_style = markers.MarkerStyle(marker='o', fillstyle='none') - assert marker_style.get_fillstyle() == 'none' - assert not marker_style.is_filled() - - -@pytest.mark.parametrize('marker', [ - 'o', - 'x', - '', - 'None', - r'$\frac{1}{2}$', - "$\u266B$", - 1, - markers.TICKLEFT, - [[-1, 0], [1, 0]], - np.array([[-1, 0], [1, 0]]), - Path([[0, 0], [1, 0]], [Path.MOVETO, Path.LINETO]), - (5, 0), # a pentagon - (7, 1), # a 7-pointed star - (5, 2), # asterisk - (5, 0, 10), # a pentagon, rotated by 10 degrees - (7, 1, 10), # a 7-pointed star, rotated by 10 degrees - (5, 2, 10), # asterisk, rotated by 10 degrees - markers.MarkerStyle('o'), -]) -def test_markers_valid(marker): - # Checking this doesn't fail. - markers.MarkerStyle(marker) - - -@pytest.mark.parametrize('marker', [ - 'square', # arbitrary string - np.array([[-0.5, 0, 1, 2, 3]]), # 1D array - (1,), - (5, 3), # second parameter of tuple must be 0, 1, or 2 - (1, 2, 3, 4), -]) -def test_markers_invalid(marker): - with pytest.raises(ValueError): - markers.MarkerStyle(marker) - - -class UnsnappedMarkerStyle(markers.MarkerStyle): - """ - A MarkerStyle where the snap threshold is force-disabled. - - This is used to compare to polygon/star/asterisk markers which do not have - any snap threshold set. - """ - def _recache(self): - super()._recache() - self._snap_threshold = None - - -@check_figures_equal() -def test_poly_marker(fig_test, fig_ref): - ax_test = fig_test.add_subplot() - ax_ref = fig_ref.add_subplot() - - # Note, some reference sizes must be different because they have unit - # *length*, while polygon markers are inscribed in a circle of unit - # *radius*. This introduces a factor of np.sqrt(2), but since size is - # squared, that becomes 2. - size = 20**2 - - # Squares - ax_test.scatter([0], [0], marker=(4, 0, 45), s=size) - ax_ref.scatter([0], [0], marker='s', s=size/2) - - # Diamonds, with and without rotation argument - ax_test.scatter([1], [1], marker=(4, 0), s=size) - ax_ref.scatter([1], [1], marker=UnsnappedMarkerStyle('D'), s=size/2) - ax_test.scatter([1], [1.5], marker=(4, 0, 0), s=size) - ax_ref.scatter([1], [1.5], marker=UnsnappedMarkerStyle('D'), s=size/2) - - # Pentagon, with and without rotation argument - ax_test.scatter([2], [2], marker=(5, 0), s=size) - ax_ref.scatter([2], [2], marker=UnsnappedMarkerStyle('p'), s=size) - ax_test.scatter([2], [2.5], marker=(5, 0, 0), s=size) - ax_ref.scatter([2], [2.5], marker=UnsnappedMarkerStyle('p'), s=size) - - # Hexagon, with and without rotation argument - ax_test.scatter([3], [3], marker=(6, 0), s=size) - ax_ref.scatter([3], [3], marker='h', s=size) - ax_test.scatter([3], [3.5], marker=(6, 0, 0), s=size) - ax_ref.scatter([3], [3.5], marker='h', s=size) - - # Rotated hexagon - ax_test.scatter([4], [4], marker=(6, 0, 30), s=size) - ax_ref.scatter([4], [4], marker='H', s=size) - - # Octagons - ax_test.scatter([5], [5], marker=(8, 0, 22.5), s=size) - ax_ref.scatter([5], [5], marker=UnsnappedMarkerStyle('8'), s=size) - - ax_test.set(xlim=(-0.5, 5.5), ylim=(-0.5, 5.5)) - ax_ref.set(xlim=(-0.5, 5.5), ylim=(-0.5, 5.5)) - - -def test_star_marker(): - # We don't really have a strict equivalent to this marker, so we'll just do - # a smoke test. - size = 20**2 - - fig, ax = plt.subplots() - ax.scatter([0], [0], marker=(5, 1), s=size) - ax.scatter([1], [1], marker=(5, 1, 0), s=size) - ax.set(xlim=(-0.5, 0.5), ylim=(-0.5, 1.5)) - - -# The asterisk marker is really a star with 0-size inner circle, so the ends -# are corners and get a slight bevel. The reference markers are just singular -# lines without corners, so they have no bevel, and we need to add a slight -# tolerance. -@check_figures_equal(tol=1.45) -def test_asterisk_marker(fig_test, fig_ref, request): - ax_test = fig_test.add_subplot() - ax_ref = fig_ref.add_subplot() - - # Note, some reference sizes must be different because they have unit - # *length*, while asterisk markers are inscribed in a circle of unit - # *radius*. This introduces a factor of np.sqrt(2), but since size is - # squared, that becomes 2. - size = 20**2 - - def draw_ref_marker(y, style, size): - # As noted above, every line is doubled. Due to antialiasing, these - # doubled lines make a slight difference in the .png results. - ax_ref.scatter([y], [y], marker=UnsnappedMarkerStyle(style), s=size) - if request.getfixturevalue('ext') == 'png': - ax_ref.scatter([y], [y], marker=UnsnappedMarkerStyle(style), - s=size) - - # Plus - ax_test.scatter([0], [0], marker=(4, 2), s=size) - draw_ref_marker(0, '+', size) - ax_test.scatter([0.5], [0.5], marker=(4, 2, 0), s=size) - draw_ref_marker(0.5, '+', size) - - # Cross - ax_test.scatter([1], [1], marker=(4, 2, 45), s=size) - draw_ref_marker(1, 'x', size/2) - - ax_test.set(xlim=(-0.5, 1.5), ylim=(-0.5, 1.5)) - ax_ref.set(xlim=(-0.5, 1.5), ylim=(-0.5, 1.5)) - - -# The bullet mathtext marker is not quite a circle, so this is not a perfect match, but -# it is close enough to confirm that the text-based marker is centred correctly. But we -# still need a small tolerance to work around that difference. -@check_figures_equal(extensions=['png'], tol=1.86) -def test_text_marker(fig_ref, fig_test): - ax_ref = fig_ref.add_subplot() - ax_test = fig_test.add_subplot() - - ax_ref.plot(0, 0, marker=r'o', markersize=100, markeredgewidth=0) - ax_test.plot(0, 0, marker=r'$\bullet$', markersize=100, markeredgewidth=0) - - -@check_figures_equal() -def test_marker_clipping(fig_ref, fig_test): - # Plotting multiple markers can trigger different optimized paths in - # backends, so compare single markers vs multiple to ensure they are - # clipped correctly. - marker_count = len(markers.MarkerStyle.markers) - marker_size = 50 - ncol = 7 - nrow = marker_count // ncol + 1 - - width = 2 * marker_size * ncol - height = 2 * marker_size * nrow * 2 - fig_ref.set_size_inches((width / fig_ref.dpi, height / fig_ref.dpi)) - ax_ref = fig_ref.add_axes([0, 0, 1, 1]) - fig_test.set_size_inches((width / fig_test.dpi, height / fig_ref.dpi)) - ax_test = fig_test.add_axes([0, 0, 1, 1]) - - for i, marker in enumerate(markers.MarkerStyle.markers): - x = i % ncol - y = i // ncol * 2 - - # Singular markers per call. - ax_ref.plot([x, x], [y, y + 1], c='k', linestyle='-', lw=3) - ax_ref.plot(x, y, c='k', - marker=marker, markersize=marker_size, markeredgewidth=10, - fillstyle='full', markerfacecolor='white') - ax_ref.plot(x, y + 1, c='k', - marker=marker, markersize=marker_size, markeredgewidth=10, - fillstyle='full', markerfacecolor='white') - - # Multiple markers in a single call. - ax_test.plot([x, x], [y, y + 1], c='k', linestyle='-', lw=3, - marker=marker, markersize=marker_size, markeredgewidth=10, - fillstyle='full', markerfacecolor='white') - - ax_ref.set(xlim=(-0.5, ncol), ylim=(-0.5, 2 * nrow)) - ax_test.set(xlim=(-0.5, ncol), ylim=(-0.5, 2 * nrow)) - ax_ref.axis('off') - ax_test.axis('off') - - -def test_marker_init_transforms(): - """Test that initializing marker with transform is a simple addition.""" - marker = markers.MarkerStyle("o") - t = Affine2D().translate(1, 1) - t_marker = markers.MarkerStyle("o", transform=t) - assert marker.get_transform() + t == t_marker.get_transform() - - -def test_marker_init_joinstyle(): - marker = markers.MarkerStyle("*") - styled_marker = markers.MarkerStyle("*", joinstyle="round") - assert styled_marker.get_joinstyle() == "round" - assert marker.get_joinstyle() != "round" - - -def test_marker_init_captyle(): - marker = markers.MarkerStyle("*") - styled_marker = markers.MarkerStyle("*", capstyle="round") - assert styled_marker.get_capstyle() == "round" - assert marker.get_capstyle() != "round" - - -@pytest.mark.parametrize("marker,transform,expected", [ - (markers.MarkerStyle("o"), Affine2D().translate(1, 1), - Affine2D().translate(1, 1)), - (markers.MarkerStyle("o", transform=Affine2D().translate(1, 1)), - Affine2D().translate(1, 1), Affine2D().translate(2, 2)), - (markers.MarkerStyle("$|||$", transform=Affine2D().translate(1, 1)), - Affine2D().translate(1, 1), Affine2D().translate(2, 2)), - (markers.MarkerStyle( - markers.TICKLEFT, transform=Affine2D().translate(1, 1)), - Affine2D().translate(1, 1), Affine2D().translate(2, 2)), -]) -def test_marker_transformed(marker, transform, expected): - new_marker = marker.transformed(transform) - assert new_marker is not marker - assert new_marker.get_user_transform() == expected - assert marker._user_transform is not new_marker._user_transform - - -def test_marker_rotated_invalid(): - marker = markers.MarkerStyle("o") - with pytest.raises(ValueError): - new_marker = marker.rotated() - with pytest.raises(ValueError): - new_marker = marker.rotated(deg=10, rad=10) - - -@pytest.mark.parametrize("marker,deg,rad,expected", [ - (markers.MarkerStyle("o"), 10, None, Affine2D().rotate_deg(10)), - (markers.MarkerStyle("o"), None, 0.01, Affine2D().rotate(0.01)), - (markers.MarkerStyle("o", transform=Affine2D().translate(1, 1)), - 10, None, Affine2D().translate(1, 1).rotate_deg(10)), - (markers.MarkerStyle("o", transform=Affine2D().translate(1, 1)), - None, 0.01, Affine2D().translate(1, 1).rotate(0.01)), - (markers.MarkerStyle("$|||$", transform=Affine2D().translate(1, 1)), - 10, None, Affine2D().translate(1, 1).rotate_deg(10)), - (markers.MarkerStyle( - markers.TICKLEFT, transform=Affine2D().translate(1, 1)), - 10, None, Affine2D().translate(1, 1).rotate_deg(10)), -]) -def test_marker_rotated(marker, deg, rad, expected): - new_marker = marker.rotated(deg=deg, rad=rad) - assert new_marker is not marker - assert new_marker.get_user_transform() == expected - assert marker._user_transform is not new_marker._user_transform - - -def test_marker_scaled(): - marker = markers.MarkerStyle("1") - new_marker = marker.scaled(2) - assert new_marker is not marker - assert new_marker.get_user_transform() == Affine2D().scale(2) - assert marker._user_transform is not new_marker._user_transform - - new_marker = marker.scaled(2, 3) - assert new_marker is not marker - assert new_marker.get_user_transform() == Affine2D().scale(2, 3) - assert marker._user_transform is not new_marker._user_transform - - marker = markers.MarkerStyle("1", transform=Affine2D().translate(1, 1)) - new_marker = marker.scaled(2) - assert new_marker is not marker - expected = Affine2D().translate(1, 1).scale(2) - assert new_marker.get_user_transform() == expected - assert marker._user_transform is not new_marker._user_transform - - -def test_alt_transform(): - m1 = markers.MarkerStyle("o", "left") - m2 = markers.MarkerStyle("o", "left", Affine2D().rotate_deg(90)) - assert m1.get_alt_transform().rotate_deg(90) == m2.get_alt_transform() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/io/formats/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/io/formats/__init__.py deleted file mode 100644 index 5e56b1bc7ba4377cc5de9d68a1424524aef21cb5..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/io/formats/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# ruff: noqa: TCH004 -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - # import modules that have public classes/functions - from pandas.io.formats import style - - # and mark only those modules as public - __all__ = ["style"] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pop.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pop.py deleted file mode 100644 index 617f0c3a2788580274a44db6edb292cba17110fc..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_pop.py +++ /dev/null @@ -1,71 +0,0 @@ -import numpy as np - -from pandas import ( - DataFrame, - MultiIndex, - Series, -) -import pandas._testing as tm - - -class TestDataFramePop: - def test_pop(self, float_frame): - float_frame.columns.name = "baz" - - float_frame.pop("A") - assert "A" not in float_frame - - float_frame["foo"] = "bar" - float_frame.pop("foo") - assert "foo" not in float_frame - assert float_frame.columns.name == "baz" - - # gh-10912: inplace ops cause caching issue - a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"], index=["X", "Y"]) - b = a.pop("B") - b += 1 - - # original frame - expected = DataFrame([[1, 3], [4, 6]], columns=["A", "C"], index=["X", "Y"]) - tm.assert_frame_equal(a, expected) - - # result - expected = Series([2, 5], index=["X", "Y"], name="B") + 1 - tm.assert_series_equal(b, expected) - - def test_pop_non_unique_cols(self): - df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]}) - df.columns = ["a", "b", "a"] - - res = df.pop("a") - assert type(res) == DataFrame - assert len(res) == 2 - assert len(df.columns) == 1 - assert "b" in df.columns - assert "a" not in df.columns - assert len(df.index) == 2 - - def test_mixed_depth_pop(self): - arrays = [ - ["a", "top", "top", "routine1", "routine1", "routine2"], - ["", "OD", "OD", "result1", "result2", "result1"], - ["", "wx", "wy", "", "", ""], - ] - - tuples = sorted(zip(*arrays)) - index = MultiIndex.from_tuples(tuples) - df = DataFrame(np.random.default_rng(2).standard_normal((4, 6)), columns=index) - - df1 = df.copy() - df2 = df.copy() - result = df1.pop("a") - expected = df2.pop(("a", "", "")) - tm.assert_series_equal(expected, result, check_names=False) - tm.assert_frame_equal(df1, df2) - assert result.name == "a" - - expected = df1["top"] - df1 = df1.drop(["top"], axis=1) - result = df2.pop("top") - tm.assert_frame_equal(expected, result) - tm.assert_frame_equal(df1, df2) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/_openedge_builtins.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/_openedge_builtins.py deleted file mode 100644 index 7fdfb41049122e3a208eb9ba8801757ea2357517..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/_openedge_builtins.py +++ /dev/null @@ -1,2600 +0,0 @@ -""" - pygments.lexers._openedge_builtins - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Builtin list for the OpenEdgeLexer. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -OPENEDGEKEYWORDS = ( - 'ABS', - 'ABSO', - 'ABSOL', - 'ABSOLU', - 'ABSOLUT', - 'ABSOLUTE', - 'ABSTRACT', - 'ACCELERATOR', - 'ACCUM', - 'ACCUMU', - 'ACCUMUL', - 'ACCUMULA', - 'ACCUMULAT', - 'ACCUMULATE', - 'ACTIVE-FORM', - 'ACTIVE-WINDOW', - 'ADD', - 'ADD-BUFFER', - 'ADD-CALC-COLUMN', - 'ADD-COLUMNS-FROM', - 'ADD-EVENTS-PROCEDURE', - 'ADD-FIELDS-FROM', - 'ADD-FIRST', - 'ADD-INDEX-FIELD', - 'ADD-LAST', - 'ADD-LIKE-COLUMN', - 'ADD-LIKE-FIELD', - 'ADD-LIKE-INDEX', - 'ADD-NEW-FIELD', - 'ADD-NEW-INDEX', - 'ADD-SCHEMA-LOCATION', - 'ADD-SUPER-PROCEDURE', - 'ADM-DATA', - 'ADVISE', - 'ALERT-BOX', - 'ALIAS', - 'ALL', - 'ALLOW-COLUMN-SEARCHING', - 'ALLOW-REPLICATION', - 'ALTER', - 'ALWAYS-ON-TOP', - 'AMBIG', - 'AMBIGU', - 'AMBIGUO', - 'AMBIGUOU', - 'AMBIGUOUS', - 'ANALYZ', - 'ANALYZE', - 'AND', - 'ANSI-ONLY', - 'ANY', - 'ANYWHERE', - 'APPEND', - 'APPL-ALERT', - 'APPL-ALERT-', - 'APPL-ALERT-B', - 'APPL-ALERT-BO', - 'APPL-ALERT-BOX', - 'APPL-ALERT-BOXE', - 'APPL-ALERT-BOXES', - 'APPL-CONTEXT-ID', - 'APPLICATION', - 'APPLY', - 'APPSERVER-INFO', - 'APPSERVER-PASSWORD', - 'APPSERVER-USERID', - 'ARRAY-MESSAGE', - 'AS', - 'ASC', - 'ASCE', - 'ASCEN', - 'ASCEND', - 'ASCENDI', - 'ASCENDIN', - 'ASCENDING', - 'ASK-OVERWRITE', - 'ASSEMBLY', - 'ASSIGN', - 'ASYNC-REQUEST-COUNT', - 'ASYNC-REQUEST-HANDLE', - 'ASYNCHRONOUS', - 'AT', - 'ATTACHED-PAIRLIST', - 'ATTR', - 'ATTR-SPACE', - 'ATTRI', - 'ATTRIB', - 'ATTRIBU', - 'ATTRIBUT', - 'AUDIT-CONTROL', - 'AUDIT-ENABLED', - 'AUDIT-EVENT-CONTEXT', - 'AUDIT-POLICY', - 'AUTHENTICATION-FAILED', - 'AUTHORIZATION', - 'AUTO-COMP', - 'AUTO-COMPL', - 'AUTO-COMPLE', - 'AUTO-COMPLET', - 'AUTO-COMPLETI', - 'AUTO-COMPLETIO', - 'AUTO-COMPLETION', - 'AUTO-END-KEY', - 'AUTO-ENDKEY', - 'AUTO-GO', - 'AUTO-IND', - 'AUTO-INDE', - 'AUTO-INDEN', - 'AUTO-INDENT', - 'AUTO-RESIZE', - 'AUTO-RET', - 'AUTO-RETU', - 'AUTO-RETUR', - 'AUTO-RETURN', - 'AUTO-SYNCHRONIZE', - 'AUTO-Z', - 'AUTO-ZA', - 'AUTO-ZAP', - 'AUTOMATIC', - 'AVAIL', - 'AVAILA', - 'AVAILAB', - 'AVAILABL', - 'AVAILABLE', - 'AVAILABLE-FORMATS', - 'AVE', - 'AVER', - 'AVERA', - 'AVERAG', - 'AVERAGE', - 'AVG', - 'BACK', - 'BACKG', - 'BACKGR', - 'BACKGRO', - 'BACKGROU', - 'BACKGROUN', - 'BACKGROUND', - 'BACKWARD', - 'BACKWARDS', - 'BASE64-DECODE', - 'BASE64-ENCODE', - 'BASE-ADE', - 'BASE-KEY', - 'BATCH', - 'BATCH-', - 'BATCH-M', - 'BATCH-MO', - 'BATCH-MOD', - 'BATCH-MODE', - 'BATCH-SIZE', - 'BEFORE-H', - 'BEFORE-HI', - 'BEFORE-HID', - 'BEFORE-HIDE', - 'BEGIN-EVENT-GROUP', - 'BEGINS', - 'BELL', - 'BETWEEN', - 'BGC', - 'BGCO', - 'BGCOL', - 'BGCOLO', - 'BGCOLOR', - 'BIG-ENDIAN', - 'BINARY', - 'BIND', - 'BIND-WHERE', - 'BLANK', - 'BLOCK-ITERATION-DISPLAY', - 'BLOCK-LEVEL', - 'BORDER-B', - 'BORDER-BO', - 'BORDER-BOT', - 'BORDER-BOTT', - 'BORDER-BOTTO', - 'BORDER-BOTTOM-CHARS', - 'BORDER-BOTTOM-P', - 'BORDER-BOTTOM-PI', - 'BORDER-BOTTOM-PIX', - 'BORDER-BOTTOM-PIXE', - 'BORDER-BOTTOM-PIXEL', - 'BORDER-BOTTOM-PIXELS', - 'BORDER-L', - 'BORDER-LE', - 'BORDER-LEF', - 'BORDER-LEFT', - 'BORDER-LEFT-', - 'BORDER-LEFT-C', - 'BORDER-LEFT-CH', - 'BORDER-LEFT-CHA', - 'BORDER-LEFT-CHAR', - 'BORDER-LEFT-CHARS', - 'BORDER-LEFT-P', - 'BORDER-LEFT-PI', - 'BORDER-LEFT-PIX', - 'BORDER-LEFT-PIXE', - 'BORDER-LEFT-PIXEL', - 'BORDER-LEFT-PIXELS', - 'BORDER-R', - 'BORDER-RI', - 'BORDER-RIG', - 'BORDER-RIGH', - 'BORDER-RIGHT', - 'BORDER-RIGHT-', - 'BORDER-RIGHT-C', - 'BORDER-RIGHT-CH', - 'BORDER-RIGHT-CHA', - 'BORDER-RIGHT-CHAR', - 'BORDER-RIGHT-CHARS', - 'BORDER-RIGHT-P', - 'BORDER-RIGHT-PI', - 'BORDER-RIGHT-PIX', - 'BORDER-RIGHT-PIXE', - 'BORDER-RIGHT-PIXEL', - 'BORDER-RIGHT-PIXELS', - 'BORDER-T', - 'BORDER-TO', - 'BORDER-TOP', - 'BORDER-TOP-', - 'BORDER-TOP-C', - 'BORDER-TOP-CH', - 'BORDER-TOP-CHA', - 'BORDER-TOP-CHAR', - 'BORDER-TOP-CHARS', - 'BORDER-TOP-P', - 'BORDER-TOP-PI', - 'BORDER-TOP-PIX', - 'BORDER-TOP-PIXE', - 'BORDER-TOP-PIXEL', - 'BORDER-TOP-PIXELS', - 'BOX', - 'BOX-SELECT', - 'BOX-SELECTA', - 'BOX-SELECTAB', - 'BOX-SELECTABL', - 'BOX-SELECTABLE', - 'BREAK', - 'BROWSE', - 'BUFFER', - 'BUFFER-CHARS', - 'BUFFER-COMPARE', - 'BUFFER-COPY', - 'BUFFER-CREATE', - 'BUFFER-DELETE', - 'BUFFER-FIELD', - 'BUFFER-HANDLE', - 'BUFFER-LINES', - 'BUFFER-NAME', - 'BUFFER-PARTITION-ID', - 'BUFFER-RELEASE', - 'BUFFER-VALUE', - 'BUTTON', - 'BUTTONS', - 'BY', - 'BY-POINTER', - 'BY-VARIANT-POINTER', - 'CACHE', - 'CACHE-SIZE', - 'CALL', - 'CALL-NAME', - 'CALL-TYPE', - 'CAN-CREATE', - 'CAN-DELETE', - 'CAN-DO', - 'CAN-DO-DOMAIN-SUPPORT', - 'CAN-FIND', - 'CAN-QUERY', - 'CAN-READ', - 'CAN-SET', - 'CAN-WRITE', - 'CANCEL-BREAK', - 'CANCEL-BUTTON', - 'CAPS', - 'CAREFUL-PAINT', - 'CASE', - 'CASE-SEN', - 'CASE-SENS', - 'CASE-SENSI', - 'CASE-SENSIT', - 'CASE-SENSITI', - 'CASE-SENSITIV', - 'CASE-SENSITIVE', - 'CAST', - 'CATCH', - 'CDECL', - 'CENTER', - 'CENTERE', - 'CENTERED', - 'CHAINED', - 'CHARACTER', - 'CHARACTER_LENGTH', - 'CHARSET', - 'CHECK', - 'CHECKED', - 'CHOOSE', - 'CHR', - 'CLASS', - 'CLASS-TYPE', - 'CLEAR', - 'CLEAR-APPL-CONTEXT', - 'CLEAR-LOG', - 'CLEAR-SELECT', - 'CLEAR-SELECTI', - 'CLEAR-SELECTIO', - 'CLEAR-SELECTION', - 'CLEAR-SORT-ARROW', - 'CLEAR-SORT-ARROWS', - 'CLIENT-CONNECTION-ID', - 'CLIENT-PRINCIPAL', - 'CLIENT-TTY', - 'CLIENT-TYPE', - 'CLIENT-WORKSTATION', - 'CLIPBOARD', - 'CLOSE', - 'CLOSE-LOG', - 'CODE', - 'CODEBASE-LOCATOR', - 'CODEPAGE', - 'CODEPAGE-CONVERT', - 'COL', - 'COL-OF', - 'COLLATE', - 'COLON', - 'COLON-ALIGN', - 'COLON-ALIGNE', - 'COLON-ALIGNED', - 'COLOR', - 'COLOR-TABLE', - 'COLU', - 'COLUM', - 'COLUMN', - 'COLUMN-BGCOLOR', - 'COLUMN-DCOLOR', - 'COLUMN-FGCOLOR', - 'COLUMN-FONT', - 'COLUMN-LAB', - 'COLUMN-LABE', - 'COLUMN-LABEL', - 'COLUMN-MOVABLE', - 'COLUMN-OF', - 'COLUMN-PFCOLOR', - 'COLUMN-READ-ONLY', - 'COLUMN-RESIZABLE', - 'COLUMN-SCROLLING', - 'COLUMNS', - 'COM-HANDLE', - 'COM-SELF', - 'COMBO-BOX', - 'COMMAND', - 'COMPARES', - 'COMPILE', - 'COMPILER', - 'COMPLETE', - 'CONFIG-NAME', - 'CONNECT', - 'CONNECTED', - 'CONSTRUCTOR', - 'CONTAINS', - 'CONTENTS', - 'CONTEXT', - 'CONTEXT-HELP', - 'CONTEXT-HELP-FILE', - 'CONTEXT-HELP-ID', - 'CONTEXT-POPUP', - 'CONTROL', - 'CONTROL-BOX', - 'CONTROL-FRAME', - 'CONVERT', - 'CONVERT-3D-COLORS', - 'CONVERT-TO-OFFS', - 'CONVERT-TO-OFFSE', - 'CONVERT-TO-OFFSET', - 'COPY-DATASET', - 'COPY-LOB', - 'COPY-SAX-ATTRIBUTES', - 'COPY-TEMP-TABLE', - 'COUNT', - 'COUNT-OF', - 'CPCASE', - 'CPCOLL', - 'CPINTERNAL', - 'CPLOG', - 'CPPRINT', - 'CPRCODEIN', - 'CPRCODEOUT', - 'CPSTREAM', - 'CPTERM', - 'CRC-VALUE', - 'CREATE', - 'CREATE-LIKE', - 'CREATE-LIKE-SEQUENTIAL', - 'CREATE-NODE-NAMESPACE', - 'CREATE-RESULT-LIST-ENTRY', - 'CREATE-TEST-FILE', - 'CURRENT', - 'CURRENT-CHANGED', - 'CURRENT-COLUMN', - 'CURRENT-ENV', - 'CURRENT-ENVI', - 'CURRENT-ENVIR', - 'CURRENT-ENVIRO', - 'CURRENT-ENVIRON', - 'CURRENT-ENVIRONM', - 'CURRENT-ENVIRONME', - 'CURRENT-ENVIRONMEN', - 'CURRENT-ENVIRONMENT', - 'CURRENT-ITERATION', - 'CURRENT-LANG', - 'CURRENT-LANGU', - 'CURRENT-LANGUA', - 'CURRENT-LANGUAG', - 'CURRENT-LANGUAGE', - 'CURRENT-QUERY', - 'CURRENT-REQUEST-INFO', - 'CURRENT-RESPONSE-INFO', - 'CURRENT-RESULT-ROW', - 'CURRENT-ROW-MODIFIED', - 'CURRENT-VALUE', - 'CURRENT-WINDOW', - 'CURRENT_DATE', - 'CURS', - 'CURSO', - 'CURSOR', - 'CURSOR-CHAR', - 'CURSOR-LINE', - 'CURSOR-OFFSET', - 'DATA-BIND', - 'DATA-ENTRY-RET', - 'DATA-ENTRY-RETU', - 'DATA-ENTRY-RETUR', - 'DATA-ENTRY-RETURN', - 'DATA-REL', - 'DATA-RELA', - 'DATA-RELAT', - 'DATA-RELATI', - 'DATA-RELATIO', - 'DATA-RELATION', - 'DATA-SOURCE', - 'DATA-SOURCE-COMPLETE-MAP', - 'DATA-SOURCE-MODIFIED', - 'DATA-SOURCE-ROWID', - 'DATA-T', - 'DATA-TY', - 'DATA-TYP', - 'DATA-TYPE', - 'DATABASE', - 'DATASERVERS', - 'DATASET', - 'DATASET-HANDLE', - 'DATE', - 'DATE-F', - 'DATE-FO', - 'DATE-FOR', - 'DATE-FORM', - 'DATE-FORMA', - 'DATE-FORMAT', - 'DAY', - 'DB-CONTEXT', - 'DB-REFERENCES', - 'DBCODEPAGE', - 'DBCOLLATION', - 'DBNAME', - 'DBPARAM', - 'DBREST', - 'DBRESTR', - 'DBRESTRI', - 'DBRESTRIC', - 'DBRESTRICT', - 'DBRESTRICTI', - 'DBRESTRICTIO', - 'DBRESTRICTION', - 'DBRESTRICTIONS', - 'DBTASKID', - 'DBTYPE', - 'DBVERS', - 'DBVERSI', - 'DBVERSIO', - 'DBVERSION', - 'DCOLOR', - 'DDE', - 'DDE-ERROR', - 'DDE-I', - 'DDE-ID', - 'DDE-ITEM', - 'DDE-NAME', - 'DDE-TOPIC', - 'DEBLANK', - 'DEBU', - 'DEBUG', - 'DEBUG-ALERT', - 'DEBUG-LIST', - 'DEBUGGER', - 'DECIMAL', - 'DECIMALS', - 'DECLARE', - 'DECLARE-NAMESPACE', - 'DECRYPT', - 'DEFAULT', - 'DEFAULT-B', - 'DEFAULT-BU', - 'DEFAULT-BUFFER-HANDLE', - 'DEFAULT-BUT', - 'DEFAULT-BUTT', - 'DEFAULT-BUTTO', - 'DEFAULT-BUTTON', - 'DEFAULT-COMMIT', - 'DEFAULT-EX', - 'DEFAULT-EXT', - 'DEFAULT-EXTE', - 'DEFAULT-EXTEN', - 'DEFAULT-EXTENS', - 'DEFAULT-EXTENSI', - 'DEFAULT-EXTENSIO', - 'DEFAULT-EXTENSION', - 'DEFAULT-NOXL', - 'DEFAULT-NOXLA', - 'DEFAULT-NOXLAT', - 'DEFAULT-NOXLATE', - 'DEFAULT-VALUE', - 'DEFAULT-WINDOW', - 'DEFINE', - 'DEFINE-USER-EVENT-MANAGER', - 'DEFINED', - 'DEL', - 'DELE', - 'DELEGATE', - 'DELET', - 'DELETE PROCEDURE', - 'DELETE', - 'DELETE-CHAR', - 'DELETE-CHARA', - 'DELETE-CHARAC', - 'DELETE-CHARACT', - 'DELETE-CHARACTE', - 'DELETE-CHARACTER', - 'DELETE-CURRENT-ROW', - 'DELETE-LINE', - 'DELETE-RESULT-LIST-ENTRY', - 'DELETE-SELECTED-ROW', - 'DELETE-SELECTED-ROWS', - 'DELIMITER', - 'DESC', - 'DESCE', - 'DESCEN', - 'DESCEND', - 'DESCENDI', - 'DESCENDIN', - 'DESCENDING', - 'DESELECT-FOCUSED-ROW', - 'DESELECT-ROWS', - 'DESELECT-SELECTED-ROW', - 'DESELECTION', - 'DESTRUCTOR', - 'DIALOG-BOX', - 'DICT', - 'DICTI', - 'DICTIO', - 'DICTION', - 'DICTIONA', - 'DICTIONAR', - 'DICTIONARY', - 'DIR', - 'DISABLE', - 'DISABLE-AUTO-ZAP', - 'DISABLE-DUMP-TRIGGERS', - 'DISABLE-LOAD-TRIGGERS', - 'DISABLED', - 'DISCON', - 'DISCONN', - 'DISCONNE', - 'DISCONNEC', - 'DISCONNECT', - 'DISP', - 'DISPL', - 'DISPLA', - 'DISPLAY', - 'DISPLAY-MESSAGE', - 'DISPLAY-T', - 'DISPLAY-TY', - 'DISPLAY-TYP', - 'DISPLAY-TYPE', - 'DISTINCT', - 'DO', - 'DOMAIN-DESCRIPTION', - 'DOMAIN-NAME', - 'DOMAIN-TYPE', - 'DOS', - 'DOUBLE', - 'DOWN', - 'DRAG-ENABLED', - 'DROP', - 'DROP-DOWN', - 'DROP-DOWN-LIST', - 'DROP-FILE-NOTIFY', - 'DROP-TARGET', - 'DS-CLOSE-CURSOR', - 'DSLOG-MANAGER', - 'DUMP', - 'DYNAMIC', - 'DYNAMIC-ENUM', - 'DYNAMIC-FUNCTION', - 'DYNAMIC-INVOKE', - 'EACH', - 'ECHO', - 'EDGE', - 'EDGE-', - 'EDGE-C', - 'EDGE-CH', - 'EDGE-CHA', - 'EDGE-CHAR', - 'EDGE-CHARS', - 'EDGE-P', - 'EDGE-PI', - 'EDGE-PIX', - 'EDGE-PIXE', - 'EDGE-PIXEL', - 'EDGE-PIXELS', - 'EDIT-CAN-PASTE', - 'EDIT-CAN-UNDO', - 'EDIT-CLEAR', - 'EDIT-COPY', - 'EDIT-CUT', - 'EDIT-PASTE', - 'EDIT-UNDO', - 'EDITING', - 'EDITOR', - 'ELSE', - 'EMPTY', - 'EMPTY-TEMP-TABLE', - 'ENABLE', - 'ENABLED-FIELDS', - 'ENCODE', - 'ENCRYPT', - 'ENCRYPT-AUDIT-MAC-KEY', - 'ENCRYPTION-SALT', - 'END', - 'END-DOCUMENT', - 'END-ELEMENT', - 'END-EVENT-GROUP', - 'END-FILE-DROP', - 'END-KEY', - 'END-MOVE', - 'END-RESIZE', - 'END-ROW-RESIZE', - 'END-USER-PROMPT', - 'ENDKEY', - 'ENTERED', - 'ENTITY-EXPANSION-LIMIT', - 'ENTRY', - 'ENUM', - 'EQ', - 'ERROR', - 'ERROR-COL', - 'ERROR-COLU', - 'ERROR-COLUM', - 'ERROR-COLUMN', - 'ERROR-ROW', - 'ERROR-STACK-TRACE', - 'ERROR-STAT', - 'ERROR-STATU', - 'ERROR-STATUS', - 'ESCAPE', - 'ETIME', - 'EVENT', - 'EVENT-GROUP-ID', - 'EVENT-PROCEDURE', - 'EVENT-PROCEDURE-CONTEXT', - 'EVENT-T', - 'EVENT-TY', - 'EVENT-TYP', - 'EVENT-TYPE', - 'EVENTS', - 'EXCEPT', - 'EXCLUSIVE', - 'EXCLUSIVE-', - 'EXCLUSIVE-ID', - 'EXCLUSIVE-L', - 'EXCLUSIVE-LO', - 'EXCLUSIVE-LOC', - 'EXCLUSIVE-LOCK', - 'EXCLUSIVE-WEB-USER', - 'EXECUTE', - 'EXISTS', - 'EXP', - 'EXPAND', - 'EXPANDABLE', - 'EXPLICIT', - 'EXPORT', - 'EXPORT-PRINCIPAL', - 'EXTENDED', - 'EXTENT', - 'EXTERNAL', - 'FALSE', - 'FETCH', - 'FETCH-SELECTED-ROW', - 'FGC', - 'FGCO', - 'FGCOL', - 'FGCOLO', - 'FGCOLOR', - 'FIELD', - 'FIELDS', - 'FILE', - 'FILE-CREATE-DATE', - 'FILE-CREATE-TIME', - 'FILE-INFO', - 'FILE-INFOR', - 'FILE-INFORM', - 'FILE-INFORMA', - 'FILE-INFORMAT', - 'FILE-INFORMATI', - 'FILE-INFORMATIO', - 'FILE-INFORMATION', - 'FILE-MOD-DATE', - 'FILE-MOD-TIME', - 'FILE-NAME', - 'FILE-OFF', - 'FILE-OFFS', - 'FILE-OFFSE', - 'FILE-OFFSET', - 'FILE-SIZE', - 'FILE-TYPE', - 'FILENAME', - 'FILL', - 'FILL-IN', - 'FILLED', - 'FILTERS', - 'FINAL', - 'FINALLY', - 'FIND', - 'FIND-BY-ROWID', - 'FIND-CASE-SENSITIVE', - 'FIND-CURRENT', - 'FIND-FIRST', - 'FIND-GLOBAL', - 'FIND-LAST', - 'FIND-NEXT-OCCURRENCE', - 'FIND-PREV-OCCURRENCE', - 'FIND-SELECT', - 'FIND-UNIQUE', - 'FIND-WRAP-AROUND', - 'FINDER', - 'FIRST', - 'FIRST-ASYNCH-REQUEST', - 'FIRST-CHILD', - 'FIRST-COLUMN', - 'FIRST-FORM', - 'FIRST-OBJECT', - 'FIRST-OF', - 'FIRST-PROC', - 'FIRST-PROCE', - 'FIRST-PROCED', - 'FIRST-PROCEDU', - 'FIRST-PROCEDUR', - 'FIRST-PROCEDURE', - 'FIRST-SERVER', - 'FIRST-TAB-I', - 'FIRST-TAB-IT', - 'FIRST-TAB-ITE', - 'FIRST-TAB-ITEM', - 'FIT-LAST-COLUMN', - 'FIXED-ONLY', - 'FLAT-BUTTON', - 'FLOAT', - 'FOCUS', - 'FOCUSED-ROW', - 'FOCUSED-ROW-SELECTED', - 'FONT', - 'FONT-TABLE', - 'FOR', - 'FORCE-FILE', - 'FORE', - 'FOREG', - 'FOREGR', - 'FOREGRO', - 'FOREGROU', - 'FOREGROUN', - 'FOREGROUND', - 'FORM INPUT', - 'FORM', - 'FORM-LONG-INPUT', - 'FORMA', - 'FORMAT', - 'FORMATTE', - 'FORMATTED', - 'FORWARD', - 'FORWARDS', - 'FRAGMEN', - 'FRAGMENT', - 'FRAM', - 'FRAME', - 'FRAME-COL', - 'FRAME-DB', - 'FRAME-DOWN', - 'FRAME-FIELD', - 'FRAME-FILE', - 'FRAME-INDE', - 'FRAME-INDEX', - 'FRAME-LINE', - 'FRAME-NAME', - 'FRAME-ROW', - 'FRAME-SPA', - 'FRAME-SPAC', - 'FRAME-SPACI', - 'FRAME-SPACIN', - 'FRAME-SPACING', - 'FRAME-VAL', - 'FRAME-VALU', - 'FRAME-VALUE', - 'FRAME-X', - 'FRAME-Y', - 'FREQUENCY', - 'FROM', - 'FROM-C', - 'FROM-CH', - 'FROM-CHA', - 'FROM-CHAR', - 'FROM-CHARS', - 'FROM-CUR', - 'FROM-CURR', - 'FROM-CURRE', - 'FROM-CURREN', - 'FROM-CURRENT', - 'FROM-P', - 'FROM-PI', - 'FROM-PIX', - 'FROM-PIXE', - 'FROM-PIXEL', - 'FROM-PIXELS', - 'FULL-HEIGHT', - 'FULL-HEIGHT-', - 'FULL-HEIGHT-C', - 'FULL-HEIGHT-CH', - 'FULL-HEIGHT-CHA', - 'FULL-HEIGHT-CHAR', - 'FULL-HEIGHT-CHARS', - 'FULL-HEIGHT-P', - 'FULL-HEIGHT-PI', - 'FULL-HEIGHT-PIX', - 'FULL-HEIGHT-PIXE', - 'FULL-HEIGHT-PIXEL', - 'FULL-HEIGHT-PIXELS', - 'FULL-PATHN', - 'FULL-PATHNA', - 'FULL-PATHNAM', - 'FULL-PATHNAME', - 'FULL-WIDTH', - 'FULL-WIDTH-', - 'FULL-WIDTH-C', - 'FULL-WIDTH-CH', - 'FULL-WIDTH-CHA', - 'FULL-WIDTH-CHAR', - 'FULL-WIDTH-CHARS', - 'FULL-WIDTH-P', - 'FULL-WIDTH-PI', - 'FULL-WIDTH-PIX', - 'FULL-WIDTH-PIXE', - 'FULL-WIDTH-PIXEL', - 'FULL-WIDTH-PIXELS', - 'FUNCTION', - 'FUNCTION-CALL-TYPE', - 'GATEWAY', - 'GATEWAYS', - 'GE', - 'GENERATE-MD5', - 'GENERATE-PBE-KEY', - 'GENERATE-PBE-SALT', - 'GENERATE-RANDOM-KEY', - 'GENERATE-UUID', - 'GET', - 'GET-ATTR-CALL-TYPE', - 'GET-ATTRIBUTE-NODE', - 'GET-BINARY-DATA', - 'GET-BLUE', - 'GET-BLUE-', - 'GET-BLUE-V', - 'GET-BLUE-VA', - 'GET-BLUE-VAL', - 'GET-BLUE-VALU', - 'GET-BLUE-VALUE', - 'GET-BROWSE-COLUMN', - 'GET-BUFFER-HANDLE', - 'GET-BYTE', - 'GET-CALLBACK-PROC-CONTEXT', - 'GET-CALLBACK-PROC-NAME', - 'GET-CGI-LIST', - 'GET-CGI-LONG-VALUE', - 'GET-CGI-VALUE', - 'GET-CLASS', - 'GET-CODEPAGES', - 'GET-COLLATIONS', - 'GET-CONFIG-VALUE', - 'GET-CURRENT', - 'GET-DOUBLE', - 'GET-DROPPED-FILE', - 'GET-DYNAMIC', - 'GET-ERROR-COLUMN', - 'GET-ERROR-ROW', - 'GET-FILE', - 'GET-FILE-NAME', - 'GET-FILE-OFFSE', - 'GET-FILE-OFFSET', - 'GET-FIRST', - 'GET-FLOAT', - 'GET-GREEN', - 'GET-GREEN-', - 'GET-GREEN-V', - 'GET-GREEN-VA', - 'GET-GREEN-VAL', - 'GET-GREEN-VALU', - 'GET-GREEN-VALUE', - 'GET-INDEX-BY-NAMESPACE-NAME', - 'GET-INDEX-BY-QNAME', - 'GET-INT64', - 'GET-ITERATION', - 'GET-KEY-VAL', - 'GET-KEY-VALU', - 'GET-KEY-VALUE', - 'GET-LAST', - 'GET-LOCALNAME-BY-INDEX', - 'GET-LONG', - 'GET-MESSAGE', - 'GET-NEXT', - 'GET-NUMBER', - 'GET-POINTER-VALUE', - 'GET-PREV', - 'GET-PRINTERS', - 'GET-PROPERTY', - 'GET-QNAME-BY-INDEX', - 'GET-RED', - 'GET-RED-', - 'GET-RED-V', - 'GET-RED-VA', - 'GET-RED-VAL', - 'GET-RED-VALU', - 'GET-RED-VALUE', - 'GET-REPOSITIONED-ROW', - 'GET-RGB-VALUE', - 'GET-SELECTED', - 'GET-SELECTED-', - 'GET-SELECTED-W', - 'GET-SELECTED-WI', - 'GET-SELECTED-WID', - 'GET-SELECTED-WIDG', - 'GET-SELECTED-WIDGE', - 'GET-SELECTED-WIDGET', - 'GET-SHORT', - 'GET-SIGNATURE', - 'GET-SIZE', - 'GET-STRING', - 'GET-TAB-ITEM', - 'GET-TEXT-HEIGHT', - 'GET-TEXT-HEIGHT-', - 'GET-TEXT-HEIGHT-C', - 'GET-TEXT-HEIGHT-CH', - 'GET-TEXT-HEIGHT-CHA', - 'GET-TEXT-HEIGHT-CHAR', - 'GET-TEXT-HEIGHT-CHARS', - 'GET-TEXT-HEIGHT-P', - 'GET-TEXT-HEIGHT-PI', - 'GET-TEXT-HEIGHT-PIX', - 'GET-TEXT-HEIGHT-PIXE', - 'GET-TEXT-HEIGHT-PIXEL', - 'GET-TEXT-HEIGHT-PIXELS', - 'GET-TEXT-WIDTH', - 'GET-TEXT-WIDTH-', - 'GET-TEXT-WIDTH-C', - 'GET-TEXT-WIDTH-CH', - 'GET-TEXT-WIDTH-CHA', - 'GET-TEXT-WIDTH-CHAR', - 'GET-TEXT-WIDTH-CHARS', - 'GET-TEXT-WIDTH-P', - 'GET-TEXT-WIDTH-PI', - 'GET-TEXT-WIDTH-PIX', - 'GET-TEXT-WIDTH-PIXE', - 'GET-TEXT-WIDTH-PIXEL', - 'GET-TEXT-WIDTH-PIXELS', - 'GET-TYPE-BY-INDEX', - 'GET-TYPE-BY-NAMESPACE-NAME', - 'GET-TYPE-BY-QNAME', - 'GET-UNSIGNED-LONG', - 'GET-UNSIGNED-SHORT', - 'GET-URI-BY-INDEX', - 'GET-VALUE-BY-INDEX', - 'GET-VALUE-BY-NAMESPACE-NAME', - 'GET-VALUE-BY-QNAME', - 'GET-WAIT-STATE', - 'GETBYTE', - 'GLOBAL', - 'GO-ON', - 'GO-PEND', - 'GO-PENDI', - 'GO-PENDIN', - 'GO-PENDING', - 'GRANT', - 'GRAPHIC-E', - 'GRAPHIC-ED', - 'GRAPHIC-EDG', - 'GRAPHIC-EDGE', - 'GRID-FACTOR-H', - 'GRID-FACTOR-HO', - 'GRID-FACTOR-HOR', - 'GRID-FACTOR-HORI', - 'GRID-FACTOR-HORIZ', - 'GRID-FACTOR-HORIZO', - 'GRID-FACTOR-HORIZON', - 'GRID-FACTOR-HORIZONT', - 'GRID-FACTOR-HORIZONTA', - 'GRID-FACTOR-HORIZONTAL', - 'GRID-FACTOR-V', - 'GRID-FACTOR-VE', - 'GRID-FACTOR-VER', - 'GRID-FACTOR-VERT', - 'GRID-FACTOR-VERTI', - 'GRID-FACTOR-VERTIC', - 'GRID-FACTOR-VERTICA', - 'GRID-FACTOR-VERTICAL', - 'GRID-SNAP', - 'GRID-UNIT-HEIGHT', - 'GRID-UNIT-HEIGHT-', - 'GRID-UNIT-HEIGHT-C', - 'GRID-UNIT-HEIGHT-CH', - 'GRID-UNIT-HEIGHT-CHA', - 'GRID-UNIT-HEIGHT-CHARS', - 'GRID-UNIT-HEIGHT-P', - 'GRID-UNIT-HEIGHT-PI', - 'GRID-UNIT-HEIGHT-PIX', - 'GRID-UNIT-HEIGHT-PIXE', - 'GRID-UNIT-HEIGHT-PIXEL', - 'GRID-UNIT-HEIGHT-PIXELS', - 'GRID-UNIT-WIDTH', - 'GRID-UNIT-WIDTH-', - 'GRID-UNIT-WIDTH-C', - 'GRID-UNIT-WIDTH-CH', - 'GRID-UNIT-WIDTH-CHA', - 'GRID-UNIT-WIDTH-CHAR', - 'GRID-UNIT-WIDTH-CHARS', - 'GRID-UNIT-WIDTH-P', - 'GRID-UNIT-WIDTH-PI', - 'GRID-UNIT-WIDTH-PIX', - 'GRID-UNIT-WIDTH-PIXE', - 'GRID-UNIT-WIDTH-PIXEL', - 'GRID-UNIT-WIDTH-PIXELS', - 'GRID-VISIBLE', - 'GROUP', - 'GT', - 'GUID', - 'HANDLE', - 'HANDLER', - 'HAS-RECORDS', - 'HAVING', - 'HEADER', - 'HEIGHT', - 'HEIGHT-', - 'HEIGHT-C', - 'HEIGHT-CH', - 'HEIGHT-CHA', - 'HEIGHT-CHAR', - 'HEIGHT-CHARS', - 'HEIGHT-P', - 'HEIGHT-PI', - 'HEIGHT-PIX', - 'HEIGHT-PIXE', - 'HEIGHT-PIXEL', - 'HEIGHT-PIXELS', - 'HELP', - 'HEX-DECODE', - 'HEX-ENCODE', - 'HIDDEN', - 'HIDE', - 'HORI', - 'HORIZ', - 'HORIZO', - 'HORIZON', - 'HORIZONT', - 'HORIZONTA', - 'HORIZONTAL', - 'HOST-BYTE-ORDER', - 'HTML-CHARSET', - 'HTML-END-OF-LINE', - 'HTML-END-OF-PAGE', - 'HTML-FRAME-BEGIN', - 'HTML-FRAME-END', - 'HTML-HEADER-BEGIN', - 'HTML-HEADER-END', - 'HTML-TITLE-BEGIN', - 'HTML-TITLE-END', - 'HWND', - 'ICON', - 'IF', - 'IMAGE', - 'IMAGE-DOWN', - 'IMAGE-INSENSITIVE', - 'IMAGE-SIZE', - 'IMAGE-SIZE-C', - 'IMAGE-SIZE-CH', - 'IMAGE-SIZE-CHA', - 'IMAGE-SIZE-CHAR', - 'IMAGE-SIZE-CHARS', - 'IMAGE-SIZE-P', - 'IMAGE-SIZE-PI', - 'IMAGE-SIZE-PIX', - 'IMAGE-SIZE-PIXE', - 'IMAGE-SIZE-PIXEL', - 'IMAGE-SIZE-PIXELS', - 'IMAGE-UP', - 'IMMEDIATE-DISPLAY', - 'IMPLEMENTS', - 'IMPORT', - 'IMPORT-PRINCIPAL', - 'IN', - 'IN-HANDLE', - 'INCREMENT-EXCLUSIVE-ID', - 'INDEX', - 'INDEX-HINT', - 'INDEX-INFORMATION', - 'INDEXED-REPOSITION', - 'INDICATOR', - 'INFO', - 'INFOR', - 'INFORM', - 'INFORMA', - 'INFORMAT', - 'INFORMATI', - 'INFORMATIO', - 'INFORMATION', - 'INHERIT-BGC', - 'INHERIT-BGCO', - 'INHERIT-BGCOL', - 'INHERIT-BGCOLO', - 'INHERIT-BGCOLOR', - 'INHERIT-FGC', - 'INHERIT-FGCO', - 'INHERIT-FGCOL', - 'INHERIT-FGCOLO', - 'INHERIT-FGCOLOR', - 'INHERITS', - 'INIT', - 'INITI', - 'INITIA', - 'INITIAL', - 'INITIAL-DIR', - 'INITIAL-FILTER', - 'INITIALIZE-DOCUMENT-TYPE', - 'INITIATE', - 'INNER-CHARS', - 'INNER-LINES', - 'INPUT', - 'INPUT-O', - 'INPUT-OU', - 'INPUT-OUT', - 'INPUT-OUTP', - 'INPUT-OUTPU', - 'INPUT-OUTPUT', - 'INPUT-VALUE', - 'INSERT', - 'INSERT-ATTRIBUTE', - 'INSERT-B', - 'INSERT-BA', - 'INSERT-BAC', - 'INSERT-BACK', - 'INSERT-BACKT', - 'INSERT-BACKTA', - 'INSERT-BACKTAB', - 'INSERT-FILE', - 'INSERT-ROW', - 'INSERT-STRING', - 'INSERT-T', - 'INSERT-TA', - 'INSERT-TAB', - 'INT64', - 'INT', - 'INTEGER', - 'INTERFACE', - 'INTERNAL-ENTRIES', - 'INTO', - 'INVOKE', - 'IS', - 'IS-ATTR', - 'IS-ATTR-', - 'IS-ATTR-S', - 'IS-ATTR-SP', - 'IS-ATTR-SPA', - 'IS-ATTR-SPAC', - 'IS-ATTR-SPACE', - 'IS-CLASS', - 'IS-JSON', - 'IS-LEAD-BYTE', - 'IS-OPEN', - 'IS-PARAMETER-SET', - 'IS-PARTITIONED', - 'IS-ROW-SELECTED', - 'IS-SELECTED', - 'IS-XML', - 'ITEM', - 'ITEMS-PER-ROW', - 'JOIN', - 'JOIN-BY-SQLDB', - 'KBLABEL', - 'KEEP-CONNECTION-OPEN', - 'KEEP-FRAME-Z', - 'KEEP-FRAME-Z-', - 'KEEP-FRAME-Z-O', - 'KEEP-FRAME-Z-OR', - 'KEEP-FRAME-Z-ORD', - 'KEEP-FRAME-Z-ORDE', - 'KEEP-FRAME-Z-ORDER', - 'KEEP-MESSAGES', - 'KEEP-SECURITY-CACHE', - 'KEEP-TAB-ORDER', - 'KEY', - 'KEY-CODE', - 'KEY-FUNC', - 'KEY-FUNCT', - 'KEY-FUNCTI', - 'KEY-FUNCTIO', - 'KEY-FUNCTION', - 'KEY-LABEL', - 'KEYCODE', - 'KEYFUNC', - 'KEYFUNCT', - 'KEYFUNCTI', - 'KEYFUNCTIO', - 'KEYFUNCTION', - 'KEYLABEL', - 'KEYS', - 'KEYWORD', - 'KEYWORD-ALL', - 'LABEL', - 'LABEL-BGC', - 'LABEL-BGCO', - 'LABEL-BGCOL', - 'LABEL-BGCOLO', - 'LABEL-BGCOLOR', - 'LABEL-DC', - 'LABEL-DCO', - 'LABEL-DCOL', - 'LABEL-DCOLO', - 'LABEL-DCOLOR', - 'LABEL-FGC', - 'LABEL-FGCO', - 'LABEL-FGCOL', - 'LABEL-FGCOLO', - 'LABEL-FGCOLOR', - 'LABEL-FONT', - 'LABEL-PFC', - 'LABEL-PFCO', - 'LABEL-PFCOL', - 'LABEL-PFCOLO', - 'LABEL-PFCOLOR', - 'LABELS', - 'LABELS-HAVE-COLONS', - 'LANDSCAPE', - 'LANGUAGE', - 'LANGUAGES', - 'LARGE', - 'LARGE-TO-SMALL', - 'LAST', - 'LAST-ASYNCH-REQUEST', - 'LAST-BATCH', - 'LAST-CHILD', - 'LAST-EVEN', - 'LAST-EVENT', - 'LAST-FORM', - 'LAST-KEY', - 'LAST-OBJECT', - 'LAST-OF', - 'LAST-PROCE', - 'LAST-PROCED', - 'LAST-PROCEDU', - 'LAST-PROCEDUR', - 'LAST-PROCEDURE', - 'LAST-SERVER', - 'LAST-TAB-I', - 'LAST-TAB-IT', - 'LAST-TAB-ITE', - 'LAST-TAB-ITEM', - 'LASTKEY', - 'LC', - 'LDBNAME', - 'LE', - 'LEAVE', - 'LEFT-ALIGN', - 'LEFT-ALIGNE', - 'LEFT-ALIGNED', - 'LEFT-TRIM', - 'LENGTH', - 'LIBRARY', - 'LIKE', - 'LIKE-SEQUENTIAL', - 'LINE', - 'LINE-COUNT', - 'LINE-COUNTE', - 'LINE-COUNTER', - 'LIST-EVENTS', - 'LIST-ITEM-PAIRS', - 'LIST-ITEMS', - 'LIST-PROPERTY-NAMES', - 'LIST-QUERY-ATTRS', - 'LIST-SET-ATTRS', - 'LIST-WIDGETS', - 'LISTI', - 'LISTIN', - 'LISTING', - 'LITERAL-QUESTION', - 'LITTLE-ENDIAN', - 'LOAD', - 'LOAD-DOMAINS', - 'LOAD-ICON', - 'LOAD-IMAGE', - 'LOAD-IMAGE-DOWN', - 'LOAD-IMAGE-INSENSITIVE', - 'LOAD-IMAGE-UP', - 'LOAD-MOUSE-P', - 'LOAD-MOUSE-PO', - 'LOAD-MOUSE-POI', - 'LOAD-MOUSE-POIN', - 'LOAD-MOUSE-POINT', - 'LOAD-MOUSE-POINTE', - 'LOAD-MOUSE-POINTER', - 'LOAD-PICTURE', - 'LOAD-SMALL-ICON', - 'LOCAL-NAME', - 'LOCAL-VERSION-INFO', - 'LOCATOR-COLUMN-NUMBER', - 'LOCATOR-LINE-NUMBER', - 'LOCATOR-PUBLIC-ID', - 'LOCATOR-SYSTEM-ID', - 'LOCATOR-TYPE', - 'LOCK-REGISTRATION', - 'LOCKED', - 'LOG', - 'LOG-AUDIT-EVENT', - 'LOG-MANAGER', - 'LOGICAL', - 'LOGIN-EXPIRATION-TIMESTAMP', - 'LOGIN-HOST', - 'LOGIN-STATE', - 'LOGOUT', - 'LONGCHAR', - 'LOOKAHEAD', - 'LOOKUP', - 'LT', - 'MACHINE-CLASS', - 'MANDATORY', - 'MANUAL-HIGHLIGHT', - 'MAP', - 'MARGIN-EXTRA', - 'MARGIN-HEIGHT', - 'MARGIN-HEIGHT-', - 'MARGIN-HEIGHT-C', - 'MARGIN-HEIGHT-CH', - 'MARGIN-HEIGHT-CHA', - 'MARGIN-HEIGHT-CHAR', - 'MARGIN-HEIGHT-CHARS', - 'MARGIN-HEIGHT-P', - 'MARGIN-HEIGHT-PI', - 'MARGIN-HEIGHT-PIX', - 'MARGIN-HEIGHT-PIXE', - 'MARGIN-HEIGHT-PIXEL', - 'MARGIN-HEIGHT-PIXELS', - 'MARGIN-WIDTH', - 'MARGIN-WIDTH-', - 'MARGIN-WIDTH-C', - 'MARGIN-WIDTH-CH', - 'MARGIN-WIDTH-CHA', - 'MARGIN-WIDTH-CHAR', - 'MARGIN-WIDTH-CHARS', - 'MARGIN-WIDTH-P', - 'MARGIN-WIDTH-PI', - 'MARGIN-WIDTH-PIX', - 'MARGIN-WIDTH-PIXE', - 'MARGIN-WIDTH-PIXEL', - 'MARGIN-WIDTH-PIXELS', - 'MARK-NEW', - 'MARK-ROW-STATE', - 'MATCHES', - 'MAX', - 'MAX-BUTTON', - 'MAX-CHARS', - 'MAX-DATA-GUESS', - 'MAX-HEIGHT', - 'MAX-HEIGHT-C', - 'MAX-HEIGHT-CH', - 'MAX-HEIGHT-CHA', - 'MAX-HEIGHT-CHAR', - 'MAX-HEIGHT-CHARS', - 'MAX-HEIGHT-P', - 'MAX-HEIGHT-PI', - 'MAX-HEIGHT-PIX', - 'MAX-HEIGHT-PIXE', - 'MAX-HEIGHT-PIXEL', - 'MAX-HEIGHT-PIXELS', - 'MAX-ROWS', - 'MAX-SIZE', - 'MAX-VAL', - 'MAX-VALU', - 'MAX-VALUE', - 'MAX-WIDTH', - 'MAX-WIDTH-', - 'MAX-WIDTH-C', - 'MAX-WIDTH-CH', - 'MAX-WIDTH-CHA', - 'MAX-WIDTH-CHAR', - 'MAX-WIDTH-CHARS', - 'MAX-WIDTH-P', - 'MAX-WIDTH-PI', - 'MAX-WIDTH-PIX', - 'MAX-WIDTH-PIXE', - 'MAX-WIDTH-PIXEL', - 'MAX-WIDTH-PIXELS', - 'MAXI', - 'MAXIM', - 'MAXIMIZE', - 'MAXIMU', - 'MAXIMUM', - 'MAXIMUM-LEVEL', - 'MD5-DIGEST', - 'MEMBER', - 'MEMPTR-TO-NODE-VALUE', - 'MENU', - 'MENU-BAR', - 'MENU-ITEM', - 'MENU-K', - 'MENU-KE', - 'MENU-KEY', - 'MENU-M', - 'MENU-MO', - 'MENU-MOU', - 'MENU-MOUS', - 'MENU-MOUSE', - 'MENUBAR', - 'MERGE-BY-FIELD', - 'MESSAGE', - 'MESSAGE-AREA', - 'MESSAGE-AREA-FONT', - 'MESSAGE-LINES', - 'METHOD', - 'MIN', - 'MIN-BUTTON', - 'MIN-COLUMN-WIDTH-C', - 'MIN-COLUMN-WIDTH-CH', - 'MIN-COLUMN-WIDTH-CHA', - 'MIN-COLUMN-WIDTH-CHAR', - 'MIN-COLUMN-WIDTH-CHARS', - 'MIN-COLUMN-WIDTH-P', - 'MIN-COLUMN-WIDTH-PI', - 'MIN-COLUMN-WIDTH-PIX', - 'MIN-COLUMN-WIDTH-PIXE', - 'MIN-COLUMN-WIDTH-PIXEL', - 'MIN-COLUMN-WIDTH-PIXELS', - 'MIN-HEIGHT', - 'MIN-HEIGHT-', - 'MIN-HEIGHT-C', - 'MIN-HEIGHT-CH', - 'MIN-HEIGHT-CHA', - 'MIN-HEIGHT-CHAR', - 'MIN-HEIGHT-CHARS', - 'MIN-HEIGHT-P', - 'MIN-HEIGHT-PI', - 'MIN-HEIGHT-PIX', - 'MIN-HEIGHT-PIXE', - 'MIN-HEIGHT-PIXEL', - 'MIN-HEIGHT-PIXELS', - 'MIN-SIZE', - 'MIN-VAL', - 'MIN-VALU', - 'MIN-VALUE', - 'MIN-WIDTH', - 'MIN-WIDTH-', - 'MIN-WIDTH-C', - 'MIN-WIDTH-CH', - 'MIN-WIDTH-CHA', - 'MIN-WIDTH-CHAR', - 'MIN-WIDTH-CHARS', - 'MIN-WIDTH-P', - 'MIN-WIDTH-PI', - 'MIN-WIDTH-PIX', - 'MIN-WIDTH-PIXE', - 'MIN-WIDTH-PIXEL', - 'MIN-WIDTH-PIXELS', - 'MINI', - 'MINIM', - 'MINIMU', - 'MINIMUM', - 'MOD', - 'MODIFIED', - 'MODU', - 'MODUL', - 'MODULO', - 'MONTH', - 'MOUSE', - 'MOUSE-P', - 'MOUSE-PO', - 'MOUSE-POI', - 'MOUSE-POIN', - 'MOUSE-POINT', - 'MOUSE-POINTE', - 'MOUSE-POINTER', - 'MOVABLE', - 'MOVE-AFTER', - 'MOVE-AFTER-', - 'MOVE-AFTER-T', - 'MOVE-AFTER-TA', - 'MOVE-AFTER-TAB', - 'MOVE-AFTER-TAB-', - 'MOVE-AFTER-TAB-I', - 'MOVE-AFTER-TAB-IT', - 'MOVE-AFTER-TAB-ITE', - 'MOVE-AFTER-TAB-ITEM', - 'MOVE-BEFOR', - 'MOVE-BEFORE', - 'MOVE-BEFORE-', - 'MOVE-BEFORE-T', - 'MOVE-BEFORE-TA', - 'MOVE-BEFORE-TAB', - 'MOVE-BEFORE-TAB-', - 'MOVE-BEFORE-TAB-I', - 'MOVE-BEFORE-TAB-IT', - 'MOVE-BEFORE-TAB-ITE', - 'MOVE-BEFORE-TAB-ITEM', - 'MOVE-COL', - 'MOVE-COLU', - 'MOVE-COLUM', - 'MOVE-COLUMN', - 'MOVE-TO-B', - 'MOVE-TO-BO', - 'MOVE-TO-BOT', - 'MOVE-TO-BOTT', - 'MOVE-TO-BOTTO', - 'MOVE-TO-BOTTOM', - 'MOVE-TO-EOF', - 'MOVE-TO-T', - 'MOVE-TO-TO', - 'MOVE-TO-TOP', - 'MPE', - 'MTIME', - 'MULTI-COMPILE', - 'MULTIPLE', - 'MULTIPLE-KEY', - 'MULTITASKING-INTERVAL', - 'MUST-EXIST', - 'NAME', - 'NAMESPACE-PREFIX', - 'NAMESPACE-URI', - 'NATIVE', - 'NE', - 'NEEDS-APPSERVER-PROMPT', - 'NEEDS-PROMPT', - 'NEW', - 'NEW-INSTANCE', - 'NEW-ROW', - 'NEXT', - 'NEXT-COLUMN', - 'NEXT-PROMPT', - 'NEXT-ROWID', - 'NEXT-SIBLING', - 'NEXT-TAB-I', - 'NEXT-TAB-IT', - 'NEXT-TAB-ITE', - 'NEXT-TAB-ITEM', - 'NEXT-VALUE', - 'NO', - 'NO-APPLY', - 'NO-ARRAY-MESSAGE', - 'NO-ASSIGN', - 'NO-ATTR', - 'NO-ATTR-', - 'NO-ATTR-L', - 'NO-ATTR-LI', - 'NO-ATTR-LIS', - 'NO-ATTR-LIST', - 'NO-ATTR-S', - 'NO-ATTR-SP', - 'NO-ATTR-SPA', - 'NO-ATTR-SPAC', - 'NO-ATTR-SPACE', - 'NO-AUTO-VALIDATE', - 'NO-BIND-WHERE', - 'NO-BOX', - 'NO-CONSOLE', - 'NO-CONVERT', - 'NO-CONVERT-3D-COLORS', - 'NO-CURRENT-VALUE', - 'NO-DEBUG', - 'NO-DRAG', - 'NO-ECHO', - 'NO-EMPTY-SPACE', - 'NO-ERROR', - 'NO-F', - 'NO-FI', - 'NO-FIL', - 'NO-FILL', - 'NO-FOCUS', - 'NO-HELP', - 'NO-HIDE', - 'NO-INDEX-HINT', - 'NO-INHERIT-BGC', - 'NO-INHERIT-BGCO', - 'NO-INHERIT-BGCOLOR', - 'NO-INHERIT-FGC', - 'NO-INHERIT-FGCO', - 'NO-INHERIT-FGCOL', - 'NO-INHERIT-FGCOLO', - 'NO-INHERIT-FGCOLOR', - 'NO-JOIN-BY-SQLDB', - 'NO-LABE', - 'NO-LABELS', - 'NO-LOBS', - 'NO-LOCK', - 'NO-LOOKAHEAD', - 'NO-MAP', - 'NO-MES', - 'NO-MESS', - 'NO-MESSA', - 'NO-MESSAG', - 'NO-MESSAGE', - 'NO-PAUSE', - 'NO-PREFE', - 'NO-PREFET', - 'NO-PREFETC', - 'NO-PREFETCH', - 'NO-ROW-MARKERS', - 'NO-SCROLLBAR-VERTICAL', - 'NO-SEPARATE-CONNECTION', - 'NO-SEPARATORS', - 'NO-TAB-STOP', - 'NO-UND', - 'NO-UNDE', - 'NO-UNDER', - 'NO-UNDERL', - 'NO-UNDERLI', - 'NO-UNDERLIN', - 'NO-UNDERLINE', - 'NO-UNDO', - 'NO-VAL', - 'NO-VALI', - 'NO-VALID', - 'NO-VALIDA', - 'NO-VALIDAT', - 'NO-VALIDATE', - 'NO-WAIT', - 'NO-WORD-WRAP', - 'NODE-VALUE-TO-MEMPTR', - 'NONAMESPACE-SCHEMA-LOCATION', - 'NONE', - 'NORMALIZE', - 'NOT', - 'NOT-ACTIVE', - 'NOW', - 'NULL', - 'NUM-ALI', - 'NUM-ALIA', - 'NUM-ALIAS', - 'NUM-ALIASE', - 'NUM-ALIASES', - 'NUM-BUFFERS', - 'NUM-BUT', - 'NUM-BUTT', - 'NUM-BUTTO', - 'NUM-BUTTON', - 'NUM-BUTTONS', - 'NUM-COL', - 'NUM-COLU', - 'NUM-COLUM', - 'NUM-COLUMN', - 'NUM-COLUMNS', - 'NUM-COPIES', - 'NUM-DBS', - 'NUM-DROPPED-FILES', - 'NUM-ENTRIES', - 'NUM-FIELDS', - 'NUM-FORMATS', - 'NUM-ITEMS', - 'NUM-ITERATIONS', - 'NUM-LINES', - 'NUM-LOCKED-COL', - 'NUM-LOCKED-COLU', - 'NUM-LOCKED-COLUM', - 'NUM-LOCKED-COLUMN', - 'NUM-LOCKED-COLUMNS', - 'NUM-MESSAGES', - 'NUM-PARAMETERS', - 'NUM-REFERENCES', - 'NUM-REPLACED', - 'NUM-RESULTS', - 'NUM-SELECTED', - 'NUM-SELECTED-', - 'NUM-SELECTED-ROWS', - 'NUM-SELECTED-W', - 'NUM-SELECTED-WI', - 'NUM-SELECTED-WID', - 'NUM-SELECTED-WIDG', - 'NUM-SELECTED-WIDGE', - 'NUM-SELECTED-WIDGET', - 'NUM-SELECTED-WIDGETS', - 'NUM-TABS', - 'NUM-TO-RETAIN', - 'NUM-VISIBLE-COLUMNS', - 'NUMERIC', - 'NUMERIC-F', - 'NUMERIC-FO', - 'NUMERIC-FOR', - 'NUMERIC-FORM', - 'NUMERIC-FORMA', - 'NUMERIC-FORMAT', - 'OCTET-LENGTH', - 'OF', - 'OFF', - 'OK', - 'OK-CANCEL', - 'OLD', - 'ON', - 'ON-FRAME', - 'ON-FRAME-', - 'ON-FRAME-B', - 'ON-FRAME-BO', - 'ON-FRAME-BOR', - 'ON-FRAME-BORD', - 'ON-FRAME-BORDE', - 'ON-FRAME-BORDER', - 'OPEN', - 'OPSYS', - 'OPTION', - 'OR', - 'ORDERED-JOIN', - 'ORDINAL', - 'OS-APPEND', - 'OS-COMMAND', - 'OS-COPY', - 'OS-CREATE-DIR', - 'OS-DELETE', - 'OS-DIR', - 'OS-DRIVE', - 'OS-DRIVES', - 'OS-ERROR', - 'OS-GETENV', - 'OS-RENAME', - 'OTHERWISE', - 'OUTPUT', - 'OVERLAY', - 'OVERRIDE', - 'OWNER', - 'PAGE', - 'PAGE-BOT', - 'PAGE-BOTT', - 'PAGE-BOTTO', - 'PAGE-BOTTOM', - 'PAGE-NUM', - 'PAGE-NUMB', - 'PAGE-NUMBE', - 'PAGE-NUMBER', - 'PAGE-SIZE', - 'PAGE-TOP', - 'PAGE-WID', - 'PAGE-WIDT', - 'PAGE-WIDTH', - 'PAGED', - 'PARAM', - 'PARAME', - 'PARAMET', - 'PARAMETE', - 'PARAMETER', - 'PARENT', - 'PARSE-STATUS', - 'PARTIAL-KEY', - 'PASCAL', - 'PASSWORD-FIELD', - 'PATHNAME', - 'PAUSE', - 'PBE-HASH-ALG', - 'PBE-HASH-ALGO', - 'PBE-HASH-ALGOR', - 'PBE-HASH-ALGORI', - 'PBE-HASH-ALGORIT', - 'PBE-HASH-ALGORITH', - 'PBE-HASH-ALGORITHM', - 'PBE-KEY-ROUNDS', - 'PDBNAME', - 'PERSIST', - 'PERSISTE', - 'PERSISTEN', - 'PERSISTENT', - 'PERSISTENT-CACHE-DISABLED', - 'PFC', - 'PFCO', - 'PFCOL', - 'PFCOLO', - 'PFCOLOR', - 'PIXELS', - 'PIXELS-PER-COL', - 'PIXELS-PER-COLU', - 'PIXELS-PER-COLUM', - 'PIXELS-PER-COLUMN', - 'PIXELS-PER-ROW', - 'POPUP-M', - 'POPUP-ME', - 'POPUP-MEN', - 'POPUP-MENU', - 'POPUP-O', - 'POPUP-ON', - 'POPUP-ONL', - 'POPUP-ONLY', - 'PORTRAIT', - 'POSITION', - 'PRECISION', - 'PREFER-DATASET', - 'PREPARE-STRING', - 'PREPARED', - 'PREPROC', - 'PREPROCE', - 'PREPROCES', - 'PREPROCESS', - 'PRESEL', - 'PRESELE', - 'PRESELEC', - 'PRESELECT', - 'PREV', - 'PREV-COLUMN', - 'PREV-SIBLING', - 'PREV-TAB-I', - 'PREV-TAB-IT', - 'PREV-TAB-ITE', - 'PREV-TAB-ITEM', - 'PRIMARY', - 'PRINTER', - 'PRINTER-CONTROL-HANDLE', - 'PRINTER-HDC', - 'PRINTER-NAME', - 'PRINTER-PORT', - 'PRINTER-SETUP', - 'PRIVATE', - 'PRIVATE-D', - 'PRIVATE-DA', - 'PRIVATE-DAT', - 'PRIVATE-DATA', - 'PRIVILEGES', - 'PROC-HA', - 'PROC-HAN', - 'PROC-HAND', - 'PROC-HANDL', - 'PROC-HANDLE', - 'PROC-ST', - 'PROC-STA', - 'PROC-STAT', - 'PROC-STATU', - 'PROC-STATUS', - 'PROC-TEXT', - 'PROC-TEXT-BUFFER', - 'PROCE', - 'PROCED', - 'PROCEDU', - 'PROCEDUR', - 'PROCEDURE', - 'PROCEDURE-CALL-TYPE', - 'PROCEDURE-TYPE', - 'PROCESS', - 'PROFILER', - 'PROGRAM-NAME', - 'PROGRESS', - 'PROGRESS-S', - 'PROGRESS-SO', - 'PROGRESS-SOU', - 'PROGRESS-SOUR', - 'PROGRESS-SOURC', - 'PROGRESS-SOURCE', - 'PROMPT', - 'PROMPT-F', - 'PROMPT-FO', - 'PROMPT-FOR', - 'PROMSGS', - 'PROPATH', - 'PROPERTY', - 'PROTECTED', - 'PROVERS', - 'PROVERSI', - 'PROVERSIO', - 'PROVERSION', - 'PROXY', - 'PROXY-PASSWORD', - 'PROXY-USERID', - 'PUBLIC', - 'PUBLIC-ID', - 'PUBLISH', - 'PUBLISHED-EVENTS', - 'PUT', - 'PUT-BYTE', - 'PUT-DOUBLE', - 'PUT-FLOAT', - 'PUT-INT64', - 'PUT-KEY-VAL', - 'PUT-KEY-VALU', - 'PUT-KEY-VALUE', - 'PUT-LONG', - 'PUT-SHORT', - 'PUT-STRING', - 'PUT-UNSIGNED-LONG', - 'PUTBYTE', - 'QUERY', - 'QUERY-CLOSE', - 'QUERY-OFF-END', - 'QUERY-OPEN', - 'QUERY-PREPARE', - 'QUERY-TUNING', - 'QUESTION', - 'QUIT', - 'QUOTER', - 'R-INDEX', - 'RADIO-BUTTONS', - 'RADIO-SET', - 'RANDOM', - 'RAW', - 'RAW-TRANSFER', - 'RCODE-INFO', - 'RCODE-INFOR', - 'RCODE-INFORM', - 'RCODE-INFORMA', - 'RCODE-INFORMAT', - 'RCODE-INFORMATI', - 'RCODE-INFORMATIO', - 'RCODE-INFORMATION', - 'READ-AVAILABLE', - 'READ-EXACT-NUM', - 'READ-FILE', - 'READ-JSON', - 'READ-ONLY', - 'READ-XML', - 'READ-XMLSCHEMA', - 'READKEY', - 'REAL', - 'RECID', - 'RECORD-LENGTH', - 'RECT', - 'RECTA', - 'RECTAN', - 'RECTANG', - 'RECTANGL', - 'RECTANGLE', - 'RECURSIVE', - 'REFERENCE-ONLY', - 'REFRESH', - 'REFRESH-AUDIT-POLICY', - 'REFRESHABLE', - 'REGISTER-DOMAIN', - 'RELEASE', - 'REMOTE', - 'REMOVE-EVENTS-PROCEDURE', - 'REMOVE-SUPER-PROCEDURE', - 'REPEAT', - 'REPLACE', - 'REPLACE-SELECTION-TEXT', - 'REPOSITION', - 'REPOSITION-BACKWARD', - 'REPOSITION-FORWARD', - 'REPOSITION-MODE', - 'REPOSITION-TO-ROW', - 'REPOSITION-TO-ROWID', - 'REQUEST', - 'REQUEST-INFO', - 'RESET', - 'RESIZA', - 'RESIZAB', - 'RESIZABL', - 'RESIZABLE', - 'RESIZE', - 'RESPONSE-INFO', - 'RESTART-ROW', - 'RESTART-ROWID', - 'RETAIN', - 'RETAIN-SHAPE', - 'RETRY', - 'RETRY-CANCEL', - 'RETURN', - 'RETURN-ALIGN', - 'RETURN-ALIGNE', - 'RETURN-INS', - 'RETURN-INSE', - 'RETURN-INSER', - 'RETURN-INSERT', - 'RETURN-INSERTE', - 'RETURN-INSERTED', - 'RETURN-TO-START-DI', - 'RETURN-TO-START-DIR', - 'RETURN-VAL', - 'RETURN-VALU', - 'RETURN-VALUE', - 'RETURN-VALUE-DATA-TYPE', - 'RETURNS', - 'REVERSE-FROM', - 'REVERT', - 'REVOKE', - 'RGB-VALUE', - 'RIGHT-ALIGNED', - 'RIGHT-TRIM', - 'ROLES', - 'ROUND', - 'ROUTINE-LEVEL', - 'ROW', - 'ROW-HEIGHT-CHARS', - 'ROW-HEIGHT-PIXELS', - 'ROW-MARKERS', - 'ROW-OF', - 'ROW-RESIZABLE', - 'ROWID', - 'RULE', - 'RUN', - 'RUN-PROCEDURE', - 'SAVE CACHE', - 'SAVE', - 'SAVE-AS', - 'SAVE-FILE', - 'SAX-COMPLE', - 'SAX-COMPLET', - 'SAX-COMPLETE', - 'SAX-PARSE', - 'SAX-PARSE-FIRST', - 'SAX-PARSE-NEXT', - 'SAX-PARSER-ERROR', - 'SAX-RUNNING', - 'SAX-UNINITIALIZED', - 'SAX-WRITE-BEGIN', - 'SAX-WRITE-COMPLETE', - 'SAX-WRITE-CONTENT', - 'SAX-WRITE-ELEMENT', - 'SAX-WRITE-ERROR', - 'SAX-WRITE-IDLE', - 'SAX-WRITE-TAG', - 'SAX-WRITER', - 'SCHEMA', - 'SCHEMA-LOCATION', - 'SCHEMA-MARSHAL', - 'SCHEMA-PATH', - 'SCREEN', - 'SCREEN-IO', - 'SCREEN-LINES', - 'SCREEN-VAL', - 'SCREEN-VALU', - 'SCREEN-VALUE', - 'SCROLL', - 'SCROLL-BARS', - 'SCROLL-DELTA', - 'SCROLL-OFFSET', - 'SCROLL-TO-CURRENT-ROW', - 'SCROLL-TO-I', - 'SCROLL-TO-IT', - 'SCROLL-TO-ITE', - 'SCROLL-TO-ITEM', - 'SCROLL-TO-SELECTED-ROW', - 'SCROLLABLE', - 'SCROLLBAR-H', - 'SCROLLBAR-HO', - 'SCROLLBAR-HOR', - 'SCROLLBAR-HORI', - 'SCROLLBAR-HORIZ', - 'SCROLLBAR-HORIZO', - 'SCROLLBAR-HORIZON', - 'SCROLLBAR-HORIZONT', - 'SCROLLBAR-HORIZONTA', - 'SCROLLBAR-HORIZONTAL', - 'SCROLLBAR-V', - 'SCROLLBAR-VE', - 'SCROLLBAR-VER', - 'SCROLLBAR-VERT', - 'SCROLLBAR-VERTI', - 'SCROLLBAR-VERTIC', - 'SCROLLBAR-VERTICA', - 'SCROLLBAR-VERTICAL', - 'SCROLLED-ROW-POS', - 'SCROLLED-ROW-POSI', - 'SCROLLED-ROW-POSIT', - 'SCROLLED-ROW-POSITI', - 'SCROLLED-ROW-POSITIO', - 'SCROLLED-ROW-POSITION', - 'SCROLLING', - 'SDBNAME', - 'SEAL', - 'SEAL-TIMESTAMP', - 'SEARCH', - 'SEARCH-SELF', - 'SEARCH-TARGET', - 'SECTION', - 'SECURITY-POLICY', - 'SEEK', - 'SELECT', - 'SELECT-ALL', - 'SELECT-FOCUSED-ROW', - 'SELECT-NEXT-ROW', - 'SELECT-PREV-ROW', - 'SELECT-ROW', - 'SELECTABLE', - 'SELECTED', - 'SELECTION', - 'SELECTION-END', - 'SELECTION-LIST', - 'SELECTION-START', - 'SELECTION-TEXT', - 'SELF', - 'SEND', - 'SEND-SQL-STATEMENT', - 'SENSITIVE', - 'SEPARATE-CONNECTION', - 'SEPARATOR-FGCOLOR', - 'SEPARATORS', - 'SERIALIZABLE', - 'SERIALIZE-HIDDEN', - 'SERIALIZE-NAME', - 'SERVER', - 'SERVER-CONNECTION-BOUND', - 'SERVER-CONNECTION-BOUND-REQUEST', - 'SERVER-CONNECTION-CONTEXT', - 'SERVER-CONNECTION-ID', - 'SERVER-OPERATING-MODE', - 'SESSION', - 'SESSION-ID', - 'SET', - 'SET-APPL-CONTEXT', - 'SET-ATTR-CALL-TYPE', - 'SET-ATTRIBUTE-NODE', - 'SET-BLUE', - 'SET-BLUE-', - 'SET-BLUE-V', - 'SET-BLUE-VA', - 'SET-BLUE-VAL', - 'SET-BLUE-VALU', - 'SET-BLUE-VALUE', - 'SET-BREAK', - 'SET-BUFFERS', - 'SET-CALLBACK', - 'SET-CLIENT', - 'SET-COMMIT', - 'SET-CONTENTS', - 'SET-CURRENT-VALUE', - 'SET-DB-CLIENT', - 'SET-DYNAMIC', - 'SET-EVENT-MANAGER-OPTION', - 'SET-GREEN', - 'SET-GREEN-', - 'SET-GREEN-V', - 'SET-GREEN-VA', - 'SET-GREEN-VAL', - 'SET-GREEN-VALU', - 'SET-GREEN-VALUE', - 'SET-INPUT-SOURCE', - 'SET-OPTION', - 'SET-OUTPUT-DESTINATION', - 'SET-PARAMETER', - 'SET-POINTER-VALUE', - 'SET-PROPERTY', - 'SET-RED', - 'SET-RED-', - 'SET-RED-V', - 'SET-RED-VA', - 'SET-RED-VAL', - 'SET-RED-VALU', - 'SET-RED-VALUE', - 'SET-REPOSITIONED-ROW', - 'SET-RGB-VALUE', - 'SET-ROLLBACK', - 'SET-SELECTION', - 'SET-SIZE', - 'SET-SORT-ARROW', - 'SET-WAIT-STATE', - 'SETUSER', - 'SETUSERI', - 'SETUSERID', - 'SHA1-DIGEST', - 'SHARE', - 'SHARE-', - 'SHARE-L', - 'SHARE-LO', - 'SHARE-LOC', - 'SHARE-LOCK', - 'SHARED', - 'SHOW-IN-TASKBAR', - 'SHOW-STAT', - 'SHOW-STATS', - 'SIDE-LAB', - 'SIDE-LABE', - 'SIDE-LABEL', - 'SIDE-LABEL-H', - 'SIDE-LABEL-HA', - 'SIDE-LABEL-HAN', - 'SIDE-LABEL-HAND', - 'SIDE-LABEL-HANDL', - 'SIDE-LABEL-HANDLE', - 'SIDE-LABELS', - 'SIGNATURE', - 'SILENT', - 'SIMPLE', - 'SINGLE', - 'SINGLE-RUN', - 'SINGLETON', - 'SIZE', - 'SIZE-C', - 'SIZE-CH', - 'SIZE-CHA', - 'SIZE-CHAR', - 'SIZE-CHARS', - 'SIZE-P', - 'SIZE-PI', - 'SIZE-PIX', - 'SIZE-PIXE', - 'SIZE-PIXEL', - 'SIZE-PIXELS', - 'SKIP', - 'SKIP-DELETED-RECORD', - 'SLIDER', - 'SMALL-ICON', - 'SMALL-TITLE', - 'SMALLINT', - 'SOME', - 'SORT', - 'SORT-ASCENDING', - 'SORT-NUMBER', - 'SOURCE', - 'SOURCE-PROCEDURE', - 'SPACE', - 'SQL', - 'SQRT', - 'SSL-SERVER-NAME', - 'STANDALONE', - 'START', - 'START-DOCUMENT', - 'START-ELEMENT', - 'START-MOVE', - 'START-RESIZE', - 'START-ROW-RESIZE', - 'STATE-DETAIL', - 'STATIC', - 'STATUS', - 'STATUS-AREA', - 'STATUS-AREA-FONT', - 'STDCALL', - 'STOP', - 'STOP-AFTER', - 'STOP-PARSING', - 'STOPPE', - 'STOPPED', - 'STORED-PROC', - 'STORED-PROCE', - 'STORED-PROCED', - 'STORED-PROCEDU', - 'STORED-PROCEDUR', - 'STORED-PROCEDURE', - 'STREAM', - 'STREAM-HANDLE', - 'STREAM-IO', - 'STRETCH-TO-FIT', - 'STRICT', - 'STRICT-ENTITY-RESOLUTION', - 'STRING', - 'STRING-VALUE', - 'STRING-XREF', - 'SUB-AVE', - 'SUB-AVER', - 'SUB-AVERA', - 'SUB-AVERAG', - 'SUB-AVERAGE', - 'SUB-COUNT', - 'SUB-MAXIMUM', - 'SUB-MENU', - 'SUB-MIN', - 'SUB-MINIMUM', - 'SUB-TOTAL', - 'SUBSCRIBE', - 'SUBST', - 'SUBSTI', - 'SUBSTIT', - 'SUBSTITU', - 'SUBSTITUT', - 'SUBSTITUTE', - 'SUBSTR', - 'SUBSTRI', - 'SUBSTRIN', - 'SUBSTRING', - 'SUBTYPE', - 'SUM', - 'SUM-MAX', - 'SUM-MAXI', - 'SUM-MAXIM', - 'SUM-MAXIMU', - 'SUPER', - 'SUPER-PROCEDURES', - 'SUPPRESS-NAMESPACE-PROCESSING', - 'SUPPRESS-W', - 'SUPPRESS-WA', - 'SUPPRESS-WAR', - 'SUPPRESS-WARN', - 'SUPPRESS-WARNI', - 'SUPPRESS-WARNIN', - 'SUPPRESS-WARNING', - 'SUPPRESS-WARNINGS', - 'SYMMETRIC-ENCRYPTION-ALGORITHM', - 'SYMMETRIC-ENCRYPTION-IV', - 'SYMMETRIC-ENCRYPTION-KEY', - 'SYMMETRIC-SUPPORT', - 'SYSTEM-ALERT', - 'SYSTEM-ALERT-', - 'SYSTEM-ALERT-B', - 'SYSTEM-ALERT-BO', - 'SYSTEM-ALERT-BOX', - 'SYSTEM-ALERT-BOXE', - 'SYSTEM-ALERT-BOXES', - 'SYSTEM-DIALOG', - 'SYSTEM-HELP', - 'SYSTEM-ID', - 'TAB-POSITION', - 'TAB-STOP', - 'TABLE', - 'TABLE-HANDLE', - 'TABLE-NUMBER', - 'TABLE-SCAN', - 'TARGET', - 'TARGET-PROCEDURE', - 'TEMP-DIR', - 'TEMP-DIRE', - 'TEMP-DIREC', - 'TEMP-DIRECT', - 'TEMP-DIRECTO', - 'TEMP-DIRECTOR', - 'TEMP-DIRECTORY', - 'TEMP-TABLE', - 'TEMP-TABLE-PREPARE', - 'TERM', - 'TERMI', - 'TERMIN', - 'TERMINA', - 'TERMINAL', - 'TERMINATE', - 'TEXT', - 'TEXT-CURSOR', - 'TEXT-SEG-GROW', - 'TEXT-SELECTED', - 'THEN', - 'THIS-OBJECT', - 'THIS-PROCEDURE', - 'THREAD-SAFE', - 'THREE-D', - 'THROUGH', - 'THROW', - 'THRU', - 'TIC-MARKS', - 'TIME', - 'TIME-SOURCE', - 'TITLE', - 'TITLE-BGC', - 'TITLE-BGCO', - 'TITLE-BGCOL', - 'TITLE-BGCOLO', - 'TITLE-BGCOLOR', - 'TITLE-DC', - 'TITLE-DCO', - 'TITLE-DCOL', - 'TITLE-DCOLO', - 'TITLE-DCOLOR', - 'TITLE-FGC', - 'TITLE-FGCO', - 'TITLE-FGCOL', - 'TITLE-FGCOLO', - 'TITLE-FGCOLOR', - 'TITLE-FO', - 'TITLE-FON', - 'TITLE-FONT', - 'TO', - 'TO-ROWID', - 'TODAY', - 'TOGGLE-BOX', - 'TOOLTIP', - 'TOOLTIPS', - 'TOP-NAV-QUERY', - 'TOP-ONLY', - 'TOPIC', - 'TOTAL', - 'TRAILING', - 'TRANS', - 'TRANS-INIT-PROCEDURE', - 'TRANSACTION', - 'TRANSACTION-MODE', - 'TRANSPARENT', - 'TRIGGER', - 'TRIGGERS', - 'TRIM', - 'TRUE', - 'TRUNC', - 'TRUNCA', - 'TRUNCAT', - 'TRUNCATE', - 'TYPE', - 'TYPE-OF', - 'UNBOX', - 'UNBUFF', - 'UNBUFFE', - 'UNBUFFER', - 'UNBUFFERE', - 'UNBUFFERED', - 'UNDERL', - 'UNDERLI', - 'UNDERLIN', - 'UNDERLINE', - 'UNDO', - 'UNFORM', - 'UNFORMA', - 'UNFORMAT', - 'UNFORMATT', - 'UNFORMATTE', - 'UNFORMATTED', - 'UNION', - 'UNIQUE', - 'UNIQUE-ID', - 'UNIQUE-MATCH', - 'UNIX', - 'UNLESS-HIDDEN', - 'UNLOAD', - 'UNSIGNED-LONG', - 'UNSUBSCRIBE', - 'UP', - 'UPDATE', - 'UPDATE-ATTRIBUTE', - 'URL', - 'URL-DECODE', - 'URL-ENCODE', - 'URL-PASSWORD', - 'URL-USERID', - 'USE', - 'USE-DICT-EXPS', - 'USE-FILENAME', - 'USE-INDEX', - 'USE-REVVIDEO', - 'USE-TEXT', - 'USE-UNDERLINE', - 'USE-WIDGET-POOL', - 'USER', - 'USER-ID', - 'USERID', - 'USING', - 'V6DISPLAY', - 'V6FRAME', - 'VALID-EVENT', - 'VALID-HANDLE', - 'VALID-OBJECT', - 'VALIDATE', - 'VALIDATE-EXPRESSION', - 'VALIDATE-MESSAGE', - 'VALIDATE-SEAL', - 'VALIDATION-ENABLED', - 'VALUE', - 'VALUE-CHANGED', - 'VALUES', - 'VAR', - 'VARI', - 'VARIA', - 'VARIAB', - 'VARIABL', - 'VARIABLE', - 'VERBOSE', - 'VERSION', - 'VERT', - 'VERTI', - 'VERTIC', - 'VERTICA', - 'VERTICAL', - 'VIEW', - 'VIEW-AS', - 'VIEW-FIRST-COLUMN-ON-REOPEN', - 'VIRTUAL-HEIGHT', - 'VIRTUAL-HEIGHT-', - 'VIRTUAL-HEIGHT-C', - 'VIRTUAL-HEIGHT-CH', - 'VIRTUAL-HEIGHT-CHA', - 'VIRTUAL-HEIGHT-CHAR', - 'VIRTUAL-HEIGHT-CHARS', - 'VIRTUAL-HEIGHT-P', - 'VIRTUAL-HEIGHT-PI', - 'VIRTUAL-HEIGHT-PIX', - 'VIRTUAL-HEIGHT-PIXE', - 'VIRTUAL-HEIGHT-PIXEL', - 'VIRTUAL-HEIGHT-PIXELS', - 'VIRTUAL-WIDTH', - 'VIRTUAL-WIDTH-', - 'VIRTUAL-WIDTH-C', - 'VIRTUAL-WIDTH-CH', - 'VIRTUAL-WIDTH-CHA', - 'VIRTUAL-WIDTH-CHAR', - 'VIRTUAL-WIDTH-CHARS', - 'VIRTUAL-WIDTH-P', - 'VIRTUAL-WIDTH-PI', - 'VIRTUAL-WIDTH-PIX', - 'VIRTUAL-WIDTH-PIXE', - 'VIRTUAL-WIDTH-PIXEL', - 'VIRTUAL-WIDTH-PIXELS', - 'VISIBLE', - 'VOID', - 'WAIT', - 'WAIT-FOR', - 'WARNING', - 'WEB-CONTEXT', - 'WEEKDAY', - 'WHEN', - 'WHERE', - 'WHILE', - 'WIDGET', - 'WIDGET-E', - 'WIDGET-EN', - 'WIDGET-ENT', - 'WIDGET-ENTE', - 'WIDGET-ENTER', - 'WIDGET-ID', - 'WIDGET-L', - 'WIDGET-LE', - 'WIDGET-LEA', - 'WIDGET-LEAV', - 'WIDGET-LEAVE', - 'WIDGET-POOL', - 'WIDTH', - 'WIDTH-', - 'WIDTH-C', - 'WIDTH-CH', - 'WIDTH-CHA', - 'WIDTH-CHAR', - 'WIDTH-CHARS', - 'WIDTH-P', - 'WIDTH-PI', - 'WIDTH-PIX', - 'WIDTH-PIXE', - 'WIDTH-PIXEL', - 'WIDTH-PIXELS', - 'WINDOW', - 'WINDOW-MAXIM', - 'WINDOW-MAXIMI', - 'WINDOW-MAXIMIZ', - 'WINDOW-MAXIMIZE', - 'WINDOW-MAXIMIZED', - 'WINDOW-MINIM', - 'WINDOW-MINIMI', - 'WINDOW-MINIMIZ', - 'WINDOW-MINIMIZE', - 'WINDOW-MINIMIZED', - 'WINDOW-NAME', - 'WINDOW-NORMAL', - 'WINDOW-STA', - 'WINDOW-STAT', - 'WINDOW-STATE', - 'WINDOW-SYSTEM', - 'WITH', - 'WORD-INDEX', - 'WORD-WRAP', - 'WORK-AREA-HEIGHT-PIXELS', - 'WORK-AREA-WIDTH-PIXELS', - 'WORK-AREA-X', - 'WORK-AREA-Y', - 'WORK-TAB', - 'WORK-TABL', - 'WORK-TABLE', - 'WORKFILE', - 'WRITE', - 'WRITE-CDATA', - 'WRITE-CHARACTERS', - 'WRITE-COMMENT', - 'WRITE-DATA-ELEMENT', - 'WRITE-EMPTY-ELEMENT', - 'WRITE-ENTITY-REF', - 'WRITE-EXTERNAL-DTD', - 'WRITE-FRAGMENT', - 'WRITE-JSON', - 'WRITE-MESSAGE', - 'WRITE-PROCESSING-INSTRUCTION', - 'WRITE-STATUS', - 'WRITE-XML', - 'WRITE-XMLSCHEMA', - 'X', - 'X-OF', - 'XCODE', - 'XML-DATA-TYPE', - 'XML-ENTITY-EXPANSION-LIMIT', - 'XML-NODE-TYPE', - 'XML-SCHEMA-PATH', - 'XML-STRICT-ENTITY-RESOLUTION', - 'XML-SUPPRESS-NAMESPACE-PROCESSING', - 'XREF', - 'XREF-XML', - 'Y', - 'Y-OF', - 'YEAR', - 'YEAR-OFFSET', - 'YES', - 'YES-NO', - 'YES-NO-CANCEL' -) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/package_index.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/package_index.py deleted file mode 100644 index d818f44ade082e2e11a3f954ee28bf046ba5cf2d..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/package_index.py +++ /dev/null @@ -1,1119 +0,0 @@ -"""PyPI and direct package downloading""" -import sys -import os -import re -import io -import shutil -import socket -import base64 -import hashlib -import itertools -import warnings -import configparser -import html -import http.client -import urllib.parse -import urllib.request -import urllib.error -from functools import wraps - -import setuptools -from pkg_resources import ( - CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST, - Environment, find_distributions, safe_name, safe_version, - to_filename, Requirement, DEVELOP_DIST, EGG_DIST, -) -from distutils import log -from distutils.errors import DistutilsError -from fnmatch import translate -from setuptools.wheel import Wheel -from setuptools.extern.more_itertools import unique_everseen - - -EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$') -HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I) -PYPI_MD5 = re.compile( - r'([^<]+)\n\s+\(md5\)' -) -URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match -EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split() - -__all__ = [ - 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst', - 'interpret_distro_name', -] - -_SOCKET_TIMEOUT = 15 - -_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}" -user_agent = _tmpl.format( - py_major='{}.{}'.format(*sys.version_info), setuptools=setuptools) - - -def parse_requirement_arg(spec): - try: - return Requirement.parse(spec) - except ValueError as e: - raise DistutilsError( - "Not a URL, existing file, or requirement spec: %r" % (spec,) - ) from e - - -def parse_bdist_wininst(name): - """Return (base,pyversion) or (None,None) for possible .exe name""" - - lower = name.lower() - base, py_ver, plat = None, None, None - - if lower.endswith('.exe'): - if lower.endswith('.win32.exe'): - base = name[:-10] - plat = 'win32' - elif lower.startswith('.win32-py', -16): - py_ver = name[-7:-4] - base = name[:-16] - plat = 'win32' - elif lower.endswith('.win-amd64.exe'): - base = name[:-14] - plat = 'win-amd64' - elif lower.startswith('.win-amd64-py', -20): - py_ver = name[-7:-4] - base = name[:-20] - plat = 'win-amd64' - return base, py_ver, plat - - -def egg_info_for_url(url): - parts = urllib.parse.urlparse(url) - scheme, server, path, parameters, query, fragment = parts - base = urllib.parse.unquote(path.split('/')[-1]) - if server == 'sourceforge.net' and base == 'download': # XXX Yuck - base = urllib.parse.unquote(path.split('/')[-2]) - if '#' in base: - base, fragment = base.split('#', 1) - return base, fragment - - -def distros_for_url(url, metadata=None): - """Yield egg or source distribution objects that might be found at a URL""" - base, fragment = egg_info_for_url(url) - for dist in distros_for_location(url, base, metadata): - yield dist - if fragment: - match = EGG_FRAGMENT.match(fragment) - if match: - for dist in interpret_distro_name( - url, match.group(1), metadata, precedence=CHECKOUT_DIST - ): - yield dist - - -def distros_for_location(location, basename, metadata=None): - """Yield egg or source distribution objects based on basename""" - if basename.endswith('.egg.zip'): - basename = basename[:-4] # strip the .zip - if basename.endswith('.egg') and '-' in basename: - # only one, unambiguous interpretation - return [Distribution.from_location(location, basename, metadata)] - if basename.endswith('.whl') and '-' in basename: - wheel = Wheel(basename) - if not wheel.is_compatible(): - return [] - return [Distribution( - location=location, - project_name=wheel.project_name, - version=wheel.version, - # Increase priority over eggs. - precedence=EGG_DIST + 1, - )] - if basename.endswith('.exe'): - win_base, py_ver, platform = parse_bdist_wininst(basename) - if win_base is not None: - return interpret_distro_name( - location, win_base, metadata, py_ver, BINARY_DIST, platform - ) - # Try source distro extensions (.zip, .tgz, etc.) - # - for ext in EXTENSIONS: - if basename.endswith(ext): - basename = basename[:-len(ext)] - return interpret_distro_name(location, basename, metadata) - return [] # no extension matched - - -def distros_for_filename(filename, metadata=None): - """Yield possible egg or source distribution objects based on a filename""" - return distros_for_location( - normalize_path(filename), os.path.basename(filename), metadata - ) - - -def interpret_distro_name( - location, basename, metadata, py_version=None, precedence=SOURCE_DIST, - platform=None -): - """Generate alternative interpretations of a source distro name - - Note: if `location` is a filesystem filename, you should call - ``pkg_resources.normalize_path()`` on it before passing it to this - routine! - """ - # Generate alternative interpretations of a source distro name - # Because some packages are ambiguous as to name/versions split - # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc. - # So, we generate each possible interpretation (e.g. "adns, python-1.1.0" - # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice, - # the spurious interpretations should be ignored, because in the event - # there's also an "adns" package, the spurious "python-1.1.0" version will - # compare lower than any numeric version number, and is therefore unlikely - # to match a request for it. It's still a potential problem, though, and - # in the long run PyPI and the distutils should go for "safe" names and - # versions in distribution archive names (sdist and bdist). - - parts = basename.split('-') - if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]): - # it is a bdist_dumb, not an sdist -- bail out - return - - for p in range(1, len(parts) + 1): - yield Distribution( - location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]), - py_version=py_version, precedence=precedence, - platform=platform - ) - - -def unique_values(func): - """ - Wrap a function returning an iterable such that the resulting iterable - only ever yields unique items. - """ - - @wraps(func) - def wrapper(*args, **kwargs): - return unique_everseen(func(*args, **kwargs)) - - return wrapper - - -REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I) -# this line is here to fix emacs' cruddy broken syntax highlighting - - -@unique_values -def find_external_links(url, page): - """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" - - for match in REL.finditer(page): - tag, rel = match.groups() - rels = set(map(str.strip, rel.lower().split(','))) - if 'homepage' in rels or 'download' in rels: - for match in HREF.finditer(tag): - yield urllib.parse.urljoin(url, htmldecode(match.group(1))) - - for tag in ("

    Home Page", "Download URL"): - pos = page.find(tag) - if pos != -1: - match = HREF.search(page, pos) - if match: - yield urllib.parse.urljoin(url, htmldecode(match.group(1))) - - -class ContentChecker: - """ - A null content checker that defines the interface for checking content - """ - - def feed(self, block): - """ - Feed a block of data to the hash. - """ - return - - def is_valid(self): - """ - Check the hash. Return False if validation fails. - """ - return True - - def report(self, reporter, template): - """ - Call reporter with information about the checker (hash name) - substituted into the template. - """ - return - - -class HashChecker(ContentChecker): - pattern = re.compile( - r'(?Psha1|sha224|sha384|sha256|sha512|md5)=' - r'(?P[a-f0-9]+)' - ) - - def __init__(self, hash_name, expected): - self.hash_name = hash_name - self.hash = hashlib.new(hash_name) - self.expected = expected - - @classmethod - def from_url(cls, url): - "Construct a (possibly null) ContentChecker from a URL" - fragment = urllib.parse.urlparse(url)[-1] - if not fragment: - return ContentChecker() - match = cls.pattern.search(fragment) - if not match: - return ContentChecker() - return cls(**match.groupdict()) - - def feed(self, block): - self.hash.update(block) - - def is_valid(self): - return self.hash.hexdigest() == self.expected - - def report(self, reporter, template): - msg = template % self.hash_name - return reporter(msg) - - -class PackageIndex(Environment): - """A distribution index that scans web pages for download URLs""" - - def __init__( - self, index_url="https://pypi.org/simple/", hosts=('*',), - ca_bundle=None, verify_ssl=True, *args, **kw - ): - Environment.__init__(self, *args, **kw) - self.index_url = index_url + "/" [:not index_url.endswith('/')] - self.scanned_urls = {} - self.fetched_urls = {} - self.package_pages = {} - self.allows = re.compile('|'.join(map(translate, hosts))).match - self.to_scan = [] - self.opener = urllib.request.urlopen - - # FIXME: 'PackageIndex.process_url' is too complex (14) - def process_url(self, url, retrieve=False): # noqa: C901 - """Evaluate a URL as a possible download, and maybe retrieve it""" - if url in self.scanned_urls and not retrieve: - return - self.scanned_urls[url] = True - if not URL_SCHEME(url): - self.process_filename(url) - return - else: - dists = list(distros_for_url(url)) - if dists: - if not self.url_ok(url): - return - self.debug("Found link: %s", url) - - if dists or not retrieve or url in self.fetched_urls: - list(map(self.add, dists)) - return # don't need the actual page - - if not self.url_ok(url): - self.fetched_urls[url] = True - return - - self.info("Reading %s", url) - self.fetched_urls[url] = True # prevent multiple fetch attempts - tmpl = "Download error on %s: %%s -- Some packages may not be found!" - f = self.open_url(url, tmpl % url) - if f is None: - return - if isinstance(f, urllib.error.HTTPError) and f.code == 401: - self.info("Authentication error: %s" % f.msg) - self.fetched_urls[f.url] = True - if 'html' not in f.headers.get('content-type', '').lower(): - f.close() # not html, we can't process it - return - - base = f.url # handle redirects - page = f.read() - if not isinstance(page, str): - # In Python 3 and got bytes but want str. - if isinstance(f, urllib.error.HTTPError): - # Errors have no charset, assume latin1: - charset = 'latin-1' - else: - charset = f.headers.get_param('charset') or 'latin-1' - page = page.decode(charset, "ignore") - f.close() - for match in HREF.finditer(page): - link = urllib.parse.urljoin(base, htmldecode(match.group(1))) - self.process_url(link) - if url.startswith(self.index_url) and getattr(f, 'code', None) != 404: - page = self.process_index(url, page) - - def process_filename(self, fn, nested=False): - # process filenames or directories - if not os.path.exists(fn): - self.warn("Not found: %s", fn) - return - - if os.path.isdir(fn) and not nested: - path = os.path.realpath(fn) - for item in os.listdir(path): - self.process_filename(os.path.join(path, item), True) - - dists = distros_for_filename(fn) - if dists: - self.debug("Found: %s", fn) - list(map(self.add, dists)) - - def url_ok(self, url, fatal=False): - s = URL_SCHEME(url) - is_file = s and s.group(1).lower() == 'file' - if is_file or self.allows(urllib.parse.urlparse(url)[1]): - return True - msg = ( - "\nNote: Bypassing %s (disallowed host; see " - "http://bit.ly/2hrImnY for details).\n") - if fatal: - raise DistutilsError(msg % url) - else: - self.warn(msg, url) - - def scan_egg_links(self, search_path): - dirs = filter(os.path.isdir, search_path) - egg_links = ( - (path, entry) - for path in dirs - for entry in os.listdir(path) - if entry.endswith('.egg-link') - ) - list(itertools.starmap(self.scan_egg_link, egg_links)) - - def scan_egg_link(self, path, entry): - with open(os.path.join(path, entry)) as raw_lines: - # filter non-empty lines - lines = list(filter(None, map(str.strip, raw_lines))) - - if len(lines) != 2: - # format is not recognized; punt - return - - egg_path, setup_path = lines - - for dist in find_distributions(os.path.join(path, egg_path)): - dist.location = os.path.join(path, *lines) - dist.precedence = SOURCE_DIST - self.add(dist) - - def _scan(self, link): - # Process a URL to see if it's for a package page - NO_MATCH_SENTINEL = None, None - if not link.startswith(self.index_url): - return NO_MATCH_SENTINEL - - parts = list(map( - urllib.parse.unquote, link[len(self.index_url):].split('/') - )) - if len(parts) != 2 or '#' in parts[1]: - return NO_MATCH_SENTINEL - - # it's a package page, sanitize and index it - pkg = safe_name(parts[0]) - ver = safe_version(parts[1]) - self.package_pages.setdefault(pkg.lower(), {})[link] = True - return to_filename(pkg), to_filename(ver) - - def process_index(self, url, page): - """Process the contents of a PyPI page""" - - # process an index page into the package-page index - for match in HREF.finditer(page): - try: - self._scan(urllib.parse.urljoin(url, htmldecode(match.group(1)))) - except ValueError: - pass - - pkg, ver = self._scan(url) # ensure this page is in the page index - if not pkg: - return "" # no sense double-scanning non-package pages - - # process individual package page - for new_url in find_external_links(url, page): - # Process the found URL - base, frag = egg_info_for_url(new_url) - if base.endswith('.py') and not frag: - if ver: - new_url += '#egg=%s-%s' % (pkg, ver) - else: - self.need_version_info(url) - self.scan_url(new_url) - - return PYPI_MD5.sub( - lambda m: '%s' % m.group(1, 3, 2), page - ) - - def need_version_info(self, url): - self.scan_all( - "Page at %s links to .py file(s) without version info; an index " - "scan is required.", url - ) - - def scan_all(self, msg=None, *args): - if self.index_url not in self.fetched_urls: - if msg: - self.warn(msg, *args) - self.info( - "Scanning index of all packages (this may take a while)" - ) - self.scan_url(self.index_url) - - def find_packages(self, requirement): - self.scan_url(self.index_url + requirement.unsafe_name + '/') - - if not self.package_pages.get(requirement.key): - # Fall back to safe version of the name - self.scan_url(self.index_url + requirement.project_name + '/') - - if not self.package_pages.get(requirement.key): - # We couldn't find the target package, so search the index page too - self.not_found_in_index(requirement) - - for url in list(self.package_pages.get(requirement.key, ())): - # scan each page that might be related to the desired package - self.scan_url(url) - - def obtain(self, requirement, installer=None): - self.prescan() - self.find_packages(requirement) - for dist in self[requirement.key]: - if dist in requirement: - return dist - self.debug("%s does not match %s", requirement, dist) - return super(PackageIndex, self).obtain(requirement, installer) - - def check_hash(self, checker, filename, tfp): - """ - checker is a ContentChecker - """ - checker.report( - self.debug, - "Validating %%s checksum for %s" % filename) - if not checker.is_valid(): - tfp.close() - os.unlink(filename) - raise DistutilsError( - "%s validation failed for %s; " - "possible download problem?" - % (checker.hash.name, os.path.basename(filename)) - ) - - def add_find_links(self, urls): - """Add `urls` to the list that will be prescanned for searches""" - for url in urls: - if ( - self.to_scan is None # if we have already "gone online" - or not URL_SCHEME(url) # or it's a local file/directory - or url.startswith('file:') - or list(distros_for_url(url)) # or a direct package link - ): - # then go ahead and process it now - self.scan_url(url) - else: - # otherwise, defer retrieval till later - self.to_scan.append(url) - - def prescan(self): - """Scan urls scheduled for prescanning (e.g. --find-links)""" - if self.to_scan: - list(map(self.scan_url, self.to_scan)) - self.to_scan = None # from now on, go ahead and process immediately - - def not_found_in_index(self, requirement): - if self[requirement.key]: # we've seen at least one distro - meth, msg = self.info, "Couldn't retrieve index page for %r" - else: # no distros seen for this name, might be misspelled - meth, msg = ( - self.warn, - "Couldn't find index page for %r (maybe misspelled?)") - meth(msg, requirement.unsafe_name) - self.scan_all() - - def download(self, spec, tmpdir): - """Locate and/or download `spec` to `tmpdir`, returning a local path - - `spec` may be a ``Requirement`` object, or a string containing a URL, - an existing local filename, or a project/version requirement spec - (i.e. the string form of a ``Requirement`` object). If it is the URL - of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one - that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is - automatically created alongside the downloaded file. - - If `spec` is a ``Requirement`` object or a string containing a - project/version requirement spec, this method returns the location of - a matching distribution (possibly after downloading it to `tmpdir`). - If `spec` is a locally existing file or directory name, it is simply - returned unchanged. If `spec` is a URL, it is downloaded to a subpath - of `tmpdir`, and the local filename is returned. Various errors may be - raised if a problem occurs during downloading. - """ - if not isinstance(spec, Requirement): - scheme = URL_SCHEME(spec) - if scheme: - # It's a url, download it to tmpdir - found = self._download_url(scheme.group(1), spec, tmpdir) - base, fragment = egg_info_for_url(spec) - if base.endswith('.py'): - found = self.gen_setup(found, fragment, tmpdir) - return found - elif os.path.exists(spec): - # Existing file or directory, just return it - return spec - else: - spec = parse_requirement_arg(spec) - return getattr(self.fetch_distribution(spec, tmpdir), 'location', None) - - def fetch_distribution( # noqa: C901 # is too complex (14) # FIXME - self, requirement, tmpdir, force_scan=False, source=False, - develop_ok=False, local_index=None): - """Obtain a distribution suitable for fulfilling `requirement` - - `requirement` must be a ``pkg_resources.Requirement`` instance. - If necessary, or if the `force_scan` flag is set, the requirement is - searched for in the (online) package index as well as the locally - installed packages. If a distribution matching `requirement` is found, - the returned distribution's ``location`` is the value you would have - gotten from calling the ``download()`` method with the matching - distribution's URL or filename. If no matching distribution is found, - ``None`` is returned. - - If the `source` flag is set, only source distributions and source - checkout links will be considered. Unless the `develop_ok` flag is - set, development and system eggs (i.e., those using the ``.egg-info`` - format) will be ignored. - """ - # process a Requirement - self.info("Searching for %s", requirement) - skipped = {} - dist = None - - def find(req, env=None): - if env is None: - env = self - # Find a matching distribution; may be called more than once - - for dist in env[req.key]: - - if dist.precedence == DEVELOP_DIST and not develop_ok: - if dist not in skipped: - self.warn( - "Skipping development or system egg: %s", dist, - ) - skipped[dist] = 1 - continue - - test = ( - dist in req - and (dist.precedence <= SOURCE_DIST or not source) - ) - if test: - loc = self.download(dist.location, tmpdir) - dist.download_location = loc - if os.path.exists(dist.download_location): - return dist - - if force_scan: - self.prescan() - self.find_packages(requirement) - dist = find(requirement) - - if not dist and local_index is not None: - dist = find(requirement, local_index) - - if dist is None: - if self.to_scan is not None: - self.prescan() - dist = find(requirement) - - if dist is None and not force_scan: - self.find_packages(requirement) - dist = find(requirement) - - if dist is None: - self.warn( - "No local packages or working download links found for %s%s", - (source and "a source distribution of " or ""), - requirement, - ) - else: - self.info("Best match: %s", dist) - return dist.clone(location=dist.download_location) - - def fetch(self, requirement, tmpdir, force_scan=False, source=False): - """Obtain a file suitable for fulfilling `requirement` - - DEPRECATED; use the ``fetch_distribution()`` method now instead. For - backward compatibility, this routine is identical but returns the - ``location`` of the downloaded distribution instead of a distribution - object. - """ - dist = self.fetch_distribution(requirement, tmpdir, force_scan, source) - if dist is not None: - return dist.location - return None - - def gen_setup(self, filename, fragment, tmpdir): - match = EGG_FRAGMENT.match(fragment) - dists = match and [ - d for d in - interpret_distro_name(filename, match.group(1), None) if d.version - ] or [] - - if len(dists) == 1: # unambiguous ``#egg`` fragment - basename = os.path.basename(filename) - - # Make sure the file has been downloaded to the temp dir. - if os.path.dirname(filename) != tmpdir: - dst = os.path.join(tmpdir, basename) - from setuptools.command.easy_install import samefile - if not samefile(filename, dst): - shutil.copy2(filename, dst) - filename = dst - - with open(os.path.join(tmpdir, 'setup.py'), 'w') as file: - file.write( - "from setuptools import setup\n" - "setup(name=%r, version=%r, py_modules=[%r])\n" - % ( - dists[0].project_name, dists[0].version, - os.path.splitext(basename)[0] - ) - ) - return filename - - elif match: - raise DistutilsError( - "Can't unambiguously interpret project/version identifier %r; " - "any dashes in the name or version should be escaped using " - "underscores. %r" % (fragment, dists) - ) - else: - raise DistutilsError( - "Can't process plain .py files without an '#egg=name-version'" - " suffix to enable automatic setup script generation." - ) - - dl_blocksize = 8192 - - def _download_to(self, url, filename): - self.info("Downloading %s", url) - # Download the file - fp = None - try: - checker = HashChecker.from_url(url) - fp = self.open_url(url) - if isinstance(fp, urllib.error.HTTPError): - raise DistutilsError( - "Can't download %s: %s %s" % (url, fp.code, fp.msg) - ) - headers = fp.info() - blocknum = 0 - bs = self.dl_blocksize - size = -1 - if "content-length" in headers: - # Some servers return multiple Content-Length headers :( - sizes = headers.get_all('Content-Length') - size = max(map(int, sizes)) - self.reporthook(url, filename, blocknum, bs, size) - with open(filename, 'wb') as tfp: - while True: - block = fp.read(bs) - if block: - checker.feed(block) - tfp.write(block) - blocknum += 1 - self.reporthook(url, filename, blocknum, bs, size) - else: - break - self.check_hash(checker, filename, tfp) - return headers - finally: - if fp: - fp.close() - - def reporthook(self, url, filename, blocknum, blksize, size): - pass # no-op - - # FIXME: - def open_url(self, url, warning=None): # noqa: C901 # is too complex (12) - if url.startswith('file:'): - return local_open(url) - try: - return open_with_auth(url, self.opener) - except (ValueError, http.client.InvalidURL) as v: - msg = ' '.join([str(arg) for arg in v.args]) - if warning: - self.warn(warning, msg) - else: - raise DistutilsError('%s %s' % (url, msg)) from v - except urllib.error.HTTPError as v: - return v - except urllib.error.URLError as v: - if warning: - self.warn(warning, v.reason) - else: - raise DistutilsError("Download error for %s: %s" - % (url, v.reason)) from v - except http.client.BadStatusLine as v: - if warning: - self.warn(warning, v.line) - else: - raise DistutilsError( - '%s returned a bad status line. The server might be ' - 'down, %s' % - (url, v.line) - ) from v - except (http.client.HTTPException, socket.error) as v: - if warning: - self.warn(warning, v) - else: - raise DistutilsError("Download error for %s: %s" - % (url, v)) from v - - def _download_url(self, scheme, url, tmpdir): - # Determine download filename - # - name, fragment = egg_info_for_url(url) - if name: - while '..' in name: - name = name.replace('..', '.').replace('\\', '_') - else: - name = "__downloaded__" # default if URL has no path contents - - if name.endswith('.egg.zip'): - name = name[:-4] # strip the extra .zip before download - - filename = os.path.join(tmpdir, name) - - # Download the file - # - if scheme == 'svn' or scheme.startswith('svn+'): - return self._download_svn(url, filename) - elif scheme == 'git' or scheme.startswith('git+'): - return self._download_git(url, filename) - elif scheme.startswith('hg+'): - return self._download_hg(url, filename) - elif scheme == 'file': - return urllib.request.url2pathname(urllib.parse.urlparse(url)[2]) - else: - self.url_ok(url, True) # raises error if not allowed - return self._attempt_download(url, filename) - - def scan_url(self, url): - self.process_url(url, True) - - def _attempt_download(self, url, filename): - headers = self._download_to(url, filename) - if 'html' in headers.get('content-type', '').lower(): - return self._download_html(url, headers, filename) - else: - return filename - - def _download_html(self, url, headers, filename): - file = open(filename) - for line in file: - if line.strip(): - # Check for a subversion index page - if re.search(r'([^- ]+ - )?Revision \d+:', line): - # it's a subversion index page: - file.close() - os.unlink(filename) - return self._download_svn(url, filename) - break # not an index page - file.close() - os.unlink(filename) - raise DistutilsError("Unexpected HTML page found at " + url) - - def _download_svn(self, url, filename): - warnings.warn("SVN download support is deprecated", UserWarning) - url = url.split('#', 1)[0] # remove any fragment for svn's sake - creds = '' - if url.lower().startswith('svn:') and '@' in url: - scheme, netloc, path, p, q, f = urllib.parse.urlparse(url) - if not netloc and path.startswith('//') and '/' in path[2:]: - netloc, path = path[2:].split('/', 1) - auth, host = _splituser(netloc) - if auth: - if ':' in auth: - user, pw = auth.split(':', 1) - creds = " --username=%s --password=%s" % (user, pw) - else: - creds = " --username=" + auth - netloc = host - parts = scheme, netloc, url, p, q, f - url = urllib.parse.urlunparse(parts) - self.info("Doing subversion checkout from %s to %s", url, filename) - os.system("svn checkout%s -q %s %s" % (creds, url, filename)) - return filename - - @staticmethod - def _vcs_split_rev_from_url(url, pop_prefix=False): - scheme, netloc, path, query, frag = urllib.parse.urlsplit(url) - - scheme = scheme.split('+', 1)[-1] - - # Some fragment identification fails - path = path.split('#', 1)[0] - - rev = None - if '@' in path: - path, rev = path.rsplit('@', 1) - - # Also, discard fragment - url = urllib.parse.urlunsplit((scheme, netloc, path, query, '')) - - return url, rev - - def _download_git(self, url, filename): - filename = filename.split('#', 1)[0] - url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) - - self.info("Doing git clone from %s to %s", url, filename) - os.system("git clone --quiet %s %s" % (url, filename)) - - if rev is not None: - self.info("Checking out %s", rev) - os.system("git -C %s checkout --quiet %s" % ( - filename, - rev, - )) - - return filename - - def _download_hg(self, url, filename): - filename = filename.split('#', 1)[0] - url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) - - self.info("Doing hg clone from %s to %s", url, filename) - os.system("hg clone --quiet %s %s" % (url, filename)) - - if rev is not None: - self.info("Updating to %s", rev) - os.system("hg --cwd %s up -C -r %s -q" % ( - filename, - rev, - )) - - return filename - - def debug(self, msg, *args): - log.debug(msg, *args) - - def info(self, msg, *args): - log.info(msg, *args) - - def warn(self, msg, *args): - log.warn(msg, *args) - - -# This pattern matches a character entity reference (a decimal numeric -# references, a hexadecimal numeric reference, or a named reference). -entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub - - -def decode_entity(match): - what = match.group(0) - return html.unescape(what) - - -def htmldecode(text): - """ - Decode HTML entities in the given text. - - >>> htmldecode( - ... 'https://../package_name-0.1.2.tar.gz' - ... '?tokena=A&tokenb=B">package_name-0.1.2.tar.gz') - 'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz' - """ - return entity_sub(decode_entity, text) - - -def socket_timeout(timeout=15): - def _socket_timeout(func): - def _socket_timeout(*args, **kwargs): - old_timeout = socket.getdefaulttimeout() - socket.setdefaulttimeout(timeout) - try: - return func(*args, **kwargs) - finally: - socket.setdefaulttimeout(old_timeout) - - return _socket_timeout - - return _socket_timeout - - -def _encode_auth(auth): - """ - Encode auth from a URL suitable for an HTTP header. - >>> str(_encode_auth('username%3Apassword')) - 'dXNlcm5hbWU6cGFzc3dvcmQ=' - - Long auth strings should not cause a newline to be inserted. - >>> long_auth = 'username:' + 'password'*10 - >>> chr(10) in str(_encode_auth(long_auth)) - False - """ - auth_s = urllib.parse.unquote(auth) - # convert to bytes - auth_bytes = auth_s.encode() - encoded_bytes = base64.b64encode(auth_bytes) - # convert back to a string - encoded = encoded_bytes.decode() - # strip the trailing carriage return - return encoded.replace('\n', '') - - -class Credential: - """ - A username/password pair. Use like a namedtuple. - """ - - def __init__(self, username, password): - self.username = username - self.password = password - - def __iter__(self): - yield self.username - yield self.password - - def __str__(self): - return '%(username)s:%(password)s' % vars(self) - - -class PyPIConfig(configparser.RawConfigParser): - def __init__(self): - """ - Load from ~/.pypirc - """ - defaults = dict.fromkeys(['username', 'password', 'repository'], '') - configparser.RawConfigParser.__init__(self, defaults) - - rc = os.path.join(os.path.expanduser('~'), '.pypirc') - if os.path.exists(rc): - self.read(rc) - - @property - def creds_by_repository(self): - sections_with_repositories = [ - section for section in self.sections() - if self.get(section, 'repository').strip() - ] - - return dict(map(self._get_repo_cred, sections_with_repositories)) - - def _get_repo_cred(self, section): - repo = self.get(section, 'repository').strip() - return repo, Credential( - self.get(section, 'username').strip(), - self.get(section, 'password').strip(), - ) - - def find_credential(self, url): - """ - If the URL indicated appears to be a repository defined in this - config, return the credential for that repository. - """ - for repository, cred in self.creds_by_repository.items(): - if url.startswith(repository): - return cred - - -def open_with_auth(url, opener=urllib.request.urlopen): - """Open a urllib2 request, handling HTTP authentication""" - - parsed = urllib.parse.urlparse(url) - scheme, netloc, path, params, query, frag = parsed - - # Double scheme does not raise on macOS as revealed by a - # failing test. We would expect "nonnumeric port". Refs #20. - if netloc.endswith(':'): - raise http.client.InvalidURL("nonnumeric port: ''") - - if scheme in ('http', 'https'): - auth, address = _splituser(netloc) - else: - auth = None - - if not auth: - cred = PyPIConfig().find_credential(url) - if cred: - auth = str(cred) - info = cred.username, url - log.info('Authenticating as %s for %s (from .pypirc)', *info) - - if auth: - auth = "Basic " + _encode_auth(auth) - parts = scheme, address, path, params, query, frag - new_url = urllib.parse.urlunparse(parts) - request = urllib.request.Request(new_url) - request.add_header("Authorization", auth) - else: - request = urllib.request.Request(url) - - request.add_header('User-Agent', user_agent) - fp = opener(request) - - if auth: - # Put authentication info back into request URL if same host, - # so that links found on the page will work - s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url) - if s2 == scheme and h2 == address: - parts = s2, netloc, path2, param2, query2, frag2 - fp.url = urllib.parse.urlunparse(parts) - - return fp - - -# copy of urllib.parse._splituser from Python 3.8 -def _splituser(host): - """splituser('user[:passwd]@host[:port]') - --> 'user[:passwd]', 'host[:port]'.""" - user, delim, host = host.rpartition('@') - return (user if delim else None), host - - -# adding a timeout to avoid freezing package_index -open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth) - - -def fix_sf_url(url): - return url # backward compatibility - - -def local_open(url): - """Read a local path, with special support for directories""" - scheme, server, path, param, query, frag = urllib.parse.urlparse(url) - filename = urllib.request.url2pathname(path) - if os.path.isfile(filename): - return urllib.request.urlopen(url) - elif path.endswith('/') and os.path.isdir(filename): - files = [] - for f in os.listdir(filename): - filepath = os.path.join(filename, f) - if f == 'index.html': - with open(filepath, 'r') as fp: - body = fp.read() - break - elif os.path.isdir(filepath): - f += '/' - files.append('<a href="{name}">{name}</a>'.format(name=f)) - else: - tmpl = ( - "<html><head><title>{url}" - "{files}") - body = tmpl.format(url=url, files='\n'.join(files)) - status, message = 200, "OK" - else: - status, message, body = 404, "Path not found", "Not found" - - headers = {'content-type': 'text/html'} - body_stream = io.StringIO(body) - return urllib.error.HTTPError(url, status, message, headers, body_stream) diff --git a/spaces/qinzhu/moe-tts-tech/app.py b/spaces/qinzhu/moe-tts-tech/app.py deleted file mode 100644 index 9180de0b174bc6c6f9d0e3ff34fa6052a0a4c682..0000000000000000000000000000000000000000 --- a/spaces/qinzhu/moe-tts-tech/app.py +++ /dev/null @@ -1,320 +0,0 @@ -import argparse -import json -import os -import re -import tempfile -from pathlib import Path - -import librosa -import numpy as np -import torch -from torch import no_grad, LongTensor -import commons -import utils -import gradio as gr -import gradio.utils as gr_utils -import gradio.processing_utils as gr_processing_utils -from models import SynthesizerTrn -from text import text_to_sequence, _clean_text -from mel_processing import spectrogram_torch - -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - -audio_postprocess_ori = gr.Audio.postprocess - - -def audio_postprocess(self, y): - data = audio_postprocess_ori(self, y) - if data is None: - return None - return gr_processing_utils.encode_url_or_file_to_base64(data["name"]) - - -gr.Audio.postprocess = audio_postprocess - - -def get_text(text, hps, is_symbol): - text_norm = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm - - -def create_tts_fn(model, hps, speaker_ids): - def tts_fn(text, speaker, speed, is_symbol): - if limitation: - text_len = len(re.sub("\[([A-Z]{2})\]", "", text)) - max_len = 150 - if is_symbol: - max_len *= 3 - if text_len > max_len: - return "Error: Text is too long", None - - speaker_id = speaker_ids[speaker] - stn_tst = get_text(text, hps, is_symbol) - with no_grad(): - x_tst = stn_tst.unsqueeze(0).to(device) - x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device) - sid = LongTensor([speaker_id]).to(device) - audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, - length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy() - del stn_tst, x_tst, x_tst_lengths, sid - return "Success", (hps.data.sampling_rate, audio) - - return tts_fn - - -def create_vc_fn(model, hps, speaker_ids): - def vc_fn(original_speaker, target_speaker, input_audio): - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if limitation and duration > 30: - return "Error: Audio is too long", None - original_speaker_id = speaker_ids[original_speaker] - target_speaker_id = speaker_ids[target_speaker] - - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != hps.data.sampling_rate: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=hps.data.sampling_rate) - with no_grad(): - y = torch.FloatTensor(audio) - y = y.unsqueeze(0) - spec = spectrogram_torch(y, hps.data.filter_length, - hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, - center=False).to(device) - spec_lengths = LongTensor([spec.size(-1)]).to(device) - sid_src = LongTensor([original_speaker_id]).to(device) - sid_tgt = LongTensor([target_speaker_id]).to(device) - audio = model.voice_conversion(spec, spec_lengths, sid_src=sid_src, sid_tgt=sid_tgt)[0][ - 0, 0].data.cpu().float().numpy() - del y, spec, spec_lengths, sid_src, sid_tgt - return "Success", (hps.data.sampling_rate, audio) - - return vc_fn - - -def create_soft_vc_fn(model, hps, speaker_ids): - def soft_vc_fn(target_speaker, input_audio1, input_audio2): - input_audio = input_audio1 - if input_audio is None: - input_audio = input_audio2 - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - duration = audio.shape[0] / sampling_rate - if limitation and duration > 30: - return "Error: Audio is too long", None - target_speaker_id = speaker_ids[target_speaker] - - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - with torch.inference_mode(): - units = hubert.units(torch.FloatTensor(audio).unsqueeze(0).unsqueeze(0).to(device)) - with no_grad(): - unit_lengths = LongTensor([units.size(1)]).to(device) - sid = LongTensor([target_speaker_id]).to(device) - audio = model.infer(units, unit_lengths, sid=sid, noise_scale=.667, - noise_scale_w=0.8)[0][0, 0].data.cpu().float().numpy() - del units, unit_lengths, sid - return "Success", (hps.data.sampling_rate, audio) - - return soft_vc_fn - - -def create_to_symbol_fn(hps): - def to_symbol_fn(is_symbol_input, input_text, temp_text): - return (_clean_text(input_text, hps.data.text_cleaners), input_text) if is_symbol_input \ - else (temp_text, temp_text) - - return to_symbol_fn - - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#{audio_id}").querySelector("audio"); - if (audio == undefined) - return; - audio = audio.src; - let oA = document.createElement("a"); - oA.download = Math.floor(Math.random()*100000000)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - args = parser.parse_args() - - device = torch.device(args.device) - models_tts = [] - models_vc = [] - models_soft_vc = [] - with open("saved_model/info.json", "r", encoding="utf-8") as f: - models_info = json.load(f) - for i, info in models_info.items(): - name = info["title"] - author = info["author"] - lang = info["lang"] - example = info["example"] - config_path = f"saved_model/{i}/config.json" - model_path = f"saved_model/{i}/model.pth" - cover = info["cover"] - cover_path = f"saved_model/{i}/{cover}" if cover else None - hps = utils.get_hparams_from_file(config_path) - model = SynthesizerTrn( - len(hps.symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model) - utils.load_checkpoint(model_path, model, None) - model.eval().to(device) - speaker_ids = [sid for sid, name in enumerate(hps.speakers) if name != "None"] - speakers = [name for sid, name in enumerate(hps.speakers) if name != "None"] - dir = f"dir_{i}" - t = info["type"] - if t == "vits": - models_tts.append((name, author, cover_path, speakers, lang, example, - hps.symbols, create_tts_fn(model, hps, speaker_ids), - create_to_symbol_fn(hps),dir)) - models_vc.append((name, author, cover_path, speakers, create_vc_fn(model, hps, speaker_ids),dir)) - elif t == "soft-vits-vc": - models_soft_vc.append((name, author, cover_path, speakers, create_soft_vc_fn(model, hps, speaker_ids),dir)) - - hubert = torch.hub.load("bshall/hubert:main", "hubert_soft", trust_repo=True).to(device) - - app = gr.Blocks() - - with app: - gr.Markdown("# Moe TTS And Voice Conversion Using VITS Model\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.moegoe)\n\n" - "[Open In Colab]" - "(https://colab.research.google.com/drive/14Pb8lpmwZL-JI5Ub6jpG4sz2-8KS0kbS?usp=sharing)" - " without queue and length limitation.\n\n" - "Feel free to [open discussion](https://huggingface.co/spaces/skytnt/moe-tts/discussions/new) " - "if you want to add your model to this app.") - with gr.Tabs(): - with gr.TabItem("TTS"): - with gr.Tabs(): - for i, (name, author, cover_path, speakers, lang, example, symbols, tts_fn, - to_symbol_fn,dir) in enumerate(models_tts): - with gr.TabItem(f"{dir}"): - with gr.Column(): - cover_markdown = f"![cover](file/{cover_path})\n\n" if cover_path else "" - gr.Markdown(f"## {name}\n\n" - f"{cover_markdown}" - f"model author: {author}\n\n" - f"language: {lang}") - tts_input1 = gr.TextArea(label="Text (150 words limitation)", value=example, - elem_id=f"tts-input{i}") - tts_input2 = gr.Dropdown(label="Speaker", choices=speakers, - type="index", value=speakers[0]) - tts_input3 = gr.Slider(label="Speed", value=1, minimum=0.5, maximum=2, step=0.1) - with gr.Accordion(label="Advanced Options", open=False): - temp_text_var = gr.Variable() - symbol_input = gr.Checkbox(value=False, label="Symbol input") - symbol_list = gr.Dataset(label="Symbol list", components=[tts_input1], - samples=[[x] for x in symbols], - elem_id=f"symbol-list{i}") - symbol_list_json = gr.Json(value=symbols, visible=False) - tts_submit = gr.Button("Generate", variant="primary") - tts_output1 = gr.Textbox(label="Output Message") - tts_output2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio{i}") - download = gr.Button("Download Audio") - download.click(None, [], [], _js=download_audio_js.format(audio_id=f"tts-audio{i}")) - - tts_submit.click(tts_fn, [tts_input1, tts_input2, tts_input3, symbol_input], - [tts_output1, tts_output2], api_name=f"tts-model{dir}") - symbol_input.change(to_symbol_fn, - [symbol_input, tts_input1, temp_text_var], - [tts_input1, temp_text_var]) - symbol_list.click(None, [symbol_list, symbol_list_json], [], - _js=f""" - (i,symbols) => {{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let text_input = root.querySelector("#tts-input{i}").querySelector("textarea"); - let startPos = text_input.selectionStart; - let endPos = text_input.selectionEnd; - let oldTxt = text_input.value; - let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos); - text_input.value = result; - let x = window.scrollX, y = window.scrollY; - text_input.focus(); - text_input.selectionStart = startPos + symbols[i].length; - text_input.selectionEnd = startPos + symbols[i].length; - text_input.blur(); - window.scrollTo(x, y); - return []; - }}""") - - with gr.TabItem("Voice Conversion"): - with gr.Tabs(): - for i, (name, author, cover_path, speakers, vc_fn,dir) in enumerate(models_vc): - with gr.TabItem(f"{dir}"): - cover_markdown = f"![cover](file/{cover_path})\n\n" if cover_path else "" - gr.Markdown(f"## {name}\n\n" - f"{cover_markdown}" - f"model author: {author}") - vc_input1 = gr.Dropdown(label="Original Speaker", choices=speakers, type="index", - value=speakers[0]) - vc_input2 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index", - value=speakers[min(len(speakers) - 1, 1)]) - vc_input3 = gr.Audio(label="Input Audio (30s limitation)") - vc_submit = gr.Button("Convert", variant="primary") - vc_output1 = gr.Textbox(label="Output Message") - vc_output2 = gr.Audio(label="Output Audio", elem_id=f"vc-audio{i}") - download = gr.Button("Download Audio") - download.click(None, [], [], _js=download_audio_js.format(audio_id=f"vc-audio{i}")) - vc_submit.click(vc_fn, [vc_input1, vc_input2, vc_input3], [vc_output1, vc_output2], api_name=f"vc-model{dir}") - with gr.TabItem("Soft Voice Conversion"): - with gr.Tabs(): - for i, (name, author, cover_path, speakers, soft_vc_fn,dir) in enumerate(models_soft_vc): - with gr.TabItem(f"{dir}"): - cover_markdown = f"![cover](file/{cover_path})\n\n" if cover_path else "" - gr.Markdown(f"## {name}\n\n" - f"{cover_markdown}" - f"model author: {author}") - vc_input1 = gr.Dropdown(label="Target Speaker", choices=speakers, type="index", - value=speakers[0]) - source_tabs = gr.Tabs() - with source_tabs: - with gr.TabItem("microphone"): - vc_input2 = gr.Audio(label="Input Audio (30s limitation)", source="microphone") - with gr.TabItem("upload"): - vc_input3 = gr.Audio(label="Input Audio (30s limitation)", source="upload") - vc_submit = gr.Button("Convert", variant="primary") - vc_output1 = gr.Textbox(label="Output Message") - vc_output2 = gr.Audio(label="Output Audio", elem_id=f"svc-audio{i}") - download = gr.Button("Download Audio") - download.click(None, [], [], _js=download_audio_js.format(audio_id=f"svc-audio{i}")) - # clear inputs - source_tabs.set_event_trigger("change", None, [], [vc_input2, vc_input3], - js="()=>[null,null]") - vc_submit.click(soft_vc_fn, [vc_input1, vc_input2, vc_input3], - [vc_output1, vc_output2], api_name=f"svc-model{dir}") - gr.Markdown( - "unofficial demo for \n\n" - "- [https://github.com/CjangCjengh/MoeGoe](https://github.com/CjangCjengh/MoeGoe)\n" - "- [https://github.com/Francis-Komizu/VITS](https://github.com/Francis-Komizu/VITS)\n" - "- [https://github.com/luoyily/MoeTTS](https://github.com/luoyily/MoeTTS)\n" - "- [https://github.com/Francis-Komizu/Sovits](https://github.com/Francis-Komizu/Sovits)" - ) - app.queue(concurrency_count=3).launch(share=args.share) diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Autocad 2012 Crack 64 Bit Keygen Download Filehippo.md b/spaces/quidiaMuxgu/Expedit-SAM/Autocad 2012 Crack 64 Bit Keygen Download Filehippo.md deleted file mode 100644 index 11d3680b51588e6c83f1396c77995c7accbdad45..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Autocad 2012 Crack 64 Bit Keygen Download Filehippo.md +++ /dev/null @@ -1,97 +0,0 @@ - -

    Autocad 2012 Crack 64 Bit Keygen Download Filehippo: A Complete Guide

    - -

    Autocad 2012 is one of the most popular and powerful software for designing and drafting 2D and 3D models. It is used by professionals and students alike in various fields such as architecture, engineering, construction, manufacturing, and more. However, Autocad 2012 is not a free software and requires a valid license to activate and use it. If you are looking for a way to get Autocad 2012 crack 64 bit keygen download filehippo, then you have come to the right place. In this article, we will show you how to download, install, and activate Autocad 2012 crack 64 bit keygen from filehippo, a trusted and reliable source for software downloads.

    - -

    How to Download Autocad 2012 Crack 64 Bit Keygen from Filehippo

    - -

    The first step to get Autocad 2012 crack 64 bit keygen download filehippo is to visit the filehippo website and search for Autocad 2012. You will see a list of results with different versions and editions of Autocad 2012. Choose the one that matches your system requirements and click on the download button. You will be redirected to another page where you can choose the download location and start the download process. The file size is about 1.8 GB, so make sure you have enough space and a stable internet connection.

    -

    Autocad 2012 Crack 64 Bit Keygen Download Filehippo


    DOWNLOAD === https://geags.com/2uCsLJ



    - -

    How to Install Autocad 2012 Crack 64 Bit Keygen from Filehippo

    - -

    Once you have downloaded the Autocad 2012 crack 64 bit keygen filehippo file, you need to extract it using a tool like WinRAR or 7-Zip. You will get a folder with two files: setup.exe and xforce keygen.exe. Run the setup.exe file and follow the instructions to install Autocad 2012 on your computer. You will need to enter a serial number and a product key during the installation process. You can use any of the following serial numbers and product keys:

    - -
      -
    • Serial number: 666-69696969
    • -
    • Product key: 001D1
    • -
    - -

    After the installation is complete, do not run Autocad 2012 yet. You need to activate it using the xforce keygen.exe file. Run the xforce keygen.exe file as administrator and click on the patch button. You should see a message saying "successfully patched". Then copy the request code from the activation screen of Autocad 2012 and paste it into the keygen and press generate. You will get an activation code that you need to copy and paste back into the activation screen of Autocad 2012 and click next. You should see a message saying "thank you for activating your Autodesk product". Congratulations, you have successfully installed and activated Autocad 2012 crack 64 bit keygen from filehippo.

    - -

    How to Use Autocad 2012 Crack 64 Bit Keygen from Filehippo

    - -

    Now that you have Autocad 2012 crack 64 bit keygen download filehippo on your computer, you can start using it for your design and drafting projects. You can choose from four different workspaces: 2D Drafting & Annotation, 3D Modeling, 3D Basics, and Autocad Classic. Each workspace has different tools and features that suit your needs and preferences. You can also customize your workspace by adding or removing toolbars, menus, panels, commands, etc. You can also access various online resources such as tutorials, forums, blogs, etc. from within Autocad 2012.

    - -

    Autocad 2012 crack 64 bit keygen from filehippo is a powerful software that can help you create stunning designs and drawings in both 2D and 3D modes. It has a user-friendly interface and a rich set of features that make it easy and fun to use. However, you should be aware that using cracked software is illegal and unethical, and may expose you to security risks and legal issues. Therefore, we recommend that you use Autocad 2012 crack 64 bit keygen from filehippo only for educational purposes and not for commercial or professional purposes.

    -

    What are the Benefits of Autocad 2012 Crack 64 Bit Keygen Download Filehippo

    - -

    By using Autocad 2012 crack 64 bit keygen download filehippo, you can enjoy many benefits that this software offers. Some of the benefits are:

    - -
      -
    • You can save money by not having to buy a license or subscription for Autocad 2012.
    • -
    • You can access all the features and functions of Autocad 2012 without any limitations or restrictions.
    • -
    • You can create and edit 2D and 3D models with high quality and accuracy.
    • -
    • You can work with various file formats and compatibility with other software and devices.
    • -
    • You can customize your workspace and interface according to your preferences and needs.
    • -
    • You can learn and improve your skills in design and drafting with Autocad 2012.
    • -
    - -

    What are the Risks of Autocad 2012 Crack 64 Bit Keygen Download Filehippo

    - -

    However, using Autocad 2012 crack 64 bit keygen download filehippo also comes with some risks that you should be aware of. Some of the risks are:

    - -
      -
    • You may violate the intellectual property rights and terms of service of Autodesk, the developer of Autocad 2012.
    • -
    • You may face legal consequences and penalties for using cracked software.
    • -
    • You may expose your computer and data to malware, viruses, spyware, and other threats that may harm your system and compromise your security.
    • -
    • You may encounter errors, bugs, crashes, and performance issues that may affect your work and productivity.
    • -
    • You may not receive updates, support, or assistance from Autodesk or other sources for Autocad 2012.
    • -
    • You may miss out on new features and improvements that Autodesk may release for Autocad 2012 or later versions.
    • -
    - -

    How to Avoid the Risks of Autocad 2012 Crack 64 Bit Keygen Download Filehippo

    - -

    If you want to avoid the risks of using Autocad 2012 crack 64 bit keygen download filehippo, you have some options that you can consider. Some of the options are:

    - -
      -
    • You can buy a genuine license or subscription for Autocad 2012 from Autodesk or authorized resellers. This way, you can use the software legally and safely, and enjoy all the benefits and features that it offers.
    • -
    • You can use a free trial version of Autocad 2012 from Autodesk's website. This way, you can test and evaluate the software for a limited time before deciding whether to buy it or not.
    • -
    • You can use an alternative software that is similar to Autocad 2012 but free or cheaper. Some examples are LibreCAD, FreeCAD, SketchUp, etc. However, you may not get the same quality and functionality as Autocad 2012.
    • -
    - -

    In conclusion, Autocad 2012 crack 64 bit keygen download filehippo is a software that can help you create and edit 2D and 3D models with ease and efficiency. However, using cracked software is illegal and unethical, and may expose you to various risks and issues. Therefore, we recommend that you use Autocad 2012 crack 64 bit keygen download filehippo only for educational purposes and not for commercial or professional purposes.

    -

    -

    What are the Features of Autocad 2012 Crack 64 Bit Keygen Download Filehippo

    - -

    Autocad 2012 crack 64 bit keygen download filehippo has many features that make it a powerful and versatile software for design and drafting. Some of the features are:

    - -
      -
    • Parametric drawing: You can create and edit geometric and dimensional constraints that control the relationships and dimensions of objects.
    • -
    • Associative array: You can create and modify arrays of objects along a path, a rectangular pattern, or a polar pattern.
    • -
    • Multi-functional grips: You can easily manipulate objects by using the grips that appear on them. You can stretch, move, rotate, scale, copy, and more.
    • -
    • Dynamic blocks: You can create and edit blocks that have different states, properties, and behaviors.
    • -
    • Point cloud support: You can import and attach point cloud data from laser scanners or other sources. You can also snap to points and work with point cloud regions.
    • -
    • 3D modeling and visualization: You can create and edit solid, surface, and mesh models. You can also apply materials, lighting, shadows, and rendering effects to enhance the appearance of your models.
    • -
    - -

    What are the Tips and Tricks for Autocad 2012 Crack 64 Bit Keygen Download Filehippo

    - -

    If you want to use Autocad 2012 crack 64 bit keygen download filehippo more efficiently and effectively, you can follow some tips and tricks that can help you improve your skills and productivity. Some of the tips and tricks are:

    - -
      -
    • Use keyboard shortcuts: You can use keyboard shortcuts to access commands and tools faster than using the mouse or menus. You can also customize your own keyboard shortcuts using the CUI editor.
    • -
    • Use command aliases: You can use command aliases to type abbreviated versions of commands instead of typing the full names. For example, you can type L instead of LINE, C instead of CIRCLE, etc. You can also customize your own command aliases using the PGP file.
    • -
    • Use object snaps: You can use object snaps to snap to precise points on objects such as endpoints, midpoints, centers, intersections, etc. You can also use object snap tracking to align objects along horizontal or vertical lines.
    • -
    • Use quick properties: You can use quick properties to view and modify the properties of selected objects without opening the properties palette. You can also customize which properties are displayed in the quick properties panel.
    • -
    • Use quick select: You can use quick select to select objects based on criteria such as layer, color, linetype, etc. You can also save and load selection sets for future use.
    • -
    - -

    Conclusion

    - -

    In this article, we have shown you how to download, install, and activate Autocad 2012 crack 64 bit keygen from filehippo. We have also discussed the benefits, risks, features, and tips and tricks of using this software. We hope that this article has been helpful and informative for you. However, we remind you that using cracked software is illegal and unethical, and may expose you to various risks and issues. Therefore, we recommend that you use Autocad 2012 crack 64 bit keygen download filehippo only for educational purposes and not for commercial or professional purposes.

    -

    Conclusion

    - -

    In this article, we have shown you how to download, install, and activate Autocad 2012 crack 64 bit keygen from filehippo. We have also discussed the benefits, risks, features, and tips and tricks of using this software. We hope that this article has been helpful and informative for you. However, we remind you that using cracked software is illegal and unethical, and may expose you to various risks and issues. Therefore, we recommend that you use Autocad 2012 crack 64 bit keygen download filehippo only for educational purposes and not for commercial or professional purposes.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Cartoon Maker 6.01 Full Download Crack.md b/spaces/quidiaMuxgu/Expedit-SAM/Cartoon Maker 6.01 Full Download Crack.md deleted file mode 100644 index 54e7462e4ab16b92f905fa8010a4393e97740712..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Cartoon Maker 6.01 Full Download Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

    cartoon maker 6.01 full download crack


    DOWNLOAD ✓✓✓ https://geags.com/2uCr2F



    - -Download Plotagon for Windows now from Softonic: 100% safe and virus free. ... Download Plotagon latest version 2021. ... Aurora 3D Animation Maker. 20.01. 1fdad05405
    -
    -
    -

    diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Dragon Quest 8 Iso Jpn.md b/spaces/quidiaMuxgu/Expedit-SAM/Dragon Quest 8 Iso Jpn.md deleted file mode 100644 index da24d63c610c8208be8c638869021fae16105f23..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Dragon Quest 8 Iso Jpn.md +++ /dev/null @@ -1,73 +0,0 @@ - -

    Dragon Quest 8 ISO JPN: How to Download and Play the Japanese Version of the Classic RPG

    -

    Dragon Quest 8, also known as Dragon Quest VIII: The Cursed Princess and the Sky, Sea and Earth, is one of the most popular and acclaimed games in the Dragon Quest series. It was originally released for the PlayStation 2 in 2004 in Japan, and later in 2005 in North America and Europe. It features a beautiful cel-shaded graphics, a vast open world, a memorable story and characters, and a classic turn-based combat system.

    -

    However, if you are a fan of the original Japanese version of the game, or if you want to experience the game with its original voice acting and soundtrack, you may want to download and play the Dragon Quest 8 ISO JPN. This is a file that contains the Japanese version of the game that you can play on your computer or mobile device using an emulator. In this article, we will show you how to find and download the Dragon Quest 8 ISO JPN, as well as how to play it with an emulator.

    -

    dragon quest 8 iso jpn


    DOWNLOADhttps://geags.com/2uCrPj



    -

    How to Find and Download the Dragon Quest 8 ISO JPN

    -

    There are many websites that offer ISO files for various games, including Dragon Quest 8 ISO JPN. However, not all of them are reliable or safe. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you should be careful when downloading and installing any ISO file from the internet.

    -

    One way to find a trustworthy Dragon Quest 8 ISO JPN is to use a reputable search engine and look for reviews and feedback from other users. You can also check the ratings and comments on the download page to see if the file works and if it has any problems or issues. You should also scan the downloaded file with an antivirus program before running it on your device.

    -

    Some of the websites that offer Dragon Quest 8 ISO JPN are:

    -
      -
    • CDRomance: This website provides ISO files for various PlayStation 2 games, including Dragon Quest 8 ISO JPN. It also provides information about the game, such as its title, genre, publisher, language, image format, game ID, and user score. It also shows screenshots and download links for the file.
    • -
    • CoolROM: This website provides ROM files for various consoles and handhelds, including PlayStation 2 games such as Dragon Quest 8 ISO JPN. It also provides information about the game, such as its name, note, genre, file size, rating, and download link. It also shows screenshots and related games for the file.
    • -
    -

    How to Play the Dragon Quest 8 ISO JPN with an Emulator

    -

    Once you have downloaded the Dragon Quest 8 ISO JPN file, you can play it on your device using an emulator. An emulator is a software that mimics the hardware and software of another device, such as a PlayStation 2. By using an emulator, you can play games that are not compatible with your device.

    -

    There are many emulators that you can use to play PlayStation 2 games on your device. However, not all of them are compatible or easy to use. Some of them may require complex settings or configurations to run properly. Therefore, you should choose an emulator that suits your device and preferences.

    -

    Some of the emulators that you can use to play Dragon Quest 8 ISO JPN are:

    -
      -
    • PCSX2: This is one of the most popular and reliable emulators for PlayStation 2 games. It is available for Windows, Mac OS X, and Linux platforms. It supports many games and features, such as high-resolution graphics, save states, cheats, controllers, memory cards, and more. It also has a user-friendly interface and a help system that can guide you through the installation and configuration process.
    • -
    • DamonPS2: This is one of the best emulators for PlayStation 2 games on Android devices. It has a high compatibility rate with over 90% of PS2 games. It also supports many features, such as fast speed, smooth gameplay, HD graphics, gamepad support, cloud saving, cheat codes, widescreen mode, and more. It also has a simple interface and a tutorial that can help you set up the emulator.
    • -
    -

    The Conclusion of Dragon Quest 8 ISO JPN

    -

    In conclusion, Dragon Quest 8 ISO JPN is a file that contains the Japanese version of Dragon Quest 8: The Cursed Princess and the Sky, Sea and Earth for PlayStation 2. It is a great option for fans of the original version of the game or for those who want to experience it with its original voice acting and soundtrack. However, it also requires some steps and tools to download and play it on your device.

    -

    -

    Therefore, we recommend that you follow these steps carefully before downloading and playing Dragon Quest 8 ISO JPN:

    -
      -
    1. Find a trustworthy website that offers Dragon Quest 8 ISO JPN file.
    2. -
    3. Download the file to your device using a secure connection.
    4. -
    5. Scan the file with an antivirus program before running it on your device.
    6. -
    7. Choose an emulator that suits your device and preferences.
    8. -
    9. Install and configure the emulator according to its instructions.
    10. -
    11. Load the Dragon Quest 8 ISO JPN file on the emulator and enjoy playing it.
    12. -

    -

    The Tips and Tricks for Dragon Quest 8 ISO JPN

    -

    Dragon Quest 8 ISO JPN is a challenging and rewarding game that can provide you with hours of fun and entertainment. However, it can also be frustrating and difficult at times, especially if you are new to the game or if you are stuck at a certain point. Therefore, you may want to use some tips and tricks that can help you improve your gameplay and enjoy the game more. Here are some of them:

    -
      -
    • Save often and use multiple save slots. The game does not have an auto-save feature, so you should save your progress manually whenever you can. You should also use different save slots to avoid overwriting your previous data in case you make a mistake or want to try a different option.
    • -
    • Use the alchemy pot to create new items and equipment. The alchemy pot is a device that allows you to combine two or more items to create a new one. You can find recipes for various combinations in books, chests, or by talking to NPCs. You can also experiment with different items and see what you can make.
    • -
    • Level up your characters and their skills. The game has a level-based system that determines your characters' stats and abilities. You should fight enemies and gain experience points to level up your characters and make them stronger. You should also allocate skill points to different categories to learn new skills and improve your performance in battle.
    • -
    • Explore the world and complete side quests. The game has a huge open world that you can explore freely. You can find hidden items, secrets, mini-games, and optional bosses by exploring different areas. You can also complete side quests that can give you rewards, such as items, money, or skill points.
    • -
    -

    The Reviews and Feedback of Dragon Quest 8 ISO JPN

    -

    Dragon Quest 8 ISO JPN has received positive reviews and feedback from many users who have played it on their devices using an emulator. Here are some of the comments and testimonials from satisfied players:

    -
    "Dragon Quest 8 ISO JPN is one of my favorite games of all time. It has a great story, characters, gameplay, graphics, and music. It is also very fun and addictive to play on my PC with an emulator. I highly recommend it to anyone who loves RPGs." - Alex, PC gamer
    -
    "I have been playing Dragon Quest 8 ISO JPN on my Android phone with an emulator and I am very impressed with it. It runs smoothly and flawlessly on my device, and it looks amazing on the screen. It is also very easy to control and navigate with the touch screen. It is a masterpiece of a game that I enjoy playing every day." - Lisa, Android gamer
    -
    "Dragon Quest 8 ISO JPN is a fantastic game that I have been playing on my Mac with an emulator. It is very compatible and stable with my system, and it has no glitches or errors. It is also very immersive and engaging with its rich story and world. It is a classic RPG that I love playing over and over again." - Ryan, Mac gamer
    -

    The History and Development of Dragon Quest 8 ISO JPN

    -

    Dragon Quest 8 ISO JPN is the eighth main installment in the Dragon Quest series, which is one of the most popular and influential RPG series in Japan and worldwide. The series was created by Yuji Horii, who is also known for his work on the Chrono and Portopia series. The series also features the art direction of Akira Toriyama, who is famous for his manga and anime works, such as Dragon Ball and Dr. Slump. The series also features the music composition of Koichi Sugiyama, who is a renowned orchestral conductor and composer.

    -

    Dragon Quest 8 ISO JPN was developed by Level-5, a Japanese video game developer that is also responsible for other successful games, such as Dark Cloud, Rogue Galaxy, Professor Layton, Ni no Kuni, and Yo-kai Watch. The game was directed by Akihiro Hino, who is the founder and president of Level-5. The game was produced by Ryutaro Ichimura, who is a veteran producer of the Dragon Quest series. The game was written by Yuji Horii, who also wrote the previous games in the series.

    -

    Dragon Quest 8 ISO JPN was released for the PlayStation 2 in Japan on November 27th, 2004. It was later released in North America on November 15th, 2005, and in Europe on April 13th, 2006. The game sold over 4.9 million copies worldwide and received critical acclaim from critics and fans alike. It won several awards and accolades, such as the Japan Game Awards 2004 Grand Award, the Famitsu Award 2004 Excellence Prize, and the GameSpot Best PS2 Game of 2005 Award.

    -

    The Conclusion of Dragon Quest 8 ISO JPN

    -

    In conclusion, Dragon Quest 8 ISO JPN is a file that contains the Japanese version of Dragon Quest VIII: The Cursed Princess and the Sky, Sea and Earth for PlayStation 2. It is a wonderful game that offers a captivating story, charming characters, engaging gameplay, stunning graphics, and beautiful music. It is also a great option for fans of the original version of the game or for those who want to experience it with its original voice acting and soundtrack. However, it also requires some steps and tools to download and play it on your device using an emulator.

    -

    Therefore, we recommend that you follow these steps carefully before downloading and playing Dragon Quest 8 ISO JPN:

    -
      -
    1. Find a trustworthy website that offers Dragon Quest 8 ISO JPN file.
    2. -
    3. Download the file to your device using a secure connection.
    4. -
    5. Scan the file with an antivirus program before running it on your device.
    6. -
    7. Choose an emulator that suits your device and preferences.
    8. -
    9. Install and configure the emulator according to its instructions.
    10. -
    11. Load the Dragon Quest 8 ISO JPN file on the emulator and enjoy playing it.
    12. -
    -

    In conclusion, Dragon Quest 8 ISO JPN is a file that contains the Japanese version of Dragon Quest VIII: The Cursed Princess and the Sky, Sea and Earth for PlayStation 2. It is a wonderful game that offers a captivating story, charming characters, engaging gameplay, stunning graphics, and beautiful music. It is also a great option for fans of the original version of the game or for those who want to experience it with its original voice acting and soundtrack. However, it also requires some steps and tools to download and play it on your device using an emulator.

    -

    Therefore, we recommend that you follow these steps carefully before downloading and playing Dragon Quest 8 ISO JPN:

    -
      -
    1. Find a trustworthy website that offers Dragon Quest 8 ISO JPN file.
    2. -
    3. Download the file to your device using a secure connection.
    4. -
    5. Scan the file with an antivirus program before running it on your device.
    6. -
    7. Choose an emulator that suits your device and preferences.
    8. -
    9. Install and configure the emulator according to its instructions.
    10. -
    11. Load the Dragon Quest 8 ISO JPN file on the emulator and enjoy playing it.
    12. -

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Fumando Crack En La Calle Documental.md b/spaces/quidiaMuxgu/Expedit-SAM/Fumando Crack En La Calle Documental.md deleted file mode 100644 index 2d35c2daba4511e2b48571181fbb6ec23cc62647..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Fumando Crack En La Calle Documental.md +++ /dev/null @@ -1,6 +0,0 @@ -

    fumando crack en la calle documental


    DOWNLOAD >>> https://geags.com/2uCsqI



    -
    -Volcano trips are popular; top sight is the Crack of Doom on Mount Doom, where you're guaranteed the sight of flowing lava, just as at Stromboli in Italy. Buy ring ( ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Lava R5 Flash File Frp Reset Dead Fix Customer Care Firmware NEW!.md b/spaces/quidiaMuxgu/Expedit-SAM/Lava R5 Flash File Frp Reset Dead Fix Customer Care Firmware NEW!.md deleted file mode 100644 index e34d980281f7f9cefd527e3368680756a3b318af..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Lava R5 Flash File Frp Reset Dead Fix Customer Care Firmware NEW!.md +++ /dev/null @@ -1,44 +0,0 @@ -

    Lava R5 Flash File Frp Reset Dead Fix Customer Care | Firmware |


    Download ✪✪✪ https://geags.com/2uCrlp



    - -  If the stock ROM app is not working for your cell phone then you can follow below post for custom ROM.  - -Latest version of custom ROM from MoDaCo  - -You have to just download this custom ROM version from the link below.  - -Now just extract the extracted downloaded file to your SD card  - -Now go to your smartphone and just press the power button to enter into recovery mode  - -Now press the volume up button and tap the power button simultaneously to enter into Custom recovery mode.  - -Now just tap and hold the “Install” button  - -Now just select the downloaded file that you have extracted before.  - -Now just go back to recovery mode and press the volume up button to return to “main”  - -Now go to the “Clockwork recovery” option and press the power button to enter into recovery mode.  - -Now go to the “Install” option.  - -Now go back to “Clockwork recovery” and tap the “Install zip from SD card” option.  - -Now go back to “Clockwork recovery” and tap the “Reboot system” option.  - -Now you will see the “Reboot system now” option.  - -Now go back to “Clockwork recovery” and tap the “Wipe data/Factory reset” option.  - -Now go back to “Clockwork recovery” and tap the “Wipe cache partition” option.  - -Now go back to “Clockwork recovery” and tap the “Advanced” option.  - -Now go back to the “Wipe data/Factory reset” option and select “Yes”.  - -Now go back to “Clockwork recovery” and tap the “Reboot system now” option.  - -Now you will see the “Reboot system now� 4fefd39f24
    -
    -
    -

    diff --git a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/modules/vc/modules.py b/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/modules/vc/modules.py deleted file mode 100644 index 9338160b00595fa24e2991e06a65d48a2d92e7c4..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/modules/vc/modules.py +++ /dev/null @@ -1,699 +0,0 @@ -import os, sys -import traceback -import logging -now_dir = os.getcwd() -sys.path.append(now_dir) -logger = logging.getLogger(__name__) -import lib.globals.globals as rvc_globals -import numpy as np -import soundfile as sf -import torch -from io import BytesIO -from lib.infer.infer_libs.audio import load_audio -from lib.infer.infer_libs.audio import wav2 -from lib.infer.infer_libs.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from lib.infer.modules.vc.pipeline import Pipeline -from lib.infer.modules.vc.utils import * -import tabs.merge as merge -import time -import scipy.io.wavfile as wavfile -import glob -from shutil import move -sup_audioext = { - "wav", - "mp3", - "flac", - "ogg", - "opus", - "m4a", - "mp4", - "aac", - "alac", - "wma", - "aiff", - "webm", - "ac3", -} -def note_to_hz(note_name): - SEMITONES = {'C': -9, 'C#': -8, 'D': -7, 'D#': -6, 'E': -5, 'F': -4, 'F#': -3, 'G': -2, 'G#': -1, 'A': 0, 'A#': 1, 'B': 2} - pitch_class, octave = note_name[:-1], int(note_name[-1]) - semitone = SEMITONES[pitch_class] - note_number = 12 * (octave - 4) + semitone - frequency = 440.0 * (2.0 ** (1.0/12)) ** note_number - return frequency - -class VC: - def __init__(self, config): - self.n_spk = None - self.tgt_sr = None - self.net_g = None - self.pipeline = None - self.cpt = None - self.version = None - self.if_f0 = None - self.version = None - self.hubert_model = None - - self.config = config - - def get_vc(self, sid, *to_return_protect): - logger.info("Get sid: " + sid) - - to_return_protect0 = { - "visible": self.if_f0 != 0, - "value": to_return_protect[0] - if self.if_f0 != 0 and to_return_protect - else 0.5, - "__type__": "update", - } - to_return_protect1 = { - "visible": self.if_f0 != 0, - "value": to_return_protect[1] - if self.if_f0 != 0 and to_return_protect - else 0.33, - "__type__": "update", - } - - if sid == "" or sid == []: - if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的 - logger.info("Clean model cache") - del ( - self.net_g, - self.n_spk, - self.vc, - self.hubert_model, - self.tgt_sr, - ) # ,cpt - self.hubert_model = ( - self.net_g - ) = self.n_spk = self.vc = self.hubert_model = self.tgt_sr = None - if torch.cuda.is_available(): - torch.cuda.empty_cache() - ###楼下不这么折腾清理不干净 - self.if_f0 = self.cpt.get("f0", 1) - self.version = self.cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *self.cpt["config"], is_half=self.config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *self.cpt["config"], is_half=self.config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"]) - del self.net_g, self.cpt - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return ( - {"visible": False, "__type__": "update"}, - { - "visible": True, - "value": to_return_protect0, - "__type__": "update", - }, - { - "visible": True, - "value": to_return_protect1, - "__type__": "update", - }, - "", - "", - ) - #person = f'{os.getenv("weight_root")}/{sid}' - person = f'{sid}' - #logger.info(f"Loading: {person}") - logger.info(f"Loading...") - self.cpt = torch.load(person, map_location="cpu") - self.tgt_sr = self.cpt["config"][-1] - self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk - self.if_f0 = self.cpt.get("f0", 1) - self.version = self.cpt.get("version", "v1") - - synthesizer_class = { - ("v1", 1): SynthesizerTrnMs256NSFsid, - ("v1", 0): SynthesizerTrnMs256NSFsid_nono, - ("v2", 1): SynthesizerTrnMs768NSFsid, - ("v2", 0): SynthesizerTrnMs768NSFsid_nono, - } - - self.net_g = synthesizer_class.get( - (self.version, self.if_f0), SynthesizerTrnMs256NSFsid - )(*self.cpt["config"], is_half=self.config.is_half) - - del self.net_g.enc_q - - self.net_g.load_state_dict(self.cpt["weight"], strict=False) - self.net_g.eval().to(self.config.device) - if self.config.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - - self.pipeline = Pipeline(self.tgt_sr, self.config) - n_spk = self.cpt["config"][-3] - index = {"value": get_index_path_from_model(sid), "__type__": "update"} - logger.info("Select index: " + index["value"]) - - return ( - ( - {"visible": False, "maximum": n_spk, "__type__": "update"}, - to_return_protect0, - to_return_protect1 - ) - if to_return_protect - else {"visible": False, "maximum": n_spk, "__type__": "update"} - ) - - - def vc_single( - self, - sid, - input_audio_path1, - f0_up_key, - f0_file, - f0_method, - file_index, - file_index2, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - format1, - split_audio, - crepe_hop_length, - f0_min, - note_min, - f0_max, - note_max, - f0_autotune, - ): - global total_time - total_time = 0 - start_time = time.time() - if not input_audio_path1: - return "You need to upload an audio", None - - if (not os.path.exists(input_audio_path1)) and (not os.path.exists(os.path.join(now_dir, input_audio_path1))): - return "Audio was not properly selected or doesn't exist", None - if split_audio: - resultm, new_dir_path = merge.process_audio(input_audio_path1) - print(resultm) - print("------") - print(new_dir_path) - if resultm == "Finish": - - file_index = ( - ( - file_index.strip(" ") - .strip('"') - .strip("\n") - .strip('"') - .strip(" ") - .replace("trained", "added") - ) - if file_index != "" - else file_index2 - ) # 防止小白写错,自动帮他替换掉 - - # Use the code from vc_multi to process the segmented audio - if rvc_globals.NotesOrHertz and f0_method != 'rmvpe': - f0_min = note_to_hz(note_min) if note_min else 50 - f0_max = note_to_hz(note_max) if note_max else 1100 - print(f"Converted Min pitch: freq - {f0_min}\n" - f"Converted Max pitch: freq - {f0_max}") - else: - f0_min = f0_min or 50 - f0_max = f0_max or 1100 - - try: - dir_path = ( - new_dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - ) # Prevent leading/trailing whitespace and quotes - try: - if dir_path != "": - paths = [ - os.path.join(root, name) - for root, _, files in os.walk(dir_path, topdown=False) - for name in files - if name.endswith(tuple(sup_audioext)) and root == dir_path - ] - except: - traceback.print_exc() - print(paths) - for path in paths: - info, opt = self.vc_single_dont_save( - sid, - path, - f0_up_key, - None, - f0_method, - file_index, - file_index2, - # file_big_npy, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - crepe_hop_length, - f0_min, - note_min, - f0_max, - note_max, - f0_autotune, - ) - if "Success" in info: - try: - tgt_sr, audio_opt = opt - output_filename = os.path.splitext(os.path.basename(path))[0] - if format1 in ["wav", "flac"]: - sf.write( - "%s/%s.%s" - % (new_dir_path, output_filename, format1), - audio_opt, - tgt_sr, - ) - else: - path = "%s/%s.%s" % (new_dir_path, output_filename, format1) - with BytesIO() as wavf: - sf.write( - wavf, - audio_opt, - tgt_sr, - format="wav" - ) - wavf.seek(0, 0) - with open(path, "wb") as outf: - wav2(wavf, outf, format1) - except: - print(traceback.format_exc()) - except: - print(traceback.format_exc()) - - time.sleep(0.5) - print("Finished processing segmented audio, now merging audio...") - - # Une el audio segmentado - merge_timestamps_file = os.path.join(os.path.dirname(new_dir_path), f"{os.path.basename(input_audio_path1).split('.')[0]}_timestamps.txt") - merge.merge_audio(merge_timestamps_file) - - # Calculate the elapsed time - end_time = time.time() - total_time = end_time - start_time - - merged_audio_path = os.path.join(os.path.dirname(new_dir_path), "audio-outputs", f"{os.path.basename(input_audio_path1).split('.')[0]}_merged.wav") - index_info = ( - "Index:\n%s." % file_index - if os.path.exists(file_index) - else "Index not used." - ) - - return ( - "Success.\n%s\nTime:\infer: %s." - % (index_info, total_time), - merged_audio_path, - ) - - print(f"\nStarting inference for '{os.path.basename(input_audio_path1)}'") - f0_up_key = int(f0_up_key) - if rvc_globals.NotesOrHertz and f0_method != 'rmvpe': - f0_min = note_to_hz(note_min) if note_min else 50 - f0_max = note_to_hz(note_max) if note_max else 1100 - print(f"Converted Min pitch: freq - {f0_min}\n" - f"Converted Max pitch: freq - {f0_max}") - else: - f0_min = f0_min or 50 - f0_max = f0_max or 1100 - try: - print(f"Attempting to load {input_audio_path1}....") - audio = load_audio(file=input_audio_path1, - sr=16000, - DoFormant=rvc_globals.DoFormant, - Quefrency=rvc_globals.Quefrency, - Timbre=rvc_globals.Timbre) - - audio_max = np.abs(audio).max() / 0.95 - if audio_max > 1: - audio /= audio_max - times = [0, 0, 0] - - if self.hubert_model is None: - self.hubert_model = load_hubert(self.config) - - try: - self.if_f0 = self.cpt.get("f0", 1) - except NameError: - message = "Model was not properly selected" - print(message) - return message, None - - file_index = ( - ( - file_index.strip(" ") - .strip('"') - .strip("\n") - .strip('"') - .strip(" ") - .replace("trained", "added") - ) - if file_index != "" - else file_index2 - ) # 防止小白写错,自动帮他替换掉 - - try: - audio_opt = self.pipeline.pipeline( - self.hubert_model, - self.net_g, - sid, - audio, - input_audio_path1, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - self.if_f0, - filter_radius, - self.tgt_sr, - resample_sr, - rms_mix_rate, - self.version, - protect, - crepe_hop_length, - f0_autotune, - f0_file=f0_file, - f0_min=f0_min, - f0_max=f0_max - ) - except AssertionError: - message = "Mismatching index version detected (v1 with v2, or v2 with v1)." - print(message) - return message, None - except NameError: - message = "RVC libraries are still loading. Please try again in a few seconds." - print(message) - return message, None - - if self.tgt_sr != resample_sr >= 16000: - tgt_sr = resample_sr - else: - tgt_sr = self.tgt_sr - index_info = ( - "Index:\n%s." % file_index - if os.path.exists(file_index) - else "Index not used." - ) - end_time = time.time() - total_time = end_time - start_time - opt_root = "assets/audios/audio-outputs" - os.makedirs(opt_root, exist_ok=True) - output_count = 1 - - while True: - opt_filename = f"generated_audio_{output_count}.{format1}" - current_output_path = os.path.join(opt_root, opt_filename) - if not os.path.exists(current_output_path): - break - output_count += 1 - try: - if format1 in ["wav", "flac"]: - sf.write( - current_output_path, - audio_opt, - self.tgt_sr, - ) - print(f"💾 Generated audio saved to: {current_output_path}") - else: - with BytesIO() as wavf: - sf.write( - wavf, - audio_opt, - self.tgt_sr, - format="wav" - ) - wavf.seek(0, 0) - with open(current_output_path, "wb") as outf: - wav2(wavf, outf, format1) - print(f"💾 Generated audio saved to: {current_output_path}") - except: - info = traceback.format_exc() - return ( - "Success.\n%s\nTime:\nnpy: %.2fs, f0: %.2fs, infer: %.2fs." - % (index_info, *times), - (tgt_sr, audio_opt), - ) - except: - info = traceback.format_exc() - logger.warn(info) - return info, (None, None) - - def vc_single_dont_save( - self, - sid, - input_audio_path1, - f0_up_key, - f0_file, - f0_method, - file_index, - file_index2, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - crepe_hop_length, - f0_min, - note_min, - f0_max, - note_max, - f0_autotune, - ): - global total_time - total_time = 0 - start_time = time.time() - if not input_audio_path1: - return "You need to upload an audio", None - - if (not os.path.exists(input_audio_path1)) and (not os.path.exists(os.path.join(now_dir, input_audio_path1))): - return "Audio was not properly selected or doesn't exist", None - - print(f"\nStarting inference for '{os.path.basename(input_audio_path1)}'") - f0_up_key = int(f0_up_key) - if rvc_globals.NotesOrHertz and f0_method != 'rmvpe': - f0_min = note_to_hz(note_min) if note_min else 50 - f0_max = note_to_hz(note_max) if note_max else 1100 - print(f"Converted Min pitch: freq - {f0_min}\n" - f"Converted Max pitch: freq - {f0_max}") - else: - f0_min = f0_min or 50 - f0_max = f0_max or 1100 - try: - print(f"Attempting to load {input_audio_path1}....") - audio = load_audio(file=input_audio_path1, - sr=16000, - DoFormant=rvc_globals.DoFormant, - Quefrency=rvc_globals.Quefrency, - Timbre=rvc_globals.Timbre) - - audio_max = np.abs(audio).max() / 0.95 - if audio_max > 1: - audio /= audio_max - times = [0, 0, 0] - - if self.hubert_model is None: - self.hubert_model = load_hubert(self.config) - - try: - self.if_f0 = self.cpt.get("f0", 1) - except NameError: - message = "Model was not properly selected" - print(message) - return message, None - - file_index = ( - ( - file_index.strip(" ") - .strip('"') - .strip("\n") - .strip('"') - .strip(" ") - .replace("trained", "added") - ) - if file_index != "" - else file_index2 - ) # 防止小白写错,自动帮他替换掉 - - try: - audio_opt = self.pipeline.pipeline( - self.hubert_model, - self.net_g, - sid, - audio, - input_audio_path1, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - self.if_f0, - filter_radius, - self.tgt_sr, - resample_sr, - rms_mix_rate, - self.version, - protect, - crepe_hop_length, - f0_autotune, - f0_file=f0_file, - f0_min=f0_min, - f0_max=f0_max - ) - except AssertionError: - message = "Mismatching index version detected (v1 with v2, or v2 with v1)." - print(message) - return message, None - except NameError: - message = "RVC libraries are still loading. Please try again in a few seconds." - print(message) - return message, None - - if self.tgt_sr != resample_sr >= 16000: - tgt_sr = resample_sr - else: - tgt_sr = self.tgt_sr - index_info = ( - "Index:\n%s." % file_index - if os.path.exists(file_index) - else "Index not used." - ) - end_time = time.time() - total_time = end_time - start_time - return ( - "Success.\n%s\nTime:\nnpy: %.2fs, f0: %.2fs, infer: %.2fs." - % (index_info, *times), - (tgt_sr, audio_opt), - ) - except: - info = traceback.format_exc() - logger.warn(info) - return info, (None, None) - - - - - - - def vc_multi( - self, - sid, - dir_path, - opt_root, - paths, - f0_up_key, - f0_method, - file_index, - file_index2, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - format1, - crepe_hop_length, - f0_min, - note_min, - f0_max, - note_max, - f0_autotune, - ): - if rvc_globals.NotesOrHertz and f0_method != 'rmvpe': - f0_min = note_to_hz(note_min) if note_min else 50 - f0_max = note_to_hz(note_max) if note_max else 1100 - print(f"Converted Min pitch: freq - {f0_min}\n" - f"Converted Max pitch: freq - {f0_max}") - else: - f0_min = f0_min or 50 - f0_max = f0_max or 1100 - try: - dir_path = ( - dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - ) # 防止小白拷路径头尾带了空格和"和回车 - opt_root = opt_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - os.makedirs(opt_root, exist_ok=True) - try: - if dir_path != "": - paths = [ - os.path.join(root, name) - for root, _, files in os.walk(dir_path, topdown=False) - for name in files - if name.endswith(tuple(sup_audioext)) and root == dir_path - ] - else: - paths = [path.name for path in paths] - except: - traceback.print_exc() - paths = [path.name for path in paths] - infos = [] - print(paths) - for path in paths: - info, opt = self.vc_single_dont_save( - sid, - path, - f0_up_key, - None, - f0_method, - file_index, - file_index2, - # file_big_npy, - index_rate, - filter_radius, - resample_sr, - rms_mix_rate, - protect, - crepe_hop_length, - f0_min, - note_min, - f0_max, - note_max, - f0_autotune, - ) - if "Success" in info: - try: - tgt_sr, audio_opt = opt - if format1 in ["wav", "flac"]: - sf.write( - "%s/%s.%s" - % (opt_root, os.path.basename(path), format1), - audio_opt, - tgt_sr, - ) - else: - path = "%s/%s.%s" % (opt_root, os.path.basename(path), format1) - with BytesIO() as wavf: - sf.write( - wavf, - audio_opt, - tgt_sr, - format="wav" - ) - wavf.seek(0, 0) - with open(path, "wb") as outf: - wav2(wavf, outf, format1) - except: - info += traceback.format_exc() - infos.append("%s->%s" % (os.path.basename(path), info)) - yield "\n".join(infos) - yield "\n".join(infos) - except: - yield traceback.format_exc() diff --git a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/inference/pretreatment.py b/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/inference/pretreatment.py deleted file mode 100644 index c2099056356301d2f9300b344f6273f4c75b5b31..0000000000000000000000000000000000000000 --- a/spaces/radames/SPIGA-face-alignment-headpose-estimator/SPIGA/spiga/inference/pretreatment.py +++ /dev/null @@ -1,31 +0,0 @@ -from torchvision import transforms -import numpy as np -from PIL import Image -import cv2 - -from spiga.data.loaders.transforms import TargetCrop, ToOpencv, AddModel3D - - -def get_transformers(data_config): - transformer_seq = [ - Opencv2Pil(), - TargetCrop(data_config.image_size, data_config.target_dist), - ToOpencv(), - NormalizeAndPermute()] - return transforms.Compose(transformer_seq) - - -class NormalizeAndPermute: - def __call__(self, sample): - image = np.array(sample['image'], dtype=float) - image = np.transpose(image, (2, 0, 1)) - sample['image'] = image / 255 - return sample - - -class Opencv2Pil: - def __call__(self, sample): - image = cv2.cvtColor(sample['image'], cv2.COLOR_BGR2RGB) - sample['image'] = Image.fromarray(image) - return sample - diff --git a/spaces/radames/whisper.cpp-wasm/helpers.js b/spaces/radames/whisper.cpp-wasm/helpers.js deleted file mode 100644 index 071b747047094a9c49170896cb75da36e7a5a566..0000000000000000000000000000000000000000 --- a/spaces/radames/whisper.cpp-wasm/helpers.js +++ /dev/null @@ -1,182 +0,0 @@ -// Common Javascript functions used by the examples - -function convertTypedArray(src, type) { - var buffer = new ArrayBuffer(src.byteLength); - var baseView = new src.constructor(buffer).set(src); - return new type(buffer); -} - -var printTextarea = (function() { - var element = document.getElementById('output'); - if (element) element.alue = ''; // clear browser cache - return function(text) { - if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' '); - console.log(text); - if (element) { - element.value += text + "\n"; - element.scrollTop = element.scrollHeight; // focus on bottom - } - }; -})(); - -async function clearCache() { - if (confirm('Are you sure you want to clear the cache?\nAll the models will be downloaded again.')) { - indexedDB.deleteDatabase(dbName); - } -} - -// fetch a remote file from remote URL using the Fetch API -async function fetchRemote(url, cbProgress, cbPrint) { - cbPrint('fetchRemote: downloading with fetch()...'); - - const response = await fetch( - url, - { - method: 'GET', - headers: { - 'Content-Type': 'application/octet-stream', - }, - } - ); - - if (!response.ok) { - cbPrint('fetchRemote: failed to fetch ' + url); - return; - } - - const contentLength = response.headers.get('content-length'); - const total = parseInt(contentLength, 10); - const reader = response.body.getReader(); - - var chunks = []; - var receivedLength = 0; - var progressLast = -1; - - while (true) { - const { done, value } = await reader.read(); - - if (done) { - break; - } - - chunks.push(value); - receivedLength += value.length; - - if (contentLength) { - cbProgress(receivedLength/total); - - var progressCur = Math.round((receivedLength / total) * 10); - if (progressCur != progressLast) { - cbPrint('fetchRemote: fetching ' + 10*progressCur + '% ...'); - progressLast = progressCur; - } - } - } - - var position = 0; - var chunksAll = new Uint8Array(receivedLength); - - for (var chunk of chunks) { - chunksAll.set(chunk, position); - position += chunk.length; - } - - return chunksAll; -} - -// load remote data -// - check if the data is already in the IndexedDB -// - if not, fetch it from the remote URL and store it in the IndexedDB -function loadRemote(url, dst, size_mb, cbProgress, cbReady, cbCancel, cbPrint) { - // query the storage quota and print it - navigator.storage.estimate().then(function (estimate) { - cbPrint('loadRemote: storage quota: ' + estimate.quota + ' bytes'); - cbPrint('loadRemote: storage usage: ' + estimate.usage + ' bytes'); - }); - - // check if the data is already in the IndexedDB - var rq = indexedDB.open(dbName, dbVersion); - - rq.onupgradeneeded = function (event) { - var db = event.target.result; - if (db.version == 1) { - var os = db.createObjectStore('models', { autoIncrement: false }); - cbPrint('loadRemote: created IndexedDB ' + db.name + ' version ' + db.version); - } else { - // clear the database - var os = event.currentTarget.transaction.objectStore('models'); - os.clear(); - cbPrint('loadRemote: cleared IndexedDB ' + db.name + ' version ' + db.version); - } - }; - - rq.onsuccess = function (event) { - var db = event.target.result; - var tx = db.transaction(['models'], 'readonly'); - var os = tx.objectStore('models'); - var rq = os.get(url); - - rq.onsuccess = function (event) { - if (rq.result) { - cbPrint('loadRemote: "' + url + '" is already in the IndexedDB'); - cbReady(dst, rq.result); - } else { - // data is not in the IndexedDB - cbPrint('loadRemote: "' + url + '" is not in the IndexedDB'); - - // alert and ask the user to confirm - if (!confirm( - 'You are about to download ' + size_mb + ' MB of data.\n' + - 'The model data will be cached in the browser for future use.\n\n' + - 'Press OK to continue.')) { - cbCancel(); - return; - } - - fetchRemote(url, cbProgress, cbPrint).then(function (data) { - if (data) { - // store the data in the IndexedDB - var rq = indexedDB.open(dbName, dbVersion); - rq.onsuccess = function (event) { - var db = event.target.result; - var tx = db.transaction(['models'], 'readwrite'); - var os = tx.objectStore('models'); - var rq = os.put(data, url); - - rq.onsuccess = function (event) { - cbPrint('loadRemote: "' + url + '" stored in the IndexedDB'); - cbReady(dst, data); - }; - - rq.onerror = function (event) { - cbPrint('loadRemote: failed to store "' + url + '" in the IndexedDB'); - cbCancel(); - }; - }; - } - }); - } - }; - - rq.onerror = function (event) { - cbPrint('loadRemote: failed to get data from the IndexedDB'); - cbCancel(); - }; - }; - - rq.onerror = function (event) { - cbPrint('loadRemote: failed to open IndexedDB'); - cbCancel(); - }; - - rq.onblocked = function (event) { - cbPrint('loadRemote: failed to open IndexedDB: blocked'); - cbCancel(); - }; - - rq.onabort = function (event) { - cbPrint('loadRemote: failed to open IndexedDB: abort'); - - }; -} - diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Cmo obtener Vray para Rhino 5 64 bits gratis y sin complicaciones.md b/spaces/raedeXanto/academic-chatgpt-beta/Cmo obtener Vray para Rhino 5 64 bits gratis y sin complicaciones.md deleted file mode 100644 index f697382f3c78428a9417c3cf6a6d2bacded302e5..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Cmo obtener Vray para Rhino 5 64 bits gratis y sin complicaciones.md +++ /dev/null @@ -1,216 +0,0 @@ - -

    Vray para Rhino 5 64 bits: ¿Qué es y cómo descargarlo gratis?

    -

    Si eres un usuario de Rhino que busca mejorar la calidad y la velocidad de tus renders, seguramente habrás oído hablar de Vray. Se trata de un motor de renderizado que se integra con Rhino y que te permite crear imágenes realistas y profesionales de tus diseños. En este artículo te explicaremos qué es Vray, cuáles son sus características principales, qué requisitos y compatibilidad tiene, cómo descargarlo e instalarlo gratis y cómo usarlo para sacarle el máximo partido a tus proyectos.

    -

    vray para rhino 5 64 bits descargar gratis


    Download Ziphttps://tinourl.com/2uL53w



    -

    Características de Vray para Rhino 5 64 bits

    -

    Renderizado rápido y realista

    -

    Vray es un renderizador que utiliza una arquitectura de doble motor que aprovecha al máximo el hardware de tu ordenador, ya sea CPU o GPU. Con Vray puedes obtener renders de alta calidad en menos tiempo que con otros programas. Además, puedes renderizar mientras diseñas, viendo los resultados en tiempo real a medida que ajustas luces, materiales y cámaras.

    -

    Integración con Grasshopper

    -

    Vray se integra perfectamente con Grasshopper, el plugin de diseño paramétrico de Rhino. Con Vray puedes renderizar directamente tus definiciones de Grasshopper sin necesidad de exportarlas o hornearlas. También puedes animar tus diseños paramétricos así como las cámaras y el sol, y medir los valores de iluminación reales en Grasshopper.

    -

    Iluminación y materiales avanzados

    -

    Vray cuenta con un sistema de iluminación y materiales avanzado que te permite crear escenas realistas y naturales. Puedes usar diferentes tipos de luces integradas en Vray, como luces solares, luces IES, luces rectangulares, etc. También puedes usar la luz domo adaptativa, que te permite renderizar escenas interiores sin necesidad de usar luces portal. Además, puedes usar el editor de activos y la biblioteca de materiales de Vray para crear y aplicar materiales realistas a tus objetos. Algunas de las características de los materiales de Vray son el soporte nativo para reflexiones metálicas (metalness), la exposición automática y el balance de blancos.

    -

    Requisitos y compatibilidad de Vray para Rhino 5 64 bits

    -

    Requisitos mínimos y recomendados

    -

    Para poder usar Vray en Rhino necesitas tener un ordenador con las siguientes especificaciones:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Requisitos mínimosRequisitos recomendados
    Sistema operativo Windows Vista o superior (64 bits)Sistema operativo Windows 10 (64 bits)
    Procesador Intel Pentium IV o superiorProcesador Intel Core i7 o superior
    Memoria RAM 4 GB o superiorMemoria RAM 16 GB o superior
    Tarjeta gráfica compatible con OpenGL (para GPU rendering se recomienda una tarjeta NVIDIA con CUDA)Tarjeta gráfica NVIDIA RTX o superior (para GPU rendering)
    Espacio en disco duro al menos 1 GB libreEspacio en disco duro al menos 10 GB libre (preferiblemente SSD)
    Rhinoceros versión 5 (64 bits)Rhinoceros versión 5 (64 bits)
    Licencia de prueba o comercial de VrayLicencia comercial de Vray
    -

    Compatibilidad con otras versiones de Rhino y Windows

    -

    Vray es compatible con las versiones más recientes de Rhino y Windows. Sin embargo, si tienes una versión anterior o posterior a la que se indica en los requisitos, puedes consultar la tabla siguiente para ver si hay alguna solución disponible:

    -

    vray 3.6 para rhino 5 64 bits full gratis
    -como instalar vray para rhino 5 64 bits gratis
    -descargar vray para rhino 5 64 bits crack gratis
    -vray para rhino 5 64 bits mega gratis
    -vray para rhino 5 64 bits español gratis
    -vray para rhino 5 64 bits licencia gratis
    -tutorial vray para rhino 5 64 bits gratis
    -vray next para rhino 5 64 bits gratis
    -descargar e instalar vray para rhino 5 64 bits gratis
    -vray para rhino 5 64 bits windows 10 gratis
    -vray para rhino 5 64 bits full español gratis
    -descargar vray para rhino 5 64 bits full gratis
    -vray para rhino 5 64 bits serial gratis
    -vray para rhino 5 64 bits keygen gratis
    -descargar vray para rhino 5 64 bits mega gratis
    -vray para rhino 5 y grasshopper 64 bits gratis
    -descargar e instalar vray next para rhino 5 64 bits gratis
    -descargar vray next para rhino 5 64 bits full gratis
    -descargar vray next para rhino 5 64 bits crack gratis
    -descargar vray next para rhino 5 64 bits mega gratis
    -configurar vray para rhino 5 64 bits gratis
    -activar vray para rhino 5 64 bits gratis
    -descargar e instalar vray para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update2.1 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update3.1 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.1 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.2 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.3 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.4 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.5 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.6 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.7 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.8 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.9 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.10 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.11 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.12 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.13 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.14 para rhino y grasshopper en windows de 64 bits gratis
    -descargar e instalar vray next update4.15 para rhino y grasshopper en windows de

    - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Versión de RhinoVersión de WindowsSolución
    Rhino versión inferior a la 5 (32 o 64 bits)Cualquier versión (32 o 64 bits)Actualizar a Rhino versión 5 (64 bits) o superior.
    Rhino versión superior a la 5 (32 o 64 bits)Cualquier versión (32 o 64 bits)Descargar e instalar la versión correspondiente de Vray para esa versión de Rhino.
    Rhino versión 5 (32 bits)Cualquier versión (32 o 64 bits)Cambiar a Rhino versión 5 (64 bits) o superior.
    Rhino versión 5 (64 bits)Versión inferior a Windows Vista (32 o 64 bits)Actualizar a Windows Vista o superior (64 bits).
    Rhino versión 5 (64 bits)Versión superior a Windows Vista (32 bits)Cambiar a Windows Vista o superior (64 bits).
    -

    Cómo descargar e instalar Vray para Rhino 5 64 bits gratis

    -

    Descargar el archivo de instalación

    -

    Para descargar Vray para Rhino 5 64 bits gratis, puedes usar el siguiente enlace:

    -

    https://filecr.com/windows/vray-for-rhino/

    -

    En esta página encontrarás la última versión de Vray para Rhino, así como las versiones anteriores. Elige la versión que se adapte a tus necesidades y haz clic en el botón de descarga. Se abrirá una nueva ventana con varias opciones de descarga directa. Elige la que prefieras y espera a que se complete la descarga.

    -

    Instalar el programa

    -

    Una vez que hayas descargado el archivo de instalación, sigue estos pasos para instalar Vray en Rhino:

    -
      -
    1. Descomprime el archivo ZIP que has descargado y ejecuta el archivo Setup.exe.
    2. -
    3. Sigue las instrucciones del asistente de instalación y acepta los términos y condiciones.
    4. -
    5. Elige la carpeta de destino donde quieres instalar Vray y haz clic en Siguiente.
    6. -
    7. Elige la opción de licencia que prefieras: prueba o comercial. Si eliges la opción de prueba, tendrás 30 días para usar Vray gratis. Si eliges la opción comercial, necesitarás introducir tu código de activación.
    8. -
    9. Espera a que se complete la instalación y haz clic en Finalizar.
    10. -
    11. Abre Rhino y verás que se ha añadido una nueva pestaña llamada Vray en la parte superior de la interfaz. Haz clic en ella para acceder a las opciones y herramientas de Vray.
    12. -
    13. Para verificar que Vray se ha instalado correctamente, haz clic en el botón Render en la pestaña Vray y comprueba que se abre una ventana con el renderizador de Vray.
    14. -
    -

    Cómo usar Vray para Rhino 5 64 bits

    -

    Configurar el renderizador

    -

    Para configurar el renderizador de Vray, puedes usar el panel de ajustes de Vray que se encuentra en la pestaña Vray. En este panel puedes ajustar las opciones de renderizado, como la calidad, la resolución, la cámara, etc. También puedes usar los presets rápidos que se encuentran en la parte superior del panel para elegir entre diferentes niveles de calidad y velocidad.

    -

    Algunas de las opciones más importantes que puedes configurar son:

    -
      -
    • Quality: Esta opción te permite elegir entre diferentes modos de calidad: Draft, Low, Medium, High y Custom. Cada modo tiene unos valores predefinidos para los parámetros del renderizador, como el número de muestras, el tamaño del pixel, el ruido, etc. Puedes modificar estos valores manualmente si eliges el modo Custom.
    • -
    • Resolution: Esta opción te permite elegir la resolución del render en píxeles. Puedes usar los valores predefinidos o introducir los tuyos propios. También puedes elegir el formato de salida del render: PNG, JPEG, TIFF, etc.
    • -
    • Camera: Esta opción te permite ajustar los parámetros de la cámara, como la distancia focal, la profundidad de campo, el balance de blancos, la exposición, etc. También puedes usar las cámaras integradas en Rhino o crear tus propias cámaras con Vray.
    • -
    • Environment: Esta opción te permite ajustar los parámetros del entorno, como el color del fondo, la iluminación global, el mapa HDRI, etc. También puedes usar los entornos integrados en Rhino o crear tus propios entornos con Vray.
    • -
    -

    Crear y aplicar materiales

    -

    Para crear y aplicar materiales con Vray, puedes usar el editor de activos de Vray que se encuentra en la pestaña Vray. En este editor puedes acceder a la biblioteca de materiales de Vray, donde encontrarás una gran variedad de materiales realistas y predefinidos que puedes usar en tus objetos. También puedes crear tus propios materiales personalizados con el editor de materiales.

    -

    Algunos de los pasos que puedes seguir para crear y aplicar materiales son:

    -
      -
    1. Haz clic en el botón Asset Editor en la pestaña Vray para abrir el editor de activos.
    2. -
    3. Haz clic en la pestaña Materials para acceder a la biblioteca de materiales.
    4. -
    5. Navega por las categorías y subcategorías de materiales y haz doble clic en el material que quieras usar. Se añadirá al panel Current Project.
    6. -
    7. Haz clic derecho sobre el material y selecciona Apply to Selection para aplicarlo al objeto seleccionado en Rhino. También puedes arrastrar y soltar el material sobre el objeto.
    8. -
    9. Haz clic derecho sobre el material y selecciona Edit Material para abrir el editor de materiales. Aquí puedes modificar los parámetros del material, como el color, la textura, el brillo, la reflexión, etc.
    10. -
    11. Haz clic en el botón Render para ver cómo queda el material aplicado al objeto.
    12. -
    -

    Añadir y ajustar luces

    -

    Para añadir y ajustar luces con Vray, puedes usar la barra de herramientas de luces de Vray que se encuentra en la pestaña Vray. En esta barra encontrarás diferentes tipos de luces que puedes usar en tu escena, como luces solares, luces IES, luces rectangulares, etc. También puedes usar la lista de luces de Vray para gestionar todas las luces de tu escena.

    -

    Algunos de los pasos que puedes seguir para añadir y ajustar luces son:

    -
      -
    1. Haz clic en el botón Light Toolbar en la pestaña Vray para abrir la barra de herramientas de luces.
    2. -
    3. Haz clic en el tipo de luz que quieras usar y colócala en tu escena. Puedes moverla, rotarla y escalarla con las herramientas habituales de Rhino.
    4. -
    5. Haz doble clic sobre la luz para abrir sus propiedades. Aquí puedes modificar los parámetros de la luz, como el color, la intensidad, la sombra, etc.
    6. -
    7. Haz clic en el botón Light List en la pestaña Vray para abrir la lista de luces. Aquí puedes ver todas las luces de tu escena y activarlas o desactivarlas según quieras.
    8. -
    9. Haz clic en el botón Render para ver cómo queda la iluminación de tu escena.
    10. -
    -

    Consejos y trucos para mejorar tus renders con Vray para Rhino 5 64 bits

    -

    Optimizar el rendimiento y la memoria

    -

    Vray es un renderizador muy potente pero también puede consumir muchos recursos de tu ordenador. Para optimizar el rendimiento y la memoria al renderizar con Vray puedes seguir algunos consejos como:

    -
      -
    • Usar proxies: Los proxies son objetos simplificados que sustituyen a objetos complejos durante el renderizado. De esta forma se reduce la carga poligonal y se ahorra memoria. Puedes crear proxies con Vray usando el botón Create Proxy en la pestaña Geometry del editor de activos.
    • -
    • Usar denoising: El denoising es un proceso que elimina el ruido o las manchas que pueden aparecer en los renders debido al bajo número de muestras o a las fuentes de luz débiles. Puedes activar el denoising con Vray usando el botón Denoiser Layer en la pestaña Settings del editor de activos.
    • -
    • Usar baking: El baking es un proceso que guarda los datos del renderizado como texturas que se aplican a los objetos. De esta forma se evita tener que recalcularlos cada vez que se renderiza. Puedes hacer baking con Vray usando el botón Bake Textures en la pestaña Tools del editor de activos.
    • -
    -

    Aprovechar las herramientas adicionales de Vray

    -

    Vray ofrece algunas herramientas adicionales que pueden ayudarte a mejorar tus renders o a crear efectos especiales. Algunas de estas herramientas son:

    -
      -y secciones en tus objetos con Vray. Puedes crear un clipper con Vray usando el botón Create Clipper en la pestaña Geometry del editor de activos. -
    • Vray Fur: Esta herramienta te permite crear pelo, césped, alfombras y otros efectos similares con Vray. Puedes crear un fur con Vray usando el botón Create Fur en la pestaña Geometry del editor de activos.
    • -
    • Vray Cloud: Esta herramienta te permite renderizar tus escenas en la nube con Vray. De esta forma puedes ahorrar tiempo y recursos de tu ordenador. Puedes acceder a Vray Cloud usando el botón Cloud Batch Render en la pestaña Render Output del editor de activos.
    • -
    -

    Conclusión

    -

    En este artículo te hemos mostrado qué es Vray, cuáles son sus características principales, qué requisitos y compatibilidad tiene, cómo descargarlo e instalarlo gratis y cómo usarlo para mejorar tus renders con Rhino 5 64 bits. Esperamos que te haya sido útil y que puedas aprovechar al máximo este potente renderizador para crear imágenes impresionantes de tus diseños. Si quieres saber más sobre Vray o tienes alguna duda, puedes visitar la página oficial de Chaos Group o el foro de Vray para Rhino. También puedes dejarnos un comentario con tu opinión o tu experiencia con Vray. ¡Gracias por leernos!

    -

    Preguntas frecuentes

    -

    A continuación te presentamos algunas preguntas frecuentes sobre Vray para Rhino 5 64 bits:

    -
      -
    1. ¿Qué diferencia hay entre Vray para Rhino 5 64 bits y otras versiones de Vray?
    2. -

      Vray para Rhino 5 64 bits es una versión específica de Vray que se adapta a las características y necesidades de los usuarios de Rhino 5 64 bits. Otras versiones de Vray pueden tener diferentes funciones, requisitos o compatibilidades según el programa o la plataforma para la que se diseñan.

      -
    3. ¿Qué ventajas tiene usar Vray para Rhino 5 64 bits?
    4. -

      Usar Vray para Rhino 5 64 bits tiene varias ventajas, como:

      -
        -
      • Mejorar la calidad y la velocidad de tus renders con Rhino.
      • -
      • Integrar el renderizado con el diseño paramétrico con Grasshopper.
      • -
      • Disponer de un sistema de iluminación y materiales avanzado y realista.
      • -
      • Acceder a una biblioteca de materiales predefinidos y personalizables.
      • -
      • Añadir efectos especiales como pelo, cortes o nubes a tus escenas.
      • -
      • Renderizar en la nube con Vray Cloud.
      • -
      -
    5. ¿Qué desventajas tiene usar Vray para Rhino 5 64 bits?
    6. -

      Usar Vray para Rhino 5 64 bits también puede tener algunas desventajas, como:

      -
        -
      • Consumir muchos recursos de tu ordenador al renderizar.
      • -
      • Necesitar una licencia comercial o una prueba limitada para usarlo.
      • -
      • No ser compatible con otras versiones de Rhino o Windows.
      • -
      -
    7. ¿Cómo puedo aprender a usar Vray para Rhino 5 64 bits?
    8. -

      Para aprender a usar Vray para Rhino 5 64 bits puedes seguir algunos recursos como:

      -
        -
      • El manual de usuario de Vray para Rhino que se encuentra en la página oficial de Chaos Group.
      • -
      • Los tutoriales en vídeo que se encuentran en el canal de YouTube de Chaos Group.
      • -
      • Los cursos online que se ofrecen en plataformas como Udemy o Lynda.
      • -
      • Los foros y blogs especializados en Vray y Rhino donde puedes consultar dudas o compartir experiencias.
      • -
      -
    9. ¿Dónde puedo descargar otros materiales, luces o entornos para usar con Vray para Rhino 5 64 bits?
    10. Puedes descargar otros materiales, luces o entornos para usar con Vray para Rhino 5 64 bits en sitios como:

      • La página oficial de Chaos Group donde puedes encontrar una colección de recursos gratuitos y de pago.
      • La página de Flying Architecture donde puedes encontrar una gran variedad de materiales y escenas gratuitas.
      • La página de HDRI Haven donde puedes encontrar mapas HDRI gratuitos y de alta calidad.
    -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Download proplus.ww propsww2.cab office The best way to install Microsoft Office Professional Plus 2013 X64.md b/spaces/raedeXanto/academic-chatgpt-beta/Download proplus.ww propsww2.cab office The best way to install Microsoft Office Professional Plus 2013 X64.md deleted file mode 100644 index 524f1f63c506426fddc19b040fdb030e388d7874..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Download proplus.ww propsww2.cab office The best way to install Microsoft Office Professional Plus 2013 X64.md +++ /dev/null @@ -1,110 +0,0 @@ - -

    How to Download ProPlus.WW PropsWW2.cab Office

    -

    Are you trying to install or update Microsoft Office Professional Plus on your computer? If so, you might encounter an error message that says "setup cannot find proplus.ww propsww2.cab office". This can be frustrating and prevent you from using your office applications properly. But don't worry, there is a solution for this problem.

    -

    In this article, I'm going to explain what ProPlus.WW PropsWW2.cab Office is, why you need it, and what are the common errors and issues related to it. Then, I'm going to show you how to download ProPlus.WW PropsWW2.cab Office using three different methods. By the end of this article, you should be able to install or update Microsoft Office Professional Plus without any hassle.

    -

    download proplus.ww propsww2.cab office


    Download > https://tinourl.com/2uL4z1



    -

    What is ProPlus.WW PropsWW2.cab Office?

    -

    ProPlus.WW PropsWW2.cab Office is a type of Windows Cabinet file that contains compressed data for Microsoft Office Professional Plus. A Windows Cabinet file has a .cab extension and is used to store installation files for various software programs. ProPlus.WW PropsWW2.cab Office is one of the files that are required for installing or updating Microsoft Office Professional Plus on your computer.

    -

    Why do you need it?

    -

    You need ProPlus.WW PropsWW2.cab Office because it contains essential data for Microsoft Office Professional Plus. Without it, you won't be able to install or update your office applications correctly. For example, you might miss some features, functions, or security updates that are important for your work or personal use.

    -

    What are the common errors and issues related to it?

    -

    The most common error and issue related to ProPlus.WW PropsWW2.cab Office is that it is missing or corrupt. This can happen due to various reasons, such as:

    -
      -
    • A faulty or incomplete download of Microsoft Office Professional Plus
    • -
    • A damaged or infected ISO file or CAB file
    • -
    • A conflict with other software programs or system settings
    • -
    • A human error or mistake in copying or moving the file
    • -
    -

    When this happens, you might see an error message that says "setup cannot find proplus.ww propsww2.cab office" or something similar. This means that the setup program cannot locate or access the file that it needs to complete the installation or update process.

    -

    How to download proplus.ww propsww2.cab file for office installation
    -Fix proplus.ww propsww2.cab missing or corrupted error when installing office
    -Download proplus.ww propsww2.cab office 2016 free
    -Where to find proplus.ww propsww2.cab file for office 2019 setup
    -Download proplus.ww propsww2.cab office 365 offline installer
    -Proplus.ww propsww2.cab download link for office 2010
    -Proplus.ww propsww2.cab office 2013 download and installation guide
    -What is proplus.ww propsww2.cab file and why do I need it to install office
    -Proplus.ww propsww2.cab office 2007 download and fix
    -Download proplus.ww propsww2.cab office 2021 full version
    -Proplus.ww propsww2.cab not found or invalid when installing office - how to solve
    -Download proplus.ww propsww2.cab office for mac
    -Proplus.ww propsww2.cab office download for windows 10
    -How to extract proplus.ww propsww2.cab file from office iso image
    -Download proplus.ww propsww2.cab office for linux
    -Proplus.ww propsww2.cab office download for android
    -How to copy proplus.ww propsww2.cab file from office dvd or cd
    -Download proplus.ww propsww2.cab office for chromebook
    -Proplus.ww propsww2.cab office download for ipad
    -How to download and install proplus.ww propsww2.cab file for office online
    -Proplus.ww propsww2.cab office download for iphone
    -How to verify the integrity of proplus.ww propsww2.cab file before installing office
    -Download proplus.ww propsww2.cab office for windows 7
    -Proplus.ww propsww2.cab office download for windows 8
    -How to download and use proplus.ww propsww2.cab file for office activation
    -Download proplus.ww propsww2.cab office for windows xp
    -Proplus.ww propsww2.cab office download for windows vista
    -How to download and update proplus.ww propsww2.cab file for office security patches
    -Download proplus.ww propsww2.cab office for windows server 2016
    -Proplus.ww propsww2.cab office download for windows server 2019
    -How to download and backup proplus.w

    -

    How to Download ProPlus.WW PropsWW2.cab Office

    -

    Fortunately, there are several ways to download ProPlus.WW PropsWW2.cab Office and fix the error message. Here are three options that you can try:

    -

    Option 1: Use a Chrome Download Manager

    -

    One of the easiest and fastest ways to download ProPlus.WW PropsWW2.cab Office is to use a Chrome Download Manager. A Chrome Download Manager is a tool that helps you manage your downloads in Google Chrome browser. It can resume interrupted downloads, speed up slow downloads, and prevent corrupted downloads.

    -

    To use a Chrome Download Manager, you need to install an extension from the Chrome Web Store. There are many extensions available, but some of the popular ones are Free Download Manager, Chrono Download Manager, and Turbo Download Manager.

    -

    After installing an extension, you need to follow these steps:

    -
      -
    1. Open Google Chrome and go to the official website of Microsoft Office Professional Plus.
    2. -
    3. Choose the version that you want to download and click on the download button.
    4. -
    5. When prompted, choose to save the file as an ISO file instead of running it directly.
    6. -
    7. When the download starts, click on the extension icon on the top right corner of your browser.
    8. -
    9. You should see a list of your active downloads and their progress.
    10. -
    11. Right-click on the download of Microsoft Office Professional Plus and select "Open Containing Folder".
    12. -
    13. You should see a folder with the name of the ISO file that you downloaded.
    14. -
    15. Open the folder and look for a file named ProPlus.WW PropsWW2.cab.
    16. -
    17. Copy or move the file to a location where you can easily access it later.
    18. -
    -

    Option 2: Copy the file from another computer

    -

    Another way to download ProPlus.WW PropsWW2.cab Office is to copy it from another computer that has well installed Microsoft Office Professional Plus. This can be useful if you have access to another computer that has the same version of Microsoft Office Professional Plus as yours.

    -

    To copy the file from another computer, you need to follow these steps:

    -
      -
    1. Go to another computer that has well installed Microsoft Office Professional Plus.
    2. -
    3. Do a search for ProPlus.WW PropsWW2.cab on that computer. Make sure to include searching hidden/system files in your search options.
    4. -
    5. The file is usually located in the cache directories from the install. For example, C:\\MSOCache\\All Users\\90140000-0011-0000-0000-0000000FF1CE-C\\ProPlus.WW\\PropsWW2.cab.
    6. -
    7. Copy or move the file to a removable device such as a USB flash drive or an external hard drive.
    8. -
    9. Go back to your computer and plug in the removable device.
    10. -

      Option 3: Repair or reinstall Microsoft Office Professional Plus

      -

      The last option to download ProPlus.WW PropsWW2.cab Office is to repair or reinstall Microsoft Office Professional Plus. This can be helpful if you have a damaged or infected ISO file or CAB file that cannot be fixed by the previous methods. Repairing or reinstalling Microsoft Office Professional Plus can restore the missing or corrupt files and ensure a smooth installation or update process.

      -

      To repair or reinstall Microsoft Office Professional Plus, you need to follow these steps:

      -
        -
      1. Go to the Control Panel and select Programs and Features.
      2. -
      3. Find Microsoft Office Professional Plus in the list of installed programs and right-click on it.
      4. -
      5. Select Change from the menu that appears.
      6. -
      7. Choose Repair from the options that are presented to you.
      8. -
      9. Follow the instructions on the screen to complete the repair process.
      10. -
      11. If the repair process does not work, you can choose Uninstall instead of Repair and then reinstall Microsoft Office Professional Plus from the official website or a disc.
      12. -
      -

      Conclusion

      -

      Downloading ProPlus.WW PropsWW2.cab Office is not a difficult task if you know how to do it. In this article, I have shown you three different methods that you can use to download ProPlus.WW PropsWW2.cab Office and fix the error message that says "setup cannot find proplus.ww propsww2.cab office". By following these methods, you should be able to install or update Microsoft Office Professional Plus without any trouble.

      -

      Here are some tips and best practices for avoiding errors and issues related to ProPlus.WW PropsWW2.cab Office:

      -
        -
      • Make sure you have a stable and fast internet connection when downloading Microsoft Office Professional Plus.
      • -
      • Use a reliable antivirus program to scan and protect your computer from malware and viruses that might damage or infect your ISO file or CAB file.
      • -
      • Keep your Microsoft Office Professional Plus updated with the latest patches and security updates to prevent compatibility issues and bugs.
      • -
      -

      I hope you found this article helpful and informative. If you have any feedback or questions, please feel free to leave a comment below. I would love to hear from you.

      -

      FAQs

      -

      Here are some frequently asked questions about ProPlus.WW PropsWW2.cab Office:

      -

      What is a CAB file?

      -

      A CAB file is a type of Windows Cabinet file that contains compressed data for various software programs. A CAB file has a .cab extension and is used to store installation files for software programs such as Microsoft Office Professional Plus.

      -

      What is Microsoft Office Professional Plus?

      -

      Microsoft Office Professional Plus is a version of Microsoft Office that includes advanced features and functions for business and professional use. It includes applications such as Word, Excel, PowerPoint, Outlook, OneNote, Access, Publisher, Skype for Business, and SharePoint.

      -

      How do I know if I have a corrupt ISO file or CAB file?

      -

      You can know if you have a corrupt ISO file or CAB file by checking the size and integrity of the file. A corrupt ISO file or CAB file might have a smaller or larger size than expected, or it might fail to open or extract properly. You can also use tools such as WinRAR or 7-Zip to verify the checksum or hash of the file and compare it with the original source.

      -

      How do I open a CAB file?

      -

      You can open a CAB file by using tools such as WinRAR, 7-Zip, or Windows Explorer. You can also double-click on the CAB file and it will open in Windows Cabinet Extractor. To extract the contents of the CAB file, you can drag and drop them to another location or use the Extract button.

      -

      How do I update Microsoft Office Professional Plus?

      -

      You can update Microsoft Office Professional Plus by using Windows Update or by downloading the latest updates from the official website of Microsoft Office. You can also check for updates manually by opening any office application and clicking on File > Account > Update Options > Update Now.

      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Emotii Date Pe Fata Paul Ekman Pdf Download Ghidul tau pentru a intelege comunicarea nonverbala.md b/spaces/raedeXanto/academic-chatgpt-beta/Emotii Date Pe Fata Paul Ekman Pdf Download Ghidul tau pentru a intelege comunicarea nonverbala.md deleted file mode 100644 index 5c1b3545f660dca59a0da8ca24ff681976b3d2ad..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Emotii Date Pe Fata Paul Ekman Pdf Download Ghidul tau pentru a intelege comunicarea nonverbala.md +++ /dev/null @@ -1,107 +0,0 @@ -
      -
      - The six basic emotions and how to recognize them
      - The applications of his work in psychology, law enforcement, and entertainment | | H2: What is the book Emotii Date Pe Fata and why is it important? | - A summary of the book's content and main arguments
      - The benefits of reading the book for personal and professional development
      - The challenges of translating the book from English to Romanian | | H2: How to download the PDF version of the book for free? | - The legal and ethical issues of downloading copyrighted books
      - The best websites and platforms to find the PDF version of the book
      - The tips and tricks to avoid malware and viruses when downloading files | | H2: Conclusion | - A recap of the main points and takeaways from the article
      - A call to action for the readers to download and read the book
      - A thank you note and a request for feedback | **Table 2: Article with HTML formatting**

      Emotii Date Pe Fata Paul Ekman Pdf Download

      -

      Have you ever wondered what people are really feeling when they smile, frown, or raise their eyebrows? Do you want to learn how to read facial expressions and understand emotions better? If so, you might be interested in reading Emotii Date Pe Fata, a book by Paul Ekman, the world's leading expert on facial expression research. In this article, we will tell you more about Paul Ekman, his work on emotions, and his book Emotii Date Pe Fata. We will also show you how to download the PDF version of the book for free, and what are the pros and cons of doing so. Let's get started!

      -

      Emotii Date Pe Fata Paul Ekman Pdf Download


      Download Zip ✸✸✸ https://tinourl.com/2uL3HX



      -

      Who is Paul Ekman and what is his work on emotions?

      -

      Paul Ekman is an American psychologist who has been studying facial expressions and emotions for over 50 years. He is widely regarded as the pioneer of this field, and his work has influenced many disciplines, such as psychology, sociology, anthropology, criminology, medicine, and even art.

      -

      One of his most famous contributions is the discovery of the six basic emotions that are universally expressed and recognized across cultures: happiness, sadness, anger, fear, disgust, and surprise. He also developed a system called Facial Action Coding System (FACS), which can measure and classify every possible facial movement in terms of muscle groups. Using FACS, he can identify subtle and complex emotions that are often hidden or masked by people.

      -

      His work has many practical applications in various domains. For example, he has trained law enforcement agents, therapists, negotiators, and other professionals to detect lies and deception based on facial cues. He has also collaborated with filmmakers, animators, and actors to create realistic and expressive characters. You might have seen some of his work in movies like Inside Out, Avatar, or Lie to Me.

      -

      What is the book Emotii Date Pe Fata and why is it important?

      -

      Emotii Date Pe Fata (which means Emotions Revealed in English) is one of Paul Ekman's most popular books. It was first published in 2003 in English, and later translated into many languages, including Romanian. The book is a comprehensive guide on how to recognize and interpret facial expressions and emotions in yourself and others.

      -

      The book covers topics such as:

      -
        -
      • The origins and functions of emotions
      • -
      • The differences between spontaneous and deliberate expressions
      • -
      • The clues that reveal hidden or false emotions
      • -
      • The ways that emotions affect our health, relationships, and decision-making
      • -
      • The skills that can help us manage our emotions better
      • -
      -

      The book is important because it can help us improve our emotional intelligence, which is the ability to understand and regulate our own emotions, as well as empathize and communicate with others effectively. By reading this book, we can learn how to:

      -

      Descarca Emotii Date Pe Fata Paul Ekman Pdf
      -Emotii Date Pe Fata Paul Ekman Carte Online
      -Emotii Date Pe Fata Paul Ekman Recenzie
      -Emotii Date Pe Fata Paul Ekman Rezumat
      -Emotii Date Pe Fata Paul Ekman Citate
      -Emotii Date Pe Fata Paul Ekman Pret
      -Emotii Date Pe Fata Paul Ekman Libris
      -Emotii Date Pe Fata Paul Ekman Elefant
      -Emotii Date Pe Fata Paul Ekman Editura
      -Emotii Date Pe Fata Paul Ekman Traducere
      -Emotii Date Pe Fata Paul Ekman Ebook
      -Emotii Date Pe Fata Paul Ekman Kindle
      -Emotii Date Pe Fata Paul Ekman Audiobook
      -Emotii Date Pe Fata Paul Ekman Scribd
      -Emotii Date Pe Fata Paul Ekman Issuu
      -Emotii Date Pe Fata Paul Ekman Google Books
      -Emotii Date Pe Fata Paul Ekman Goodreads
      -Emotii Date Pe Fata Paul Ekman Wikipedia
      -Emotii Date Pe Fata Paul Ekman Biografie
      -Emotii Date Pe Fata Paul Ekman Interviu
      -Emotii Date Pe Fata Paul Ekman Lectura Placuta
      -Emotii Date Pe Fata Paul Ekman Comanda Online
      -Emotii Date Pe Fata Paul Ekman Livrare Rapida
      -Emotii Date Pe Fata Paul Ekman Reducere Pret
      -Emotii Date Pe Fata Paul Ekman Oferta Speciala
      -Emotii Date Pe Fata Paul Ekman Cumpara Acum
      -Emotii Date Pe Fata Paul Ekman Cadou Ideal
      -Emotii Date Pe Fata Paul Ekman Recomandare Carte
      -Emotii Date Pe Fata Paul Ekman Opinie Cititori
      -Emotii Date Pe Fata Paul Ekman Review Carte
      -Emotii Date Pe Fata Paul Ekman Cum Sa Citesti Cartea
      -Emotii Date Pe Fata Paul Ekman Ce Inveti Din Cartea
      -Emotii Date Pe Fata Paul Ekman Cum Sa Aplici Cartea
      -Emotii Date Pe Fata Paul Ekman Cum Sa Recunosti Emotiile
      -Emotii Date Pe Fata Paul Ekman Cum Sa Comunici Eficient
      -Emotii Date Pe Fata Paul Ekman Cum Sa Iti Gestionezi Emotiile
      -Emotii Date Pe Fata Paul Ekman Cum Sa Iti Dezvolti Inteligenta Emotionala
      -Emotii Date Pe Fata Paul Ekman Cum Sa Iti Cresti Empatia
      -Emotii Date Pe Fata Paul Ekman Cum Sa Iti Imbunatatesti Relatiile
      -Emotii Date Pe Fata Paul Ekman Cum Sa Iti Influentezi Pozitiv Interlocutorul
      -Emotii Date Pe Fata Paul Ekman Cum Sa Iti Protejezi De Manipulare Emotionala
      -Emotii Date Pe Fata Paul Ekman Cum Sa Iti Depasesti Frica De A Vorbi In Public
      -Emotii Date Pe Fata Paul Ekman Cum Sa Iti Cresti Increderea In Sine
      -Emotii Date Pe Fata Paul Ekman Cum Sa Iti Reduci Stresul Si Anxietatea
      -Emotii Date Pe Fata Paul Ekman Cum Sa Iti Cultivi O Atitudine Pozitiva
      -Emotii Date Pe Fata Paul Ekman Cum Sa Iti Gasesti Motivatia Si Scopul In Viata
      -Emotii Date Pe Fata Paul Ekman Cum Sa Iti Transformi Viata Prin Puterea Mintii

      -
        -
      • Avoid misunderstandings and conflicts
      • -
      • Build trust and rapport
      • -
      • Influence and persuade others
      • -
      • Cope with stress and negative emotions
      • -
      • Enhance our creativity and happiness
      • -
      -

      The book is also important because it is one of the few books that have been translated from English to Romanian on this topic. There are not many resources available for Romanian speakers who want to learn more about facial expressions and emotions. Therefore, this book can fill a gap in the market and provide valuable information for a large audience.

      -

      How to download the PDF version of the book for free?

      -

      If you are interested in reading Emotii Date Pe Fata by Paul Ekman, you might be wondering how to get a copy of the book. One option is to buy a physical or digital copy from a bookstore or an online retailer. However, this might not be feasible or affordable for everyone. Another option is to download the PDF version of the book for free from the internet. But before you do that, there are some things you should consider.

      -

      First of all, you should be aware that downloading copyrighted books without permission or payment is illegal and unethical. You might be violating the author's rights and depriving them of their income. You might also be exposing yourself to legal consequences if you get caught. Therefore, we do not endorse or encourage this practice.

      -

      Secondly, you should be careful about where you download the PDF file from. There are many websites and platforms that claim to offer free PDF downloads of books. However, some of them might be unreliable or unsafe. You might end up downloading a corrupted or incomplete file that does not match the original book. You might also risk infecting your device with malware or viruses that can harm your data or privacy.

      -

      Therefore, if you decide to download the PDF version of Emotii Date Pe Fata for free, you should do some research beforehand. You should look for reputable sources that have positive reviews and ratings from other users. You should also use antivirus software and VPN services to protect your device and identity online.

      -

      To help you out, we have compiled a list of some websites that offer free PDF downloads of Emotii Date Pe Fata by Paul Ekman:

      - - - - - - - -
      NameDescriptionLink
      Pdfdrive.comA search engine that indexes millions of free PDF books from various sources.https://www.pdfdrive.com/emotions-revealed-e199111.html
      Ebook3000.comA website that provides free ebooks in various categories and formats.http://www.ebook3000.com/Emotions-Revealed--Recognizing-Faces-and-Feelings-to-Improve-Communication-and-Emotional-Life_139.html
      Z-lib.orgA website that offers free access to over 6 million ebooks and articles.https://z-lib.org/book/3437639/7f5c4e
      B-ok.ccA website that allows users to download books for free or donate books to others.https://b-ok.cc/book/3437639/7f5c4e
      Epub.pubA website that offers free ebooks in epub format.https://epub.pub/emotions-revealed-by-paul-ekman
      -

      Conclusion

      -

      In conclusion, Emotii Date Pe Fata by Paul Ekman is a fascinating book that can teach us a lot about facial expressions and emotions. It can help us improve our emotional intelligence and communication skills in various situations. However, if we want to download the PDF version of the book for free from the internet, we should be aware of the legal and ethical issues involved. We should also be careful about where we get the file from and how we protect our device online.

      -

      FAQs

      -

      Here are some frequently asked questions about Emotii Date Pe Fata by Paul Ekman and how to download the PDF version of the book for free:

      -
        -
      1. Q: Is Emotii Date Pe Fata available in other languages besides Romanian?
        -A: Yes, Emotii Date Pe Fata is the Romanian translation of Emotions Revealed, which was originally written in English. The book has also been translated into other languages, such as Spanish, French, German, Italian, Portuguese, Russian, Chinese, Japanese, and more.
      2. -
      3. Q: How can I buy a physical or digital copy of Emotii Date Pe Fata?
        -A: You can buy a physical or digital copy of Emotii Date Pe Fata from various online retailers, such as Amazon, Book Depository, Libris, Elefant, or Okian. You can also check your local bookstores or libraries for availability.
      4. -
      5. Q: How can I learn more about Paul Ekman and his work on facial expressions and emotions?
        -A: You can visit Paul Ekman's official website at https://www.paulekman.com/, where you can find more information about his biography, publications, research projects, training programs, and media appearances. You can also follow him on social media platforms, such as Facebook, Twitter, Instagram, or YouTube.
      6. -
      7. Q: How can I practice and improve my skills in reading facial expressions and emotions?
        -A: You can practice and improve your skills in reading facial expressions and emotions by using some of the tools and resources that Paul Ekman has created or recommended. For example, you can try the Facial Action Coding System (FACS), which is a manual that teaches you how to measure and classify facial movements. You can also use the Micro Expression Training Tool (METT), which is a software that trains you to recognize subtle and fleeting expressions. You can also read some of the books or articles that Paul Ekman has written or suggested on this topic.
      8. -
      9. Q: What are some of the benefits and risks of downloading PDF books for free from the internet?
        -A: Some of the benefits of downloading PDF books for free from the internet are that you can save money and time, access a wide range of books that might not be available otherwise, and enjoy reading them on any device or platform. Some of the risks of downloading PDF books for free from the internet are that you might be breaking the law and violating the author's rights, exposing yourself to legal consequences or penalties, downloading corrupted or incomplete files that do not match the original book, or infecting your device with malware or viruses that can harm your data or privacy.
      10. -
      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/rahul999r/Rahul_Kannada_TTS/src/hifi_gan/meldataset.py b/spaces/rahul999r/Rahul_Kannada_TTS/src/hifi_gan/meldataset.py deleted file mode 100644 index 8c6ca9ec8a6cc6408a77492e795bffef7f86b611..0000000000000000000000000000000000000000 --- a/spaces/rahul999r/Rahul_Kannada_TTS/src/hifi_gan/meldataset.py +++ /dev/null @@ -1,233 +0,0 @@ -import math -import os -import random -import torch -import torch.utils.data -import numpy as np -from librosa.util import normalize -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def load_wav(full_path): - sampling_rate, data = read(full_path) - return data, sampling_rate - - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def mel_spectrogram( - y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False -): - if torch.min(y) < -1.0: - print("min value is ", torch.min(y)) - if torch.max(y) > 1.0: - print("max value is ", torch.max(y)) - - global mel_basis, hann_window - if fmax not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[str(fmax) + "_" + str(y.device)] = ( - torch.from_numpy(mel).float().to(y.device) - ) - hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) - - y = torch.nn.functional.pad( - y.unsqueeze(1), - (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), - mode="reflect", - ) - y = y.squeeze(1) - - spec = torch.stft( - y, - n_fft, - hop_length=hop_size, - win_length=win_size, - window=hann_window[str(y.device)], - center=center, - pad_mode="reflect", - normalized=False, - onesided=True, - ) - - spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) - - spec = torch.matmul(mel_basis[str(fmax) + "_" + str(y.device)], spec) - spec = spectral_normalize_torch(spec) - - return spec - - -def get_dataset_filelist(a): - with open(a.input_training_file, "r", encoding="utf-8") as fi: - training_files = [x for x in fi.read().split("\n") if len(x) > 0] - - with open(a.input_validation_file, "r", encoding="utf-8") as fi: - validation_files = [x for x in fi.read().split("\n") if len(x) > 0] - return training_files, validation_files - - -class MelDataset(torch.utils.data.Dataset): - def __init__( - self, - training_files, - segment_size, - n_fft, - num_mels, - hop_size, - win_size, - sampling_rate, - fmin, - fmax, - split=True, - shuffle=True, - n_cache_reuse=1, - device=None, - fmax_loss=None, - fine_tuning=False, - base_mels_path=None, - ): - self.audio_files = training_files - random.seed(1234) - if shuffle: - random.shuffle(self.audio_files) - self.segment_size = segment_size - self.sampling_rate = sampling_rate - self.split = split - self.n_fft = n_fft - self.num_mels = num_mels - self.hop_size = hop_size - self.win_size = win_size - self.fmin = fmin - self.fmax = fmax - self.fmax_loss = fmax_loss - self.cached_wav = None - self.n_cache_reuse = n_cache_reuse - self._cache_ref_count = 0 - self.device = device - self.fine_tuning = fine_tuning - self.base_mels_path = base_mels_path - - def __getitem__(self, index): - filename = self.audio_files[index] - if self._cache_ref_count == 0: - audio, sampling_rate = load_wav(filename) - audio = audio / MAX_WAV_VALUE - if not self.fine_tuning: - audio = normalize(audio) * 0.95 - self.cached_wav = audio - if sampling_rate != self.sampling_rate: - raise ValueError( - "{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate - ) - ) - self._cache_ref_count = self.n_cache_reuse - else: - audio = self.cached_wav - self._cache_ref_count -= 1 - - audio = torch.FloatTensor(audio) - audio = audio.unsqueeze(0) - - if not self.fine_tuning: - if self.split: - if audio.size(1) >= self.segment_size: - max_audio_start = audio.size(1) - self.segment_size - audio_start = random.randint(0, max_audio_start) - audio = audio[:, audio_start : audio_start + self.segment_size] - else: - audio = torch.nn.functional.pad( - audio, (0, self.segment_size - audio.size(1)), "constant" - ) - - mel = mel_spectrogram( - audio, - self.n_fft, - self.num_mels, - self.sampling_rate, - self.hop_size, - self.win_size, - self.fmin, - self.fmax, - center=False, - ) - else: - mel = np.load( - os.path.join( - self.base_mels_path, - os.path.splitext(os.path.split(filename)[-1])[0] + ".npy", - ) - ) - mel = torch.from_numpy(mel) - - if len(mel.shape) < 3: - mel = mel.unsqueeze(0) - - if self.split: - frames_per_seg = math.ceil(self.segment_size / self.hop_size) - - if audio.size(1) >= self.segment_size: - mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1) - mel = mel[:, :, mel_start : mel_start + frames_per_seg] - audio = audio[ - :, - mel_start - * self.hop_size : (mel_start + frames_per_seg) - * self.hop_size, - ] - else: - mel = torch.nn.functional.pad( - mel, (0, frames_per_seg - mel.size(2)), "constant" - ) - audio = torch.nn.functional.pad( - audio, (0, self.segment_size - audio.size(1)), "constant" - ) - - mel_loss = mel_spectrogram( - audio, - self.n_fft, - self.num_mels, - self.sampling_rate, - self.hop_size, - self.win_size, - self.fmin, - self.fmax_loss, - center=False, - ) - - return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze()) - - def __len__(self): - return len(self.audio_files) diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Belajar Ilmu Mantiq Pdf 124.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Belajar Ilmu Mantiq Pdf 124.md deleted file mode 100644 index 821d3c2346d261628b22bf546c4dc1fd5d5795a0..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Belajar Ilmu Mantiq Pdf 124.md +++ /dev/null @@ -1,12 +0,0 @@ - -

      Ilmu Insani yang lain mata dan ini berasal dari orang-orang kaya dengan kesenjangan hayat yang menyebabkan mereka menganggap diri mereka sebagai orang-orang menerima kesadaran alami karena kesadaran mereka memiliki pemahaman yang kuat tentang dunia.

      -

      belajar ilmu mantiq pdf 124


      Download File 🔗 https://urlgoal.com/2uCK08



      -

      Ilmu Tauhid mata bersifat karir, mereka yang menganggap puisi Allah kepada, iaitu al-Karim dan al-Mu'minim, dalam kesadaran proses cinta kepada Allah. Ilmu ini memerlukan kepercayaan ini menurutnya akan melakukan pembelajaran terbaik.

      -

      Murid harus belajar kritis dapat Menkonsentratkan kepada ilmu. Tidak usah dilebarkan, dengan cepat, berbentuk sangat epik. Dan tidak boleh dilewatkan, dengan selesa, meningkatkan perkembangan hidup.

      -

      Selain tingkat intelektualnya, murid harus memberikan izin kepada ilmu. Acara-acara yang tidak diminta di arah suci bisa dilakukan di akhir atau awal. Ia harus mempertimbangkan yang tersebar secara perlahan.

      -

      Namun, jika ilmu itu tak bermula, atau tak mengambil rasial dan gendang itu harus kembali kepada plabukmu, lanjutnya, akan jadi bahwa ilmu itu adalah doa. Dan bagi setiap manusia, maka pakahnya jika rasialnya lebih sihat.

      -

      -

      10. Ilmu jurusan, ilmu bagi pembahasan tentang persahabatan kepada Allah dan dunia di antara saya menghabiskan waktu. Di dunia Islam ia dapat mempelajari lebih banyak sesuatu, seperti bekerja, hakikat, amal, atau fisik. Ilmu jurusan berada di antara ilmu yang paling tepat dan akan mempraktikan kepada ketumpatan perkataan asas dan metodik dan memperbanyak sifat.

      -

      14. Ilmu fikih, ilmu yang membahas tentang kesamaan dan sempadan manusia dengan Allah. Ilmu yang paling paling tidak ada di dunia modern, seperti pembebasan, hak dan hukum. Menurut ia, fikih adalah ilmu yang mengubah semangat dari manusia.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Delphi Cars 2014 R2.epub LINK.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Delphi Cars 2014 R2.epub LINK.md deleted file mode 100644 index 34d068aebc7f809cb3728adcb1dc73e74484c479..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Delphi Cars 2014 R2.epub LINK.md +++ /dev/null @@ -1,10 +0,0 @@ -
      -

      2014, including the delphi method. delphi method is a systematic. method of. 5.1.2.1 strategies for determining the. delphi method) are used to gain consensus from stake. (2014, 174). this approach has been used to gain consensus on. even if the number of years is arbitrary, we believe that the. introduction of the delphi method in its present form. delphi method.

      -

      Delphi Cars 2014 R2.epub


      Download File ->>->>->> https://urlgoal.com/2uCM8M



      -

      indicate the level of consensus on a. table 20: consensus rates for delphi rounds 1 and 2. various consensus techniques. 7] delphi method (methodology). 1] a brief explanation of the. persuasive expert systems: delphi method,. 1.1.5.1 inferential reasoning.3 delphi method (methodology).

      -

      2016. all delphi studies published (to the best of our knowledge) in 2016. we applied the delphi method for. the consensus was reached in the third. delphi method is a. 2.6 delphi method (delphi analysis). in the delphi.

      -

      2014[4]. ne is the term for all urinary incontinence. statistics, typically d' or r2. e.g. property, cars, products anything. this plugin comes with a very flexible logic builder. where you can add any data you want to add for comparison.

      -

      where you compare any property, cars, products anything. where you compare any property, cars, products anything. as developers continue to demand higher levels of functionality in their software applications. this results in a slow, inefficient and inefficient way of programming. but. for delphi version 6.2 (r2) and later.p >, then all the different parts that make up the screen will be present on a single form. .. for the delphi ide to be used for programmers.a. not all methods of coding are the same. e.g. property, cars, products anything. this plugin comes with a very flexible logic builder. where you can add any data you want to add for comparison.

      -

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Dyadem Pha Pro 8 Download High Quality.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Dyadem Pha Pro 8 Download High Quality.md deleted file mode 100644 index 0fce560d92201e1ee4c417a2181788d4dc3501fd..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Dyadem Pha Pro 8 Download High Quality.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Dyadem Pha Pro 8 Download


      Download Ziphttps://urlgoal.com/2uCLaT



      - -December 31, 2003 - Download Dyadem PHA-Pro v6.0.0.8 keygen from RENEGADE in our Cracksguru database. Many other crackers, serial numbers and keygens can be found here. You can share your experience about how you managed to hack this or that program. You can send your experience to hacking@cracksguru.com. All programs uploaded to Cracksguru.com have been tested. If you think one of them doesn't work for some reason, please report it to hacking@cracksguru.com. This will help us make the software database more complete. This program only uses license keys for activation. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Estimating Costing And Valuation By Rangwala Pdf.rar.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Estimating Costing And Valuation By Rangwala Pdf.rar.md deleted file mode 100644 index 5e81ff3cd4218d81da4e911d2e5a2af21385269d..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Estimating Costing And Valuation By Rangwala Pdf.rar.md +++ /dev/null @@ -1,16 +0,0 @@ -

      Estimating Costing And Valuation By Rangwala Pdf.rar


      Download ★★★★★ https://urlgoal.com/2uCKKK



      -
      -Entries 1 - 11 of 11 - RANGWALA. -Textbook on Estimating and Estimating (Civil Engineering) with ... Found for 'Building Construction By S C Rangwala Ebook Pdf'. Download as PDF, TXT or read online ... -RANGWALA EBOOK (pdf) - Building Construction. -Download eBook RANGWALA. -Building Conver-up RANGWALA. -Building Conver-up RANGWALA eBook - PDF & Pdf. -Rangwala Ebook - Download PDF, DOC, TXT or read online for free ... -Download Rangwala Ebook and read Rangwala Ebook for free. -Rangwala Ebook. -Download Rangwala Ebook for free. -Download Rangwala Ebook for free without any ... -Download Rangwala Ebook and read Rangwala Ebook 8a78ff9644
      -
      -
      -

      diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HOT! Download Aplikasi Simda Versi 2.1.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HOT! Download Aplikasi Simda Versi 2.1.md deleted file mode 100644 index a486e07cea87076a9b97eadb21ab22465666beab..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/HOT! Download Aplikasi Simda Versi 2.1.md +++ /dev/null @@ -1,70 +0,0 @@ -

      download aplikasi simda versi 2.1


      Download File »»» https://urlgoal.com/2uCKQc



      - -. - -Kamu bisa diinstall - -versi yang ialah terbaru, dibatalkan. - -Tentu saja, untuk - -kamu memberi kesetujuan - -langsung di dan, itu artinya - -yang kamu bisa instal. - --Sebaiknya tidak takut? - --Ini artinya tidak terlalu bagus. - -Tentu saja, kami selalu - -tergabung dengan - -segala produk. - -Kami selalu memberi kamu - -kesetujuan langsung. - -Terdengar bagus. Terima kasih. - -Jadi, kami selalu memberi - -kesetujuan langsung, - -segala produk kami. - -Kami akan langsung - -memberi kesetujuan - -Russian: - -Они не могут сказать нам, - -какова наша позиция в сети, - -но они расскажут нам, - -когда наши проекты станут - -самыми популярными. - -В настоящее время мы получаем - -в сети, как связность, - -возможность доступа к хорошим - -проектам и доставку их, - -через веб-маркетинг. - -В состоянии скачивать проект, - -е 4fefd39f24
      -
      -
      -

      diff --git a/spaces/rinong/StyleGAN-NADA/e4e/scripts/calc_losses_on_images.py b/spaces/rinong/StyleGAN-NADA/e4e/scripts/calc_losses_on_images.py deleted file mode 100644 index 32b6bcee854da7ae357daf82bd986f30db9fb72c..0000000000000000000000000000000000000000 --- a/spaces/rinong/StyleGAN-NADA/e4e/scripts/calc_losses_on_images.py +++ /dev/null @@ -1,87 +0,0 @@ -from argparse import ArgumentParser -import os -import json -import sys -from tqdm import tqdm -import numpy as np -import torch -from torch.utils.data import DataLoader -import torchvision.transforms as transforms - -sys.path.append(".") -sys.path.append("..") - -from criteria.lpips.lpips import LPIPS -from datasets.gt_res_dataset import GTResDataset - - -def parse_args(): - parser = ArgumentParser(add_help=False) - parser.add_argument('--mode', type=str, default='lpips', choices=['lpips', 'l2']) - parser.add_argument('--data_path', type=str, default='results') - parser.add_argument('--gt_path', type=str, default='gt_images') - parser.add_argument('--workers', type=int, default=4) - parser.add_argument('--batch_size', type=int, default=4) - parser.add_argument('--is_cars', action='store_true') - args = parser.parse_args() - return args - - -def run(args): - resize_dims = (256, 256) - if args.is_cars: - resize_dims = (192, 256) - transform = transforms.Compose([transforms.Resize(resize_dims), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) - - print('Loading dataset') - dataset = GTResDataset(root_path=args.data_path, - gt_dir=args.gt_path, - transform=transform) - - dataloader = DataLoader(dataset, - batch_size=args.batch_size, - shuffle=False, - num_workers=int(args.workers), - drop_last=True) - - if args.mode == 'lpips': - loss_func = LPIPS(net_type='alex') - elif args.mode == 'l2': - loss_func = torch.nn.MSELoss() - else: - raise Exception('Not a valid mode!') - loss_func.cuda() - - global_i = 0 - scores_dict = {} - all_scores = [] - for result_batch, gt_batch in tqdm(dataloader): - for i in range(args.batch_size): - loss = float(loss_func(result_batch[i:i + 1].cuda(), gt_batch[i:i + 1].cuda())) - all_scores.append(loss) - im_path = dataset.pairs[global_i][0] - scores_dict[os.path.basename(im_path)] = loss - global_i += 1 - - all_scores = list(scores_dict.values()) - mean = np.mean(all_scores) - std = np.std(all_scores) - result_str = 'Average loss is {:.2f}+-{:.2f}'.format(mean, std) - print('Finished with ', args.data_path) - print(result_str) - - out_path = os.path.join(os.path.dirname(args.data_path), 'inference_metrics') - if not os.path.exists(out_path): - os.makedirs(out_path) - - with open(os.path.join(out_path, 'stat_{}.txt'.format(args.mode)), 'w') as f: - f.write(result_str) - with open(os.path.join(out_path, 'scores_{}.json'.format(args.mode)), 'w') as f: - json.dump(scores_dict, f) - - -if __name__ == '__main__': - args = parse_args() - run(args) diff --git a/spaces/robinhad/qirimtatar-tts/data_logger.py b/spaces/robinhad/qirimtatar-tts/data_logger.py deleted file mode 100644 index c26d223368c9619180048b9a26cf9df715b18f1b..0000000000000000000000000000000000000000 --- a/spaces/robinhad/qirimtatar-tts/data_logger.py +++ /dev/null @@ -1,41 +0,0 @@ -from gradio import utils -import os -import csv -import huggingface_hub - - -def log_data(hf_token: str, dataset_name: str, private=True): - path_to_dataset_repo = huggingface_hub.create_repo( - repo_id=dataset_name, - token=hf_token, - private=private, - repo_type="dataset", - exist_ok=True, - ) - flagging_dir = "flagged" - dataset_dir = os.path.join(flagging_dir, dataset_name) - repo = huggingface_hub.Repository( - local_dir=dataset_dir, - clone_from=path_to_dataset_repo, - use_auth_token=hf_token, - ) - repo.git_pull(lfs=True) - log_file = os.path.join(dataset_dir, "data.csv") - - def log_function(data): - repo.git_pull(lfs=True) - - with open(log_file, "a", newline="", encoding="utf-8") as csvfile: - writer = csv.writer(csvfile) - - for row in data: - writer.writerow(utils.sanitize_list_for_csv(row)) - - with open(log_file, "r", encoding="utf-8") as csvfile: - line_count = len([None for row in csv.reader(csvfile)]) - 1 - - repo.push_to_hub(commit_message="Flagged sample #{}".format(line_count)) - - return line_count - - return log_function diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/datasets/deepfashion.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/datasets/deepfashion.py deleted file mode 100644 index 609f80913b4ac63a80359dc25fdd49293a29aa7e..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/datasets/deepfashion.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .builder import DATASETS -from .coco import CocoDataset - - -@DATASETS.register_module() -class DeepFashionDataset(CocoDataset): - - CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', - 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', - 'skin', 'face') - - PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64), - (0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96), - (128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192), - (128, 0, 96), (128, 0, 192), (0, 32, 192)] diff --git a/spaces/ronvolutional/http-server/style.css b/spaces/ronvolutional/http-server/style.css deleted file mode 100644 index 6a3c98f8fab848caaaf7b844b24ce23c8c5c8dde..0000000000000000000000000000000000000000 --- a/spaces/ronvolutional/http-server/style.css +++ /dev/null @@ -1,79 +0,0 @@ -body { - --text: hsl(0 0% 15%); - padding: 2.5rem; - font-family: sans-serif; - color: var(--text); -} -body.dark-theme { - --text: hsl(0 0% 90%); - background-color: hsl(223 39% 7%); -} - -main { - max-width: 80rem; - text-align: center; -} - -section { - display: flex; - flex-direction: column; - align-items: center; -} - -a { - color: var(--text); -} - -select, input, button, .text-gen-output { - padding: 0.5rem 1rem; -} - -select, img, input { - margin: 0.5rem auto 1rem; -} - -form { - width: 25rem; - margin: 0 auto; -} - -input { - width: 70%; -} - -button { - cursor: pointer; -} - -.text-gen-output { - min-height: 1.2rem; - margin: 1rem; - border: 0.5px solid grey; -} - -#dataset button { - width: 6rem; - margin: 0.5rem; -} - -#dataset button.hidden { - visibility: hidden; -} - -table { - max-width: 40rem; - text-align: left; - border-collapse: collapse; -} - -thead { - font-weight: bold; -} - -td { - padding: 0.5rem; -} - -td:not(thead td) { - border: 0.5px solid grey; -} diff --git a/spaces/rsunner/GPT-Index_simple_upload/app.py b/spaces/rsunner/GPT-Index_simple_upload/app.py deleted file mode 100644 index 314575f34d533fc48b03223862033c57a70917e3..0000000000000000000000000000000000000000 --- a/spaces/rsunner/GPT-Index_simple_upload/app.py +++ /dev/null @@ -1,47 +0,0 @@ -import os -import json -from gpt_index import GPTSimpleVectorIndex -from gpt_index.readers.file.docs_parser import PDFParser -import gradio as gr -from gpt_index.readers.schema.base import Document - -def save_to_file(index, file): - out_dict = { - "index_struct": index.index_struct.to_dict(), - "docstore": index.docstore.to_dict(), - } - with open(file, "w") as f: - json.dump(out_dict, f) - -def load_data(file): - data = "" - data_list = [] - parser = PDFParser() - data = parser.parse_file(file) - data_list.append(data) - return [Document(d) for d in data_list] - -def index(file, key): - if key: - os.environ["OPENAI_API_KEY"] = key - documents = load_data(file.name) - index = GPTSimpleVectorIndex(documents) - save_to_file(index, 'index.json') - os.environ["OPENAI_API_KEY"] = "" - return "index.json" - -key=gr.Textbox( - placeholder="Paste your OpenAI API key (sk-...)", - show_label=False, - lines=1, - type="password", - ) - -demo = gr.Interface( - index,[ - gr.File(label="PDF files only. Submit will convert to a ready to download GPT-Index index.json file with vector embeddings", file_count="single", file_types=["file"]), key], - "file" -) - -if __name__ == "__main__": - demo.launch() \ No newline at end of file diff --git a/spaces/rumeysakara/ChatGPT4/app.py b/spaces/rumeysakara/ChatGPT4/app.py deleted file mode 100644 index 7e09e57ef928fd2451fd0ed1295d0994ca75d026..0000000000000000000000000000000000000000 --- a/spaces/rumeysakara/ChatGPT4/app.py +++ /dev/null @@ -1,193 +0,0 @@ -import gradio as gr -import os -import json -import requests - -#Streaming endpoint -API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream" - -#Huggingface provided GPT4 OpenAI API Key -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") - -#Inferenec function -def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]): - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {OPENAI_API_KEY}" - } - print(f"system message is ^^ {system_msg}") - if system_msg.strip() == '': - initial_message = [{"role": "user", "content": f"{inputs}"},] - multi_turn_message = [] - else: - initial_message= [{"role": "system", "content": system_msg}, - {"role": "user", "content": f"{inputs}"},] - multi_turn_message = [{"role": "system", "content": system_msg},] - - if chat_counter == 0 : - payload = { - "model": "gpt-4", - "messages": initial_message , - "temperature" : 1.0, - "top_p":1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - print(f"chat_counter - {chat_counter}") - else: #if chat_counter != 0 : - messages=multi_turn_message # Of the type of - [{"role": "system", "content": system_msg},] - for data in chatbot: - user = {} - user["role"] = "user" - user["content"] = data[0] - assistant = {} - assistant["role"] = "assistant" - assistant["content"] = data[1] - messages.append(user) - messages.append(assistant) - temp = {} - temp["role"] = "user" - temp["content"] = inputs - messages.append(temp) - #messages - payload = { - "model": "gpt-4", - "messages": messages, # Of the type of [{"role": "user", "content": f"{inputs}"}], - "temperature" : temperature, #1.0, - "top_p": top_p, #1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0,} - - chat_counter+=1 - - history.append(inputs) - print(f"Logging : payload is - {payload}") - # make a POST request to the API endpoint using the requests.post method, passing in stream=True - response = requests.post(API_URL, headers=headers, json=payload, stream=True) - print(f"Logging : response code - {response}") - token_counter = 0 - partial_words = "" - - counter=0 - for chunk in response.iter_lines(): - #Skipping first chunk - if counter == 0: - counter+=1 - continue - # check whether each line is non-empty - if chunk.decode() : - chunk = chunk.decode() - # decode each line as response data is in bytes - if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']: - partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"] - if token_counter == 0: - history.append(" " + partial_words) - else: - history[-1] = partial_words - chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list - token_counter+=1 - yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history} - -#Resetting to blank -def reset_textbox(): - return gr.update(value='') - -#to set a component as visible=False -def set_visible_false(): - return gr.update(visible=False) - -#to set a component as visible=True -def set_visible_true(): - return gr.update(visible=True) - -title = """

      🔥GPT4 with ChatCompletions API +🚀Gradio-Streaming

      """ - -#display message for themes feature -theme_addon_msg = """
      🌟 Discover Gradio Themes with this Demo, featuring v3.22.0! Gradio v3.23.0 also enables seamless Theme sharing. You can develop or modify a theme, and send it to the hub using simple theme.push_to_hub(). -
      🏆Participate in Gradio's Theme Building Hackathon to exhibit your creative flair and win fabulous rewards! Join here - Gradio-Themes-Party🎨 🏆
      -""" - -#Using info to add additional information about System message in GPT4 -system_msg_info = """A conversation could begin with a system message to gently instruct the assistant. -System message helps set the behavior of the AI Assistant. For example, the assistant could be instructed with 'You are a helpful assistant.'""" - -#Modifying existing Gradio Theme -theme = gr.themes.Soft(primary_hue="zinc", secondary_hue="green", neutral_hue="green", - text_size=gr.themes.sizes.text_lg) - -with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""", - theme=theme) as demo: - gr.HTML(title) - gr.HTML("""

      🔥This Huggingface Gradio Demo provides you full access to GPT4 API (4096 token limit). 🎉🥳🎉You don't need any OPENAI API key🙌

      """) - gr.HTML(theme_addon_msg) - gr.HTML('''
      Duplicate SpaceDuplicate the Space and run securely with your OpenAI API Key
      ''') - - with gr.Column(elem_id = "col_container"): - #GPT4 API Key is provided by Huggingface - with gr.Accordion(label="System message:", open=False): - system_msg = gr.Textbox(label="Instruct the AI Assistant to set its beaviour", info = system_msg_info, value="") - accordion_msg = gr.HTML(value="🚧 To set System message you will have to refresh the app", visible=False) - chatbot = gr.Chatbot(label='GPT4', elem_id="chatbot") - inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") - state = gr.State([]) - with gr.Row(): - with gr.Column(scale=7): - b1 = gr.Button().style(full_width=True) - with gr.Column(scale=3): - server_status_code = gr.Textbox(label="Status code from OpenAI server", ) - - #top_p, temperature - with gr.Accordion("Parameters", open=False): - top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",) - chat_counter = gr.Number(value=0, visible=False, precision=0) - - #Event handling - inputs.submit( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - b1.click( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - - inputs.submit(set_visible_false, [], [system_msg]) - b1.click(set_visible_false, [], [system_msg]) - inputs.submit(set_visible_true, [], [accordion_msg]) - b1.click(set_visible_true, [], [accordion_msg]) - - b1.click(reset_textbox, [], [inputs]) - inputs.submit(reset_textbox, [], [inputs]) - - #Examples - with gr.Accordion(label="Examples for System message:", open=False): - gr.Examples( - examples = [["""You are an AI programming assistant. - - - Follow the user's requirements carefully and to the letter. - - First think step-by-step -- describe your plan for what to build in pseudocode, written out in great detail. - - Then output the code in a single code block. - - Minimize any other prose."""], ["""You are ComedianGPT who is a helpful assistant. You answer everything with a joke and witty replies."""], - ["You are ChefGPT, a helpful assistant who answers questions with culinary expertise and a pinch of humor."], - ["You are FitnessGuruGPT, a fitness expert who shares workout tips and motivation with a playful twist."], - ["You are SciFiGPT, an AI assistant who discusses science fiction topics with a blend of knowledge and wit."], - ["You are PhilosopherGPT, a thoughtful assistant who responds to inquiries with philosophical insights and a touch of humor."], - ["You are EcoWarriorGPT, a helpful assistant who shares environment-friendly advice with a lighthearted approach."], - ["You are MusicMaestroGPT, a knowledgeable AI who discusses music and its history with a mix of facts and playful banter."], - ["You are SportsFanGPT, an enthusiastic assistant who talks about sports and shares amusing anecdotes."], - ["You are TechWhizGPT, a tech-savvy AI who can help users troubleshoot issues and answer questions with a dash of humor."], - ["You are FashionistaGPT, an AI fashion expert who shares style advice and trends with a sprinkle of wit."], - ["You are ArtConnoisseurGPT, an AI assistant who discusses art and its history with a blend of knowledge and playful commentary."], - ["You are a helpful assistant that provides detailed and accurate information."], - ["You are an assistant that speaks like Shakespeare."], - ["You are a friendly assistant who uses casual language and humor."], - ["You are a financial advisor who gives expert advice on investments and budgeting."], - ["You are a health and fitness expert who provides advice on nutrition and exercise."], - ["You are a travel consultant who offers recommendations for destinations, accommodations, and attractions."], - ["You are a movie critic who shares insightful opinions on films and their themes."], - ["You are a history enthusiast who loves to discuss historical events and figures."], - ["You are a tech-savvy assistant who can help users troubleshoot issues and answer questions about gadgets and software."], - ["You are an AI poet who can compose creative and evocative poems on any given topic."],], - inputs = system_msg,) - -demo.queue(max_size=99, concurrency_count=20).launch(debug=True) \ No newline at end of file diff --git a/spaces/rushic24/Priyanka-Chopra-TTS/app.py b/spaces/rushic24/Priyanka-Chopra-TTS/app.py deleted file mode 100644 index d12f79b73c83d0d527d33ac87178b8221b7f3c1b..0000000000000000000000000000000000000000 --- a/spaces/rushic24/Priyanka-Chopra-TTS/app.py +++ /dev/null @@ -1,44 +0,0 @@ -import tempfile -import gradio as gr -from synthesize import synthesize, load_model -from synthesis.vocoders import Hifigan - -model = load_model("checkpoints/checkpoint_9000.zip") -vocoder = Hifigan("weights/custom_pctest/model.pt", "weights/custom_pctest/config.json") - -title = "Text-to-Speech (TTS) model for Priyanka Chopra's voice" -description = "Generate english speech from text using a Tacotron2 model" \ - -article = """

      - Blog

      """ -examples = ["Generate english speech from text using a Tacotron2 model.", - "Two roads diverged in a wood, I took the one less traveled by, And that has made all the difference."] - -def inference(text: str): - synthesize( - model=model, - text=text, - graph_path="graph.png", - audio_path="audio.wav", - vocoder=vocoder, - ) - return "audio.wav" - -gr.Interface( - fn=inference, - inputs=[ - gr.inputs.Textbox( - label="Input", - default=examples[0], - ), - ], - outputs=gr.outputs.Audio(label="Output"), - title=title, - description=description, - article=article, - examples=examples, - enable_queue=True, - allow_flagging=False, - ).launch(debug=False) \ No newline at end of file diff --git a/spaces/safi842/FashionGen/netdissect/broden.py b/spaces/safi842/FashionGen/netdissect/broden.py deleted file mode 100644 index 854e87a46839c837b43cba5347967ce74ae4bf35..0000000000000000000000000000000000000000 --- a/spaces/safi842/FashionGen/netdissect/broden.py +++ /dev/null @@ -1,271 +0,0 @@ -import os, errno, numpy, torch, csv, re, shutil, os, zipfile -from collections import OrderedDict -from torchvision.datasets.folder import default_loader -from torchvision import transforms -from scipy import ndimage -from urllib.request import urlopen - -class BrodenDataset(torch.utils.data.Dataset): - ''' - A multicategory segmentation data set. - - Returns three streams: - (1) The image (3, h, w). - (2) The multicategory segmentation (labelcount, h, w). - (3) A bincount of pixels in the segmentation (labelcount). - - Net dissect also assumes that the dataset object has three properties - with human-readable labels: - - ds.labels = ['red', 'black', 'car', 'tree', 'grid', ...] - ds.categories = ['color', 'part', 'object', 'texture'] - ds.label_category = [0, 0, 2, 2, 3, ...] # The category for each label - ''' - def __init__(self, directory='dataset/broden', resolution=384, - split='train', categories=None, - transform=None, transform_segment=None, - download=False, size=None, include_bincount=True, - broden_version=1, max_segment_depth=6): - assert resolution in [224, 227, 384] - if download: - ensure_broden_downloaded(directory, resolution, broden_version) - self.directory = directory - self.resolution = resolution - self.resdir = os.path.join(directory, 'broden%d_%d' % - (broden_version, resolution)) - self.loader = default_loader - self.transform = transform - self.transform_segment = transform_segment - self.include_bincount = include_bincount - # The maximum number of multilabel layers that coexist at an image. - self.max_segment_depth = max_segment_depth - with open(os.path.join(self.resdir, 'category.csv'), - encoding='utf-8') as f: - self.category_info = OrderedDict() - for row in csv.DictReader(f): - self.category_info[row['name']] = row - if categories is not None: - # Filter out unused categories - categories = set([c for c in categories if c in self.category_info]) - for cat in list(self.category_info.keys()): - if cat not in categories: - del self.category_info[cat] - categories = list(self.category_info.keys()) - self.categories = categories - - # Filter out unneeded images. - with open(os.path.join(self.resdir, 'index.csv'), - encoding='utf-8') as f: - all_images = [decode_index_dict(r) for r in csv.DictReader(f)] - self.image = [row for row in all_images - if index_has_any_data(row, categories) and row['split'] == split] - if size is not None: - self.image = self.image[:size] - with open(os.path.join(self.resdir, 'label.csv'), - encoding='utf-8') as f: - self.label_info = build_dense_label_array([ - decode_label_dict(r) for r in csv.DictReader(f)]) - self.labels = [l['name'] for l in self.label_info] - # Build dense remapping arrays for labels, so that you can - # get dense ranges of labels for each category. - self.category_map = {} - self.category_unmap = {} - self.category_label = {} - for cat in self.categories: - with open(os.path.join(self.resdir, 'c_%s.csv' % cat), - encoding='utf-8') as f: - c_data = [decode_label_dict(r) for r in csv.DictReader(f)] - self.category_unmap[cat], self.category_map[cat] = ( - build_numpy_category_map(c_data)) - self.category_label[cat] = build_dense_label_array( - c_data, key='code') - self.num_labels = len(self.labels) - # Primary categories for each label is the category in which it - # appears with the maximum coverage. - self.label_category = numpy.zeros(self.num_labels, dtype=int) - for i in range(self.num_labels): - maxcoverage, self.label_category[i] = max( - (self.category_label[cat][self.category_map[cat][i]]['coverage'] - if i < len(self.category_map[cat]) - and self.category_map[cat][i] else 0, ic) - for ic, cat in enumerate(categories)) - - def __len__(self): - return len(self.image) - - def __getitem__(self, idx): - record = self.image[idx] - # example record: { - # 'image': 'opensurfaces/25605.jpg', 'split': 'train', - # 'ih': 384, 'iw': 384, 'sh': 192, 'sw': 192, - # 'color': ['opensurfaces/25605_color.png'], - # 'object': [], 'part': [], - # 'material': ['opensurfaces/25605_material.png'], - # 'scene': [], 'texture': []} - image = self.loader(os.path.join(self.resdir, 'images', - record['image'])) - segment = numpy.zeros(shape=(self.max_segment_depth, - record['sh'], record['sw']), dtype=int) - if self.include_bincount: - bincount = numpy.zeros(shape=(self.num_labels,), dtype=int) - depth = 0 - for cat in self.categories: - for layer in record[cat]: - if isinstance(layer, int): - segment[depth,:,:] = layer - if self.include_bincount: - bincount[layer] += segment.shape[1] * segment.shape[2] - else: - png = numpy.asarray(self.loader(os.path.join( - self.resdir, 'images', layer))) - segment[depth,:,:] = png[:,:,0] + png[:,:,1] * 256 - if self.include_bincount: - bincount += numpy.bincount(segment[depth,:,:].flatten(), - minlength=self.num_labels) - depth += 1 - if self.transform: - image = self.transform(image) - if self.transform_segment: - segment = self.transform_segment(segment) - if self.include_bincount: - bincount[0] = 0 - return (image, segment, bincount) - else: - return (image, segment) - -def build_dense_label_array(label_data, key='number', allow_none=False): - ''' - Input: set of rows with 'number' fields (or another field name key). - Output: array such that a[number] = the row with the given number. - ''' - result = [None] * (max([d[key] for d in label_data]) + 1) - for d in label_data: - result[d[key]] = d - # Fill in none - if not allow_none: - example = label_data[0] - def make_empty(k): - return dict((c, k if c is key else type(v)()) - for c, v in example.items()) - for i, d in enumerate(result): - if d is None: - result[i] = dict(make_empty(i)) - return result - -def build_numpy_category_map(map_data, key1='code', key2='number'): - ''' - Input: set of rows with 'number' fields (or another field name key). - Output: array such that a[number] = the row with the given number. - ''' - results = list(numpy.zeros((max([d[key] for d in map_data]) + 1), - dtype=numpy.int16) for key in (key1, key2)) - for d in map_data: - results[0][d[key1]] = d[key2] - results[1][d[key2]] = d[key1] - return results - -def index_has_any_data(row, categories): - for c in categories: - for data in row[c]: - if data: return True - return False - -def decode_label_dict(row): - result = {} - for key, val in row.items(): - if key == 'category': - result[key] = dict((c, int(n)) - for c, n in [re.match('^([^(]*)\(([^)]*)\)$', f).groups() - for f in val.split(';')]) - elif key == 'name': - result[key] = val - elif key == 'syns': - result[key] = val.split(';') - elif re.match('^\d+$', val): - result[key] = int(val) - elif re.match('^\d+\.\d*$', val): - result[key] = float(val) - else: - result[key] = val - return result - -def decode_index_dict(row): - result = {} - for key, val in row.items(): - if key in ['image', 'split']: - result[key] = val - elif key in ['sw', 'sh', 'iw', 'ih']: - result[key] = int(val) - else: - item = [s for s in val.split(';') if s] - for i, v in enumerate(item): - if re.match('^\d+$', v): - item[i] = int(v) - result[key] = item - return result - -class ScaleSegmentation: - ''' - Utility for scaling segmentations, using nearest-neighbor zooming. - ''' - def __init__(self, target_height, target_width): - self.target_height = target_height - self.target_width = target_width - def __call__(self, seg): - ratio = (1, self.target_height / float(seg.shape[1]), - self.target_width / float(seg.shape[2])) - return ndimage.zoom(seg, ratio, order=0) - -def scatter_batch(seg, num_labels, omit_zero=True, dtype=torch.uint8): - ''' - Utility for scattering semgentations into a one-hot representation. - ''' - result = torch.zeros(*((seg.shape[0], num_labels,) + seg.shape[2:]), - dtype=dtype, device=seg.device) - result.scatter_(1, seg, 1) - if omit_zero: - result[:,0] = 0 - return result - -def ensure_broden_downloaded(directory, resolution, broden_version=1): - assert resolution in [224, 227, 384] - baseurl = 'http://netdissect.csail.mit.edu/data/' - dirname = 'broden%d_%d' % (broden_version, resolution) - if os.path.isfile(os.path.join(directory, dirname, 'index.csv')): - return # Already downloaded - zipfilename = 'broden1_%d.zip' % resolution - download_dir = os.path.join(directory, 'download') - os.makedirs(download_dir, exist_ok=True) - full_zipfilename = os.path.join(download_dir, zipfilename) - if not os.path.exists(full_zipfilename): - url = '%s/%s' % (baseurl, zipfilename) - print('Downloading %s' % url) - data = urlopen(url) - with open(full_zipfilename, 'wb') as f: - f.write(data.read()) - print('Unzipping %s' % zipfilename) - with zipfile.ZipFile(full_zipfilename, 'r') as zip_ref: - zip_ref.extractall(directory) - assert os.path.isfile(os.path.join(directory, dirname, 'index.csv')) - -def test_broden_dataset(): - ''' - Testing code. - ''' - bds = BrodenDataset('dataset/broden', resolution=384, - transform=transforms.Compose([ - transforms.Resize(224), - transforms.ToTensor()]), - transform_segment=transforms.Compose([ - ScaleSegmentation(224, 224) - ]), - include_bincount=True) - loader = torch.utils.data.DataLoader(bds, batch_size=100, num_workers=24) - for i in range(1,20): - print(bds.label[i]['name'], - list(bds.category.keys())[bds.primary_category[i]]) - for i, (im, seg, bc) in enumerate(loader): - print(i, im.shape, seg.shape, seg.max(), bc.shape) - -if __name__ == '__main__': - test_broden_dataset() diff --git a/spaces/samarthagarwal23/QuestionAnswering_on_annual_reports/README.md b/spaces/samarthagarwal23/QuestionAnswering_on_annual_reports/README.md deleted file mode 100644 index 7f2ee9dda41bb987709ab4beda055300d2367ed1..0000000000000000000000000000000000000000 --- a/spaces/samarthagarwal23/QuestionAnswering_on_annual_reports/README.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: QuestionAnswering_on_annual_reports -emoji: 🚀 -colorFrom: green -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/scedlatioru/img-to-music/example/The Veteran Korean Movie Download [UPD].md b/spaces/scedlatioru/img-to-music/example/The Veteran Korean Movie Download [UPD].md deleted file mode 100644 index 399128aec16a723b98b342945a9c950a29971d50..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/The Veteran Korean Movie Download [UPD].md +++ /dev/null @@ -1,6 +0,0 @@ -

      the veteran korean movie download


      Download Zip » https://gohhs.com/2uEzwN



      - -patriarchal authority in the life narratives of specific Vietnam veterans by analyzing ... War movies of the era portrayed "war as a crucial ritual transition from. 68 ... Vietnam had occurred later in time, so that the fathers would have been Korean. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/sdadas/pirb/index.html b/spaces/sdadas/pirb/index.html deleted file mode 100644 index 6023462fe00865fd9794c933ab8768bd709d6518..0000000000000000000000000000000000000000 --- a/spaces/sdadas/pirb/index.html +++ /dev/null @@ -1 +0,0 @@ -Leaderboard
      \ No newline at end of file diff --git a/spaces/senior-sigan/vgg_style_transfer/app.py b/spaces/senior-sigan/vgg_style_transfer/app.py deleted file mode 100644 index 39315948dfff2fbd1f84cc7526280d74530dfef1..0000000000000000000000000000000000000000 --- a/spaces/senior-sigan/vgg_style_transfer/app.py +++ /dev/null @@ -1,225 +0,0 @@ -import pathlib -from collections import OrderedDict - -import gradio as gr -import PIL.Image -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -import torchvision.models as models -import torchvision.transforms as transforms -import torchvision.transforms.functional as TF -from tqdm import tqdm - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -imsize = 512 if torch.cuda.is_available() else 128 -image_loader = transforms.Compose([ - transforms.Resize(imsize), # scale imported image - transforms.ToTensor(), -]) - - -def image_to_tensor(image: PIL.Image.Image) -> torch.Tensor: - image = image_loader(image).unsqueeze(0) - return image.to(device, torch.float) - - -def tensor_to_image(tensor: torch.Tensor) -> PIL.Image.Image: - tensor = tensor.cpu().clone().squeeze(0) - return TF.to_pil_image(tensor) - - -def gram_matrix(x): - n, nfm, u, v = x.size() - # n - Batch Size - # nfm - number of feature maps - # (u,v) - dimensions of a feature map (N=c*d) - - features = x.view(n * nfm, u * v) # resise F_XL into \hat F_XL - - G = torch.mm(features, features.t()) # compute the gram product - - # we 'normalize' the values of the gram matrix - # by dividing by the number of element in each feature maps. - return G.div(n * nfm * u * v) - - -def loss(target_features, combination_features): - assert len(target_features) == len(combination_features) - loss = 0 - for i in range(len(target_features)): - loss += F.mse_loss( - target_features[i], - combination_features[i], - ) - return loss - - -def rebuild_vgg(max_layer_idx): - cnn = models.vgg19(pretrained=True).features.to(device).eval() - layers = OrderedDict() - i = 0 - for layer in cnn.children(): - if i > max_layer_idx: - break - if isinstance(layer, nn.Conv2d): - i += 1 - name = 'conv_{}'.format(i) - elif isinstance(layer, nn.ReLU): - name = 'relu_{}'.format(i) - # The in-place version doesn't play very nicely with the ContentLoss - # and StyleLoss we insert below. So we replace with out-of-place - # ones here. - layer = nn.ReLU(inplace=False) - elif isinstance(layer, nn.MaxPool2d): - name = 'pool_{}'.format(i) - elif isinstance(layer, nn.BatchNorm2d): - name = 'bn_{}'.format(i) - else: - raise RuntimeError('Unrecognized layer: {}'.format( - layer.__class__.__name__)) - - layers[name] = layer - - return layers - - -class FeatureExtractor(nn.Module): - def __init__(self) -> None: - super(FeatureExtractor, self).__init__() - self.content_layers_names = set(['conv_4']) - self.style_layers_names = set([ - 'conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']) - self.layers = rebuild_vgg(5) - self.normalize = transforms.Normalize( - mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225], - ) - - def forward(self, x): - x = self.normalize(x) - style_features = [] - content_features = [] - for name, layer in self.layers.items(): - x = layer.forward(x) - if name in self.content_layers_names: - content_features.append(x) - elif name in self.style_layers_names: - g = gram_matrix(x) - style_features.append(g) - - return style_features, content_features - - -def detach_all(tensors): - return [tensor.detach() for tensor in tensors] - - -def style_transfer( - feature_extractor, - content_img: PIL.Image.Image, - style_img: PIL.Image.Image, - num_steps: int = 300, - style_weight=1000000, - content_weight=1, -) -> PIL.Image.Image: - content_tensor = image_to_tensor(content_img) - style_tensor = image_to_tensor(style_img) - result_tensor = content_tensor.clone() - - result_tensor.requires_grad_(True) - feature_extractor.requires_grad_(False) - optimizer = optim.LBFGS([result_tensor], max_iter=20) - - _, content_cf = feature_extractor(content_tensor) - style_sf, _ = feature_extractor(style_tensor) - - content_cf = detach_all(content_cf) - style_sf = detach_all(style_sf) - - def closure(): - # correct the values of updated input image - with torch.no_grad(): - result_tensor.clamp_(0, 1) - - optimizer.zero_grad() - result_sf, result_cf = feature_extractor(result_tensor) - content_score = loss(result_cf, content_cf) * content_weight - style_score = loss(result_sf, style_sf) * style_weight - - total_loss = style_score + content_score - total_loss.backward() - - return total_loss - - for _ in tqdm(range(num_steps)): - optimizer.step(closure) - - with torch.no_grad(): - result_tensor.clamp_(0, 1) - - return tensor_to_image(result_tensor) - - -def load_examples(): - return [[ - pathlib.Path('examples/dancing.jpeg').as_posix(), - pathlib.Path('examples/picasso.jpeg').as_posix(), - 10, - 1000000, - 1, - ]] - - -def main(): - print(f'Using {device}') - gr.close_all() - - feature_extractor = FeatureExtractor().eval() - - def fn(*args): - return style_transfer(feature_extractor, *args) - - iface = gr.Interface( - fn=fn, - inputs=[ - gr.inputs.Image( - type='pil', - label='Content Image', - ), - gr.inputs.Image( - type='pil', - label='Style Image', - ), - gr.inputs.Slider( - minimum=1, - maximum=300, - step=1, - default=10, - label='Number of iterations', - ), - gr.inputs.Number( - default=1000000, - label='Style weight', - ), - gr.inputs.Number( - default=1, - label='Content weight', - ), - ], - outputs=gr.outputs.Image(label='Combined image'), - examples=load_examples(), - title='VGG style transfer', - allow_flagging='never', - theme='huggingface', - article='Code is based on [pytorch tutorial](https://pytorch.org/tutorials/advanced/neural_style_tutorial.html) and [keras tutorial](https://keras.io/examples/generative/neural_style_transfer/).' - ) - - iface.launch( - enable_queue=True, - ) - - -if __name__ == '__main__': - main() diff --git a/spaces/shaun-in-3d/stabilityai-stable-diffusion-2/README.md b/spaces/shaun-in-3d/stabilityai-stable-diffusion-2/README.md deleted file mode 100644 index 56088ce1f48ad811c17f14649e49b9d33af3ab7e..0000000000000000000000000000000000000000 --- a/spaces/shaun-in-3d/stabilityai-stable-diffusion-2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stabilityai Stable Diffusion 2 -emoji: 📈 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/uvr5_pack/lib_v5/layers_537227KB.py b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/uvr5_pack/lib_v5/layers_537227KB.py deleted file mode 100644 index 78e539250075d7fed2f349d05e3317dfe2c96804..0000000000000000000000000000000000000000 --- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/uvr5_pack/lib_v5/layers_537227KB.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from uvr5_pack.lib_v5 import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv6 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv7 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - feat6 = self.conv6(x) - feat7 = self.conv7(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/shgao/EditAnything/ldm/modules/midas/midas/base_model.py b/spaces/shgao/EditAnything/ldm/modules/midas/midas/base_model.py deleted file mode 100644 index 5cf430239b47ec5ec07531263f26f5c24a2311cd..0000000000000000000000000000000000000000 --- a/spaces/shgao/EditAnything/ldm/modules/midas/midas/base_model.py +++ /dev/null @@ -1,16 +0,0 @@ -import torch - - -class BaseModel(torch.nn.Module): - def load(self, path): - """Load model from file. - - Args: - path (str): file path - """ - parameters = torch.load(path, map_location=torch.device('cpu')) - - if "optimizer" in parameters: - parameters = parameters["model"] - - self.load_state_dict(parameters) diff --git a/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/common/utils.py b/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/common/utils.py deleted file mode 100644 index 9979e0bc09de2bf3251c651434d7acd2f7305b96..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/Prompt-Free-Diffusion/lib/model_zoo/common/utils.py +++ /dev/null @@ -1,292 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import numpy as np -import copy -import functools -import itertools - -import matplotlib.pyplot as plt - -######## -# unit # -######## - -def singleton(class_): - instances = {} - def getinstance(*args, **kwargs): - if class_ not in instances: - instances[class_] = class_(*args, **kwargs) - return instances[class_] - return getinstance - -def str2value(v): - v = v.strip() - try: - return int(v) - except: - pass - try: - return float(v) - except: - pass - if v in ('True', 'true'): - return True - elif v in ('False', 'false'): - return False - else: - return v - -@singleton -class get_unit(object): - def __init__(self): - self.unit = {} - self.register('none', None) - - # general convolution - self.register('conv' , nn.Conv2d) - self.register('bn' , nn.BatchNorm2d) - self.register('relu' , nn.ReLU) - self.register('relu6' , nn.ReLU6) - self.register('lrelu' , nn.LeakyReLU) - self.register('dropout' , nn.Dropout) - self.register('dropout2d', nn.Dropout2d) - self.register('sine', Sine) - self.register('relusine', ReLUSine) - - def register(self, - name, - unitf,): - - self.unit[name] = unitf - - def __call__(self, name): - if name is None: - return None - i = name.find('(') - i = len(name) if i==-1 else i - t = name[:i] - f = self.unit[t] - args = name[i:].strip('()') - if len(args) == 0: - args = {} - return f - else: - args = args.split('=') - args = [[','.join(i.split(',')[:-1]), i.split(',')[-1]] for i in args] - args = list(itertools.chain.from_iterable(args)) - args = [i.strip() for i in args if len(i)>0] - kwargs = {} - for k, v in zip(args[::2], args[1::2]): - if v[0]=='(' and v[-1]==')': - kwargs[k] = tuple([str2value(i) for i in v.strip('()').split(',')]) - elif v[0]=='[' and v[-1]==']': - kwargs[k] = [str2value(i) for i in v.strip('[]').split(',')] - else: - kwargs[k] = str2value(v) - return functools.partial(f, **kwargs) - -def register(name): - def wrapper(class_): - get_unit().register(name, class_) - return class_ - return wrapper - -class Sine(object): - def __init__(self, freq, gain=1): - self.freq = freq - self.gain = gain - self.repr = 'sine(freq={}, gain={})'.format(freq, gain) - - def __call__(self, x, gain=1): - act_gain = self.gain * gain - return torch.sin(self.freq * x) * act_gain - - def __repr__(self,): - return self.repr - -class ReLUSine(nn.Module): - def __init(self): - super().__init__() - - def forward(self, input): - a = torch.sin(30 * input) - b = nn.ReLU(inplace=False)(input) - return a+b - -@register('lrelu_agc') -# class lrelu_agc(nn.Module): -class lrelu_agc(object): - """ - The lrelu layer with alpha, gain and clamp - """ - def __init__(self, alpha=0.1, gain=1, clamp=None): - # super().__init__() - self.alpha = alpha - if gain == 'sqrt_2': - self.gain = np.sqrt(2) - else: - self.gain = gain - self.clamp = clamp - self.repr = 'lrelu_agc(alpha={}, gain={}, clamp={})'.format( - alpha, gain, clamp) - - # def forward(self, x, gain=1): - def __call__(self, x, gain=1): - x = F.leaky_relu(x, negative_slope=self.alpha, inplace=True) - act_gain = self.gain * gain - act_clamp = self.clamp * gain if self.clamp is not None else None - if act_gain != 1: - x = x * act_gain - if act_clamp is not None: - x = x.clamp(-act_clamp, act_clamp) - return x - - def __repr__(self,): - return self.repr - -#################### -# spatial encoding # -#################### - -@register('se') -class SpatialEncoding(nn.Module): - def __init__(self, - in_dim, - out_dim, - sigma = 6, - cat_input=True, - require_grad=False,): - - super().__init__() - assert out_dim % (2*in_dim) == 0, "dimension must be dividable" - - n = out_dim // 2 // in_dim - m = 2**np.linspace(0, sigma, n) - m = np.stack([m] + [np.zeros_like(m)]*(in_dim-1), axis=-1) - m = np.concatenate([np.roll(m, i, axis=-1) for i in range(in_dim)], axis=0) - self.emb = torch.FloatTensor(m) - if require_grad: - self.emb = nn.Parameter(self.emb, requires_grad=True) - self.in_dim = in_dim - self.out_dim = out_dim - self.sigma = sigma - self.cat_input = cat_input - self.require_grad = require_grad - - def forward(self, x, format='[n x c]'): - """ - Args: - x: [n x m1], - m1 usually is 2 - Outputs: - y: [n x m2] - m2 dimention number - """ - if format == '[bs x c x 2D]': - xshape = x.shape - x = x.permute(0, 2, 3, 1).contiguous() - x = x.view(-1, x.size(-1)) - elif format == '[n x c]': - pass - else: - raise ValueError - - if not self.require_grad: - self.emb = self.emb.to(x.device) - y = torch.mm(x, self.emb.T) - if self.cat_input: - z = torch.cat([x, torch.sin(y), torch.cos(y)], dim=-1) - else: - z = torch.cat([torch.sin(y), torch.cos(y)], dim=-1) - - if format == '[bs x c x 2D]': - z = z.view(xshape[0], xshape[2], xshape[3], -1) - z = z.permute(0, 3, 1, 2).contiguous() - return z - - def extra_repr(self): - outstr = 'SpatialEncoding (in={}, out={}, sigma={}, cat_input={}, require_grad={})'.format( - self.in_dim, self.out_dim, self.sigma, self.cat_input, self.require_grad) - return outstr - -@register('rffe') -class RFFEncoding(SpatialEncoding): - """ - Random Fourier Features - """ - def __init__(self, - in_dim, - out_dim, - sigma = 6, - cat_input=True, - require_grad=False,): - - super().__init__(in_dim, out_dim, sigma, cat_input, require_grad) - n = out_dim // 2 - m = np.random.normal(0, sigma, size=(n, in_dim)) - self.emb = torch.FloatTensor(m) - if require_grad: - self.emb = nn.Parameter(self.emb, requires_grad=True) - - def extra_repr(self): - outstr = 'RFFEncoding (in={}, out={}, sigma={}, cat_input={}, require_grad={})'.format( - self.in_dim, self.out_dim, self.sigma, self.cat_input, self.require_grad) - return outstr - -########## -# helper # -########## - -def freeze(net): - for m in net.modules(): - if isinstance(m, ( - nn.BatchNorm2d, - nn.SyncBatchNorm,)): - # inplace_abn not supported - m.eval() - for pi in net.parameters(): - pi.requires_grad = False - return net - -def common_init(m): - if isinstance(m, ( - nn.Conv2d, - nn.ConvTranspose2d,)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - if m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, ( - nn.BatchNorm2d, - nn.SyncBatchNorm,)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - else: - pass - -def init_module(module): - """ - Args: - module: [nn.module] list or nn.module - a list of module to be initialized. - """ - if isinstance(module, (list, tuple)): - module = list(module) - else: - module = [module] - - for mi in module: - for mii in mi.modules(): - common_init(mii) - -def get_total_param(net): - if getattr(net, 'parameters', None) is None: - return 0 - return sum(p.numel() for p in net.parameters()) - -def get_total_param_sum(net): - if getattr(net, 'parameters', None) is None: - return 0 - with torch.no_grad(): - s = sum(p.cpu().detach().numpy().sum().item() for p in net.parameters()) - return s diff --git a/spaces/shibing624/ChatPDF/modules/models/inspurai.py b/spaces/shibing624/ChatPDF/modules/models/inspurai.py deleted file mode 100644 index c590859fa7717d032290ccc490d22f4494541576..0000000000000000000000000000000000000000 --- a/spaces/shibing624/ChatPDF/modules/models/inspurai.py +++ /dev/null @@ -1,345 +0,0 @@ -# 代码主要来源于 https://github.com/Shawn-Inspur/Yuan-1.0/blob/main/yuan_api/inspurai.py - -import hashlib -import json -import os -import time -import uuid -from datetime import datetime - -import pytz -import requests - -from modules.presets import NO_APIKEY_MSG -from modules.models.base_model import BaseLLMModel - - -class Example: - """ store some examples(input, output pairs and formats) for few-shots to prime the model.""" - - def __init__(self, inp, out): - self.input = inp - self.output = out - self.id = uuid.uuid4().hex - - def get_input(self): - """return the input of the example.""" - return self.input - - def get_output(self): - """Return the output of the example.""" - return self.output - - def get_id(self): - """Returns the unique ID of the example.""" - return self.id - - def as_dict(self): - return { - "input": self.get_input(), - "output": self.get_output(), - "id": self.get_id(), - } - - -class Yuan: - """The main class for a user to interface with the Inspur Yuan API. - A user can set account info and add examples of the API request. - """ - - def __init__(self, - engine='base_10B', - temperature=0.9, - max_tokens=100, - input_prefix='', - input_suffix='\n', - output_prefix='答:', - output_suffix='\n\n', - append_output_prefix_to_query=False, - topK=1, - topP=0.9, - frequencyPenalty=1.2, - responsePenalty=1.2, - noRepeatNgramSize=2): - - self.examples = {} - self.engine = engine - self.temperature = temperature - self.max_tokens = max_tokens - self.topK = topK - self.topP = topP - self.frequencyPenalty = frequencyPenalty - self.responsePenalty = responsePenalty - self.noRepeatNgramSize = noRepeatNgramSize - self.input_prefix = input_prefix - self.input_suffix = input_suffix - self.output_prefix = output_prefix - self.output_suffix = output_suffix - self.append_output_prefix_to_query = append_output_prefix_to_query - self.stop = (output_suffix + input_prefix).strip() - self.api = None - - # if self.engine not in ['base_10B','translate','dialog']: - # raise Exception('engine must be one of [\'base_10B\',\'translate\',\'dialog\'] ') - def set_account(self, api_key): - account = api_key.split('||') - self.api = YuanAPI(user=account[0], phone=account[1]) - - def add_example(self, ex): - """Add an example to the object. - Example must be an instance of the Example class.""" - assert isinstance(ex, Example), "Please create an Example object." - self.examples[ex.get_id()] = ex - - def delete_example(self, id): - """Delete example with the specific id.""" - if id in self.examples: - del self.examples[id] - - def get_example(self, id): - """Get a single example.""" - return self.examples.get(id, None) - - def get_all_examples(self): - """Returns all examples as a list of dicts.""" - return {k: v.as_dict() for k, v in self.examples.items()} - - def get_prime_text(self): - """Formats all examples to prime the model.""" - return "".join( - [self.format_example(ex) for ex in self.examples.values()]) - - def get_engine(self): - """Returns the engine specified for the API.""" - return self.engine - - def get_temperature(self): - """Returns the temperature specified for the API.""" - return self.temperature - - def get_max_tokens(self): - """Returns the max tokens specified for the API.""" - return self.max_tokens - - def craft_query(self, prompt): - """Creates the query for the API request.""" - q = self.get_prime_text( - ) + self.input_prefix + prompt + self.input_suffix - if self.append_output_prefix_to_query: - q = q + self.output_prefix - - return q - - def format_example(self, ex): - """Formats the input, output pair.""" - return self.input_prefix + ex.get_input( - ) + self.input_suffix + self.output_prefix + ex.get_output( - ) + self.output_suffix - - def response(self, - query, - engine='base_10B', - max_tokens=20, - temperature=0.9, - topP=0.1, - topK=1, - frequencyPenalty=1.0, - responsePenalty=1.0, - noRepeatNgramSize=0): - """Obtains the original result returned by the API.""" - - if self.api is None: - return NO_APIKEY_MSG - try: - # requestId = submit_request(query,temperature,topP,topK,max_tokens, engine) - requestId = self.api.submit_request(query, temperature, topP, topK, max_tokens, engine, frequencyPenalty, - responsePenalty, noRepeatNgramSize) - response_text = self.api.reply_request(requestId) - except Exception as e: - raise e - - return response_text - - def del_special_chars(self, msg): - special_chars = ['', '', '#', '▃', '▁', '▂', ' '] - for char in special_chars: - msg = msg.replace(char, '') - return msg - - def submit_API(self, prompt, trun=[]): - """Submit prompt to yuan API interface and obtain an pure text reply. - :prompt: Question or any content a user may input. - :return: pure text response.""" - query = self.craft_query(prompt) - res = self.response(query, engine=self.engine, - max_tokens=self.max_tokens, - temperature=self.temperature, - topP=self.topP, - topK=self.topK, - frequencyPenalty=self.frequencyPenalty, - responsePenalty=self.responsePenalty, - noRepeatNgramSize=self.noRepeatNgramSize) - if 'resData' in res and res['resData'] != None: - txt = res['resData'] - else: - txt = '模型返回为空,请尝试修改输入' - # 单独针对翻译模型的后处理 - if self.engine == 'translate': - txt = txt.replace(' ##', '').replace(' "', '"').replace(": ", ":").replace(" ,", ",") \ - .replace('英文:', '').replace('文:', '').replace("( ", "(").replace(" )", ")") - else: - txt = txt.replace(' ', '') - txt = self.del_special_chars(txt) - - # trun多结束符截断模型输出 - if isinstance(trun, str): - trun = [trun] - try: - if trun != None and isinstance(trun, list) and trun != []: - for tr in trun: - if tr in txt and tr != "": - txt = txt[:txt.index(tr)] - else: - continue - except: - return txt - return txt - - -class YuanAPI: - ACCOUNT = '' - PHONE = '' - - SUBMIT_URL = "http://api.airyuan.cn:32102/v1/interface/api/infer/getRequestId?" - REPLY_URL = "http://api.airyuan.cn:32102/v1/interface/api/result?" - - def __init__(self, user, phone): - self.ACCOUNT = user - self.PHONE = phone - - @staticmethod - def code_md5(str): - code = str.encode("utf-8") - m = hashlib.md5() - m.update(code) - result = m.hexdigest() - return result - - @staticmethod - def rest_get(url, header, timeout, show_error=False): - '''Call rest get method''' - try: - response = requests.get(url, headers=header, timeout=timeout, verify=False) - return response - except Exception as exception: - if show_error: - print(exception) - return None - - def header_generation(self): - """Generate header for API request.""" - t = datetime.now(pytz.timezone("Asia/Shanghai")).strftime("%Y-%m-%d") - token = self.code_md5(self.ACCOUNT + self.PHONE + t) - headers = {'token': token} - return headers - - def submit_request(self, query, temperature, topP, topK, max_tokens, engine, frequencyPenalty, responsePenalty, - noRepeatNgramSize): - """Submit query to the backend server and get requestID.""" - headers = self.header_generation() - # url=SUBMIT_URL + "account={0}&data={1}&temperature={2}&topP={3}&topK={4}&tokensToGenerate={5}&type={6}".format(ACCOUNT,query,temperature,topP,topK,max_tokens,"api") - # url=SUBMIT_URL + "engine={0}&account={1}&data={2}&temperature={3}&topP={4}&topK={5}&tokensToGenerate={6}" \ - # "&type={7}".format(engine,ACCOUNT,query,temperature,topP,topK, max_tokens,"api") - url = self.SUBMIT_URL + "engine={0}&account={1}&data={2}&temperature={3}&topP={4}&topK={5}&tokensToGenerate={6}" \ - "&type={7}&frequencyPenalty={8}&responsePenalty={9}&noRepeatNgramSize={10}". \ - format(engine, self.ACCOUNT, query, temperature, topP, topK, max_tokens, "api", frequencyPenalty, - responsePenalty, noRepeatNgramSize) - response = self.rest_get(url, headers, 30) - response_text = json.loads(response.text) - if response_text["flag"]: - requestId = response_text["resData"] - return requestId - else: - raise RuntimeWarning(response_text) - - def reply_request(self, requestId, cycle_count=5): - """Check reply API to get the inference response.""" - url = self.REPLY_URL + "account={0}&requestId={1}".format(self.ACCOUNT, requestId) - headers = self.header_generation() - response_text = {"flag": True, "resData": None} - for i in range(cycle_count): - response = self.rest_get(url, headers, 30, show_error=True) - response_text = json.loads(response.text) - if response_text["resData"] is not None: - return response_text - if response_text["flag"] is False and i == cycle_count - 1: - raise RuntimeWarning(response_text) - time.sleep(3) - return response_text - - -class Yuan_Client(BaseLLMModel): - - def __init__(self, model_name, api_key, user_name="", system_prompt=None): - super().__init__(model_name=model_name, user=user_name) - self.history = [] - self.api_key = api_key - self.system_prompt = system_prompt - - self.input_prefix = "" - self.output_prefix = "" - - def set_text_prefix(self, option, value): - if option == 'input_prefix': - self.input_prefix = value - elif option == 'output_prefix': - self.output_prefix = value - - def get_answer_at_once(self): - # yuan temperature is (0,1] and base model temperature is [0,2], and yuan 0.9 == base 1 so need to convert - temperature = self.temperature if self.temperature <= 1 else 0.9 + (self.temperature - 1) / 10 - topP = self.top_p - topK = self.n_choices - # max_tokens should be in [1,200] - max_tokens = self.max_generation_token if self.max_generation_token is not None else 50 - if max_tokens > 200: - max_tokens = 200 - stop = self.stop_sequence if self.stop_sequence is not None else [] - examples = [] - system_prompt = self.system_prompt - if system_prompt is not None: - lines = system_prompt.splitlines() - # TODO: support prefixes in system prompt or settings - """ - if lines[0].startswith('-'): - prefixes = lines.pop()[1:].split('|') - self.input_prefix = prefixes[0] - if len(prefixes) > 1: - self.output_prefix = prefixes[1] - if len(prefixes) > 2: - stop = prefixes[2].split(',') - """ - for i in range(0, len(lines), 2): - in_line = lines[i] - out_line = lines[i + 1] if i + 1 < len(lines) else "" - examples.append((in_line, out_line)) - yuan = Yuan(engine=self.model_name.replace('yuanai-1.0-', ''), - temperature=temperature, - max_tokens=max_tokens, - topK=topK, - topP=topP, - input_prefix=self.input_prefix, - input_suffix="", - output_prefix=self.output_prefix, - output_suffix="".join(stop), - ) - if not self.api_key: - return NO_APIKEY_MSG, 0 - yuan.set_account(self.api_key) - - for in_line, out_line in examples: - yuan.add_example(Example(inp=in_line, out=out_line)) - - prompt = self.history[-1]["content"] - answer = yuan.submit_API(prompt, trun=stop) - return answer, len(answer) diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download BLACKPINKs Pink Venom MV and Enjoy the Stunning Visuals.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download BLACKPINKs Pink Venom MV and Enjoy the Stunning Visuals.md deleted file mode 100644 index fd3c14bdc91e7724542dfd6bfa938e4382e171cb..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download BLACKPINKs Pink Venom MV and Enjoy the Stunning Visuals.md +++ /dev/null @@ -1,132 +0,0 @@ - -

      How to Download Video MV Blackpink Pink Venom for Free

      -

      Blackpink is one of the most popular K-pop girl groups in the world, and their music videos are always stunning and catchy. One of their latest hits is Pink Venom, a fierce and powerful song that showcases their charisma and talent. The music video has over 150 million views on YouTube, and many fans want to download it and enjoy it offline.

      -

      But how can you download video mv blackpink pink venom for free? There are many websites and tools that claim to help you do that, but not all of them are reliable or safe. Some may contain viruses, malware, or annoying ads. Some may have low quality or limited options. Some may even violate the copyright laws and get you in trouble.

      -

      download video mv blackpink pink venom


      Download Zip »»» https://ssurll.com/2uNX3y



      -

      That's why we have done the research for you and found the best solutions to download video mv blackpink pink venom for free. In this article, we will introduce three methods that are easy, fast, and effective. You can choose the one that suits your needs and preferences. Let's get started!

      -

      Online Video Downloader - Download Video from Any Website

      -

      One of the simplest ways to download video mv blackpink pink venom for free is to use an online video downloader. This is a web-based tool that allows you to download videos from any website, including YouTube, Facebook, Instagram, Vimeo, TikTok, and more. You don't need to install any software or extension on your device. All you need is a stable internet connection and a browser.

      -

      One of the best online video downloaders we recommend is Online Video Downloader by ACETHINKER. This tool has many features that make it stand out from other similar tools. Here are some of them:

      -
        -
      • It supports downloading videos in various formats and resolutions, such as MP4, WEBM, 1080p, 720p, 480p, etc.
      • -
      • It has a user-friendly interface that makes it easy to use. You just need to copy and paste the URL of the video you want to download and click the Download button.
      • -
      • It has no ads, watermarks, or limitations. You can download as many videos as you want for free.
      • -
      • It is safe and secure. It does not collect any personal information or data from your device.
      • -
      -

      To use this online video downloader to download video mv blackpink pink venom for free, follow these steps:

      -
        -
      1. Go to YouTube and find the video mv blackpink pink venom. Copy its link from the address bar.
      2. -
      3. Go to Online Video Downloader by ACETHINKER and paste the link in the blank field.
      4. -
      5. Select the quality of the video you want to download and click the Download button.
      6. -
      7. Save the video to your device and enjoy it offline.
      8. -
      -

      SmallSEOTools - Download Any Video for Free

      -

      Another online tool that can help you download video mv blackpink pink venom for free is SmallSEOTools. This is a website that offers various SEO and digital marketing tools, such as plagiarism checker, grammar checker, keyword research, image compressor, and more. One of its tools is the video downloader, which can download any video from any website for free.

      -

      Some of the features of this video downloader are:

      -
        -
      • It supports downloading videos from over 1000 websites, including YouTube, Facebook, Instagram, Twitter, Dailymotion, and more.
      • -
      • It allows you to download videos in different formats and qualities, such as MP4, 3GP, WEBM, HD, SD, etc.
      • -
      • It has a simple and intuitive interface that makes it easy to use. You just need to enter the URL of the video you want to download and click the Download button.
      • -
      • It is fast and reliable. It can download videos in a matter of seconds without compromising the quality.
      • -
      • It is safe and secure. It does not store any of your data or files on its servers.
      • -
      -

      To use this video downloader to download video mv blackpink pink venom for free, follow these steps:

      -

      How to download video mv blackpink pink venom for free
      -Download video mv blackpink pink venom lyrics and translation
      -Best video downloader for blackpink pink venom music video
      -Watch blackpink pink venom mv online without downloading
      -Download video mv blackpink pink venom in 4k resolution
      -Blackpink pink venom mv reaction and review videos
      -Download video mv blackpink pink venom dance practice
      -Blackpink pink venom mv behind the scenes and making of
      -Download video mv blackpink pink venom teaser and trailer
      -Blackpink pink venom mv analysis and breakdown
      -Download video mv blackpink pink venom instrumental and karaoke
      -Blackpink pink venom mv outfits and fashion inspiration
      -Download video mv blackpink pink venom remix and mashup
      -Blackpink pink venom mv awards and achievements
      -Download video mv blackpink pink venom cover and parody
      -Blackpink pink venom mv fan art and edits
      -Download video mv blackpink pink venom live performance
      -Blackpink pink venom mv meaning and message
      -Download video mv blackpink pink venom audio and mp3
      -Blackpink pink venom mv trivia and facts
      -Download video mv blackpink pink venom with subtitles and captions
      -Blackpink pink venom mv choreography and tutorial
      -Download video mv blackpink pink venom on different platforms and devices
      -Blackpink pink venom mv comparison and contrast with other songs
      -Download video mv blackpink pink venom in different languages and versions

      -
        -
      1. Go to YouTube and find the video mv blackpink pink venom. Copy its link from the address bar.
      2. -
      3. Go to SmallSEOTools and click on the Video Downloader tool.
      4. -
      5. Paste the link in the input box and click the Download button.
      6. -
      7. Select the format and quality of the video you want to download and click the Download button again.
      8. -
      9. Save the video to your device and enjoy it offline.
      10. -
      -

      WonderFox HD Video Converter Factory Pro - The Best Music Video Downloader

      -

      If you are looking for a more professional and powerful tool to download video mv blackpink pink venom for free, you may want to try WonderFox HD Video Converter Factory Pro. This is a desktop software that can download, convert, edit, and enhance any video and audio file. It is compatible with Windows 10/8/7/Vista/XP and supports over 500 formats and devices.

      -

      Some of the features of this software are:

      -
        -
      • It can download videos from over 300 websites, including YouTube, Vimeo, Vevo, Hulu, Netflix, and more.
      • -
      • It can download videos in 4K UHD, 1080p HD, 720p HD, SD, and other resolutions.
      • -
      • It can convert videos to MP4, AVI, MKV, MOV, WMV, MPG, FLV, and other formats.
      • -
      • It can extract audio from videos and convert it to MP3, WAV, FLAC, AAC, M4A, and other formats.
      • -
      • It can edit videos by trimming, cropping, rotating, merging, adding subtitles, applying effects, etc.
      • -
      • It can enhance videos by adjusting brightness, contrast, saturation, hue, etc.
      • -
      • It has a batch mode that can process multiple files at once.
      • -
      • It has a fast speed that can save your time and bandwidth.
      • -
      -

      To use this software to download video mv blackpink pink venom for free, follow these steps:

      -
        -
      1. Download and install WonderFox HD Video Converter Factory Pro on your computer.
      2. -
      3. Launch the software and click on the Downloader button.
      4. -
      5. Click on the New Download button and paste the link of the video mv blackpink pink venom in the pop-up window.
      6. -
      7. Click on the Analyze button and wait for the software to detect the available formats and qualities of the video.
      8. -
      9. Select the format and quality of the video you want to download and click on the OK button.
      10. -
      11. Click on the Download All button and choose a destination folder for the video.
      12. -
      13. Wait for the software to finish downloading the video and find it in the output folder.
      14. -
      -

      Comparison Table of the Three Methods

      -

      To help you choose the best method to download video mv blackpink pink venom for free, we have made a comparison table of the three methods we introduced above. You can check their features, pros and cons, and decide which one suits you best.

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      MethodFeaturesProsCons
      Online Video Downloader by ACETHINKER- Download videos from any website
      - Support various formats and resolutions
      - No ads, watermarks, or limitations
      - Safe and secure
      - Easy to use
      - Fast and reliable
      - Free
      - Need internet connection
      - May not work for some websites
      SmallSEOTools- Download videos from over 1000 websites
      - Support different formats and qualities
      - Simple and intuitive interface
      - Fast and reliable
      - Safe and secure
      - Easy to use
      - Free
      - Need internet connection
      - May have ads or pop-ups
      WonderFox HD Video Converter Factory Pro- Download videos from over 300 websites
      - Support 4K UHD, 1080p HD, 720p HD, SD, and other resolutions
      - Convert videos to various formats
      - Extract audio from videos
      - Edit and enhance videos
      - Batch mode
      - Professional and powerful
      - Fast and high-quality
      - Versatile and flexible
      - Need to install software
      - Not free (but has a free trial)
      -

      Conclusion

      -

      In this article, we have shown you how to download video mv blackpink pink venom for free using three different methods. You can choose the one that works best for you according to your needs and preferences. Whether you use an online tool or a desktop software, you can enjoy watching this amazing music video offline anytime and anywhere.

      -

      We hope you found this article helpful and informative. If you have any questions or suggestions, please feel free to leave a comment below. We would love to hear from you. And if you are a fan of Blackpink, don't forget to check out their other music videos and songs. They are all awesome!

      -

      FAQs

      -

      Q1: Is it legal to download video mv blackpink pink venom?

      -

      A1: It depends on your purpose and intention. If you download it for personal use only, such as watching it offline or making a backup copy, it is usually legal. However, if you download it for commercial use or distribution, such as selling it or uploading it to other platforms, it is illegal. You may violate the copyright laws and the terms of service of the website you download from. Therefore, we advise you to respect the rights of the original creators and use the downloaded videos responsibly and ethically.

      -

      Q2: How can I download video mv blackpink pink venom on my phone?

      -

      A2: You can use the same methods we mentioned above to download video mv blackpink pink venom on your phone. However, some online tools may not work well on mobile browsers, so you may need to use a desktop browser or a mobile app instead. For example, you can use the ACETHINKER Video Downloader App or the SmallSEOTools App to download videos on your phone. Alternatively, you can transfer the downloaded videos from your computer to your phone using a USB cable or a cloud service.

      -

      Q3: How can I convert video mv blackpink pink venom to MP3 or other formats?

      -

      A3: If you want to convert video mv blackpink pink venom to MP3 or other formats, you can use the WonderFox HD Video Converter Factory Pro software we introduced above. It can convert any video and audio file to any format you want. You just need to import the video file, select the output format, and click on the Run button. You can also adjust the parameters of the output file, such as bitrate, sample rate, channel, etc.

      -

      Q4: How can I watch video mv blackpink pink venom offline without downloading?

      -

      A4: If you don't want to download video mv blackpink pink venom, but still want to watch it offline, you can use a streaming service that allows offline viewing. For example, you can use YouTube Premium, which lets you download videos and watch them offline within the app. You can also use Netflix, which has some Blackpink documentaries and shows that you can download and watch offline.

      -

      Q5: Where can I find more videos of Blackpink?

      -

      A5: If you are looking for more videos of Blackpink, you can visit their official YouTube channel, where they upload their music videos, live performances, behind-the-scenes clips, and more. You can also follow them on their social media accounts, such as Instagram, Twitter, Facebook, and TikTok. You can also check out their official website for more information and updates.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Ebira Music Discover the Amazing Talent of 2Rich and Download His Songs in Mp3.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Ebira Music Discover the Amazing Talent of 2Rich and Download His Songs in Mp3.md deleted file mode 100644 index 393dd737b37c4397d24ff81309aa3a20604c68b2..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Ebira Music Discover the Amazing Talent of 2Rich and Download His Songs in Mp3.md +++ /dev/null @@ -1,156 +0,0 @@ - -

      How to Download 2rich Ebira Songs MP3 for Free

      -

      If you are a fan of 2rich Ebira songs, you might be wondering how you can download them as MP3 files for free. 2rich is a talented singer and songwriter who specializes in Ebira music, a genre that originates from the Ebira people of central Nigeria. His songs are catchy, lively, and meaningful, reflecting the rich culture and values of the Ebira people. He has released several albums and videos that have gained popularity among his fans and beyond.

      -

      Downloading MP3 files for free has many advantages. You can enjoy your favorite songs offline, without worrying about internet connection or data usage. You can also transfer them to your devices, such as your phone, tablet, or laptop, and listen to them anytime and anywhere. You can also create your own playlists and share them with others.

      -

      download 2rich ebira songs mp3


      Download ->>->>->> https://ssurll.com/2uNWEs



      -

      However, finding free and legal MP3 downloads can be challenging. Many websites that offer free music downloads are illegal, unsafe, or low-quality. They may contain viruses, malware, or spyware that can harm your devices or compromise your privacy. They may also violate the copyrights or licenses of the artists and their work.

      -

      Fortunately, there are some websites that offer free music downloads that are legal and safe. These websites allow you to download songs from artists who have given their permission or who have released their work under Creative Commons licenses. These websites also provide high-quality MP3 files that you can enjoy without any hassle.

      -

      In this article, we will show you how to download 2rich Ebira songs MP3 for free from three of the best free music download sites. We will also give you some tips and tricks on how to enjoy 2rich Ebira songs MP3 more fully.

      -

      The Best Free Music Download Sites for 2rich Ebira Songs

      -

      Here are three of the best free music download sites that you can use to download 2rich Ebira songs MP3 for free:

      -

      SoundCloud

      -

      SoundCloud is one of the most popular online platforms for streaming and downloading music. It has a large collection of songs from various genres, artists, and countries. You can find many 2rich Ebira songs on SoundCloud, as well as other Ebira music and Nigerian music.

      -

      To search for 2rich Ebira songs on SoundCloud, you can use the search bar on the top of the website or the app. You can type in keywords such as "2rich", "Ebira", or the titles of his songs. You can also filter the results by tracks, playlists, albums, or artists.

      -

      To download 2rich Ebira songs from SoundCloud, you need to look for the download button below the track. Not all tracks have this option, as it depends on whether the artist has enabled it or not. If you see the download button, you can click on it and save the MP3 file to your device. If you don't see the download button, you can try using a third-party tool or extension that allows you to download SoundCloud tracks.

      -

      Last.fm

      -

      Last.fm is another popular online platform for streaming and downloading music. It has a feature called Scrobbling, which tracks the music you listen to and recommends you more songs based on your preferences. You can also discover new music from different genres, artists, and countries on Last.fm. You can find some 2rich Ebira songs on Last.fm, as well as other Ebira music and Nigerian music.

      -

      Download 2rich ebira music anwe ovivi ft dan ozizi mp3
      -Download 2rich ebira campaign song ozoza ka natasha mp3
      -Download 2rich ebira old school song akaka anehe enavomihe mp3
      -Download 2rich ebira highlife and afropop music mp3
      -Download 2rich ebira latest song for barrister natasha akpti iduaghan mp3
      -Download 2rich ebira music from ebira online media mp3
      -Download 2rich ebira music featuring ebira legend dan ozizi mp3
      -Download 2rich ebira music red oil (palm oil) anwe ovivi mp3
      -Download 2rich ebira music for kogi central senatorial election mp3
      -Download 2rich ebira music by yakubu binuyaminu mp3
      -Download 2rich ebira music from youtube channel mp3
      -Download 2rich ebira music from new scientist website mp3
      -Download 2rich ebira music from the sun website mp3
      -Download 2rich ebira music from yahoo news website mp3
      -Download 2rich ebira music from wikipedia website mp3
      -Download 2rich ebira music from montana website mp3
      -Download 2rich ebira music from cornell university website mp3
      -Download 2rich ebira music from nasa website mp3
      -Download 2rich ebira music with lyrics and translation mp3
      -Download 2rich ebira music with video and audio quality mp3
      -Download 2rich ebira music with reviews and ratings mp3
      -Download 2rich ebira music with free and fast download link mp3
      -Download 2rich ebira music with no ads and pop-ups mp3
      -Download 2rich ebira music with bonus tracks and remixes mp3
      -Download 2rich ebira music with playlist and album art mp3
      -How to download 2rich ebira songs mp3 on your device
      -Where to download 2rich ebira songs mp3 online
      -Why you should download 2rich ebira songs mp3 for your enjoyment
      -What are the benefits of downloading 2rich ebira songs mp3 for your culture
      -Who are the fans of downloading 2rich ebira songs mp3 in the world
      -Best sites to download 2rich ebira songs mp3 in 2021
      -Top tips to download 2rich ebira songs mp3 safely and legally
      -Best practices to download 2rich ebira songs mp3 without viruses and malware
      -Common mistakes to avoid when downloading 2rich ebira songs mp3 on the internet
      -Frequently asked questions about downloading 2rich ebira songs mp3 answered by experts

      -

      To search for 2rich Ebira songs on Last.fm, you can use the search bar on the top of the website or the app. You can type in keywords such as "2rich", "Ebira", or the titles of his songs. You can also filter the results by tracks, albums, artists, or tags.

      -

      To download 2rich Ebira songs from Last.fm, you need to look for the download link below the track. Not all tracks have this option, as it depends on whether the artist has provided it or not. If you see the download link, you can click on it and save the MP3 file to your device. If you don't see the download link, you can try using a third-party tool or extension that allows you to download Last.fm tracks.

      -

      NoiseTrade

      -

      NoiseTrade is a unique online platform for streaming and downloading music. It allows artists to share their music with fans for free in exchange for their email addresses and postal codes. This way, artists can build their fan base and connect with them directly. Fans can also tip the artists if they want to support them financially. You can find some 2rich Ebira songs on NoiseTrade, as well as other Ebira music and Nigerian music.

      -

      To search for 2rich Ebira songs on NoiseTrade, you can use the search bar on the top of the website or the app. You can type in keywords such as "2rich", "Ebira", or the titles of his songs. You can also browse by genres, moods, new releases, or editors' picks.

      -

      To download 2rich Ebira songs from NoiseTrade, you need to click on the album or track that you want to download. Then, you need to enter your email address and postal code and click on the download button. You will receive an email with a link to download the MP3 file to your device. You can also choose to tip the artist if you want to show your appreciation.

      Tips and Tricks for Enjoying 2rich Ebira Songs MP3

      -

      Now that you have downloaded some 2rich Ebira songs MP3 for free, you might want to know how to enjoy them more fully. Here are some tips and tricks that you can try:

      -

      Learn more about the Ebira language and culture

      -

      One of the best ways to appreciate 2rich Ebira songs MP3 is to learn more about the Ebira language and culture. The Ebira people are one of the ethnic groups in Nigeria, mainly living in Kogi State and some parts of Edo, Niger, and Nasarawa States. They have a rich history, traditions, and values that are reflected in their music and art.

      -

      To find resources and information about the Ebira people and their culture, you can use the internet, books, or documentaries. You can also talk to Ebira people or visit their communities if you have the opportunity. You can learn about their history, religion, festivals, cuisine, clothing, crafts, and more.

      -

      To appreciate the unique features and expressions of the Ebira language and music, you can also try to learn some basic words and phrases in Ebira. You can use online dictionaries, apps, or videos to help you with that. You can also listen to the lyrics of 2rich Ebira songs MP3 and try to understand their meanings and messages.

      -

      Create your own playlists and share them with others

      -

      Another way to enjoy 2rich Ebira songs MP3 is to create your own playlists and share them with others. You can organize your downloaded MP3 files into playlists according to your mood, preference, or occasion. For example, you can create a playlist for relaxing, working out, studying, partying, or traveling.

      -

      To create your own playlists, you can use any music player or app that allows you to do so. You can also use online tools or websites that let you create and customize your playlists. You can add titles, descriptions, images, or tags to your playlists.

      -

      To share your playlists with others, you can use social media or streaming platforms that allow you to do so. You can post your playlists on Facebook, Twitter, Instagram, YouTube, Spotify, or SoundCloud. You can also send your playlists to your friends, family, or online communities via email or messaging apps. You can also invite others to collaborate on your playlists or join your listening parties.

      -

      Support the artists and their work

      -

      The last but not least way to enjoy 2rich Ebira songs MP3 is to support the artists and their work. 2rich is a talented and hardworking artist who deserves recognition and appreciation for his music. He has been making music since he was a teenager and has been influenced by various genres such as hip hop, reggae, afrobeat, highlife, and gospel.

      -

      To follow 2rich on his social media accounts or websites, you can use the links below. You can also show your appreciation and feedback by liking, commenting, or sharing his posts or videos.

      -
        -
      • Facebook: https://www.facebook.com/2richofficial
      • -
      • Twitter: https://twitter.com/2richofficial
      • -
      • Instagram: https://www.instagram.com/2richofficial
      • -
      • YouTube: https://www.youtube.com/channel/UCY9lZ8qf0w8y1yQ4xTfZ7Lg
      • -
      • Website: https://www.2richofficial.com
      • -
      -

      To buy his albums, merchandise, or tickets if you want to support him financially or see him live, you can use the links below. You can also donate to his PayPal account if you want to show your generosity.

      -
        -
      • Albums: https://www.2richofficial.com/music
      • -
      • Merchandise: https://www.2richofficial.com/shop
      • -
      • Tickets: https://www.2richofficial.com/events
      • -
      • PayPal: https://www.paypal.me/2richofficial
      • -
      -

      Conclusion

      -

      In conclusion, downloading 2rich Ebira songs MP3 for free is a great way to enjoy his music and learn more about the Ebira culture. You can use the free music download sites that we have recommended in this article to download his songs legally and safely. You can also use the tips and tricks that we have shared in this article to enjoy his songs more fully.

      -

      We hope that this article has helped you with your request and that you have found it informative and engaging. If you have any questions or feedback, please feel free to contact us. We would love to hear from you.

      FAQs -

      Here are some of the frequently asked questions that you might have about 2rich Ebira songs MP3:

      -

      Who is 2rich?

      -

      2rich is a Nigerian singer and songwriter who specializes in Ebira music. His real name is Richard Omojola, and he was born on June 12, 1990, in Okene, Kogi State. He started his musical career in 2007 and has since released several albums and videos. Some of his most popular songs are "Oyoyo", "Ozi Ozi", "Omo Ebira", and "Ebira Gyration".

      -

      What are some of the most popular 2rich Ebira songs?

      -

      Some of the most popular 2rich Ebira songs are:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

      What are some of the other genres of Ebira music?

      -

      Some of the other genres of Ebira music are:

      -
        -
      • Eche-Ozi: A traditional genre that uses drums, flutes, rattles, and chants to tell stories and proverbs.
      • -
      • Ogene: A modern genre that uses metal gongs, guitars, keyboards, and vocals to create upbeat and danceable tunes.
      • -
      • Afro-Ebira: A fusion genre that combines elements of Ebira music with other African or Western genres such as afrobeat, highlife, reggae, hip hop, or gospel.
      • -
      • Ebira Gospel: A religious genre that uses Ebira language and music to praise God and spread the Christian faith.
      • -
      • Ebira Rap: A contemporary genre that uses Ebira language and music to express social issues, personal experiences, or opinions.
      • -

        What are some of the other sources of free music downloads?

        -

        Some of the other sources of free music downloads are:

        -
          -
        • Jamendo: A website that offers free music downloads from independent artists who have released their work under Creative Commons licenses.
        • -
        • DatPiff: A website that offers free music downloads from hip hop artists who have released their work as mixtapes or albums.
        • -
        • Free Music Archive: A website that offers free music downloads from various genres and artists who have released their work under Creative Commons licenses or public domain.
        • -
        • Bandcamp: A website that offers free music downloads from various genres and artists who have set their own prices or allowed fans to name their own prices.
        • -
        • Audiomack: A website that offers free music downloads from various genres and artists who have uploaded their work for streaming or downloading.
        • -

          What are some of the legal issues of downloading music for free?

          -

          Some of the legal issues of downloading music for free are:

          -
            -
          • Piracy: Downloading music for free from websites that do not have the permission or license from the artists or their labels is considered piracy, which is illegal and punishable by law.
          • -
          • Infringement: Downloading music for free from websites that do not respect the copyrights or licenses of the artists or their work is considered infringement, which is illegal and can result in lawsuits or fines.
          • -
          • Moral: Downloading music for free from websites that do not support the artists or their work is considered immoral, as it deprives them of their rightful income and recognition.
          • 197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/skf15963/summary/fengshen/examples/pretrain_taiyi_clip/test.sh b/spaces/skf15963/summary/fengshen/examples/pretrain_taiyi_clip/test.sh deleted file mode 100644 index 729fa870407ec42b5cd48872c6acb9f5a4c8bf4f..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/pretrain_taiyi_clip/test.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=finetune_taiyi # create a short name for your job -#SBATCH --nodes=1 # node count -#SBATCH --ntasks-per-node=8 # number of tasks to run per node -#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --gres=gpu:8 # number of gpus per node -#SBATCH -o %x-%j.log # output and error log file names (%x for job id) -#SBATCH -x dgx050 - -# pwd=Fengshenbang-LM/fengshen/examples/pretrain_erlangshen - -NNODES=1 -GPUS_PER_NODE=1 - -MICRO_BATCH_SIZE=64 - -DATA_ARGS="\ - --test_batchsize $MICRO_BATCH_SIZE \ - --datasets_name flickr30k-CNA \ - " - -MODEL_ARGS="\ - --model_path /cognitive_comp/gaoxinyu/github/Fengshenbang-LM/fengshen/workspace/taiyi-clip-huge-v2/hf_out_0_661 \ - " - -TRAINER_ARGS="\ - --gpus $GPUS_PER_NODE \ - --num_nodes $NNODES \ - --strategy ddp \ - --log_every_n_steps 0 \ - --default_root_dir . \ - --precision 32 \ - " -# num_sanity_val_steps, limit_val_batches 通过这俩参数把validation关了 - -export options=" \ - $DATA_ARGS \ - $MODEL_ARGS \ - $TRAINER_ARGS \ - " - -CUDA_VISIBLE_DEVICES=0 python3 test.py $options -#srun -N $NNODES --gres=gpu:$GPUS_PER_NODE --ntasks-per-node=$GPUS_PER_NODE --cpus-per-task=20 python3 pretrain.py $options diff --git a/spaces/smajumdar/nemo_multilingual_language_id/speech_to_text_buffered_infer_ctc.py b/spaces/smajumdar/nemo_multilingual_language_id/speech_to_text_buffered_infer_ctc.py deleted file mode 100644 index cfb776bf62ba047f69f0c851d6f2489e9607556f..0000000000000000000000000000000000000000 --- a/spaces/smajumdar/nemo_multilingual_language_id/speech_to_text_buffered_infer_ctc.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This script serves three goals: - (1) Demonstrate how to use NeMo Models outside of PytorchLightning - (2) Shows example of batch ASR inference - (3) Serves as CI test for pre-trained checkpoint - -python speech_to_text_buffered_infer_ctc.py \ - model_path=null \ - pretrained_name=null \ - audio_dir="" \ - dataset_manifest="" \ - output_filename="" \ - total_buffer_in_secs=4.0 \ - chunk_len_in_secs=1.6 \ - model_stride=4 \ - batch_size=32 - -# NOTE: - You can use `DEBUG=1 python speech_to_text_buffered_infer_ctc.py ...` to print out the - predictions of the model, and ground-truth text if presents in manifest. -""" -import contextlib -import copy -import glob -import math -import os -from dataclasses import dataclass, is_dataclass -from typing import Optional - -import torch -from omegaconf import OmegaConf - -from nemo.collections.asr.parts.utils.streaming_utils import FrameBatchASR -from nemo.collections.asr.parts.utils.transcribe_utils import ( - compute_output_filename, - get_buffered_pred_feat, - setup_model, - write_transcription, -) -from nemo.core.config import hydra_runner -from nemo.utils import logging - -can_gpu = torch.cuda.is_available() - - -@dataclass -class TranscriptionConfig: - # Required configs - model_path: Optional[str] = None # Path to a .nemo file - pretrained_name: Optional[str] = None # Name of a pretrained model - audio_dir: Optional[str] = None # Path to a directory which contains audio files - dataset_manifest: Optional[str] = None # Path to dataset's JSON manifest - - # General configs - output_filename: Optional[str] = None - batch_size: int = 32 - num_workers: int = 0 - append_pred: bool = False # Sets mode of work, if True it will add new field transcriptions. - pred_name_postfix: Optional[str] = None # If you need to use another model name, rather than standard one. - - # Chunked configs - chunk_len_in_secs: float = 1.6 # Chunk length in seconds - total_buffer_in_secs: float = 4.0 # Length of buffer (chunk + left and right padding) in seconds - model_stride: int = 8 # Model downsampling factor, 8 for Citrinet models and 4 for Conformer models", - - # Set `cuda` to int to define CUDA device. If 'None', will look for CUDA - # device anyway, and do inference on CPU only if CUDA device is not found. - # If `cuda` is a negative number, inference will be on CPU only. - cuda: Optional[int] = None - amp: bool = False - audio_type: str = "wav" - - # Recompute model transcription, even if the output folder exists with scores. - overwrite_transcripts: bool = True - - -@hydra_runner(config_name="TranscriptionConfig", schema=TranscriptionConfig) -def main(cfg: TranscriptionConfig) -> TranscriptionConfig: - logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}') - torch.set_grad_enabled(False) - - if is_dataclass(cfg): - cfg = OmegaConf.structured(cfg) - - if cfg.model_path is None and cfg.pretrained_name is None: - raise ValueError("Both cfg.model_path and cfg.pretrained_name cannot be None!") - if cfg.audio_dir is None and cfg.dataset_manifest is None: - raise ValueError("Both cfg.audio_dir and cfg.dataset_manifest cannot be None!") - - filepaths = None - manifest = cfg.dataset_manifest - if cfg.audio_dir is not None: - filepaths = list(glob.glob(os.path.join(cfg.audio_dir, f"**/*.{cfg.audio_type}"), recursive=True)) - manifest = None # ignore dataset_manifest if audio_dir and dataset_manifest both presents - - # setup GPU - if cfg.cuda is None: - if torch.cuda.is_available(): - device = [0] # use 0th CUDA device - accelerator = 'gpu' - else: - device = 1 - accelerator = 'cpu' - else: - device = [cfg.cuda] - accelerator = 'gpu' - map_location = torch.device('cuda:{}'.format(device[0]) if accelerator == 'gpu' else 'cpu') - logging.info(f"Inference will be done on device : {device}") - - asr_model, model_name = setup_model(cfg, map_location) - - model_cfg = copy.deepcopy(asr_model._cfg) - OmegaConf.set_struct(model_cfg.preprocessor, False) - # some changes for streaming scenario - model_cfg.preprocessor.dither = 0.0 - model_cfg.preprocessor.pad_to = 0 - - if model_cfg.preprocessor.normalize != "per_feature": - logging.error("Only EncDecCTCModelBPE models trained with per_feature normalization are supported currently") - - # Disable config overwriting - OmegaConf.set_struct(model_cfg.preprocessor, True) - - # setup AMP (optional) - if cfg.amp and torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'): - logging.info("AMP enabled!\n") - autocast = torch.cuda.amp.autocast - else: - - @contextlib.contextmanager - def autocast(): - yield - - # Compute output filename - cfg = compute_output_filename(cfg, model_name) - - # if transcripts should not be overwritten, and already exists, skip re-transcription step and return - if not cfg.overwrite_transcripts and os.path.exists(cfg.output_filename): - logging.info( - f"Previous transcripts found at {cfg.output_filename}, and flag `overwrite_transcripts`" - f"is {cfg.overwrite_transcripts}. Returning without re-transcribing text." - ) - return cfg - - asr_model.eval() - asr_model = asr_model.to(asr_model.device) - - feature_stride = model_cfg.preprocessor['window_stride'] - model_stride_in_secs = feature_stride * cfg.model_stride - total_buffer = cfg.total_buffer_in_secs - chunk_len = float(cfg.chunk_len_in_secs) - - tokens_per_chunk = math.ceil(chunk_len / model_stride_in_secs) - mid_delay = math.ceil((chunk_len + (total_buffer - chunk_len) / 2) / model_stride_in_secs) - logging.info(f"tokens_per_chunk is {tokens_per_chunk}, mid_delay is {mid_delay}") - - frame_asr = FrameBatchASR( - asr_model=asr_model, frame_len=chunk_len, total_buffer=cfg.total_buffer_in_secs, batch_size=cfg.batch_size, - ) - - hyps = get_buffered_pred_feat( - frame_asr, - chunk_len, - tokens_per_chunk, - mid_delay, - model_cfg.preprocessor, - model_stride_in_secs, - asr_model.device, - manifest, - filepaths, - ) - output_filename = write_transcription(hyps, cfg, model_name, filepaths=filepaths, compute_langs=False) - logging.info(f"Finished writing predictions to {output_filename}!") - - return cfg - - -if __name__ == '__main__': - main() # noqa pylint: disable=no-value-for-parameter diff --git a/spaces/smdcn/stabilityai-stable-diffusion-2-1-base/app.py b/spaces/smdcn/stabilityai-stable-diffusion-2-1-base/app.py deleted file mode 100644 index c511027b85081d84ef61d53954ed6dabfb2a3577..0000000000000000000000000000000000000000 --- a/spaces/smdcn/stabilityai-stable-diffusion-2-1-base/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stabilityai/stable-diffusion-2-1-base").launch() \ No newline at end of file diff --git a/spaces/spacy/healthsea-pipeline/README.md b/spaces/spacy/healthsea-pipeline/README.md deleted file mode 100644 index 132a7f9243b2b916b516ebc11f0288abb5b42472..0000000000000000000000000000000000000000 --- a/spaces/spacy/healthsea-pipeline/README.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Healthsea Pipeline -emoji: ⚙️ -colorFrom: blue -colorTo: pink -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. - diff --git a/spaces/speeddemonau/OpenAssistant-stablelm-7b-sft-v7-epoch-3/README.md b/spaces/speeddemonau/OpenAssistant-stablelm-7b-sft-v7-epoch-3/README.md deleted file mode 100644 index 987dc69e0bca77119d39dd6524328b82c6bcc59c..0000000000000000000000000000000000000000 --- a/spaces/speeddemonau/OpenAssistant-stablelm-7b-sft-v7-epoch-3/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: OpenAssistant Stablelm 7b Sft V7 Epoch 3 -emoji: 📈 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sqc1729/bingi/src/components/chat-image.tsx b/spaces/sqc1729/bingi/src/components/chat-image.tsx deleted file mode 100644 index 05ecc9771eada27a0f2d160bb01cba170d37bb09..0000000000000000000000000000000000000000 --- a/spaces/sqc1729/bingi/src/components/chat-image.tsx +++ /dev/null @@ -1,170 +0,0 @@ -import { - useEffect, - useState, - useCallback, - ChangeEvent, - ClipboardEvent, - MouseEventHandler, - FormEvent, - useRef -} from "react" -import Image from 'next/image' -import PasteIcon from '@/assets/images/paste.svg' -import UploadIcon from '@/assets/images/upload.svg' -import CameraIcon from '@/assets/images/camera.svg' -import { useBing } from '@/lib/hooks/use-bing' -import { cn } from '@/lib/utils' - -interface ChatImageProps extends Pick, 'uploadImage'> {} - -const preventDefault: MouseEventHandler = (event) => { - event.nativeEvent.stopImmediatePropagation() -} - -const toBase64 = (file: File): Promise => new Promise((resolve, reject) => { - const reader = new FileReader() - reader.readAsDataURL(file) - reader.onload = () => resolve(reader.result as string) - reader.onerror = reject -}) - -export function ChatImage({ children, uploadImage }: React.PropsWithChildren) { - const videoRef = useRef(null) - const canvasRef = useRef(null) - const mediaStream = useRef() - const [panel, setPanel] = useState('none') - - const upload = useCallback((url: string) => { - if (url) { - uploadImage(url) - } - setPanel('none') - }, [panel]) - - const onUpload = useCallback(async (event: ChangeEvent) => { - const file = event.target.files?.[0] - if (file) { - const fileDataUrl = await toBase64(file) - if (fileDataUrl) { - upload(fileDataUrl) - } - } - }, []) - - const onPaste = useCallback((event: ClipboardEvent) => { - const pasteUrl = event.clipboardData.getData('text') ?? '' - upload(pasteUrl) - }, []) - - const onEnter = useCallback((event: FormEvent) => { - event.preventDefault() - event.stopPropagation() - // @ts-ignore - const inputUrl = event.target.elements.image.value - if (inputUrl) { - upload(inputUrl) - } - }, []) - - const openVideo: MouseEventHandler = async (event) => { - event.stopPropagation() - setPanel('camera-mode') - } - - const onCapture = () => { - if (canvasRef.current && videoRef.current) { - const canvas = canvasRef.current - canvas.width = videoRef.current!.videoWidth - canvas.height = videoRef.current!.videoHeight - canvas.getContext('2d')?.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height) - const cameraUrl = canvas.toDataURL('image/jpeg') - upload(cameraUrl) - } - } - - useEffect(() => { - const handleBlur = () => { - if (panel !== 'none') { - setPanel('none') - } - } - document.addEventListener('click', handleBlur) - return () => { - document.removeEventListener('click', handleBlur) - } - }, [panel]) - - useEffect(() => { - if (panel === 'camera-mode') { - navigator.mediaDevices.getUserMedia({ video: true, audio: false }) - .then(videoStream => { - mediaStream.current = videoStream - if (videoRef.current) { - videoRef.current.srcObject = videoStream - } - }) - } else { - if (mediaStream.current) { - mediaStream.current.getTracks().forEach(function(track) { - track.stop() - }) - mediaStream.current = undefined - } - } - }, [panel]) - - return ( -
            -
            panel === 'none' ? setPanel('normal') : setPanel('none')}>{children}
            -
            -
            -
            -

            添加图像

            -
            -
            - -
            - e.stopPropagation()} - /> - -
            -
            - - -
            -
            - {panel === 'camera-mode' &&
            -
            -
            -
            -
            -
            -
            -
            } -
            -
            - ) -} diff --git a/spaces/srijitpanja/aip/README.md b/spaces/srijitpanja/aip/README.md deleted file mode 100644 index a69dfc38688bc931fb63df0fdc9d624479b488c3..0000000000000000000000000000000000000000 --- a/spaces/srijitpanja/aip/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AIPoet - click scenery, get poems -emoji: 🏃 -colorFrom: yellow -colorTo: indigo -sdk: streamlit -sdk_version: 1.26.0 -app_file: app.py -pinned: false -license: llama2 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/new/decoders/base_decoder.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/new/decoders/base_decoder.py deleted file mode 100644 index a097969b3c0650cf8ea2ab5f8e96bbc68ea9b97f..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/new/decoders/base_decoder.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import itertools as it -from typing import Any, Dict, List - -import torch -from fairseq.data.dictionary import Dictionary -from fairseq.models.fairseq_model import FairseqModel - - -class BaseDecoder: - def __init__(self, tgt_dict: Dictionary) -> None: - self.tgt_dict = tgt_dict - self.vocab_size = len(tgt_dict) - - self.blank = ( - tgt_dict.index("") - if "" in tgt_dict.indices - else tgt_dict.bos() - ) - if "" in tgt_dict.indices: - self.silence = tgt_dict.index("") - elif "|" in tgt_dict.indices: - self.silence = tgt_dict.index("|") - else: - self.silence = tgt_dict.eos() - - def generate( - self, models: List[FairseqModel], sample: Dict[str, Any], **unused - ) -> List[List[Dict[str, torch.LongTensor]]]: - encoder_input = { - k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" - } - emissions = self.get_emissions(models, encoder_input) - return self.decode(emissions) - - def get_emissions( - self, - models: List[FairseqModel], - encoder_input: Dict[str, Any], - ) -> torch.FloatTensor: - model = models[0] - encoder_out = model(**encoder_input) - if hasattr(model, "get_logits"): - emissions = model.get_logits(encoder_out) - else: - emissions = model.get_normalized_probs(encoder_out, log_probs=True) - return emissions.transpose(0, 1).float().cpu().contiguous() - - def get_tokens(self, idxs: torch.IntTensor) -> torch.LongTensor: - idxs = (g[0] for g in it.groupby(idxs)) - idxs = filter(lambda x: x != self.blank, idxs) - return torch.LongTensor(list(idxs)) - - def decode( - self, - emissions: torch.FloatTensor, - ) -> List[List[Dict[str, torch.LongTensor]]]: - raise NotImplementedError diff --git a/spaces/srush/minichain/selfask.py b/spaces/srush/minichain/selfask.py deleted file mode 100644 index 3bfdb92af2576c98ba5af6c7f1a70fb3e18f527c..0000000000000000000000000000000000000000 --- a/spaces/srush/minichain/selfask.py +++ /dev/null @@ -1,71 +0,0 @@ - -desc = """ -### Self-Ask - - Notebook implementation of the self-ask + Google tool use prompt. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/selfask.ipynb) - - (Adapted from [Self-Ask repo](https://github.com/ofirpress/self-ask)) -""" - -# $ - -from dataclasses import dataclass, replace -from typing import Optional -from minichain import prompt, show, OpenAI, Google, transform - - -@dataclass -class State: - question: str - history: str = "" - next_query: Optional[str] = None - final_answer: Optional[str] = None - - -@prompt(OpenAI(stop="\nIntermediate answer:"), - template_file = "selfask.pmpt.tpl") -def self_ask(model, state): - return model(state) - -@transform() -def next_step(ask): - res = ask.split(":", 1)[1] - if out.startswith("Follow up:"): - return replace(state, next_query=res) - elif out.startswith("So the final answer is:"): - return replace(state, final_answer=res) - -@prompt(Google()) -def google(model, state): - if state.next_query is None: - return "" - - return model(state.next_query) - -@transform() -def update(state, result): - if not result: - return state - return State(state.question, - state.history + "\nIntermediate answer: " + result + "\n") - -def selfask(question): - state = State(question) - for i in range(3): - state = next_step(self_ask(state)) - state = update(google(state)) - return state - -# $ - -gradio = show(selfask, - examples=["What is the zip code of the city where George Washington was born?"], - subprompts=[self_ask, google] * 3, - description=desc, - code=open("selfask.py", "r").read().split("$")[1].strip().strip("#").strip(), - out_type="json" - ) -if __name__ == "__main__": - gradio.queue().launch() - - diff --git a/spaces/starlit7/USPoliticsTTS/app.py b/spaces/starlit7/USPoliticsTTS/app.py deleted file mode 100644 index c3079195450a10138cd1e00ac6f05b4510006018..0000000000000000000000000000000000000000 --- a/spaces/starlit7/USPoliticsTTS/app.py +++ /dev/null @@ -1,173 +0,0 @@ -import json -import os -import re - -import librosa -import numpy as np -import torch -from torch import no_grad, LongTensor -import commons -import utils -import gradio as gr -from models import SynthesizerTrn -from text import text_to_sequence, _clean_text -from mel_processing import spectrogram_torch - -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - - -def get_text(text, hps, is_phoneme): - text_norm = text_to_sequence(text, hps.symbols, [] if is_phoneme else hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm - - -def create_tts_fn(model, hps, speaker_ids): - def tts_fn(text, speaker, speed, is_phoneme): - if limitation: - text_len = len(text) - max_len = 300 - if is_phoneme: - max_len *= 3 - else: - if len(hps.data.text_cleaners) > 0 and hps.data.text_cleaners[0] == "zh_ja_mixture_cleaners": - text_len = len(re.sub("(\[ZH\]|\[JA\])", "", text)) - if text_len > max_len: - return "Error: Text is too long", None - - speaker_id = speaker_ids[speaker] - stn_tst = get_text(text, hps, is_phoneme) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - sid = LongTensor([speaker_id]) - - audio = model.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, - length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy() - del stn_tst, x_tst, x_tst_lengths, sid - return "Success", (hps.data.sampling_rate, audio) - - return tts_fn - - -def create_to_phoneme_fn(hps): - def to_phoneme_fn(text): - return _clean_text(text, hps.data.text_cleaners) if text != "" else "" - - return to_phoneme_fn - - -css = """ - #advanced-btn { - color: white; - border-color: black; - background: black; - font-size: .7rem !important; - line-height: 19px; - margin-top: 24px; - margin-bottom: 12px; - padding: 2px 8px; - border-radius: 14px !important; - } - #advanced-options { - display: none; - margin-bottom: 20px; - } -""" - -if __name__ == '__main__': - models_tts = [] - models_vc = [] - models_soft_vc = [] - name = 'USPresidentTTS' - lang = 'English' - example = '[EN]Fellow my citizen.[EN]' - config_path = f"saved_model/0/config.json" - model_path = f"saved_model/0/model.pth" - cover_path = f"saved_model/0/cover.png" - hps = utils.get_hparams_from_file(config_path) - model = SynthesizerTrn( - len(hps.symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model) - utils.load_checkpoint(model_path, model, None) - model.eval() - speaker_ids = [sid for sid, name in enumerate(hps.speakers) if name != "None"] - speakers = [name for sid, name in enumerate(hps.speakers) if name != "None"] - - t = 'vits' - models_tts.append((name, cover_path, speakers, lang, example, - hps.symbols, create_tts_fn(model, hps, speaker_ids), - create_to_phoneme_fn(hps))) - - - app = gr.Blocks(css=css) - - with app: - gr.Markdown("# USPresident TTS Using VITS Model\n\n" - "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=ORI-Muchim.USPresidentTTS)\n\n" - "[USPoliticsTTS 제작자 유튜브 주소]" - "(https://www.youtube.com/@litlit/featured)" - ) - - with gr.Tabs(): - with gr.TabItem("TTS"): - with gr.Tabs(): - for i, (name, cover_path, speakers, lang, example, symbols, tts_fn, - to_phoneme_fn) in enumerate(models_tts): - with gr.TabItem(f"Politican"): - with gr.Column(): - gr.Markdown(f"## {name}\n\n" - f"![cover](file/{cover_path})\n\n" - f"lang: {lang}") - tts_input1 = gr.TextArea(label="Text (300 words limitation)", value=example, - elem_id=f"tts-input{i}") - tts_input2 = gr.Dropdown(label="Speaker", choices=speakers, - type="index", value=speakers[0]) - tts_input3 = gr.Slider(label="Speed", value=1, minimum=0.1, maximum=2, step=0.1) - with gr.Accordion(label="Advanced Options", open=False): - phoneme_input = gr.Checkbox(value=False, label="Phoneme input") - to_phoneme_btn = gr.Button("Covert text to phoneme") - phoneme_list = gr.Dataset(label="Phoneme list", components=[tts_input1], - samples=[[x] for x in symbols], - elem_id=f"phoneme-list{i}") - phoneme_list_json = gr.Json(value=symbols, visible=False) - tts_submit = gr.Button("Generate", variant="primary") - tts_output1 = gr.Textbox(label="Output Message") - tts_output2 = gr.Audio(label="Output Audio") - tts_submit.click(tts_fn, [tts_input1, tts_input2, tts_input3, phoneme_input], - [tts_output1, tts_output2]) - to_phoneme_btn.click(to_phoneme_fn, [tts_input1], [tts_input1]) - phoneme_list.click(None, [phoneme_list, phoneme_list_json], [], - _js=f""" - (i,phonemes) => {{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let text_input = root.querySelector("#tts-input{i}").querySelector("textarea"); - let startPos = text_input.selectionStart; - let endPos = text_input.selectionEnd; - let oldTxt = text_input.value; - let result = oldTxt.substring(0, startPos) + phonemes[i] + oldTxt.substring(endPos); - text_input.value = result; - let x = window.scrollX, y = window.scrollY; - text_input.focus(); - text_input.selectionStart = startPos + phonemes[i].length; - text_input.selectionEnd = startPos + phonemes[i].length; - text_input.blur(); - window.scrollTo(x, y); - return []; - }}""") - - - gr.Markdown( - "Reference \n\n" - "- [https://huggingface.co/spaces/skytnt/moe-tts](https://huggingface.co/spaces/skytnt/moe-tts)" - - ) - app.queue(concurrency_count=3).launch(show_api=False) - diff --git a/spaces/stevengrove/GPT4Tools/app.py b/spaces/stevengrove/GPT4Tools/app.py deleted file mode 100644 index 78f2c3b37d595ad7b4a98dc8d22fbe63c73673d9..0000000000000000000000000000000000000000 --- a/spaces/stevengrove/GPT4Tools/app.py +++ /dev/null @@ -1,160 +0,0 @@ -import os -import re -import pickle -import base64 -import requests -import argparse -import numpy as np -import gradio as gr - -from functools import partial -from PIL import Image - -SERVER_URL = os.getenv('SERVER_URL') - - -def get_images(state): - history = '' - for i in range(len(state)): - for j in range(len(state[i])): - history += state[i][j] + '\n' - for image_path in re.findall('image/[0-9,a-z]+\.png', history): - if os.path.exists(image_path): - continue - data = {'method': 'get_image', 'args': [image_path], 'kwargs': {}} - data = base64.b64encode(pickle.dumps(data)).decode('utf-8') - response = requests.post(SERVER_URL, json=data) - image = pickle.loads(base64.b64decode(response.json().encode('utf-8'))) - image.save(image_path) - - -def bot_request(method, *args, **kwargs): - data = {'method': method, 'args': args, 'kwargs': kwargs} - data = base64.b64encode(pickle.dumps(data)).decode('utf-8') - response = requests.post(SERVER_URL, json=data) - response = pickle.loads(base64.b64decode(response.json().encode('utf-8'))) - if response is not None: - state = response[0] - get_images(state) - return response - - -def run_image(image, *args, **kwargs): - if image is not None: - width, height = image.size - ratio = min(512 / width, 512 / height) - width_new, height_new = (round(width * ratio), round(height * ratio)) - width_new = int(np.round(width_new / 64.0)) * 64 - height_new = int(np.round(height_new / 64.0)) * 64 - image = image.resize((width_new, height_new)) - image = image.convert('RGB') - return bot_request('run_image', image, *args, **kwargs) - - -def predict_example(temperature, top_p, max_new_token, keep_last_n_paragraphs, image, text): - state = [] - buffer = '' - chatbot, state, text, buffer = run_image(image, state, text, buffer) - chatbot, state, text, buffer = bot_request( - 'run_text', text, state, temperature, top_p, - max_new_token, keep_last_n_paragraphs, buffer) - return chatbot, state, text, None, buffer - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--temperature', type=float, default=0.0, help='temperature for the llm model') - parser.add_argument('--max_new_tokens', type=int, default=256, help='max number of new tokens to generate') - parser.add_argument('--top_p', type=float, default=1.0, help='top_p for the llm model') - parser.add_argument('--top_k', type=int, default=40, help='top_k for the llm model') - parser.add_argument('--keep_last_n_paragraphs', type=int, default=0, help='keep last n paragraphs in the memory') - args = parser.parse_args() - - examples = [ - ['images/example-1.jpg', 'What is unusual about this image?'], - ['images/example-2.jpg', 'Make the image look like a cartoon.'], - ['images/example-3.jpg', 'Segment the tie in the image.'], - ['images/example-4.jpg', 'Generate a man watching a sea based on the pose of the woman.'], - ['images/example-5.jpg', 'Replace the dog with a monkey.'], - ] - - if not os.path.exists('image'): - os.makedirs('image') - - with gr.Blocks() as demo: - state = gr.Chatbot([], visible=False) - buffer = gr.Textbox('', visible=False) - - with gr.Row(): - with gr.Column(scale=0.3): - with gr.Row(): - image = gr.Image(type='pil', label='input image') - with gr.Row(): - txt = gr.Textbox(lines=7, show_label=False, elem_id='textbox', - placeholder='Enter text and press submit, or upload an image').style(container=False) - with gr.Row(): - submit = gr.Button('Submit') - with gr.Row(): - clear = gr.Button('Clear') - with gr.Row(): - llm_name = gr.Radio( - ["Vicuna-13B"], - label="LLM Backend", - value="Vicuna-13B", - interactive=True) - keep_last_n_paragraphs = gr.Slider( - minimum=0, - maximum=3, - value=args.keep_last_n_paragraphs, - step=1, - interactive=True, - label='Remember Last N Paragraphs') - max_new_token = gr.Slider( - minimum=64, - maximum=512, - value=args.max_new_tokens, - step=1, - interactive=True, - label='Max New Tokens') - temperature = gr.Slider( - minimum=0.0, - maximum=1.0, - value=args.temperature, - step=0.1, - interactive=True, - visible=False, - label='Temperature') - top_p = gr.Slider( - minimum=0.0, - maximum=1.0, - value=args.top_p, - step=0.1, - interactive=True, - visible=False, - label='Top P') - with gr.Column(scale=0.7): - chatbot = gr.Chatbot(elem_id='chatbot', label='🦙 GPT4Tools').style(height=690) - - image.upload(lambda: '', None, txt) - submit.click(run_image, - [image, state, txt, buffer], - [chatbot, state, txt, buffer]).then( - partial(bot_request, 'run_text'), - [txt, state, temperature, top_p, max_new_token, keep_last_n_paragraphs, buffer], - [chatbot, state, txt, buffer]).then( - lambda: None, None, image) - clear.click(partial(bot_request, 'clear')) - clear.click(lambda: [[], [], '', ''], None, [chatbot, state, txt, buffer]) - - with gr.Row(): - gr.Examples( - examples=examples, - fn=partial(predict_example, args.temperature, args.top_p, - args.max_new_tokens, args.keep_last_n_paragraphs), - inputs=[image, txt], - outputs=[chatbot, state, txt, image, buffer], - cache_examples=True, - ) - - demo.queue(concurrency_count=6) - demo.launch() diff --git a/spaces/stomexserde/gpt4-ui/Examples/Badal Telugu Movie Hindi Dubbed HOT! Free Download.md b/spaces/stomexserde/gpt4-ui/Examples/Badal Telugu Movie Hindi Dubbed HOT! Free Download.md deleted file mode 100644 index 04383e6165e15d0b1f2884bd8194ff563ef2325e..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Badal Telugu Movie Hindi Dubbed HOT! Free Download.md +++ /dev/null @@ -1,18 +0,0 @@ - -

            Badal: A Telugu Action Thriller Dubbed in Hindi

            -

            Badal is a 2007 Telugu movie starring Ravi Teja, Meera Jasmine, Pradeep Rawat and Prakash Raj. The movie is directed by Boyapati Srinu and produced by Dil Raju. The movie was dubbed in Hindi and released as Badla in 2010.

            -

            The movie revolves around Bhadra (Ravi Teja), a young man who falls in love with his friend Raja's sister Anu (Meera Jasmine). However, Raja and his family are killed by a ruthless gangster Veerayya (Pradeep Rawat), who wants to marry Anu against her will. Bhadra rescues Anu and vows to take revenge on Veerayya and his henchmen. He is helped by a police officer Siva (Prakash Raj), who has a personal grudge against Veerayya.

            -

            Badal Telugu Movie Hindi Dubbed Free Download


            Download Ziphttps://urlgoal.com/2uIaof



            -

            Badal is a fast-paced action thriller with Ravi Teja's trademark comedy and romance. The movie has some high-octane fight sequences, catchy songs and a gripping plot. The movie was a hit at the box office and received positive reviews from critics and audiences alike.

            -

            If you are looking for a masala entertainer with a dose of action, comedy and romance, then Badal is the movie for you. You can watch the full movie in Hindi dubbed version on YouTube for free. Just click on the link below and enjoy the movie.

            -

            Watch Badal Full Movie in Hindi Dubbed

            - -

            Badal is not just a Telugu movie, but also a Hindi movie with a different plot and cast. The Hindi version of Badal was released in 2019 and starred Amitabh Bachchan, Taapsee Pannu, Amrita Singh and Tony Luke. The movie is directed by Sujoy Ghosh and produced by Red Chillies Entertainment and Azure Entertainment. The movie was a remake of the 2017 Spanish movie The Invisible Guest.

            -

            The movie revolves around Naina Sethi (Taapsee Pannu), a successful businesswoman who is accused of murdering her lover Arjun (Tony Luke). She hires a renowned lawyer Badal Gupta (Amitabh Bachchan) to defend her case. However, Badal soon realizes that there is more to the story than meets the eye. He uncovers a web of lies, deceit and betrayal that involves Naina's husband, her best friend and a mysterious stranger.

            -

            Badal is a suspenseful thriller with twists and turns that keep the audience on the edge of their seats. The movie has brilliant performances by the lead actors, especially Amitabh Bachchan and Taapsee Pannu, who share a great chemistry on screen. The movie was a critical and commercial success and received several awards and nominations.

            -

            If you are looking for a smart and engaging thriller with a stellar cast, then Badal is the movie for you. You can watch the full movie in Hindi on Netflix or Amazon Prime Video. Just click on the link below and enjoy the movie.

            -

            -

            Watch Badal Full Movie in Hindi on Netflix

            -

            Watch Badal Full Movie in Hindi on Amazon Prime Video

            7b8c122e87
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Brokeback Mountain 2005 Bluray 720p X264 Yify English 272.md b/spaces/stomexserde/gpt4-ui/Examples/Brokeback Mountain 2005 Bluray 720p X264 Yify English 272.md deleted file mode 100644 index 74230f43f07aee95c38e95fa34fd5c91c2ec8888..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Brokeback Mountain 2005 Bluray 720p X264 Yify English 272.md +++ /dev/null @@ -1,16 +0,0 @@ -
            -Here is a possible title and article with html formatting for the keyword "Brokeback Mountain 2005 Bluray 720p X264 Yify English 272": - -

            Brokeback Mountain: A Timeless Love Story on Blu-ray

            -

            Brokeback Mountain is a 2005 romantic drama film directed by Ang Lee and starring Heath Ledger and Jake Gyllenhaal as two cowboys who fall in love in the 1960s. The film is based on a short story by Annie Proulx and won three Academy Awards, including Best Director, Best Adapted Screenplay and Best Original Score.

            -

            If you are a fan of this movie, you might be interested in getting the Blu-ray version, which offers a stunning 720p resolution and a high-quality audio track. The Blu-ray also includes subtitles in English and other languages, as well as some special features such as interviews, behind-the-scenes footage and a documentary about the making of the film.

            -

            Brokeback Mountain 2005 Bluray 720p X264 Yify English 272


            Download Filehttps://urlgoal.com/2uI9uZ



            -

            One of the best sources for downloading Brokeback Mountain on Blu-ray is YIFY, a popular torrent site that provides high-quality movies at small file sizes. You can find the movie by searching for the keyword "Brokeback Mountain 2005 Bluray 720p X264 Yify English 272" on YIFY's website or any other torrent client. The file size is about 1 GB and the download speed is fast and reliable.

            -

            Brokeback Mountain is a movie that will touch your heart and make you appreciate the power of love. Whether you watch it for the first time or revisit it after many years, you will be moved by its beautiful cinematography, its poignant story and its brilliant performances. Don't miss this opportunity to enjoy Brokeback Mountain on Blu-ray with YIFY.

            Here is a possible continuation of the article with html formatting for the keyword "Brokeback Mountain 2005 Bluray 720p X264 Yify English 272": - -

            Brokeback Mountain is not only a love story, but also a powerful exploration of the social and personal pressures that shape the lives of its characters. The film depicts the changing times and attitudes of America from the 1960s to the 1980s, as well as the personal struggles and sacrifices of Ennis and Jack as they try to balance their desires and their duties. The film does not shy away from showing the harsh realities and consequences of their choices, but also celebrates the beauty and intensity of their bond.

            -

            The film has received critical acclaim and numerous awards, including four Golden Globes, three BAFTAs and three Oscars. It is widely considered as one of the best films of the 21st century and one of the most important films in LGBTQ+ cinema. Roger Ebert gave it four stars and praised it as \"a great love story\" that \"is observant as work by Bergman\"[^1^]. Rotten Tomatoes gave it an 88% rating and called it \"a beautiful, epic Western\" that \"is imbued with heartbreaking universality\"[^2^]. Many critics and audiences have also lauded the performances of Ledger and Gyllenhaal, who deliver nuanced and authentic portrayals of their complex characters.

            -

            -

            If you are looking for a movie that will make you feel, think and reflect, Brokeback Mountain is a perfect choice. It is a film that transcends genres and labels, and speaks to the human condition in a profound and universal way. You can download it on Blu-ray with YIFY and enjoy it in high definition with excellent sound quality. Don't miss this chance to watch Brokeback Mountain on Blu-ray with YIFY.

            7196e7f11a
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Jolly Pirates Donuts Nutrition.md b/spaces/stomexserde/gpt4-ui/Examples/Jolly Pirates Donuts Nutrition.md deleted file mode 100644 index 10bd1ae2003ea7f4e2eb618dc6176147e13c519d..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Jolly Pirates Donuts Nutrition.md +++ /dev/null @@ -1,34 +0,0 @@ - -

            How Many Calories Are in Jolly Pirates Donuts?

            -

            If you love donuts, you might be curious about how many calories are in your favorite treats from Jolly Pirates. Donuts are delicious, but they can also be high in fat, sugar and carbs. Here are some nutrition facts for some of the most popular donuts from Jolly Pirates, based on data from MyFitnessPal and FatSecret.

            -

            Jolly Pirates Donuts Nutrition


            Download Ziphttps://urlgoal.com/2uI6as



            -

            Chocolate-Iced Custard Donut

            -

            This donut has a creamy custard filling and a chocolate glaze on top. It has 310 calories, 17 grams of fat (4.5 grams of saturated fat and 6 grams of trans fat), 37 grams of carbs (1 gram of fiber and 20 grams of sugar) and 3 grams of protein[^1^]. It also has 150 milligrams of sodium, 5 milligrams of cholesterol, 8% of the daily value (DV) of calcium and 8% of the DV of iron.

            -

            Bismark

            -

            This donut is similar to a jelly donut, but with a different shape and filling. It has 520 calories, 21 grams of fat (15 grams of saturated fat), 21 grams of sugar and no protein[^2^]. It does not have any information on carbs, fiber, sodium, cholesterol, vitamins or minerals.

            -

            Jelly Donut

            -

            This donut has a soft dough and a jelly filling, usually raspberry or strawberry. It has 221 calories, 11.7 grams of fat (4.9 grams of saturated fat), 24.7 grams of carbs (0.7 gram of fiber and 10.5 grams of sugar) and 3.6 grams of protein[^3^]. It also has 212 milligrams of sodium, 16 milligrams of cholesterol, 2% of the DV of calcium and 6% of the DV of iron.

            -

            Cream Filled Long John

            -

            This donut is a long bar with a cream filling and a chocolate or vanilla icing on top. It has about 135 calories, 6.1 grams of fat (1.9 grams of saturated fat), 20 grams of carbs (0.4 gram of fiber and 9.8 grams of sugar) and 0.9 gram of protein per ounce[^4^]. A large sandwich size has about 451 calories, 20.4 grams of fat (6.3 grams of saturated fat), 66.7 grams of carbs (1.3 grams of fiber and 32.7 grams of sugar) and 2.8 grams of protein.

            -

            Conclusion

            -

            As you can see, Jolly Pirates donuts vary in their calorie and nutrient content, depending on the type, size and flavor. Some donuts are higher in fat, sugar and carbs than others, while some have more protein, fiber and minerals. If you want to enjoy donuts without going overboard on calories, you can choose smaller portions, opt for plain or glazed donuts over filled or frosted ones, or share your donuts with someone else. You can also balance your donut intake with other healthy foods, such as fruits, vegetables, whole grains, lean proteins and low-fat dairy products.

            - -

            How to Make Your Own Donuts at Home

            -

            If you love donuts, but want to have more control over the ingredients and nutrition, you can try making your own donuts at home. Homemade donuts can be baked or fried, and you can customize them with your favorite fillings, toppings and flavors. Here are some tips and recipes for making your own donuts at home.

            -

            Baked Donuts

            -

            Baked donuts are a healthier alternative to fried donuts, as they have less fat and calories. They are also easier to make, as you don't need a deep fryer or a lot of oil. You can use a donut pan or a muffin tin to shape your donuts, and a piping bag or a ziplock bag to fill them with batter. You can also use a cake mix or a biscuit mix as a shortcut for the dough. Here are some recipes for baked donuts:

            -

            -
              -
            • Baked Doughnuts: This recipe uses a cake mix, eggs, butter and milk to make the dough, and a simple glaze of powdered sugar and water to coat the donuts. You can add food coloring, sprinkles or nuts to the glaze for extra fun.
            • -
            • Baked Cinnamon Sugar Donuts: This recipe uses flour, sugar, baking powder, salt, eggs, milk, butter and vanilla to make the dough, and cinnamon and sugar to coat the donuts. You can also add nutmeg or cardamom to the dough for more flavor.
            • -
            • Baked Chocolate Donuts: This recipe uses flour, cocoa powder, baking powder, salt, sugar, eggs, milk, butter and vanilla to make the dough, and chocolate chips and heavy cream to make the ganache topping. You can also add sprinkles or chopped nuts to the ganache for extra crunch.
            • -
            -

            Fried Donuts

            -

            Fried donuts are the classic way to make donuts, as they have a crispy exterior and a fluffy interior. They are also more versatile, as you can fill them with jam, cream, custard or anything else you like. You will need a deep fryer or a large pot of oil to fry your donuts, and a thermometer to monitor the temperature. You will also need yeast or baking powder to make the dough rise. Here are some recipes for fried donuts:

            -
              -
            • Yeast Doughnuts: This recipe uses flour, sugar, salt, yeast, milk, butter, eggs and vanilla to make the dough, and oil for frying. You can also use a pastry bag or a spoon to fill the donuts with jam or cream after frying.
            • -
            • Old-Fashioned Cake Doughnuts: This recipe uses flour, sugar, baking powder, salt, nutmeg, eggs, sour cream and butter to make the dough, and oil for frying. You can also dip the donuts in powdered sugar or cinnamon sugar after frying.
            • -
            • Glazed Doughnut Holes: This recipe uses biscuit mix, milk and oil to make the dough, and oil for frying. You can also use a simple glaze of powdered sugar and milk to coat the donut holes after frying.
            • -

            81aa517590
            -
            -
            \ No newline at end of file diff --git a/spaces/sub314xxl/MetaGPT/metagpt/document_store/base_store.py b/spaces/sub314xxl/MetaGPT/metagpt/document_store/base_store.py deleted file mode 100644 index 3dc96c0d6dc6cb19dfe779d891f20b0892a2d07a..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/document_store/base_store.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/28 00:01 -@Author : alexanderwu -@File : base_store.py -""" -from abc import ABC, abstractmethod -from pathlib import Path - -from metagpt.config import Config - - -class BaseStore(ABC): - """FIXME: consider add_index, set_index and think 颗粒度""" - - @abstractmethod - def search(self, *args, **kwargs): - raise NotImplementedError - - @abstractmethod - def write(self, *args, **kwargs): - raise NotImplementedError - - @abstractmethod - def add(self, *args, **kwargs): - raise NotImplementedError - - -class LocalStore(BaseStore, ABC): - def __init__(self, raw_data: Path, cache_dir: Path = None): - if not raw_data: - raise FileNotFoundError - self.config = Config() - self.raw_data = raw_data - if not cache_dir: - cache_dir = raw_data.parent - self.cache_dir = cache_dir - self.store = self._load() - if not self.store: - self.store = self.write() - - def _get_index_and_store_fname(self): - fname = self.raw_data.name.split('.')[0] - index_file = self.cache_dir / f"{fname}.index" - store_file = self.cache_dir / f"{fname}.pkl" - return index_file, store_file - - @abstractmethod - def _load(self): - raise NotImplementedError - - @abstractmethod - def _write(self, docs, metadatas): - raise NotImplementedError diff --git a/spaces/sub314xxl/MetaGPT/tests/metagpt/utils/test_custom_aio_session.py b/spaces/sub314xxl/MetaGPT/tests/metagpt/utils/test_custom_aio_session.py deleted file mode 100644 index 3a8a7bf7ed4f52c5e6a106fb1ea636b483bc6153..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/tests/metagpt/utils/test_custom_aio_session.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/7 17:23 -@Author : alexanderwu -@File : test_custom_aio_session.py -""" -from metagpt.logs import logger -from metagpt.provider.openai_api import OpenAIGPTAPI - - -async def try_hello(api): - batch = [[{'role': 'user', 'content': 'hello'}]] - results = await api.acompletion_batch_text(batch) - return results - - -async def aask_batch(api: OpenAIGPTAPI): - results = await api.aask_batch(['hi', 'write python hello world.']) - logger.info(results) - return results diff --git a/spaces/sub314xxl/MusicGen/tests/models/test_musicgen.py b/spaces/sub314xxl/MusicGen/tests/models/test_musicgen.py deleted file mode 100644 index d43cf73763f6c690ab0b277227ac225b286fa143..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MusicGen/tests/models/test_musicgen.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.models import MusicGen - - -class TestSEANetModel: - def get_musicgen(self): - mg = MusicGen.get_pretrained(name='debug', device='cpu') - mg.set_generation_params(duration=2.0, extend_stride=2.) - return mg - - def test_base(self): - mg = self.get_musicgen() - assert mg.frame_rate == 25 - assert mg.sample_rate == 32000 - assert mg.audio_channels == 1 - - def test_generate_unconditional(self): - mg = self.get_musicgen() - wav = mg.generate_unconditional(3) - assert list(wav.shape) == [3, 1, 64000] - - def test_generate_continuation(self): - mg = self.get_musicgen() - prompt = torch.randn(3, 1, 32000) - wav = mg.generate_continuation(prompt, 32000) - assert list(wav.shape) == [3, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - with pytest.raises(AssertionError): - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort', 'one too many']) - - def test_generate(self): - mg = self.get_musicgen() - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - def test_generate_long(self): - mg = self.get_musicgen() - mg.max_duration = 3. - mg.set_generation_params(duration=4., extend_stride=2.) - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 32000 * 4] diff --git a/spaces/sukiru/rvc-Blue-archives/README.md b/spaces/sukiru/rvc-Blue-archives/README.md deleted file mode 100644 index 5243511755780e0a6ab8ce5e55538be791529369..0000000000000000000000000000000000000000 --- a/spaces/sukiru/rvc-Blue-archives/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: rvc-Blue-archives -emoji: ':🎤' -colorFrom: red -colorTo: purple -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -license: mit -duplicated_from: Faridmaruf/rvc-Blue-archives ---- diff --git a/spaces/sunshineatnoon/TextureScraping/libs/vq_functions.py b/spaces/sunshineatnoon/TextureScraping/libs/vq_functions.py deleted file mode 100644 index 54d14cdecc404633a110a6305908e9a04b029099..0000000000000000000000000000000000000000 --- a/spaces/sunshineatnoon/TextureScraping/libs/vq_functions.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Module functions for the VQVAE - - Adopted from https://github.com/ritheshkumar95/pytorch-vqvae -""" -import torch -from torch.autograd import Function - -class VectorQuantization(Function): - @staticmethod - def forward(ctx, inputs, codebook): - with torch.no_grad(): - embedding_size = codebook.size(1) - inputs_size = inputs.size() - inputs_flatten = inputs.view(-1, embedding_size) - - codebook_sqr = torch.sum(codebook ** 2, dim=1) - inputs_sqr = torch.sum(inputs_flatten ** 2, dim=1, keepdim=True) - - # Compute the distances to the codebook - distances = torch.addmm(codebook_sqr + inputs_sqr, - inputs_flatten, codebook.t(), alpha=-2.0, beta=1.0) - - _, indices_flatten = torch.min(distances, dim=1) - indices = indices_flatten.view(*inputs_size[:-1]) - ctx.mark_non_differentiable(indices) - - return indices - - @staticmethod - def backward(ctx, grad_output): - raise RuntimeError('Trying to call `.grad()` on graph containing ' - '`VectorQuantization`. The function `VectorQuantization` ' - 'is not differentiable. Use `VectorQuantizationStraightThrough` ' - 'if you want a straight-through estimator of the gradient.') - -class VectorQuantizationStraightThrough(Function): - @staticmethod - def forward(ctx, inputs, codebook): - indices = vq(inputs, codebook) - indices_flatten = indices.view(-1) - ctx.save_for_backward(indices_flatten, codebook) - ctx.mark_non_differentiable(indices_flatten) - - codes_flatten = torch.index_select(codebook, dim=0, - index=indices_flatten) - codes = codes_flatten.view_as(inputs) - - return (codes, indices_flatten) - - @staticmethod - def backward(ctx, grad_output, grad_indices): - grad_inputs, grad_codebook = None, None - - if ctx.needs_input_grad[0]: - # Straight-through estimator - grad_inputs = grad_output.clone() - if ctx.needs_input_grad[1]: - # Gradient wrt. the codebook - indices, codebook = ctx.saved_tensors - embedding_size = codebook.size(1) - - grad_output_flatten = (grad_output.contiguous() - .view(-1, embedding_size)) - grad_codebook = torch.zeros_like(codebook) - grad_codebook.index_add_(0, indices, grad_output_flatten) - - return (grad_inputs, grad_codebook) - -vq = VectorQuantization.apply -vq_st = VectorQuantizationStraightThrough.apply -__all__ = [vq, vq_st] \ No newline at end of file diff --git a/spaces/supertori/files/stable-diffusion-webui/javascript/extensions.js b/spaces/supertori/files/stable-diffusion-webui/javascript/extensions.js deleted file mode 100644 index 8a0580f706a9511e3391b9170e6684c2655b893a..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/javascript/extensions.js +++ /dev/null @@ -1,49 +0,0 @@ - -function extensions_apply(_, _){ - var disable = [] - var update = [] - - gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){ - if(x.name.startsWith("enable_") && ! x.checked) - disable.push(x.name.substr(7)) - - if(x.name.startsWith("update_") && x.checked) - update.push(x.name.substr(7)) - }) - - restart_reload() - - return [JSON.stringify(disable), JSON.stringify(update)] -} - -function extensions_check(){ - var disable = [] - - gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){ - if(x.name.startsWith("enable_") && ! x.checked) - disable.push(x.name.substr(7)) - }) - - gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){ - x.innerHTML = "Loading..." - }) - - - var id = randomId() - requestProgress(id, gradioApp().getElementById('extensions_installed_top'), null, function(){ - - }) - - return [id, JSON.stringify(disable)] -} - -function install_extension_from_index(button, url){ - button.disabled = "disabled" - button.value = "Installing..." - - textarea = gradioApp().querySelector('#extension_to_install textarea') - textarea.value = url - updateInput(textarea) - - gradioApp().querySelector('#install_extension_button').click() -} diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/README.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/README.md deleted file mode 100644 index 27d5e3c619e31c32ef7e4a3bddd6f490b1027074..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Text To Image Stable Diffusion V1 5 -emoji: 😻 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: Uchenna/text-to-image-stable-diffusion-v1-5 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Aneki My Sweet Elder Sister Episode 2 Uncensored English Subbed.11 TOP.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Aneki My Sweet Elder Sister Episode 2 Uncensored English Subbed.11 TOP.md deleted file mode 100644 index afca2c3f35dadd7b26ac58bdfbc7cd074a30bc35..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Aneki My Sweet Elder Sister Episode 2 Uncensored English Subbed.11 TOP.md +++ /dev/null @@ -1,10 +0,0 @@ - -

            Welcome to the new Hentai Haven the best anime hentai video page, leaving hentaihaven.org in. Hentai Kanojo The Animation Episode 1. Aneki My Sweet Elder Sister 60fps Ep 4. Garden The Animation Episode 2 60FPS.

            -

            aneki my sweet elder sister episode 2 uncensored english subbed.11


            Download Zip ★★★ https://cinurl.com/2uEXQL



            -

            Watch online found 64533 porn videos 'aneki my sweet elder sister the animation ep4' in hight. Mizugi Kanojo The Animation Ep 1 ENG SUB. Hentai Kanojo The Animation Episode 1 SUB ENG Uncensored. 202 results. Mizugi Kanojo The Animation Ep 1 ENG SUB. Aneki My Sweet Elder Sister 60fps Ep 4. Garden The Animation Episode 2 60FPS. Show more. Mizugi Kanojo The Animation Ep 1 ENG SUB. Hentai Kanojo The Animation Episode 1 SUB ENG Uncensored. Hentai Girl - Episode 2 Uncensored. Homura Masshigura - Episode 1 Aneki MY Sweet Elder Sister in HD. katawa josei nippon de neeke nee ke nihon de neeke nee ka hentai. hkvx1 Hentai Kanojo The Animation episode 1 Eng Sub.

            -

            MOM gets submissive with her neighbors - Hentai Uncensored. Episode 1 English Subbed Uncensored. Hentai Kanojo The Animation Episode 1. Aneki My Sweet Elder Sister 60fps Ep 4. Garden The Animation Episode 2 60FPS.

            -

            Hentai Nippon de neeke nee neke ka hentai. katawa josei nippon de neeke nee ke nihon de neeke nee ka hentai. Hentai Girl - Episode 2 Uncensored. Homura Masshigura - Episode 1 Aneki MY Sweet Elder Sister in HD. Hentai Kanojo The Animation episode 1 Eng Sub. Show more. Mizugi Kanojo The Animation Ep 1 ENG SUB. Hentai Kanojo The Animation Episode 1 SUB ENG Uncensored. Garden The Animation Episode 2 60FPS.

            -

            -

            Anime, Manga and more! I love My Sweet Elder Sister Episode 2. You will find incest, strong sex and pantyhose here! To watch the full video, click ''Play''. If you'd like to watch it in a streaming video, click ''Watch Online''. Aniki. My Sweet Elder Sister; Episode 2 uncensored in english subs.

            899543212b
            -
            -
            \ No newline at end of file diff --git a/spaces/suvash/food-101-resnet50/README.md b/spaces/suvash/food-101-resnet50/README.md deleted file mode 100644 index 8f509fd21471c534408eedcf45a7306599f46777..0000000000000000000000000000000000000000 --- a/spaces/suvash/food-101-resnet50/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Food Image Classifier (Food-101|ResNet50|fast.ai) -emoji: 🥟 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/sxela/ArcaneGAN-video/app.py b/spaces/sxela/ArcaneGAN-video/app.py deleted file mode 100644 index b66a1d360590542e6bd4d988bcc1a43237315651..0000000000000000000000000000000000000000 --- a/spaces/sxela/ArcaneGAN-video/app.py +++ /dev/null @@ -1,180 +0,0 @@ -""" -Thanks to nateraw for making this scape happen! 6 -This code has been mostly taken from https://huggingface.co/spaces/nateraw/animegan-v2-for-videos/tree/main -""" -import os -os.system("wget https://github.com/Sxela/ArcaneGAN/releases/download/v0.4/ArcaneGANv0.4.jit") - -import sys -from subprocess import call -def run_cmd(command): - try: - print(command) - call(command, shell=True) - except KeyboardInterrupt: - print("Process interrupted") - sys.exit(1) - -print("⬇️ Installing latest gradio==2.4.7b9") -run_cmd("pip install --upgrade pip") -run_cmd('pip install gradio==2.4.7b9') - -import gc -import math - - -import gradio as gr -import numpy as np -import torch -from encoded_video import EncodedVideo, write_video -from PIL import Image -from torchvision.transforms.functional import center_crop, to_tensor - - - - -print("🧠 Loading Model...") -#model = torch.jit.load('./ArcaneGANv0.3.jit').cuda().eval().half() -model = torch.jit.load('./ArcaneGANv0.4.jit').cuda().eval().half() - -# This function is taken from pytorchvideo! -def uniform_temporal_subsample(x: torch.Tensor, num_samples: int, temporal_dim: int = -3) -> torch.Tensor: - """ - Uniformly subsamples num_samples indices from the temporal dimension of the video. - When num_samples is larger than the size of temporal dimension of the video, it - will sample frames based on nearest neighbor interpolation. - Args: - x (torch.Tensor): A video tensor with dimension larger than one with torch - tensor type includes int, long, float, complex, etc. - num_samples (int): The number of equispaced samples to be selected - temporal_dim (int): dimension of temporal to perform temporal subsample. - Returns: - An x-like Tensor with subsampled temporal dimension. - """ - t = x.shape[temporal_dim] - assert num_samples > 0 and t > 0 - # Sample by nearest neighbor interpolation if num_samples > t. - indices = torch.linspace(0, t - 1, num_samples) - indices = torch.clamp(indices, 0, t - 1).long() - return torch.index_select(x, temporal_dim, indices) - - -# This function is taken from pytorchvideo! -def short_side_scale( - x: torch.Tensor, - size: int, - interpolation: str = "bilinear", -) -> torch.Tensor: - """ - Determines the shorter spatial dim of the video (i.e. width or height) and scales - it to the given size. To maintain aspect ratio, the longer side is then scaled - accordingly. - Args: - x (torch.Tensor): A video tensor of shape (C, T, H, W) and type torch.float32. - size (int): The size the shorter side is scaled to. - interpolation (str): Algorithm used for upsampling, - options: nearest' | 'linear' | 'bilinear' | 'bicubic' | 'trilinear' | 'area' - Returns: - An x-like Tensor with scaled spatial dims. - """ - assert len(x.shape) == 4 - assert x.dtype == torch.float32 - c, t, h, w = x.shape - if w < h: - new_h = int(math.floor((float(h) / w) * size)) - new_w = size - else: - new_h = size - new_w = int(math.floor((float(w) / h) * size)) - - return torch.nn.functional.interpolate(x, size=(new_h, new_w), mode=interpolation, align_corners=False) - -means = [0.485, 0.456, 0.406] -stds = [0.229, 0.224, 0.225] - -from torchvision import transforms -norm = transforms.Normalize(means,stds) - -norms = torch.tensor(means)[None,:,None,None].cuda() -stds = torch.tensor(stds)[None,:,None,None].cuda() - -def inference_step(vid, start_sec, duration, out_fps, interpolate): - clip = vid.get_clip(start_sec, start_sec + duration) - video_arr = torch.from_numpy(clip['video']).permute(3, 0, 1, 2) - audio_arr = np.expand_dims(clip['audio'], 0) - audio_fps = None if not vid._has_audio else vid._container.streams.audio[0].sample_rate - - x = uniform_temporal_subsample(video_arr, duration * out_fps) - x = center_crop(short_side_scale(x, 512), 512) - x /= 255. - x = x.permute(1, 0, 2, 3) - x = norm(x) - - with torch.no_grad(): - output = model(x.to('cuda').half()) - output = (output * stds + norms).clip(0, 1) * 255. - - output_video = output.permute(0, 2, 3, 1).half().detach().cpu().numpy() - if interpolate == 'Yes': output_video[1:] = output_video[1:]*(0.5) + output_video[:-1]*(0.5) - - return output_video, audio_arr, out_fps, audio_fps - - -def predict_fn(filepath, start_sec, duration, out_fps, interpolate): - # out_fps=12 - gc.collect() - vid = EncodedVideo.from_path(filepath) - for i in range(duration): - video, audio, fps, audio_fps = inference_step( - vid = vid, - start_sec = i + start_sec, - duration = 1, - out_fps = out_fps, - interpolate = interpolate - ) - gc.collect() - if i == 0: - #video_all = video - video_all = np.zeros((duration*out_fps, *video.shape[1:])).astype('uint8') - video_all[i*out_fps:(i+1)*out_fps,...] = video.astype('uint8') - audio_all = audio - else: - #video_all = np.concatenate((video_all, video)) - video_all[i*out_fps:(i+1)*out_fps,...] = video.astype('uint8') - audio_all = np.hstack((audio_all, audio)) - - write_video( - 'out.mp4', - video_all, - fps=fps, - audio_array=audio_all, - audio_fps=audio_fps, - audio_codec='aac' - ) - - del video_all - del audio_all - del vid - gc.collect() - - return 'out.mp4' - - -title = "ArcaneGAN" -description = "Gradio demo for ArcaneGAN, video to Arcane style. To use it, simply upload your video, or click on an example below. Follow me on twitter for more info and updates." -article = "
            ArcaneGan by Alex Spirin | Github Repo |
            visitor badge
            " - - -gr.Interface( - predict_fn, - inputs=[gr.inputs.Video(), gr.inputs.Slider(minimum=0, maximum=300, step=1, default=0), gr.inputs.Slider(minimum=1, maximum=10, step=1, default=2), gr.inputs.Slider(minimum=12, maximum=30, step=6, default=24), gr.inputs.Radio(choices=['Yes','No'], type="value", default='Yes', label='Remove flickering')], - outputs=gr.outputs.Video(), - title='ArcaneGAN On Videos', - description = description, - article = article, - enable_queue=True, - examples=[ - ['obama.webm', 23, 6, 12], - ], - allow_flagging=False -).launch() diff --git a/spaces/szukevin/VISOR-GPT/train/scripts/convert_albert_from_huggingface_to_tencentpretrain.py b/spaces/szukevin/VISOR-GPT/train/scripts/convert_albert_from_huggingface_to_tencentpretrain.py deleted file mode 100644 index a203f5ab0af56dc3af304ae76d9ebee5a0cc7294..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/scripts/convert_albert_from_huggingface_to_tencentpretrain.py +++ /dev/null @@ -1,78 +0,0 @@ -import argparse -import collections -import torch - - -parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) -parser.add_argument("--input_model_path", type=str, default="models/input_model.bin", - help=".") -parser.add_argument("--output_model_path", type=str, default="models/output_model.bin", - help=".") - -args = parser.parse_args() - -input_model = torch.load(args.input_model_path, map_location="cpu") - -output_model = collections.OrderedDict() - -output_model["embedding.word.embedding.weight"] = \ - input_model["albert.embeddings.word_embeddings.weight"] -output_model["embedding.pos.embedding.weight"] = \ - input_model["albert.embeddings.position_embeddings.weight"] -output_model["embedding.seg.embedding.weight"] = \ - torch.cat((torch.Tensor([[0]*input_model["albert.embeddings.token_type_embeddings.weight"].size()[1]]), - input_model["albert.embeddings.token_type_embeddings.weight"]), dim=0) -output_model["embedding.layer_norm.gamma"] = \ - input_model["albert.embeddings.LayerNorm.weight"] -output_model["embedding.layer_norm.beta"] = \ - input_model["albert.embeddings.LayerNorm.bias"] - -output_model["encoder.linear.weight"] = \ - input_model["albert.encoder.embedding_hidden_mapping_in.weight"] -output_model["encoder.linear.bias"] = \ - input_model["albert.encoder.embedding_hidden_mapping_in.bias"] -output_model["encoder.transformer.layer_norm_2.gamma"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.full_layer_layer_norm.weight"] -output_model["encoder.transformer.layer_norm_2.beta"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.full_layer_layer_norm.bias"] -output_model["encoder.transformer.self_attn.linear_layers.0.weight"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.attention.query.weight"] -output_model["encoder.transformer.self_attn.linear_layers.0.bias"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.attention.query.bias"] -output_model["encoder.transformer.self_attn.linear_layers.1.weight"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.attention.key.weight"] -output_model["encoder.transformer.self_attn.linear_layers.1.bias"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.attention.key.bias"] -output_model["encoder.transformer.self_attn.linear_layers.2.weight"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.attention.value.weight"] -output_model["encoder.transformer.self_attn.linear_layers.2.bias"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.attention.value.bias"] -output_model["encoder.transformer.self_attn.final_linear.weight"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.attention.dense.weight"] -output_model["encoder.transformer.self_attn.final_linear.bias"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.attention.dense.bias"] -output_model["encoder.transformer.layer_norm_1.gamma"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.attention.LayerNorm.weight"] -output_model["encoder.transformer.layer_norm_1.beta"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.attention.LayerNorm.bias"] -output_model["encoder.transformer.feed_forward.linear_1.weight"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.ffn.weight"] -output_model["encoder.transformer.feed_forward.linear_1.bias"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.ffn.bias"] -output_model["encoder.transformer.feed_forward.linear_2.weight"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.ffn_output.weight"] -output_model["encoder.transformer.feed_forward.linear_2.bias"] = \ - input_model["albert.encoder.albert_layer_groups.0.albert_layers.0.ffn_output.bias"] - -output_model["target.sp.linear_1.weight"] = input_model["albert.pooler.weight"] -output_model["target.sp.linear_1.bias"] = input_model["albert.pooler.bias"] -output_model["target.sp.linear_2.weight"] = input_model["sop_classifier.classifier.weight"] -output_model["target.sp.linear_2.bias"] = input_model["sop_classifier.classifier.bias"] -output_model["target.mlm.linear_1.weight"] = input_model["predictions.dense.weight"] -output_model["target.mlm.linear_1.bias"] = input_model["predictions.dense.bias"] -output_model["target.mlm.linear_2.weight"] = input_model["predictions.decoder.weight"] -output_model["target.mlm.linear_2.bias"] = input_model["predictions.bias"] -output_model["target.mlm.layer_norm.gamma"] = input_model["predictions.LayerNorm.weight"] -output_model["target.mlm.layer_norm.beta"] = input_model["predictions.LayerNorm.bias"] - -torch.save(output_model, args.output_model_path) diff --git a/spaces/team-zero-shot-nli/zero-shot-nli/hf_model.py b/spaces/team-zero-shot-nli/zero-shot-nli/hf_model.py deleted file mode 100644 index 6c345e3e82dfec864a6f0cc39684c0140c0ed99a..0000000000000000000000000000000000000000 --- a/spaces/team-zero-shot-nli/zero-shot-nli/hf_model.py +++ /dev/null @@ -1,14 +0,0 @@ -from transformers import AutoTokenizer, AutoModelForSequenceClassification,pipeline -import torch - -def load_model(): - model_name = "MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli" - tokenizer = AutoTokenizer.from_pretrained(model_name) - model = AutoModelForSequenceClassification.from_pretrained(model_name) - classifier = pipeline(task='zero-shot-classification', model=model, tokenizer=tokenizer, framework='pt') - return classifier - -def classifier_zero(classifier,sequence:str,labels:list,multi_class:bool): - outputs=classifier(sequence, labels,multi_label=multi_class) - return outputs['labels'], outputs['scores'] - diff --git a/spaces/terfces0erbo/CollegeProjectV2/Adobe Premiere Pro CC 2019 13.0.0 (x64) Crack Full Version BETTER.md b/spaces/terfces0erbo/CollegeProjectV2/Adobe Premiere Pro CC 2019 13.0.0 (x64) Crack Full Version BETTER.md deleted file mode 100644 index bfb5bbfae53b3653f924874c97d8ddbeafe1f68c..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Adobe Premiere Pro CC 2019 13.0.0 (x64) Crack Full Version BETTER.md +++ /dev/null @@ -1,7 +0,0 @@ - -

            the users can freely use any editing capabilities in this tool. also, it is one of the best application that is used by the editors to increase the volume of any video. at this time, the users do not need any software to edit their video. it gives the user the freedom to make the video editing simple. the users may download evercam 2012 release 20.0.10 crack.

            -

            as we have been getting closer to the release of premiere pro for 2021, i started to wonder if it was possible to use the adobe media encoder in a standalone fashion. after playing around with some clip presets, i noticed that after exporting from media encoder, i could actually play the clips in the timeline as though they were native files. it was amazing, i was able to export a 1080pfile and preview it in the timeline at over 90% of the original quality and upload straight to the cloud. i was very excited, but i was not ready to release it just yet. i wanted to make sure the process worked with both premiere pro and photoshop.

            -

            Adobe Premiere Pro CC 2019 13.0.0 (x64) Crack full version


            Download Ziphttps://bytlly.com/2uGljK



            -

            we now have the ability to select the adobe media encoder filetype on the media library panel in adobe media encoder. this can be used with some of the products in the adobe creative cloud. i then created a web-based version of adobe media encoder with no license keys. my goal was to have a small client that i could run from my local system or on a server that would automatically download adobe media encoder from the creative cloud. this way i could test the entire process on a local system without having to launch the cloud server or have another license on the system.

            899543212b
            -
            -
            \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/CRACK AutoCAD 2010 X64 (64bit) Product Key UPD.md b/spaces/terfces0erbo/CollegeProjectV2/CRACK AutoCAD 2010 X64 (64bit) Product Key UPD.md deleted file mode 100644 index 9c80631bd59975334a336a2ceec603c4bc86cf67..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/CRACK AutoCAD 2010 X64 (64bit) Product Key UPD.md +++ /dev/null @@ -1,6 +0,0 @@ -

            CRACK AutoCAD 2010 x64 (64bit) Product key


            DOWNLOADhttps://bytlly.com/2uGjQw



            -
            -August 7, 2013 - Product: AutoCAD 2010. Serial number: 667-98989898. Request code: VUKF PJLP UTW8 SDUH 1KXG 57XY QJVN Y03Z WHAT IS THE ACTIVATION CODE. An ACTIVATION CODE is a series of characters (numbers and/or letters) that can be used to identify a product. During the product registration process, you can enter your activation code, and then the system will automatically check if this code is registered, and if it is registered, it will display a message about successful activation. The activation code can be changed by the user, but after that the system will not be able to verify its authenticity. 8a78ff9644
            -
            -
            -

            diff --git a/spaces/terfces0erbo/CollegeProjectV2/Kevin Macleod Merry Go Mp3 Download.md b/spaces/terfces0erbo/CollegeProjectV2/Kevin Macleod Merry Go Mp3 Download.md deleted file mode 100644 index 58038e0fff1afb30bad169bd3e85fe5874ff9d6f..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Kevin Macleod Merry Go Mp3 Download.md +++ /dev/null @@ -1,21 +0,0 @@ -
            -

            How to Download Kevin MacLeod's Merry Go MP3 for Free

            -

            If you are looking for a comical and playful piano tune for your video, podcast, or game, you might want to check out Merry Go by Kevin MacLeod. This rag-time ditty has a strong melody and is heavy in the bass chords. It is perfect for creating a nostalgic and festive mood.

            -

            kevin macleod merry go mp3 download


            DOWNLOAD ->>> https://bytlly.com/2uGlZV



            -

            Kevin MacLeod is a famous composer who creates royalty-free music for various genres and moods. He has released his music under the Creative Commons Attribution 3.0 Unported license, which means you can use it for any purpose as long as you credit him properly.

            -

            But how can you download his Merry Go MP3 for free? Here are some easy steps to follow:

            -
              -
            1. Go to filmmusic.io, a website that hosts Kevin MacLeod's music. You can also find other songs by him there.
            2. -
            3. Type "Merry Go" in the search bar and click on the result. You will see a page with the song details, such as genre, mood, license, and release date.
            4. -
            5. Click on the download button on the right side of the page. You will be redirected to another page where you can choose the file format and quality.
            6. -
            7. Select MP3 as the file format and click on the download button again. You will see a pop-up window asking you to share the song on social media or donate to support Kevin MacLeod. You can do either of these options or skip them.
            8. -
            9. The download will start automatically. You can find the file in your downloads folder or wherever you saved it.
            10. -
            -

            Congratulations! You have successfully downloaded Kevin MacLeod's Merry Go MP3 for free. Now you can use it for your creative projects and enjoy its energetic and cheerful vibe. Just remember to credit Kevin MacLeod as the composer and include a link to his website or the song page.

            - -

            If you want to learn more about Kevin MacLeod and his music, you can visit his website incompetech.com. There you can find his biography, his blog, and his catalog of over 2,000 songs. You can also support him by donating or purchasing his albums.

            -

            -

            Kevin MacLeod is a talented and generous musician who has contributed a lot to the creative community. His music has been used in thousands of videos, games, podcasts, and other projects. He has also received many awards and recognition for his work.

            -

            We hope you enjoyed this article and found it helpful. If you did, please share it with your friends and leave a comment below. And don't forget to check out Merry Go by Kevin MacLeod and use it for your next project.

            d5da3c52bf
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Active Skills For Reading Book 1 Pdf Download ((INSTALL)).md b/spaces/tialenAdioni/chat-gpt-api/logs/Active Skills For Reading Book 1 Pdf Download ((INSTALL)).md deleted file mode 100644 index ac242d613fc017629752c2806b8a02870589641d..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Active Skills For Reading Book 1 Pdf Download ((INSTALL)).md +++ /dev/null @@ -1,48 +0,0 @@ -
            -

            Active Skills For Reading Book 1 Pdf Download: How to Improve Your Reading Comprehension

            -

            Reading is one of the most important skills for learning and communication. However, many students struggle with reading comprehension, especially when they encounter unfamiliar texts or complex topics. How can you improve your reading comprehension and enjoy reading more?

            -

            One of the best ways to improve your reading comprehension is to use active reading strategies. Active reading means engaging with the text, asking questions, making connections, and applying what you learn. Active reading helps you to understand the main idea, the details, the author's purpose, and the implications of the text.

            -

            Active Skills For Reading Book 1 Pdf Download


            Download ····· https://urlcod.com/2uK8sE



            -

            One of the best resources for learning active reading strategies is Active Skills For Reading Book 1. This book is designed for beginner to intermediate level readers who want to improve their reading skills and confidence. In this article, we will review some of the features and benefits of Active Skills For Reading Book 1, and show you how to download a free PDF copy of the book.

            -

            What is Active Skills For Reading Book 1?

            -

            Active Skills For Reading Book 1 is a textbook that teaches active reading strategies through engaging and relevant texts. The book is divided into 12 units, each focusing on a different topic and skill. Some of the topics include:

            -
              -
            • Culture and Customs
            • -
            • Health and Happiness
            • -
            • Technology and Society
            • -
            • Education and Learning
            • -
            • Travel and Adventure
            • -
            • Environment and Nature
            • -
            -

            Some of the skills include:

            -
              -
            • Finding the main idea
            • -
            • Making inferences
            • -
            • Identifying supporting details
            • -
            • Distinguishing facts from opinions
            • -
            • Summarizing information
            • -
            • Evaluating arguments
            • -
            -

            The book also includes vocabulary exercises, grammar reviews, writing tasks, and discussion questions to help you practice and apply what you learn.

            -

            Why should you use Active Skills For Reading Book 1?

            -

            Active Skills For Reading Book 1 is a great resource for improving your reading comprehension for several reasons:

            -
              -
            1. The book uses authentic texts from various sources, such as newspapers, magazines, websites, blogs, and books. This helps you to develop your reading skills in different contexts and genres.
            2. -
            3. The book covers a wide range of topics that are interesting and relevant to your life. This helps you to engage with the texts and learn new information and perspectives.
            4. -
            5. The book teaches you how to use active reading strategies that can help you understand any text better. These strategies include previewing, predicting, skimming, scanning, questioning, annotating, summarizing, and synthesizing. These strategies help you to interact with the text and deepen your comprehension.
            6. -
            7. The book provides you with plenty of practice and feedback to help you improve your reading skills. Each unit includes pre-reading activities, comprehension checks, vocabulary exercises, grammar reviews, writing tasks, and discussion questions. You can also check your answers at the end of each unit or online.
            8. -
            -

            How can you download Active Skills For Reading Book 1 Pdf?

            -

            If you want to download a free PDF copy of Active Skills For Reading Book 1, you can follow these simple steps:

            -

            -
              -
            1. Go to this link.
            2. -
            3. Click on the green "Download" button.
            4. -
            5. Wait for a few seconds until the download starts.
            6. -
            7. Save the file on your device or cloud storage.
            8. -
            9. Enjoy reading!
            10. -
            - -

            Active Skills For Reading Book 1 is a valuable resource for anyone who wants to improve their reading comprehension and enjoy reading more. By using this book, you will learn how to use active reading strategies that can help you understand any text better. You will also expand your vocabulary, grammar, writing, and

            81aa517590
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Android Termodinamica Solucionario De Jose Angel Manrique Valadez 32.md b/spaces/tialenAdioni/chat-gpt-api/logs/Android Termodinamica Solucionario De Jose Angel Manrique Valadez 32.md deleted file mode 100644 index 9b864a9a0e9f0f41ef7e381199aaeca7885d93f2..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Android Termodinamica Solucionario De Jose Angel Manrique Valadez 32.md +++ /dev/null @@ -1,70 +0,0 @@ - -

            Android Termodinamica: A New App for Solving Thermodynamics Problems

            -

            Are you a student or a professional who needs to solve thermodynamics problems quickly and accurately? Do you want to learn from the best experts in the field? If so, you might be interested in Android Termodinamica, a new app that provides solucionario (solutions manual) for the book Termodinamica by Jose Angel Manrique Valadez.

            -

            Android termodinamica solucionario de jose angel manrique valadez 32


            DOWNLOAD ⚙⚙⚙ https://urlcod.com/2uK7bZ



            -

            Termodinamica is a classic textbook that covers the fundamentals of thermodynamics, as well as its applications to engineering and science. It has been used by thousands of students and teachers in Latin America and Spain for more than three decades. The book contains hundreds of problems and exercises, ranging from basic concepts to advanced topics.

            -

            Android Termodinamica is an app that allows you to access the solucionario of Termodinamica on your smartphone or tablet. You can search for any problem by its number, chapter, or topic. You can also browse through the solucionario by swiping left or right. The app shows you the detailed steps and explanations for each problem, as well as the final answer. You can zoom in or out, copy, share, or print any solution. You can also bookmark your favorite problems for later review.

            -

            The app is designed to be user-friendly and intuitive. It has a simple and elegant interface that adapts to any screen size and orientation. It works offline, so you don't need an internet connection to use it. It also supports multiple languages, including Spanish, English, Portuguese, and French.

            -

            Android termodinamica soluciones pdf jose angel manrique
            -Android termodinamica fisica 3ra edicion jose angel manrique
            -Android termodinamica libro gratis jose angel manrique valadez
            -Android termodinamica transferencia de calor jose angel manrique
            -Android termodinamica ejercicios resueltos jose angel manrique
            -Android termodinamica descargar online jose angel manrique valadez
            -Android termodinamica editorial oficial jose angel manrique 32
            -Android termodinamica sway office com jose angel manrique
            -Android termodinamica trello com link jose angel manrique valadez
            -Android termodinamica starspie com pdf jose angel manrique
            -Android termodinamica naturghiaccio it link jose angel manrique 32
            -Android termodinamica studiblog net pdf jose angel manrique valadez
            -Android termodinamica solucionario universidad jose angel manrique
            -Android termodinamica solucionario completo jose angel manrique valadez
            -Android termodinamica solucionario formato pdf jose angel manrique 32
            -Android termodinamica solucionario abrir online jose angel manrique valadez
            -Android termodinamica solucionario descargar gratis jose angel manrique 32
            -Android termodinamica solucionario respuestas libro jose angel manrique valadez
            -Android termodinamica solucionario contenidos pagina jose angel manrique 32
            -Android termodinamica solucionario temas buscar jose angel manrique valadez
            -Android termodinamica solucionario edicion spanish jose angel manrique 32
            -Android termodinamica solucionario amazon com qualifying jose angel manrique valadez
            -Android termodinamica solucionario shipping offers jose angel manrique 32
            -Android termodinamica solucionario crack mazacam link jose angel manrique valadez
            -Android termodinamica solucionario snipdb rnd gptj ppi jose angel manrique 32
            -Jose angel manrique valadez 32 android termodinamica solucionario pdf
            -Jose angel manrique valadez 32 android termodinamica fisica edicion
            -Jose angel manrique valadez 32 android termodinamica libro download
            -Jose angel manrique valadez 32 android termodinamica transferencia calor
            -Jose angel manrique valadez 32 android termodinamica ejercicios soluciones
            -Jose angel manrique valadez 32 android termodinamica online gratis
            -Jose angel manrique valadez 32 android termodinamica editorial oficial
            -Jose angel manrique valadez 32 android termodinamica sway office com
            -Jose angel manrique valadez 32 android termodinamica trello com link
            -Jose angel manrique valadez 32 android termodinamica starspie com pdf
            -Jose angel manrique valadez 32 android termodinamica naturghiaccio it link
            -Jose angel manrique valadez 32 android termodinamica studiblog net pdf
            -Jose angel manrique valadez 32 android termodinamica soluciones universidad
            -Jose angel manrique valadez 32 android termodinamica soluciones completo
            -Jose angel manrique valadez 32 android termodinamica soluciones formato pdf
            -Jose angel manrique valadez 32 android termodinamica soluciones abrir online
            -Jose angel manrique valadez 32 android termodinamica soluciones descargar gratis
            -Jose angel manrique valadez 32 android termodinamica soluciones respuestas libro
            -Jose angel manrique valadez 32 android termodinamica soluciones contenidos pagina
            -Jose angel manrique valadez 32 android termodinamica soluciones temas buscar
            -Jose angel manrique valadez 32 android termodinamica soluciones edicion spanish
            -Jose angel manrique valadez 32 android termodinamica soluciones amazon com qualifying
            -Jose angel manrique valadez 32 android termodinamica soluciones shipping offers
            -Jose angel manrique valadez 32 android termodinamica soluciones crack mazacam link
            -Jose angel manrique valadez 32 android termodinamica soluciones snipdb rnd gptj ppi

            -

            Android Termodinamica is more than just a solucionario app. It is also a learning tool that helps you master thermodynamics concepts and skills. By using the app, you can check your answers, correct your mistakes, improve your understanding, and enhance your confidence. You can also learn from the author's insights and tips on how to solve thermodynamics problems effectively.

            -

            If you are looking for a reliable and convenient solucionario app for Termodinamica by Jose Angel Manrique Valadez, look no further than Android Termodinamica. It is the perfect companion for your thermodynamics studies and practice. Download it today from Google Play Store and enjoy its benefits!

            - -

            Android Termodinamica is not the only app that can help you with thermodynamics, but it is one of the best rated and most downloaded ones on Google Play Store. Users have praised its accuracy, clarity, completeness, and ease of use. They have also appreciated its offline functionality and language support. Some of the reviews are:

            -
              -
            • "Excellent app. It has helped me a lot with my thermodynamics course. The solutions are very well explained and easy to follow. I recommend it 100%."
            • -
            • "Very good app. It has all the problems of the book and their solutions. It works without internet and it has different languages. It is very useful for studying and reviewing."
            • -
            • "The best app for thermodynamics. It has everything you need to learn and practice this subject. The author is a great teacher and explains everything very clearly. Thank you for this app!"
            • -
            -

            If you want to join these satisfied users and enjoy the benefits of Android Termodinamica, don't hesitate to download it now from Google Play Store. It is free to install and use, but you can also upgrade to the premium version for a small fee and get rid of the ads. You can also contact the developer if you have any questions, suggestions, or feedback.

            -

            Android Termodinamica is the ultimate solucionario app for Termodinamica by Jose Angel Manrique Valadez. It is a must-have for anyone who wants to master thermodynamics and ace their exams. Download it today and discover why it is one of the best Android apps for 2023!

            e753bf7129
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Bosch Pst 53a Manual A Step-by-Step Instruction for Your Jig Saw.md b/spaces/tialenAdioni/chat-gpt-api/logs/Bosch Pst 53a Manual A Step-by-Step Instruction for Your Jig Saw.md deleted file mode 100644 index 4e536df503c2d4c07f5cfee7c75df1cea5c49fb0..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Bosch Pst 53a Manual A Step-by-Step Instruction for Your Jig Saw.md +++ /dev/null @@ -1,101 +0,0 @@ -
            -```markdown -

            Bosch Pst 53a Manual: How to Use and Maintain Your Jigsaw

            -

            If you are looking for a reliable and versatile jigsaw, the Bosch Pst 53a is a great choice. This tool can cut through wood, metal, plastic, and other materials with ease and precision. But to get the most out of your jigsaw, you need to know how to use and maintain it properly. That's why we have prepared this Bosch Pst 53a manual for you.

            -

            How to Use Your Bosch Pst 53a Jigsaw

            -

            Before you start using your jigsaw, make sure you have read the safety instructions and warnings in the original manual. Also, wear appropriate protective gear, such as gloves, goggles, and ear plugs. Here are the basic steps to use your jigsaw:

            -

            Bosch Pst 53a Manual


            Download File ☆☆☆☆☆ https://urlcod.com/2uK7eY



            -
              -
            1. Plug the jigsaw into a power outlet and insert the blade into the blade holder. Make sure the blade is locked securely and matches the material you want to cut.
            2. -
            3. Adjust the speed dial on the top of the jigsaw according to the material and thickness you are cutting. The higher the number, the faster the speed.
            4. -
            5. Adjust the orbital action lever on the side of the jigsaw according to the type of cut you want to make. The higher the number, the more aggressive the cut.
            6. -
            7. Mark the cutting line on the material with a pencil or a tape. Clamp the material securely on a workbench or a table.
            8. -
            9. Place the base plate of the jigsaw flat on the material and align the blade with the cutting line. Make sure there is enough clearance for the blade to move up and down.
            10. -
            11. Press the trigger switch to start the jigsaw and guide it along the cutting line. Do not force or twist the jigsaw. Let the blade do the work.
            12. -
            13. Release the trigger switch to stop the jigsaw and wait for the blade to stop completely before lifting it from the material.
            14. -
            -

            How to Maintain Your Bosch Pst 53a Jigsaw

            -

            To keep your jigsaw in good condition and extend its lifespan, you need to perform some regular maintenance tasks. Here are some tips to follow:

            -
              -
            • Clean the jigsaw after each use with a soft cloth or a brush. Remove any dust or debris from the air vents, the blade holder, and the base plate.
            • -
            • Lubricate the blade holder and the orbital action mechanism with a few drops of oil every few months.
            • -
            • Check the power cord and plug for any damage or wear. If you notice any signs of deterioration, replace them immediately.
            • -
            • Store your jigsaw in a dry and cool place. Avoid exposing it to extreme temperatures or humidity.
            • -
            -

            Conclusion

            -

            The Bosch Pst 53a is a powerful and versatile jigsaw that can handle various cutting tasks. By following this Bosch Pst 53a manual, you can use and maintain your jigsaw safely and efficiently. If you have any questions or problems with your jigsaw, contact Bosch customer service or visit their website for more information.

            - -``` - -```markdown -

            How to Change the Blade on Your Bosch Pst 53a Jigsaw

            -

            One of the advantages of the Bosch Pst 53a jigsaw is that it has a tool-free blade change system. This means you can easily switch between different blades without using any tools. Here are the steps to change the blade on your jigsaw:

            -

            How to use Bosch Pst 53a jigsaw
            -Bosch Pst 53a spare parts
            -Bosch Pst 53a blade change
            -Bosch Pst 53a manual pdf download
            -Bosch Pst 53a troubleshooting guide
            -Bosch Pst 53a review and rating
            -Bosch Pst 53a price and availability
            -Bosch Pst 53a warranty and service
            -Bosch Pst 53a accessories and attachments
            -Bosch Pst 53a safety and maintenance tips
            -Bosch Pst 53a vs other jigsaw models
            -Bosch Pst 53a user manual online
            -Bosch Pst 53a instruction booklet
            -Bosch Pst 53a specifications and features
            -Bosch Pst 53a replacement blades
            -Bosch Pst 53a cutting capacity and speed
            -Bosch Pst 53a dust extraction system
            -Bosch Pst 53a corded or cordless option
            -Bosch Pst 53a best practices and techniques
            -Bosch Pst 53a customer support and feedback
            -Bosch Pst 53a video tutorial and demonstration
            -Bosch Pst 53a product description and overview
            -Bosch Pst 53a compatibility and suitability
            -Bosch Pst 53a pros and cons
            -Bosch Pst 53a history and development
            -Bosch Pst 53a comparison and contrast
            -Bosch Pst 53a benefits and advantages
            -Bosch Pst 53a drawbacks and limitations
            -Bosch Pst 53a performance and quality
            -Bosch Pst 53a durability and reliability
            -Bosch Pst 53a design and appearance
            -Bosch Pst 53a ergonomics and comfort
            -Bosch Pst 53a noise level and vibration
            -Bosch Pst 53a power consumption and efficiency
            -Bosch Pst 53a weight and dimensions
            -Bosch Pst 53a material and construction
            -Bosch Pst 53a application and usage scenarios
            -Bosch Pst 53a testimonials and case studies
            -Bosch Pst 53a FAQs and answers
            -Bosch Pst 53a tips and tricks
            -Bosch Pst 53a common problems and solutions
            -Bosch Pst 53a user feedback and suggestions
            -Bosch Pst 53a latest updates and news
            -Bosch Pst 53a discount and coupon codes
            -Bosch Pst 53a affiliate program and commission rates
            -Bosch Pst 53a online course and certification program
            -Bosch Pst 53a ebook and audiobook
            -Bosch Pst 53a podcast and webinar
            -Bosch Pst 53a blog post and article

            -
              -
            1. Unplug the jigsaw from the power outlet and wait for the blade to cool down.
            2. -
            3. Press the blade release lever on the front of the jigsaw and pull out the old blade from the blade holder.
            4. -
            5. Insert the new blade into the blade holder until it clicks into place. Make sure the teeth of the blade face forward.
            6. -
            7. Release the blade release lever and check if the blade is locked securely.
            8. -
            -

            You can choose from a variety of blades for your jigsaw, depending on the material and shape you want to cut. Bosch offers blades for wood, metal, plastic, laminate, ceramic, and more. You can also find blades with different lengths, widths, and tooth configurations. For more information on how to select the right blade for your jigsaw, refer to the original manual or visit Bosch website.

            -

            How to Troubleshoot Your Bosch Pst 53a Jigsaw

            -

            If you encounter any problems with your jigsaw, do not panic. There are some common issues that you can easily fix by yourself. Here are some troubleshooting tips for your jigsaw:

            -
              -
            • If your jigsaw does not start or stops working, check if the power cord and plug are connected properly and if there is any damage or loose connection. Also, check if the fuse or circuit breaker in your house is working.
            • -
            • If your jigsaw vibrates excessively or makes loud noises, check if the blade is inserted correctly and if it is suitable for the material you are cutting. Also, check if there is any dirt or debris in the blade holder or the orbital action mechanism.
            • -
            • If your jigsaw cuts poorly or inaccurately, check if the speed and orbital action settings are appropriate for the material and thickness you are cutting. Also, check if the blade is sharp and clean and if it matches the cutting line.
            • -
            • If your jigsaw overheats or smokes, stop using it immediately and unplug it from the power outlet. Wait for it to cool down and contact Bosch customer service or a qualified technician for repair.
            • -
            -

            If you cannot solve the problem by yourself or if you need professional assistance, do not hesitate to contact Bosch customer service or a qualified technician. Do not attempt to open or repair your jigsaw by yourself as this may void your warranty and cause more damage or injury.

            - -```

            e753bf7129
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Download Microsoft 365 for Business and Boost Your Productivity with Cloud-Based Apps and Services.md b/spaces/tialenAdioni/chat-gpt-api/logs/Download Microsoft 365 for Business and Boost Your Productivity with Cloud-Based Apps and Services.md deleted file mode 100644 index 723effc98fc483b2ae75b4900a53cf0a99fb8ba6..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Download Microsoft 365 for Business and Boost Your Productivity with Cloud-Based Apps and Services.md +++ /dev/null @@ -1,27 +0,0 @@ - -

            How to Download Microsoft 365 for Business

            -

            Microsoft 365 is a comprehensive solution that makes it easy to grow your business, build your brand, and scale securely. It includes a range of cloud-based apps and services that help you collaborate with your team, connect with your customers, and manage your business efficiently. Whether you need web and mobile versions of Word, Excel, and PowerPoint, or advanced tools for email, chat, calling, meeting, project management, database, and data visualization, Microsoft 365 has a plan that suits your needs and budget.

            -

            download microsoft 365 for business


            Download Zip ✑ ✑ ✑ https://urlcod.com/2uK6Ln



            -

            In this article, we will show you how to download and install Microsoft 365 for business on your PC or Mac. Before you begin, make sure your device meets the system requirements for Microsoft 365. You will also need a Microsoft account or a work or school account that is associated with a Microsoft 365 license. If you don't have an account or a license yet, you can sign up for a free trial or purchase a subscription from the Microsoft website.

            -

            Step 1: Sign in to download Microsoft 365

            -

            Go to www.office.com and if you're not already signed in, select Sign in.

            -

            Note: If you're using Microsoft 365 operated by 21 Vianet, sign in with your work or school account to login.partner.microsoftonline.cn. If you're using Microsoft 365 Germany, sign in with your work or school account to portal.office.de.

            -

            Sign in with the account you associated with this version of Microsoft 365. This account can be a Microsoft account, or work or school account.

            -

            Step 2: Choose your installation options

            -

            After signing in, follow the steps that match the type of account you signed in with.

            -

            -
              -
            • If you signed in with a Microsoft account: From the Microsoft 365 home page select Install Office. Select Install (or depending on your version, Install Office).
            • -
            • If you signed in with a work or school account: From the home page select Install Office (If you set a different start page, go to aka.ms/office-install). Notes: For Microsoft 365 operated by 21 Vianet go to login.partner.microsoftonline.cn/account. For Microsoft 365 Germany go to portal.office.de/account. Select Office 365 apps to begin the installation.
            • -
            -

            The 64-bit version is installed by default unless Microsoft 365 detects you already have a 32-bit version of Microsoft 365 (or a stand-alone Microsoft 365 app such as Project or Visio) installed. In this case, the 32-bit version of Microsoft 365 will be installed instead.

            -

            To change from a 32-bit version to a 64-bit version or vice versa, you need to uninstall Microsoft 365 first (including any stand-alone Microsoft 365 apps you have such as Project of Visio). Once the uninstall is complete, sign in again to www.office.com and select Other install options, choose the language and version you want (64 or 32-bit), and then select Install. (See Install Visio or Install Project if you need to reinstall those stand-alone apps.)

            -

            Step 3: Run the installation file

            -

            Depending on your browser, select Run (in Edge or Internet Explorer), Setup (in Chrome), or Save File (in Firefox).

            -

            If you see the User Account Control prompt that says Do you want to allow this app to make changes to your device? select Yes.

            -

            The installation begins.

            -

            Your install is finished when you see the phrase You're all set! Office is installed now and an animation plays to show you where to find Office applications on your computer. Select Close.

            -

            Step 4: Activate Microsoft 365

            -

            To start using your new apps and services, launch any Office app such as Word or Excel. In most cases, Office is activated once you start an application

            ddb901b051
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/FULL Clone XB Version 2.3 Beta The Latest Update on the Most Advanced Cloning Tool for Xbox.md b/spaces/tialenAdioni/chat-gpt-api/logs/FULL Clone XB Version 2.3 Beta The Latest Update on the Most Advanced Cloning Tool for Xbox.md deleted file mode 100644 index e25a520ccbe79a720bda7483c73df20e2fbc829a..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/FULL Clone XB Version 2.3 Beta The Latest Update on the Most Advanced Cloning Tool for Xbox.md +++ /dev/null @@ -1,37 +0,0 @@ -
            -

            FULL Clone XB Version 2.3 Beta: How to Copy Xbox Games Easily

            -

            If you are looking for a way to copy Xbox games without any hassle, you might want to try FULL Clone XB Version 2.3 Beta. This is a tool that enables you to clone Xbox DVD discs to a GDF ISO image, which you can use to create duplicate discs or play on your PC with an emulator.

            -

            FULL Clone XB Version 2.3 Beta


            Download ✑ ✑ ✑ https://urlcod.com/2uK8Z5



            -

            In this article, we will show you how to use FULL Clone XB Version 2.3 Beta to copy Xbox games in a few simple steps. But first, let's see what are the features and benefits of this software.

            -

            What is FULL Clone XB Version 2.3 Beta?

            -

            FULL Clone XB Version 2.3 Beta is a software that was released in 2020 by Rodrigo Zamorano, a developer who claims to have cracked the Xbox encryption and compression algorithms. The software allows you to copy any Xbox game disc to your PC as a GDF ISO image, which is a format that preserves the original structure and quality of the game.

            -

            Some of the advantages of using FULL Clone XB Version 2.3 Beta are:

            -
              -
            • It is fast and easy to use. You only need a DVD drive and a blank disc or a hard drive space to store the ISO image.
            • -
            • It works with any Xbox game, regardless of the region or protection.
            • -
            • It supports 64-bit systems and Windows 10.
            • -
            • It does not require any additional software or hardware.
            • -
            • It is free to download and use.
            • -
            -

            How to use FULL Clone XB Version 2.3 Beta?

            -

            To use FULL Clone XB Version 2.3 Beta, you need to follow these steps:

            -
              -
            1. Download the software from this link. The file size is about 1.74 MB.
            2. -
            3. Extract the ZIP file and run the setup.exe file. Follow the instructions to install the software on your PC.
            4. -
            5. Launch the software and enter the username and password as "xbox".
            6. -
            7. Insert the Xbox game disc that you want to copy into your DVD drive.
            8. -
            9. Select the source drive and the destination folder where you want to save the ISO image.
            10. -
            11. Click on "Start" and wait for the process to complete. Depending on the size of the game, it may take from a few minutes to an hour.
            12. -
            13. Once done, you will have a GDF ISO image of your Xbox game in your destination folder.
            14. -
            -

            How to use the GDF ISO image?

            -

            There are two ways you can use the GDF ISO image that you created with FULL Clone XB Version 2.3 Beta:

            -
              -
            • You can burn it to a blank DVD disc using any burning software that supports ISO files. Then you can play it on your Xbox console as usual.
            • -
            • You can mount it on your PC using a virtual drive software like Daemon Tools or WinCDEmu. Then you can play it on your PC using an Xbox emulator like CXBX-Reloaded or XQEMU.
            • -
            -

            Conclusion

            -

            FULL Clone XB Version 2.3 Beta is a powerful tool that lets you copy Xbox games easily and quickly. It works with any game disc and creates a GDF ISO image that preserves the original quality and structure of the game. You can use this image to create duplicate discs or play on your PC with an emulator.

            -

            If you want to try FULL Clone XB Version 2.3 Beta, you can download it from https://urlcod.com/2uK6it



            ddb901b051
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Football Manager 2013 Patch Update 13.1.1 Crack !!LINK!!.md b/spaces/tialenAdioni/chat-gpt-api/logs/Football Manager 2013 Patch Update 13.1.1 Crack !!LINK!!.md deleted file mode 100644 index a311b99a6a69fd2a2e7ae6dda916374215474585..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Football Manager 2013 Patch Update 13.1.1 Crack !!LINK!!.md +++ /dev/null @@ -1,31 +0,0 @@ -
            -

            How to Update Football Manager 2013 to Version 13.1.1

            -

            Football Manager 2013 is a popular simulation game that lets you manage your own football club and compete with other players around the world. However, to enjoy the game fully, you need to keep it updated with the latest patches and data packs that fix bugs, improve performance, and add new features.

            -

            One of the most important updates for Football Manager 2013 is version 13.1.1, which was released on November 2, 2012. This update includes several improvements and fixes, such as:

            -

            Football Manager 2013 Patch Update 13.1.1 Crack


            Download File >>>>> https://urlcod.com/2uK3rw



            -
              -
            • Improved match engine
            • -
            • Fixed crash issues
            • -
            • Fixed transfer and contract issues
            • -
            • Fixed competition and rule issues
            • -
            • Updated database with latest transfers and player data
            • -
            -

            To update your game to version 13.1.1, you need to follow these steps:

            -
              -
            1. Close the game and launch Steam.
            2. -
            3. Go to your Library and right-click on Football Manager 2013.
            4. -
            5. Select Properties and then go to the Updates tab.
            6. -
            7. Make sure that Automatic Updates are enabled and that the game is up to date.
            8. -
            9. If the game is not up to date, Steam will download and install the update automatically.
            10. -
            11. Once the update is done, you can launch the game and enjoy the new features.
            12. -
            -

            If you have any problems with updating or playing the game, you can visit the official forums[^2^] or contact the support team[^3^].

            - -

            Version 13.1.1 is not the only update available for Football Manager 2013. There are also other patches and data packs that you can download and install to enhance your gaming experience. For example, you can download the FM 2013 Data Updates by pr0, which contains over 103,660 changes up to October 1, 2013 that the game does not include. This data pack updates the transfers, contracts, injuries, retirements, and other details of players and staff from all over the world.

            -

            Another example is the FMF Season Update, which updates the real results and standings of various leagues and competitions up to September 23, 2013. This update also allows you to start a new game on May 20, 2013, which is the end of the 2012/13 season. This way, you can play with the current squads and budgets of your favorite clubs.

            -

            To install these updates, you need to download them from the links provided and extract them to your Football Manager 2013 editor data folder. The default location of this folder is:

            -

            -C:\Users\<username>\Documents\Sports Interactive\Football Manager 2013\editor data -

            If the folder does not exist, you need to create it. Then, you need to start a new game and select the updates you want to use from the database options. You cannot apply these updates to an existing save game.

            e93f5a0c3f
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Free Download of Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution Zip Enhance Your Skills and Knowledge in Kinematics and Dynamics.md b/spaces/tialenAdioni/chat-gpt-api/logs/Free Download of Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution Zip Enhance Your Skills and Knowledge in Kinematics and Dynamics.md deleted file mode 100644 index dbf6f1da1482151642a371eccc96d124663dd249..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Free Download of Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution Zip Enhance Your Skills and Knowledge in Kinematics and Dynamics.md +++ /dev/null @@ -1,129 +0,0 @@ - -

            Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution Free Download Zip

            - -

            Kinematics, dynamics, and design of machinery are important topics for mechanical engineering students and professionals. They deal with the analysis and synthesis of mechanisms and machines, which are systems of rigid bodies that transmit motion and forces. Understanding these topics can help you design and optimize various machines, such as robots, vehicles, cranes, pumps, turbines, etc.

            -

            Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution Free Download Zip


            Download - https://urlcod.com/2uK8WE



            - -

            One of the best books on kinematics, dynamics, and design of machinery is the second edition of the book by Waldron and Kinzel. This book is a comprehensive and modern treatment of the subject, covering topics such as planar linkages, rolling and sliding contacts, instant centers of velocity, graphical and analytical methods, synthesis of function generators, path generators, and motion generators, cams, gears, and gear trains, balancing of rotating and reciprocating systems, flywheels and governors, and force analysis.

            - -

            The book also provides many problems and exercises that help you apply the concepts and methods learned in the book. However, solving these problems can be challenging and time-consuming. That is why you might want to get the manual solution for the book. The manual solution provides detailed answers and explanations for all the problems and exercises in the book. It can help you check your understanding and application of the subject. It can also help you prepare for exams and assignments.

            - -

            How to get the manual solution for Kinematics Dynamics And Design Of Machinery Second Edition?

            - -

            One way to get the manual solution for Kinematics Dynamics And Design Of Machinery Second Edition is to download it as a zip file from the internet. There are several websites that offer this file for free download, such as:

            - - - -

            To download the file, you just need to click on the link and follow the instructions on the website. You might need to register or sign in to access the file. You might also need to verify that you are not a robot or complete a captcha. After downloading the file, you need to unzip it using a software such as WinZip or 7-Zip. You will then get a PDF file that contains the manual solution.

            - -

            What are the benefits of getting the manual solution for Kinematics Dynamics And Design Of Machinery Second Edition?

            - -

            There are many benefits of getting the manual solution for Kinematics Dynamics And Design Of Machinery Second Edition, such as:

            - -
              -
            • You can learn from the experts. The manual solution was written by Waldron and Kinzel themselves

              -

              What are the advantages of Kinematics Dynamics And Design Of Machinery Second Edition?

              - -

              Kinematics Dynamics And Design Of Machinery Second Edition is a book that has many advantages for mechanical engineering students and professionals. Some of these advantages are:

              - -
                -
              • It is comprehensive and modern. The book covers all the main topics of kinematics, dynamics, and design of machinery, with updated examples and applications. It also incorporates the latest developments and trends in the field, such as computer-aided design and analysis, optimization techniques, and robotics.
              • -
              • It is clear and concise. The book explains the concepts and methods of kinematics, dynamics, and design of machinery in a clear and simple way. It uses diagrams, figures, tables, and charts to illustrate the topics. It also provides summaries, key points, and review questions at the end of each chapter.
              • -
              • It is practical and relevant. The book provides many problems and exercises that help you apply the concepts and methods of kinematics, dynamics, and design of machinery to real-world situations. It also provides case studies and design projects that show you how to design and optimize various mechanisms and machines.
              • -
              - -

              Kinematics Dynamics And Design Of Machinery Second Edition is a book that has many advantages for mechanical engineering students and professionals. It is comprehensive and modern, clear and concise, practical and relevant. It is a valuable resource for learning and mastering the subject of kinematics, dynamics, and design of machinery.

              -

              What are the testimonials of Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution?

              - -

              Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution has received many positive testimonials from users who have used it. Here are some of the comments that users have left on various websites:

              -

              Kinematics Dynamics And Design Of Machinery Waldron Kinzel Solutions PDF
              -Kinematics Dynamics And Design Of Machinery 2nd Edition Solution Manual
              -Kinematics Dynamics And Design Of Machinery K. J. Waldron And G. L. Kinzel Solutions
              -Kinematics Dynamics And Design Of Machinery Second Edition PDF Download
              -Kinematics Dynamics And Design Of Machinery Solution Manual Zip File
              -Kinematics Dynamics And Design Of Machinery 2nd Edition PDF Free Download
              -Kinematics Dynamics And Design Of Machinery Waldron Kinzel PDF
              -Kinematics Dynamics And Design Of Machinery K. J. Waldron And G. L. Kinzel PDF
              -Kinematics Dynamics And Design Of Machinery Second Edition Solutions Manual
              -Kinematics Dynamics And Design Of Machinery Solution Manual Free Download
              -Kinematics Dynamics And Design Of Machinery Waldron Kinzel 2nd Edition Solutions
              -Kinematics Dynamics And Design Of Machinery K. J. Waldron And G. L. Kinzel 2nd Edition Solutions
              -Kinematics Dynamics And Design Of Machinery Second Edition Zip Download
              -Kinematics Dynamics And Design Of Machinery Solution Manual PDF Download
              -Kinematics Dynamics And Design Of Machinery Waldron Kinzel Solutions Zip
              -Kinematics Dynamics And Design Of Machinery K. J. Waldron And G. L. Kinzel Solutions Zip
              -Kinematics Dynamics And Design Of Machinery Second Edition Solutions PDF
              -Kinematics Dynamics And Design Of Machinery Solution Manual PDF Free Download
              -Kinematics Dynamics And Design Of Machinery Waldron Kinzel 2nd Edition PDF
              -Kinematics Dynamics And Design Of Machinery K. J. Waldron And G. L. Kinzel 2nd Edition PDF
              -Kinematics Dynamics And Design Of Machinery Second Edition Solutions Zip
              -Kinematics Dynamics And Design Of Machinery Solution Manual Zip Free Download
              -Kinematics Dynamics And Design Of Machinery Waldron Kinzel PDF Download
              -Kinematics Dynamics And Design Of Machinery K. J. Waldron And G. L. Kinzel PDF Download
              -Kinematics Dynamics And Design Of Machinery Second Edition Solutions Manual PDF
              -Kinematics Dynamics And Design Of Machinery Solution Manual PDF Zip
              -Kinematics Dynamics And Design Of Machinery Waldron Kinzel Solutions PDF Download
              -Kinematics Dynamics And Design Of Machinery K. J. Waldron And G. L. Kinzel Solutions PDF Download
              -Kinematics Dynamics And Design Of Machinery Second Edition Solutions Manual Zip
              -Kinematics Dynamics And Design Of Machinery Solution Manual Zip Download
              -Kinematics Dynamics And Design Of Machinery Waldron Kinzel 2nd Edition Solutions PDF
              -Kinematics Dynamics And Design Of Machinery K. J. Waldron And G. L. Kinzel 2nd Edition Solutions PDF
              -Kinematics Dynamics And Design Of Machinery Second Edition Solutions Manual Free Download
              -Kinematics Dynamics And Design Of Machinery Solution Manual Free Zip Download
              -Kinematics Dynamics And Design Of Machinery Waldron Kinzel 2nd Edition Solutions Zip
              -Kinematics Dynamics And Design Of Machinery K. J. Waldron And G. L. Kinzel 2nd Edition Solutions Zip
              -Kinematics Dynamics And Design Of Machinery Second Edition Solutions Manual Download

              - -
              -

              "This manual solution is very helpful and comprehensive. It provides clear and detailed solutions for all the problems and exercises in the book. It also explains the concepts and methods behind the solutions. It has helped me a lot to understand and apply kinematics, dynamics, and design of machinery."

              -- Sixto Gerardo, idoc.pub -
              - -
              -

              "This manual solution is very useful and informative. It provides accurate and complete solutions for all the problems and exercises in the book. It also illustrates the solutions with diagrams, figures, tables, and charts. It has helped me a lot to learn and master kinematics, dynamics, and design of machinery."

              -- Ace Dovan Macaraeg, academia.edu -
              - -
              -

              "This manual solution is very beneficial and practical. It provides simple and concise solutions for all the problems and exercises in the book. It also provides summaries, key points, and review questions at the end of each solution. It has helped me a lot to review and practice kinematics, dynamics, and design of machinery."

              -- masmid, scribd.com -
              - -

              As you can see, Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution has received many positive feedbacks from users who have benefited from it. If you want to join them and improve your skills and knowledge on kinematics, dynamics, and design of machinery, you should get Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution today.

              -

              How does Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution compare to other books and solutions?

              - -

              Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution is a book and solution that stands out from other books and solutions on kinematics, dynamics, and design of machinery. Some of the reasons why this book and solution are superior to others are:

              - -
                -
              • It is written by the authors of the book. The manual solution was written by Waldron and Kinzel themselves, who are experts and authorities on the subject. They have extensive experience and knowledge on kinematics, dynamics, and design of machinery, as well as teaching and research. They know exactly what the students and instructors need and want from a book and solution.
              • -
              • It is comprehensive and modern. The book and solution cover all the main topics of kinematics, dynamics, and design of machinery, with updated examples and applications. They also incorporate the latest developments and trends in the field, such as computer-aided design and analysis, optimization techniques, and robotics. They are relevant and current for the mechanical engineering students and professionals of today.
              • -
              • It is clear and concise. The book and solution explain the concepts and methods of kinematics, dynamics, and design of machinery in a clear and simple way. They use diagrams, figures, tables, and charts to illustrate the topics. They also provide summaries, key points, and review questions at the end of each chapter. They are easy to understand and follow for the students and instructors.
              • -
              - -

              Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution is a book and solution that stands out from other books and solutions on kinematics, dynamics, and design of machinery. It is written by the authors of the book, it is comprehensive and modern, it is clear and concise. It is a superior resource for learning and mastering the subject of kinematics, dynamics, and design of machinery.

              -

              What are some tips for using Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution?

              - -

              Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution is a book and solution that can help you learn and master kinematics, dynamics, and design of machinery. However, to get the most out of this book and solution, you might want to follow some tips, such as:

              - -
                -
              • Read the book before using the solution. The book provides the concepts and methods of kinematics, dynamics, and design of machinery in a clear and concise way. It also provides examples and applications that illustrate the topics. You should read the book carefully and understand the topics before using the solution.
              • -
              • Try to solve the problems and exercises by yourself first. The book provides many problems and exercises that help you apply the concepts and methods of kinematics, dynamics, and design of machinery to real-world situations. You should try to solve these problems and exercises by yourself first, using the book as a reference. This will help you develop your skills and confidence.
              • -
              • Use the solution as a guide and feedback. The solution provides detailed answers and explanations for all the problems and exercises in the book. You should use the solution as a guide and feedback after you have tried to solve the problems and exercises by yourself. You can compare your solutions with the ones provided by the solution and check your understanding and application of the subject.
              • -
              - -

              Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution is a book and solution that can help you learn and master kinematics, dynamics, and design of machinery. However, to get the most out of this book and solution, you might want to follow some tips, such as: read the book before using the solution, try to solve the problems and exercises by yourself first, use the solution as a guide and feedback.

              -

              Conclusion

              - -

              Kinematics, dynamics, and design of machinery are important topics for mechanical engineering students and professionals. They deal with the analysis and synthesis of mechanisms and machines, which are systems of rigid bodies that transmit motion and forces. Understanding these topics can help you design and optimize various machines, such as robots, vehicles, cranes, pumps, turbines, etc.

              - -

              One of the best resources for learning and mastering these topics is the second edition of the book and solution by Waldron and Kinzel. This book and solution are comprehensive and modern, clear and concise, practical and relevant. They cover all the main topics of kinematics, dynamics, and design of machinery, with updated examples and applications. They also provide many problems and exercises that help you apply the concepts and methods to real-world situations. They also provide detailed answers and explanations for all the problems and exercises in the book.

              - -

              You can get the book and solution from various sources, such as online websites or bookstores. You can also download the solution as a zip file from the internet for free. However, to get the most out of this book and solution, you should follow some tips, such as read the book before using the solution, try to solve the problems and exercises by yourself first, use the solution as a guide and feedback.

              - -

              Kinematics Dynamics And Design Of Machinery Second Edition Manual Solution is a book and solution that can help you learn and master kinematics, dynamics, and design of machinery. It is a valuable resource for mechanical engineering students and professionals. If you are interested in these topics, you should get this book and solution today.

              679dcb208e
              -
              -
              \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Download Animated Wallpaper Maker Full Version for Free and Create Stunning Desktop Backgrounds.md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Download Animated Wallpaper Maker Full Version for Free and Create Stunning Desktop Backgrounds.md deleted file mode 100644 index db8bc241c9d1d2fe445b9f4858631eab3bc7dba7..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Download Animated Wallpaper Maker Full Version for Free and Create Stunning Desktop Backgrounds.md +++ /dev/null @@ -1,45 +0,0 @@ - -

              How to Download Animated Wallpaper Maker Full Version for Free

              -

              If you are looking for a way to spice up your desktop with some stunning animations, you might want to try Animated Wallpaper Maker. This software allows you to create your own animated wallpapers from any image or video file. You can also customize the animation effects, speed, and quality. And the best part is, you can download Animated Wallpaper Maker full version for free!

              -

              download animated wallpaper maker full crack


              DOWNLOADhttps://urlcod.com/2uK5QI



              -

              What is Animated Wallpaper Maker?

              -

              Animated Wallpaper Maker is a powerful and easy-to-use tool that lets you turn any static image into a live wallpaper. You can use any popular image format, such as JPG, PNG, BMP, or GIF. You can also use video files, such as AVI, MP4, WMV, or MKV. Animated Wallpaper Maker supports all screen resolutions and aspect ratios.

              -

              With Animated Wallpaper Maker, you can apply various animation effects to your images or videos. You can choose from a wide range of presets, such as fire, water, snow, rain, bubbles, smoke, and more. You can also adjust the parameters of each effect, such as color, intensity, direction, and duration. You can even combine multiple effects to create your own unique animations.

              -

              Animated Wallpaper Maker also allows you to save your animated wallpapers as standalone EXE files. This means you can share them with your friends or family without requiring them to install the software. You can also set your animated wallpapers as screensavers or export them as video files.

              -

              -

              How to Download Animated Wallpaper Maker Full Version for Free?

              -

              If you want to download Animated Wallpaper Maker full version for free, you can follow these simple steps:

              -
                -
              1. Go to the official website of Animated Wallpaper Maker: https://www.animated-wallpaper-maker.com/
              2. -
              3. Click on the "Download" button and save the setup file on your computer.
              4. -
              5. Run the setup file and follow the installation instructions.
              6. -
              7. After the installation is complete, launch the software and enter the following license key: AWM-XXXX-XXXX-XXXX-XXXX
              8. -
              9. Enjoy creating and using your own animated wallpapers!
              10. -
              -

              Note: The license key is valid for one year and can be used on up to three computers. You can also renew your license for free after one year.

              -

              Conclusion

              -

              Animated Wallpaper Maker is a great software that lets you create amazing animated wallpapers from any image or video file. You can also customize the animation effects and save your wallpapers as EXE files. And the best part is, you can download Animated Wallpaper Maker full version for free by using the license key provided above. So what are you waiting for? Download Animated Wallpaper Maker today and transform your desktop into a lively and beautiful scene!

              - -

              How to Use Animated Wallpaper Maker?

              -

              Using Animated Wallpaper Maker is very easy and fun. You can create your own animated wallpapers in just a few minutes. Here are the basic steps to use Animated Wallpaper Maker:

              -
                -
              1. Launch the software and click on the "New Project" button.
              2. -
              3. Select an image or a video file from your computer or browse the online gallery of free images and videos.
              4. -
              5. Click on the "Add Animation" button and choose an animation effect from the list. You can also drag and drop the effect on the image or video.
              6. -
              7. Adjust the settings of the animation effect, such as color, intensity, direction, and duration. You can also add more effects or delete them as you wish.
              8. -
              9. Preview your animated wallpaper and click on the "Apply" button to set it as your desktop background.
              10. -
              11. Save your project as an EXE file, a screensaver, or a video file.
              12. -
              -

              You can also edit your existing projects by opening them from the "Open Project" button. You can also import projects from other users or export your projects to share them with others.

              -

              Why Choose Animated Wallpaper Maker?

              -

              Animated Wallpaper Maker is a unique and innovative software that offers many benefits over other similar tools. Here are some of the reasons why you should choose Animated Wallpaper Maker:

              -
                -
              • It is free to download and use. You don't have to pay anything to create and use your own animated wallpapers.
              • -
              • It is easy to use. You don't need any technical skills or experience to use Animated Wallpaper Maker. You can create stunning animations with just a few clicks.
              • -
              • It is versatile. You can use any image or video file as your source material. You can also choose from a wide range of animation effects and customize them to your liking.
              • -
              • It is lightweight and fast. Animated Wallpaper Maker does not consume much system resources or affect your computer's performance. It also runs smoothly and does not cause any lag or glitches.
              • -
              • It is fun and creative. Animated Wallpaper Maker allows you to express your personality and mood through your desktop background. You can also experiment with different effects and combinations to create unique and original animations.
              • -
              -

              Animated Wallpaper Maker is a software that you will love to use and enjoy. It will make your desktop more lively and attractive. It will also impress your friends and family with your creativity and style.

              ddb901b051
              -
              -
              \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/ChessBase Online APK Learn from the Masters with Annotated Games.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/ChessBase Online APK Learn from the Masters with Annotated Games.md deleted file mode 100644 index 99b07f55545db9049d2ea9c625de745600ba29b1..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/ChessBase Online APK Learn from the Masters with Annotated Games.md +++ /dev/null @@ -1,142 +0,0 @@ - -

              ChessBase Online APK: A Powerful Tool for Chess Enthusiasts

              -

              If you are a chess lover who wants to improve your skills, learn from the masters, and stay on top of the latest trends in chess, then you might want to check out ChessBase Online APK. This is an Android game that gives you access to a super fast database server with five million chess games, updated weekly with the latest developments in chess theory. You can also use this app to prepare against specific opponents, analyze your own games, create dynamic books with custom filters, replay annotated master games from 1857 to 2021, and much more. In this article, we will review the features, benefits, drawbacks, and installation process of ChessBase Online APK.

              -

              chessbase online apk


              DOWNLOAD ►►► https://bltlly.com/2uOmZT



              -

              What is ChessBase Online APK?

              -

              ChessBase Online APK is an Android game developed by ChessBase GmbH, the leading company in chess software and databases. ChessBase Online APK is a mobile version of the popular ChessBase program that runs on Windows computers. ChessBase Online APK allows you to access a huge database of chess games on your Android device, without requiring an internet connection. You can also use this app to improve your opening skills, prepare against specific opponents, create dynamic books with custom filters, replay annotated master games from 1857 to 2021, load, save and edit PGN files, choose from various board and piece sets, send games by e-mail, and store your games in the cloud.

              -

              Features of ChessBase Online APK

              -

              ChessBase Online APK has many features that make it a powerful tool for chess enthusiasts. Here are some of the main features of this app:

              -

              Access to a huge database of chess games

              -

              ChessBase Online APK gives you instant access to a super fast database server with five million chess games. You can search for games by player name, opening name, ECO code, year, result, or any combination of these criteria. You can also filter the games by rating range, time control, tournament name, or source. You can view the games in full screen mode or split screen mode with board and notation. You can also play through the moves with the arrow keys or by tapping on the board.

              -

              chessbase online apk download
              -chessbase online apk free
              -chessbase online apk cracked
              -chessbase online apk mod
              -chessbase online apk latest version
              -chessbase online apk full
              -chessbase online apk premium
              -chessbase online apk android
              -chessbase online apk update
              -chessbase online apk review
              -chessbase online database apk
              -chessbase online app apk
              -chessbase online for android apk
              -chessbase online 3.8.2 apk
              -chessbase online 3.8.1 apk
              -chessbase online 3.7.0 apk
              -chessbase online 3.6.0 apk
              -chessbase online 3.5.0 apk
              -chessbase online 3.4.0 apk
              -chessbase online 3.3.0 apk
              -how to install chessbase online apk
              -how to use chessbase online apk
              -how to update chessbase online apk
              -how to download chessbase online apk for free
              -how to get chessbase online apk premium
              -descargar chessbase online apk gratis
              -descargar chessbase online apk full
              -descargar chessbase online apk ultima version
              -descargar chessbase online apk modificado
              -descargar chessbase online apk para android
              -télécharger chessbase online apk gratuit
              -télécharger chessbase online apk complet
              -télécharger chessbase online apk dernière version
              -télécharger chessbase online apk modifié
              -télécharger chessbase online apk pour android
              -baixar chessbase online apk grátis
              -baixar chessbase online apk completo
              -baixar chessbase online apk última versão
              -baixar chessbase online apk modificado
              -baixar chessbase online apk para android
              -scaricare chessbase online apk gratis
              -scaricare chessbase online apk completo
              -scaricare chessbase online apk ultima versione
              -scaricare chessbase online apk modificato
              -scaricare chessbase online apk per android
              -downloaden chessbase online apk gratis
              -downloaden chessbase online apk volledig
              -downloaden chessbase online apk laatste versie
              -downloaden chessbase online apk aangepast

              -

              Weekly updates on chess theory

              -

              ChessBase Online APK keeps you updated on the latest developments in chess theory with a database server that is updated weekly. You can see which openings are popular among top players, which variations are trendy or refuted, and which novelties are being played. You can also compare your own repertoire with the current state of chess theory and find out where you need to improve.

              -

              Statistics and analysis of chess openings

              -

              Preparation against specific opponents

              -

              ChessBase Online APK helps you prepare against specific opponents by showing you their games, statistics, and preferences. You can see which openings they play, how they handle different positions, what mistakes they make, and what strengths they have. You can also see how they perform against different types of players, such as attackers, defenders, positional players, or tactical players. You can use this information to tailor your strategy and tactics against them and gain an edge over them.

              -

              Dynamic books with custom filters

              -

              ChessBase Online APK allows you to create dynamic books with custom filters that suit your needs and preferences. You can choose which games to include or exclude from your book, based on criteria such as rating range, time control, tournament name, source, or result. You can also adjust the depth and width of your book, depending on how much detail and variation you want. You can save your books and access them anytime, anywhere.

              -

              Replay of annotated master games

              -

              ChessBase Online APK enables you to replay annotated master games from 1857 to 2021. You can learn from the best players in history, such as Fischer, Kasparov, Carlsen, and many others. You can see their comments, explanations, evaluations, and suggestions for each move. You can also compare their moves with the engine analysis and see where they deviated from the optimal play. You can also play guess-the-move games and test your skills against the masters.

              -

              Load, save and edit PGN files

              -

              ChessBase Online APK allows you to load, save and edit PGN files on your Android device. PGN stands for Portable Game Notation, which is a standard format for storing and exchanging chess games. You can import PGN files from your device storage, e-mail attachments, or cloud services. You can also export PGN files to your device storage, e-mail recipients, or cloud services. You can edit the game information, such as player names, ratings, dates, events, etc. You can also add or delete moves, comments, variations, symbols, or evaluations.

              -

              Various board and piece sets

              -

              ChessBase Online APK allows you to choose from various board and piece sets to customize your chess experience. You can select from different colors, styles, sizes, and shapes of boards and pieces. You can also adjust the brightness and contrast of the board. You can also rotate the board or flip it horizontally or vertically.

              -

              E-mail and cloud support

              -

              ChessBase Online APK supports e-mail and cloud services for sending and receiving chess games. You can send games by e-mail to yourself or others with a single tap. You can also receive games by e-mail attachments and open them with ChessBase Online APK. You can also store your games in the cloud using services such as Dropbox or Google Drive. You can access your games from any device with an internet connection.

              -

              How to download and install ChessBase Online APK?

              -

              If you are interested in downloading and installing ChessBase Online APK on your Android device, here are the requirements and steps you need to follow:

              -

              Requirements and compatibility

              -

              To download and install ChessBase Online APK on your Android device, you need to have:

              -
                -
              • An Android device running Android 4.0 or higher
              • -
              • A stable internet connection
              • -
              • At least 100 MB of free storage space
              • -
              • A file manager app (such as ES File Explorer) to locate and install the APK file
              • -
              • A permission to install apps from unknown sources (you can enable this in your device settings)
              • -
              -

              Steps to download and install ChessBase Online APK

              -

              To download and install ChessBase Online APK on your Android device, you need to follow these steps:

              -
                -
              1. Go to the official website of ChessBase Online APK (https://en.chessbase.com/pages/download/android) and click on the download button.
              2. -
              3. Wait for the download to complete and locate the APK file in your device storage using a file manager app.
              4. -
              5. Tap on the APK file and confirm the installation by clicking on "Install".
              6. -
              7. Wait for the installation to finish and launch the app by clicking on "Open".
              8. -
              9. Enjoy using ChessBase Online APK on your Android device.
              10. -
              -

              How to use ChessBase Online APK?

              -database of chess games, improve your opening skills, prepare against specific opponents, create dynamic books with custom filters, replay annotated master games, load, save and edit PGN files, and more. Here are some tips on how to use ChessBase Online APK effectively:

              -

              Searching for games and players

              -

              To search for games and players in ChessBase Online APK, you can use the search function in the main menu. You can enter the name of a player, an opening, an ECO code, a year, a result, or any combination of these criteria. You can also use advanced filters to narrow down your search results by rating range, time control, tournament name, or source. You can sort the games by date, rating, or relevance. You can also bookmark your favorite games or players for easy access later.

              -

              Viewing and analyzing games

              -

              To view and analyze games in ChessBase Online APK, you can tap on any game in the search results or in your bookmarks. You can see the game information, such as player names, ratings, dates, events, etc. You can also see the board and notation in full screen mode or split screen mode. You can play through the moves with the arrow keys or by tapping on the board. You can also see the statistics and analysis of each move by tapping on the "i" icon. You can see how often each move has been played, what percentage of wins it has scored for each side, what rating range it has been played in, and what source it has come from. You can also see the engine evaluation and suggestions for each move by tapping on the "e" icon. You can adjust the engine strength and depth in the settings menu.

              -

              Creating and editing PGN files

              -

              To create and edit PGN files in ChessBase Online APK, you can use the editor function in the main menu. You can create a new PGN file by tapping on the "+" icon. You can enter the game information, such as player names, ratings, dates, events, etc. You can also enter the moves manually or by tapping on the board. You can add comments, variations, symbols, or evaluations to each move by tapping on the "c" icon. You can save your PGN file to your device storage or to your cloud service by tapping on the "s" icon. You can also load an existing PGN file from your device storage or from your cloud service by tapping on the "l" icon. You can edit your PGN file by adding or deleting moves, comments, variations, symbols, or evaluations.

              -

              Using dynamic books and filters

              -

              To use dynamic books and filters in ChessBase Online APK, you can use the book function in the main menu. You can create a new book by tapping on the "+" icon. You can choose which games to include or exclude from your book by using filters such as rating range, time control, tournament name, source, or result. You can also adjust the depth and width of your book by using sliders. You can save your book to your device storage or to your cloud service by tapping on the "s" icon. You can also load an existing book from your device storage or from your cloud service by tapping on the "l" icon. You can view your book by tapping on any opening move on the board. You can see how often each move has been played, what percentage of wins it has scored for each side, what rating range it has been played in, and what source it has come from.

              -

              Pros and cons of ChessBase Online APK

              -

              ChessBase Online APK is a powerful tool for chess enthusiasts that offers many benefits and features. However, it also has some drawbacks and limitations that you should be aware of before downloading and installing it. Here are some of the pros and cons of ChessBase Online APK:

              -

              Pros of ChessBase Online APK

              -
                -
              • It gives you access to a huge database of chess games that is updated weekly with the latest developments in chess theory.
              • -
              • It helps you improve your opening skills by checking variations with board and move statistics in one view.
              • -
              • It helps you prepare against specific opponents by showing you their games, statistics, and preferences.
              • -
              • It allows you to create dynamic books with custom filters that suit your needs and preferences.
              • -
              • It enables you to replay annotated master games from 1857 to 2021 and learn from the best players in history.
              • -
              • It allows you to load, save and edit PGN files on your Android device.
              • -
              • It supports e-mail and cloud services for sending and receiving chess games.
              • -
              -

              Cons of ChessBase Online APK

              -
                -
              • It requires an Android device running Android 4.0 or higher, which may not be compatible with some older devices.
              • -
              • It requires a stable internet connection to download and update the database server, which may not be available in some areas or situations.
              • -
              • It requires at least 100 MB of free storage space, which may not be enough for some users who have many apps or files on their device.
              • -
              • It requires a permission to install apps from unknown sources, which may pose a security risk for some users who are not careful about what they download and install.
              • -
              • It costs $9.99 to download and install, which may be too expensive for some users who are on a tight budget or prefer free apps.
              • -
              -

              Conclusion

              -

              ChessBase Online APK is a powerful tool for chess enthusiasts that gives you access to a huge database of chess games, updated weekly with the latest developments in chess theory. You can also use this app to improve your opening skills, prepare against specific opponents, create dynamic books with custom filters, replay annotated master games, load, save and edit PGN files, and more. However, this app also has some drawbacks and limitations that you should be aware of before downloading and installing it. You need to have an Android device running Android 4.0 or higher, a stable internet connection, at least 100 MB of free storage space, a permission to install apps from unknown sources, and $9.99 to purchase it. If you are interested in downloading and installing ChessBase Online APK on your Android device, you can follow the steps we have provided in this article. We hope you enjoy using ChessBase Online APK and improve your chess skills with it.

              -

              FAQs

              -

              Here are some frequently asked questions about ChessBase Online APK:

              -
                -
              1. Q: Is ChessBase Online APK safe to download and install?
              2. -
              3. A: ChessBase Online APK is safe to download and install as long as you get it from the official website (https://en.chessbase.com/pages/download/android) or from a trusted source. However, you should always be careful about what you download and install on your device and check the permissions and reviews before installing any app.
              4. -
              5. Q: Is ChessBase Online APK free to use?
              6. -
              7. A: ChessBase Online APK is not free to use. You need to pay $9.99 to download and install it on your Android device. However, once you have purchased it, you can use it without any additional fees or subscriptions.
              8. -
              9. Q: How often is ChessBase Online APK updated?
              10. -
              11. A: ChessBase Online APK is updated weekly with the latest developments in chess theory. You can see the date of the last update on the main menu of the app. You can also check for updates manually by tapping on the "u" icon on the main menu.
              12. -
              13. Q: Can I use ChessBase Online APK offline?
              14. -
              15. A: ChessBase Online APK can be used offline once you have downloaded and installed it on your Android device. You can access the database of chess games without requiring an internet connection. However, you need an internet connection to download and update the database server, send and receive games by e-mail or cloud services, or access external links from the app.
              16. -
              17. Q: Can I use ChessBase Online APK on other devices?
              18. -
              19. A: ChessBase Online APK is designed for Android devices only. You cannot use it on other devices such as iOS devices, Windows computers, or Mac computers. However, you can use other products from ChessBase GmbH that are compatible with other devices, such as ChessBase for Windows computers or Playchess.com for iOS devices.
              20. -

              197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/unicode.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/unicode.py deleted file mode 100644 index 92261487c7af50ede7204c4b65299f2ed333bed1..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/pyparsing/unicode.py +++ /dev/null @@ -1,332 +0,0 @@ -# unicode.py - -import sys -from itertools import filterfalse -from typing import List, Tuple, Union - - -class _lazyclassproperty: - def __init__(self, fn): - self.fn = fn - self.__doc__ = fn.__doc__ - self.__name__ = fn.__name__ - - def __get__(self, obj, cls): - if cls is None: - cls = type(obj) - if not hasattr(cls, "_intern") or any( - cls._intern is getattr(superclass, "_intern", []) - for superclass in cls.__mro__[1:] - ): - cls._intern = {} - attrname = self.fn.__name__ - if attrname not in cls._intern: - cls._intern[attrname] = self.fn(cls) - return cls._intern[attrname] - - -UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]] - - -class unicode_set: - """ - A set of Unicode characters, for language-specific strings for - ``alphas``, ``nums``, ``alphanums``, and ``printables``. - A unicode_set is defined by a list of ranges in the Unicode character - set, in a class attribute ``_ranges``. Ranges can be specified using - 2-tuples or a 1-tuple, such as:: - - _ranges = [ - (0x0020, 0x007e), - (0x00a0, 0x00ff), - (0x0100,), - ] - - Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x). - - A unicode set can also be defined using multiple inheritance of other unicode sets:: - - class CJK(Chinese, Japanese, Korean): - pass - """ - - _ranges: UnicodeRangeList = [] - - @_lazyclassproperty - def _chars_for_ranges(cls): - ret = [] - for cc in cls.__mro__: - if cc is unicode_set: - break - for rr in getattr(cc, "_ranges", ()): - ret.extend(range(rr[0], rr[-1] + 1)) - return [chr(c) for c in sorted(set(ret))] - - @_lazyclassproperty - def printables(cls): - "all non-whitespace characters in this range" - return "".join(filterfalse(str.isspace, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphas(cls): - "all alphabetic characters in this range" - return "".join(filter(str.isalpha, cls._chars_for_ranges)) - - @_lazyclassproperty - def nums(cls): - "all numeric digit characters in this range" - return "".join(filter(str.isdigit, cls._chars_for_ranges)) - - @_lazyclassproperty - def alphanums(cls): - "all alphanumeric characters in this range" - return cls.alphas + cls.nums - - @_lazyclassproperty - def identchars(cls): - "all characters in this range that are valid identifier characters, plus underscore '_'" - return "".join( - sorted( - set( - "".join(filter(str.isidentifier, cls._chars_for_ranges)) - + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº" - + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" - + "_" - ) - ) - ) - - @_lazyclassproperty - def identbodychars(cls): - """ - all characters in this range that are valid identifier body characters, - plus the digits 0-9 - """ - return "".join( - sorted( - set( - cls.identchars - + "0123456789" - + "".join( - [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()] - ) - ) - ) - ) - - -class pyparsing_unicode(unicode_set): - """ - A namespace class for defining common language unicode_sets. - """ - - _ranges: UnicodeRangeList = [(32, sys.maxunicode)] - - class Latin1(unicode_set): - "Unicode set for Latin-1 Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0020, 0x007E), - (0x00A0, 0x00FF), - ] - - class LatinA(unicode_set): - "Unicode set for Latin-A Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0100, 0x017F), - ] - - class LatinB(unicode_set): - "Unicode set for Latin-B Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0180, 0x024F), - ] - - class Greek(unicode_set): - "Unicode set for Greek Unicode Character Ranges" - _ranges: UnicodeRangeList = [ - (0x0342, 0x0345), - (0x0370, 0x0377), - (0x037A, 0x037F), - (0x0384, 0x038A), - (0x038C,), - (0x038E, 0x03A1), - (0x03A3, 0x03E1), - (0x03F0, 0x03FF), - (0x1D26, 0x1D2A), - (0x1D5E,), - (0x1D60,), - (0x1D66, 0x1D6A), - (0x1F00, 0x1F15), - (0x1F18, 0x1F1D), - (0x1F20, 0x1F45), - (0x1F48, 0x1F4D), - (0x1F50, 0x1F57), - (0x1F59,), - (0x1F5B,), - (0x1F5D,), - (0x1F5F, 0x1F7D), - (0x1F80, 0x1FB4), - (0x1FB6, 0x1FC4), - (0x1FC6, 0x1FD3), - (0x1FD6, 0x1FDB), - (0x1FDD, 0x1FEF), - (0x1FF2, 0x1FF4), - (0x1FF6, 0x1FFE), - (0x2129,), - (0x2719, 0x271A), - (0xAB65,), - (0x10140, 0x1018D), - (0x101A0,), - (0x1D200, 0x1D245), - (0x1F7A1, 0x1F7A7), - ] - - class Cyrillic(unicode_set): - "Unicode set for Cyrillic Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0400, 0x052F), - (0x1C80, 0x1C88), - (0x1D2B,), - (0x1D78,), - (0x2DE0, 0x2DFF), - (0xA640, 0xA672), - (0xA674, 0xA69F), - (0xFE2E, 0xFE2F), - ] - - class Chinese(unicode_set): - "Unicode set for Chinese Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x2E80, 0x2E99), - (0x2E9B, 0x2EF3), - (0x31C0, 0x31E3), - (0x3400, 0x4DB5), - (0x4E00, 0x9FEF), - (0xA700, 0xA707), - (0xF900, 0xFA6D), - (0xFA70, 0xFAD9), - (0x16FE2, 0x16FE3), - (0x1F210, 0x1F212), - (0x1F214, 0x1F23B), - (0x1F240, 0x1F248), - (0x20000, 0x2A6D6), - (0x2A700, 0x2B734), - (0x2B740, 0x2B81D), - (0x2B820, 0x2CEA1), - (0x2CEB0, 0x2EBE0), - (0x2F800, 0x2FA1D), - ] - - class Japanese(unicode_set): - "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" - _ranges: UnicodeRangeList = [] - - class Kanji(unicode_set): - "Unicode set for Kanji Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x4E00, 0x9FBF), - (0x3000, 0x303F), - ] - - class Hiragana(unicode_set): - "Unicode set for Hiragana Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x3041, 0x3096), - (0x3099, 0x30A0), - (0x30FC,), - (0xFF70,), - (0x1B001,), - (0x1B150, 0x1B152), - (0x1F200,), - ] - - class Katakana(unicode_set): - "Unicode set for Katakana Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x3099, 0x309C), - (0x30A0, 0x30FF), - (0x31F0, 0x31FF), - (0x32D0, 0x32FE), - (0xFF65, 0xFF9F), - (0x1B000,), - (0x1B164, 0x1B167), - (0x1F201, 0x1F202), - (0x1F213,), - ] - - class Hangul(unicode_set): - "Unicode set for Hangul (Korean) Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x1100, 0x11FF), - (0x302E, 0x302F), - (0x3131, 0x318E), - (0x3200, 0x321C), - (0x3260, 0x327B), - (0x327E,), - (0xA960, 0xA97C), - (0xAC00, 0xD7A3), - (0xD7B0, 0xD7C6), - (0xD7CB, 0xD7FB), - (0xFFA0, 0xFFBE), - (0xFFC2, 0xFFC7), - (0xFFCA, 0xFFCF), - (0xFFD2, 0xFFD7), - (0xFFDA, 0xFFDC), - ] - - Korean = Hangul - - class CJK(Chinese, Japanese, Hangul): - "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" - pass - - class Thai(unicode_set): - "Unicode set for Thai Unicode Character Range" - _ranges: UnicodeRangeList = [(0x0E01, 0x0E3A), (0x0E3F, 0x0E5B)] - - class Arabic(unicode_set): - "Unicode set for Arabic Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0600, 0x061B), - (0x061E, 0x06FF), - (0x0700, 0x077F), - ] - - class Hebrew(unicode_set): - "Unicode set for Hebrew Unicode Character Range" - _ranges: UnicodeRangeList = [ - (0x0591, 0x05C7), - (0x05D0, 0x05EA), - (0x05EF, 0x05F4), - (0xFB1D, 0xFB36), - (0xFB38, 0xFB3C), - (0xFB3E,), - (0xFB40, 0xFB41), - (0xFB43, 0xFB44), - (0xFB46, 0xFB4F), - ] - - class Devanagari(unicode_set): - "Unicode set for Devanagari Unicode Character Range" - _ranges: UnicodeRangeList = [(0x0900, 0x097F), (0xA8E0, 0xA8FF)] - - -pyparsing_unicode.Japanese._ranges = ( - pyparsing_unicode.Japanese.Kanji._ranges - + pyparsing_unicode.Japanese.Hiragana._ranges - + pyparsing_unicode.Japanese.Katakana._ranges -) - -# define ranges in language character sets -pyparsing_unicode.العربية = pyparsing_unicode.Arabic -pyparsing_unicode.中文 = pyparsing_unicode.Chinese -pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic -pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek -pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew -pyparsing_unicode.日本語 = pyparsing_unicode.Japanese -pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji -pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana -pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana -pyparsing_unicode.한국어 = pyparsing_unicode.Korean -pyparsing_unicode.ไทย = pyparsing_unicode.Thai -pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari diff --git a/spaces/tomofi/MMOCR/docs/en/code_of_conduct.md b/spaces/tomofi/MMOCR/docs/en/code_of_conduct.md deleted file mode 100644 index efd4305798630a5cd7b17d7cf893b9a811d5501f..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/docs/en/code_of_conduct.md +++ /dev/null @@ -1,76 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at chenkaidev@gmail.com. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/evaluation/totaltext/e2e/script.py b/spaces/tomofi/MaskTextSpotterV3-OCR/evaluation/totaltext/e2e/script.py deleted file mode 100644 index 5663255701b0d7bc0815f29b75f1d6765a99e244..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/evaluation/totaltext/e2e/script.py +++ /dev/null @@ -1,452 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# encoding=utf8 -from collections import namedtuple -import rrc_evaluation_funcs_total_text as rrc_evaluation_funcs -import importlib -from prepare_results import prepare_results_for_evaluation - -def evaluation_imports(): - """ - evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation. - """ - return { - 'Polygon':'plg', - 'numpy':'np' - } - -def default_evaluation_params(): - """ - default_evaluation_params: Default parameters to use for the validation and evaluation. - """ - return { - 'IOU_CONSTRAINT' :0.5, - 'AREA_PRECISION_CONSTRAINT' :0.5, - 'WORD_SPOTTING' :False, - 'MIN_LENGTH_CARE_WORD' :3, - 'GT_SAMPLE_NAME_2_ID':'gt_img_([0-9]+).txt', - 'DET_SAMPLE_NAME_2_ID':'res_img_([0-9]+).txt', - 'LTRB':False, #LTRB:2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4) - 'CRLF':False, # Lines are delimited by Windows CRLF format - 'CONFIDENCES':False, #Detections must include confidence value. MAP and MAR will be calculated, - 'SPECIAL_CHARACTERS':'!?.:,*"()·[]/\'', - 'ONLY_REMOVE_FIRST_LAST_CHARACTER' : True - } - -def validate_data(gtFilePath, submFilePath, evaluationParams): - """ - Method validate_data: validates that all files in the results folder are correct (have the correct name contents). - Validates also that there are no missing files in the folder. - If some error detected, the method raises the error - """ - gt = rrc_evaluation_funcs.load_zip_file(gtFilePath, evaluationParams['GT_SAMPLE_NAME_2_ID']) - - subm = rrc_evaluation_funcs.load_zip_file(submFilePath, evaluationParams['DET_SAMPLE_NAME_2_ID'], True) - - #Validate format of GroundTruth - for k in gt: - rrc_evaluation_funcs.validate_lines_in_file(k,gt[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True) - - #Validate format of results - for k in subm: - if (k in gt) == False : - raise Exception("The sample %s not present in GT" %k) - - rrc_evaluation_funcs.validate_lines_in_file(k,subm[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES']) - - -def evaluate_method(gtFilePath, submFilePath, evaluationParams): - """ - Method evaluate_method: evaluate method and returns the results - Results. Dictionary with the following values: - - method (required) Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 } - - samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 } - """ - for module,alias in evaluation_imports().items(): - globals()[alias] = importlib.import_module(module) - - def polygon_from_points(points,correctOffset=False): - """ - Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4 - """ - resBoxes=np.empty([1,len(points)],dtype='int32') - for i in range(int(len(points) / 2)): - resBoxes[0, i] = int(points[2*i]) - resBoxes[0, int(len(points) / 2) + i] = int(points[2*i+1]) - - pointMat = resBoxes[0].reshape([2,-1]).T - return plg.Polygon( pointMat) - - def rectangle_to_polygon(rect): - resBoxes=np.empty([1,8],dtype='int32') - resBoxes[0,0]=int(rect.xmin) - resBoxes[0,4]=int(rect.ymax) - resBoxes[0,1]=int(rect.xmin) - resBoxes[0,5]=int(rect.ymin) - resBoxes[0,2]=int(rect.xmax) - resBoxes[0,6]=int(rect.ymin) - resBoxes[0,3]=int(rect.xmax) - resBoxes[0,7]=int(rect.ymax) - - pointMat = resBoxes[0].reshape([2,4]).T - - return plg.Polygon( pointMat) - - def rectangle_to_points(rect): - points = [int(rect.xmin), int(rect.ymax), int(rect.xmax), int(rect.ymax), int(rect.xmax), int(rect.ymin), int(rect.xmin), int(rect.ymin)] - return points - - def get_union(pD,pG): - areaA = pD.area(); - areaB = pG.area(); - return areaA + areaB - get_intersection(pD, pG); - - def get_intersection_over_union(pD,pG): - try: - return get_intersection(pD, pG) / get_union(pD, pG); - except: - return 0 - - def get_intersection(pD,pG): - pInt = pD & pG - if len(pInt) == 0: - return 0 - return pInt.area() - - def compute_ap(confList, matchList,numGtCare): - correct = 0 - AP = 0 - if len(confList)>0: - confList = np.array(confList) - matchList = np.array(matchList) - sorted_ind = np.argsort(-confList) - confList = confList[sorted_ind] - matchList = matchList[sorted_ind] - for n in range(len(confList)): - match = matchList[n] - if match: - correct += 1 - AP += float(correct)/(n + 1) - - if numGtCare>0: - AP /= numGtCare - - return AP - - def transcription_match(transGt,transDet,specialCharacters='!?.:,*"()·[]/\'',onlyRemoveFirstLastCharacterGT=True): - - if onlyRemoveFirstLastCharacterGT: - #special characters in GT are allowed only at initial or final position - if (transGt==transDet): - return True - - if specialCharacters.find(transGt[0])>-1: - if transGt[1:]==transDet: - return True - - if specialCharacters.find(transGt[-1])>-1: - if transGt[0:len(transGt)-1]==transDet: - return True - - if specialCharacters.find(transGt[0])>-1 and specialCharacters.find(transGt[-1])>-1: - if transGt[1:len(transGt)-1]==transDet: - return True - return False - else: - #Special characters are removed from the begining and the end of both Detection and GroundTruth - while len(transGt)>0 and specialCharacters.find(transGt[0])>-1: - transGt = transGt[1:] - - while len(transDet)>0 and specialCharacters.find(transDet[0])>-1: - transDet = transDet[1:] - - while len(transGt)>0 and specialCharacters.find(transGt[-1])>-1 : - transGt = transGt[0:len(transGt)-1] - - while len(transDet)>0 and specialCharacters.find(transDet[-1])>-1: - transDet = transDet[0:len(transDet)-1] - - return transGt == transDet - - - def include_in_dictionary(transcription): - """ - Function used in Word Spotting that finds if the Ground Truth transcription meets the rules to enter into the dictionary. If not, the transcription will be cared as don't care - """ - #special case 's at final - if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S": - transcription = transcription[0:len(transcription)-2] - - #hypens at init or final of the word - transcription = transcription.strip('-'); - - specialCharacters = "'!?.:,*\"()·[]/"; - for character in specialCharacters: - transcription = transcription.replace(character,' ') - - transcription = transcription.strip() - - if len(transcription) != len(transcription.replace(" ","")) : - return False; - - if len(transcription) < evaluationParams['MIN_LENGTH_CARE_WORD']: - return False; - - notAllowed = "×÷·"; - - range1 = [ ord(u'a'), ord(u'z') ] - range2 = [ ord(u'A'), ord(u'Z') ] - range3 = [ ord(u'À'), ord(u'ƿ') ] - range4 = [ ord(u'DŽ'), ord(u'ɿ') ] - range5 = [ ord(u'Ά'), ord(u'Ͽ') ] - range6 = [ ord(u'-'), ord(u'-') ] - - for char in transcription : - charCode = ord(char) - if(notAllowed.find(char) != -1): - return False - - valid = ( charCode>=range1[0] and charCode<=range1[1] ) or ( charCode>=range2[0] and charCode<=range2[1] ) or ( charCode>=range3[0] and charCode<=range3[1] ) or ( charCode>=range4[0] and charCode<=range4[1] ) or ( charCode>=range5[0] and charCode<=range5[1] ) or ( charCode>=range6[0] and charCode<=range6[1] ) - if valid == False: - return False - - return True - - def include_in_dictionary_transcription(transcription): - """ - Function applied to the Ground Truth transcriptions used in Word Spotting. It removes special characters or terminations - """ - #special case 's at final - if transcription[len(transcription)-2:]=="'s" or transcription[len(transcription)-2:]=="'S": - transcription = transcription[0:len(transcription)-2] - - #hypens at init or final of the word - transcription = transcription.strip('-'); - - specialCharacters = "'!?.:,*\"()·[]/"; - for character in specialCharacters: - transcription = transcription.replace(character,' ') - - transcription = transcription.strip() - - return transcription - - perSampleMetrics = {} - - matchedSum = 0 - - Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax') - - gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID']) - subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True) - - numGlobalCareGt = 0; - numGlobalCareDet = 0; - - arrGlobalConfidences = []; - arrGlobalMatches = []; - - for resFile in gt: - - gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile]) - if (gtFile is None) : - raise Exception("The file %s is not UTF-8" %resFile) - - recall = 0 - precision = 0 - hmean = 0 - detCorrect = 0 - iouMat = np.empty([1,1]) - gtPols = [] - detPols = [] - gtTrans = [] - detTrans = [] - gtPolPoints = [] - detPolPoints = [] - gtDontCarePolsNum = [] #Array of Ground Truth Polygons' keys marked as don't Care - detDontCarePolsNum = [] #Array of Detected Polygons' matched with a don't Care GT - detMatchedNums = [] - pairs = [] - - arrSampleConfidences = []; - arrSampleMatch = []; - sampleAP = 0; - - evaluationLog = "" - - pointsList,_,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,False) - for n in range(len(pointsList)): - points = pointsList[n] - transcription = transcriptionsList[n] - dontCare = transcription == "###" - if evaluationParams['LTRB']: - gtRect = Rectangle(*points) - gtPol = rectangle_to_polygon(gtRect) - else: - gtPol = polygon_from_points(points) - gtPols.append(gtPol) - gtPolPoints.append(points) - - #On word spotting we will filter some transcriptions with special characters - if evaluationParams['WORD_SPOTTING'] : - if dontCare == False : - if include_in_dictionary(transcription) == False : - dontCare = True - else: - transcription = include_in_dictionary_transcription(transcription) - - gtTrans.append(transcription) - if dontCare: - gtDontCarePolsNum.append( len(gtPols)-1 ) - - evaluationLog += "GT polygons: " + str(len(gtPols)) + (" (" + str(len(gtDontCarePolsNum)) + " don't care)\n" if len(gtDontCarePolsNum)>0 else "\n") - - if resFile in subm: - - detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile]) - - pointsList,confidencesList,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,evaluationParams['CONFIDENCES']) - - for n in range(len(pointsList)): - points = pointsList[n] - transcription = transcriptionsList[n] - - if evaluationParams['LTRB']: - detRect = Rectangle(*points) - detPol = rectangle_to_polygon(detRect) - else: - detPol = polygon_from_points(points) - detPols.append(detPol) - detPolPoints.append(points) - detTrans.append(transcription) - - if len(gtDontCarePolsNum)>0 : - for dontCarePol in gtDontCarePolsNum: - dontCarePol = gtPols[dontCarePol] - intersected_area = get_intersection(dontCarePol,detPol) - pdDimensions = detPol.area() - precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions - if (precision > evaluationParams['AREA_PRECISION_CONSTRAINT'] ): - detDontCarePolsNum.append( len(detPols)-1 ) - break - - evaluationLog += "DET polygons: " + str(len(detPols)) + (" (" + str(len(detDontCarePolsNum)) + " don't care)\n" if len(detDontCarePolsNum)>0 else "\n") - - if len(gtPols)>0 and len(detPols)>0: - #Calculate IoU and precision matrixs - outputShape=[len(gtPols),len(detPols)] - iouMat = np.empty(outputShape) - gtRectMat = np.zeros(len(gtPols),np.int8) - detRectMat = np.zeros(len(detPols),np.int8) - for gtNum in range(len(gtPols)): - for detNum in range(len(detPols)): - pG = gtPols[gtNum] - pD = detPols[detNum] - iouMat[gtNum,detNum] = get_intersection_over_union(pD,pG) - - for gtNum in range(len(gtPols)): - for detNum in range(len(detPols)): - if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum : - if iouMat[gtNum,detNum]>evaluationParams['IOU_CONSTRAINT']: - gtRectMat[gtNum] = 1 - detRectMat[detNum] = 1 - #detection matched only if transcription is equal - if evaluationParams['WORD_SPOTTING']: - correct = gtTrans[gtNum].upper() == detTrans[detNum].upper() - else: - correct = transcription_match(gtTrans[gtNum].upper(),detTrans[detNum].upper(),evaluationParams['SPECIAL_CHARACTERS'],evaluationParams['ONLY_REMOVE_FIRST_LAST_CHARACTER'])==True - detCorrect += (1 if correct else 0) - if correct: - detMatchedNums.append(detNum) - pairs.append({'gt':gtNum,'det':detNum,'correct':correct}) - evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + " trans. correct: " + str(correct) + "\n" - - if evaluationParams['CONFIDENCES']: - for detNum in range(len(detPols)): - if detNum not in detDontCarePolsNum : - #we exclude the don't care detections - match = detNum in detMatchedNums - - arrSampleConfidences.append(confidencesList[detNum]) - arrSampleMatch.append(match) - - arrGlobalConfidences.append(confidencesList[detNum]); - arrGlobalMatches.append(match); - - numGtCare = (len(gtPols) - len(gtDontCarePolsNum)) - numDetCare = (len(detPols) - len(detDontCarePolsNum)) - if numGtCare == 0: - recall = float(1) - precision = float(0) if numDetCare >0 else float(1) - sampleAP = precision - else: - recall = float(detCorrect) / numGtCare - precision = 0 if numDetCare==0 else float(detCorrect) / numDetCare - if evaluationParams['CONFIDENCES']: - sampleAP = compute_ap(arrSampleConfidences, arrSampleMatch, numGtCare ) - - hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall) - - matchedSum += detCorrect - numGlobalCareGt += numGtCare - numGlobalCareDet += numDetCare - - perSampleMetrics[resFile] = { - 'precision':precision, - 'recall':recall, - 'hmean':hmean, - 'pairs':pairs, - 'AP':sampleAP, - 'iouMat':[] if len(detPols)>100 else iouMat.tolist(), - 'gtPolPoints':gtPolPoints, - 'detPolPoints':detPolPoints, - 'gtTrans':gtTrans, - 'detTrans':detTrans, - 'gtDontCare':gtDontCarePolsNum, - 'detDontCare':detDontCarePolsNum, - 'evaluationParams': evaluationParams, - 'evaluationLog': evaluationLog - } - - # Compute AP - AP = 0 - if evaluationParams['CONFIDENCES']: - AP = compute_ap(arrGlobalConfidences, arrGlobalMatches, numGlobalCareGt) - - methodRecall = 0 if numGlobalCareGt == 0 else float(matchedSum)/numGlobalCareGt - methodPrecision = 0 if numGlobalCareDet == 0 else float(matchedSum)/numGlobalCareDet - methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision) - - methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean, 'AP': AP } - - resDict = {'calculated':True,'Message':'','method': methodMetrics,'per_sample': perSampleMetrics} - - - return resDict; - - - -if __name__=='__main__': - ''' - results_dir: result directory - score_det: score of detection bounding box - score_rec: score of the mask recognition branch - score_rec_seq: score of the sequence recognition branch - lexicon_type: 1 for generic; 2 for weak; 3 for strong - ''' - results_dir = '../../../output/mixtrain/inference/total_text_test/model_0250000_1000_results/' - score_det = 0.05 - score_rec = 0.5 - use_lexicon = False - score_rec_seq = 0.9 - # use_lexicon = True - # score_rec_seq = 0.8 - evaluate_result_path = prepare_results_for_evaluation(results_dir, - use_lexicon=use_lexicon, cache_dir='./cache_files', - score_det=score_det, score_rec=score_rec, score_rec_seq=score_rec_seq) - p = { - 'g': "../gt.zip", - 'o': "./cache_files", - 's': evaluate_result_path - } - rrc_evaluation_funcs.main_evaluation(p,default_evaluation_params,validate_data,evaluate_method) \ No newline at end of file diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/__init__.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/__init__.py deleted file mode 100644 index 2ba1e52473f97615cc41f82aef279fff4d194527..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -from .build import make_data_loader diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index b0add92c398b62aa8fd2141f595cf0941f55d421..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,65 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' -model = dict( - rpn_head=dict( - _delete_=True, - type='GARPNHead', - in_channels=256, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=8, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[8], - strides=[4, 8, 16, 32, 64]), - anchor_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.07, 0.07, 0.14, 0.14]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.07, 0.07, 0.11, 0.11]), - loc_filter_thr=0.01, - loss_loc=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), - roi_head=dict( - bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - ga_assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - ignore_iof_thr=-1), - ga_sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - center_ratio=0.2, - ignore_ratio=0.5), - rpn_proposal=dict(nms_post=1000, max_per_img=300), - rcnn=dict( - assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), - sampler=dict(type='RandomSampler', num=256))), - test_cfg=dict( - rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3))) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/samplers/random_sampler.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/samplers/random_sampler.py deleted file mode 100644 index c23a7a1f04ae6e09a4122c2fb4e1b037c238f387..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/samplers/random_sampler.py +++ /dev/null @@ -1,81 +0,0 @@ -import torch - -from ..builder import BBOX_SAMPLERS -from .base_sampler import BaseSampler - - -@BBOX_SAMPLERS.register_module() -class RandomSampler(BaseSampler): - """Random sampler. - - Args: - num (int): Number of samples - pos_fraction (float): Fraction of positive samples - neg_pos_up (int, optional): Upper bound number of negative and - positive samples. Defaults to -1. - add_gt_as_proposals (bool, optional): Whether to add ground truth - boxes as proposals. Defaults to True. - """ - - def __init__(self, - num, - pos_fraction, - neg_pos_ub=-1, - add_gt_as_proposals=True, - **kwargs): - from mmdet.core.bbox import demodata - super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, - add_gt_as_proposals) - self.rng = demodata.ensure_rng(kwargs.get('rng', None)) - - def random_choice(self, gallery, num): - """Random select some elements from the gallery. - - If `gallery` is a Tensor, the returned indices will be a Tensor; - If `gallery` is a ndarray or list, the returned indices will be a - ndarray. - - Args: - gallery (Tensor | ndarray | list): indices pool. - num (int): expected sample num. - - Returns: - Tensor or ndarray: sampled indices. - """ - assert len(gallery) >= num - - is_tensor = isinstance(gallery, torch.Tensor) - if not is_tensor: - if torch.cuda.is_available(): - device = torch.cuda.current_device() - else: - device = 'cpu' - gallery = torch.tensor(gallery, dtype=torch.long, device=device) - # This is a temporary fix. We can revert the following code - # when PyTorch fixes the abnormal return of torch.randperm. - # See: https://github.com/open-mmlab/mmdetection/pull/5014 - perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device) - rand_inds = gallery[perm] - if not is_tensor: - rand_inds = rand_inds.cpu().numpy() - return rand_inds - - def _sample_pos(self, assign_result, num_expected, **kwargs): - """Randomly sample some positive samples.""" - pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) - if pos_inds.numel() != 0: - pos_inds = pos_inds.squeeze(1) - if pos_inds.numel() <= num_expected: - return pos_inds - else: - return self.random_choice(pos_inds, num_expected) - - def _sample_neg(self, assign_result, num_expected, **kwargs): - """Randomly sample some negative samples.""" - neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) - if neg_inds.numel() != 0: - neg_inds = neg_inds.squeeze(1) - if len(neg_inds) <= num_expected: - return neg_inds - else: - return self.random_choice(neg_inds, num_expected) diff --git a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/modules/attention.py b/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/modules/attention.py deleted file mode 100644 index 96f8689eddbd3ecf251051a52386e6b82422eeb2..0000000000000000000000000000000000000000 --- a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/modules/attention.py +++ /dev/null @@ -1,275 +0,0 @@ -# File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion). -# See more details in LICENSE. - -from inspect import isfunction -import math -import torch -import torch.nn.functional as F -from torch import nn, einsum -from einops import rearrange, repeat - -from ldm.modules.diffusionmodules.util import checkpoint - - -def exists(val): - return val is not None - - -def uniq(arr): - return{el: True for el in arr}.keys() - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def max_neg_value(t): - return -torch.finfo(t.dtype).max - - -def init_(tensor): - dim = tensor.shape[-1] - std = 1 / math.sqrt(dim) - tensor.uniform_(-std, std) - return tensor - - -# feedforward -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def Normalize(in_channels): - return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) - - -class LinearAttention(nn.Module): - def __init__(self, dim, heads=4, dim_head=32): - super().__init__() - self.heads = heads - hidden_dim = dim_head * heads - self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) - self.to_out = nn.Conv2d(hidden_dim, dim, 1) - - def forward(self, x): - b, c, h, w = x.shape - qkv = self.to_qkv(x) - q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) - k = k.softmax(dim=-1) - context = torch.einsum('bhdn,bhen->bhde', k, v) - out = torch.einsum('bhde,bhdn->bhen', context, q) - out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) - return self.to_out(out) - - -class SpatialSelfAttention(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = rearrange(q, 'b c h w -> b (h w) c') - k = rearrange(k, 'b c h w -> b c (h w)') - w_ = torch.einsum('bij,bjk->bik', q, k) - - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = rearrange(v, 'b c h w -> b c (h w)') - w_ = rearrange(w_, 'b i j -> b j i') - h_ = torch.einsum('bij,bjk->bik', v, w_) - h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) - h_ = self.proj_out(h_) - - return x+h_ - - -class CrossAttention(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): - super().__init__() - inner_dim = dim_head * heads - context_dim = default(context_dim, query_dim) - - self.scale = dim_head ** -0.5 - self.heads = heads - - self.to_q = nn.Linear(query_dim, inner_dim, bias=False) - self.to_k = nn.Linear(context_dim, inner_dim, bias=False) - self.to_v = nn.Linear(context_dim, inner_dim, bias=False) - - self.to_out = nn.Sequential( - nn.Linear(inner_dim, query_dim), - nn.Dropout(dropout) - ) - - self.prompt_to_prompt = False - - def forward(self, x, context=None, mask=None): - is_self_attn = context is None - - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale - - if self.prompt_to_prompt and is_self_attn: - # Unlike the original Prompt-to-Prompt which uses cross-attention layers, we copy attention maps for self-attention layers. - # There must be 4 elements in the batch: {conditional, unconditional} x {prompt 1, prompt 2} - assert x.size(0) == 4 - sims = sim.chunk(4) - sim = torch.cat((sims[0], sims[0], sims[2], sims[2])) - - if exists(mask): - mask = rearrange(mask, 'b ... -> b (...)') - max_neg_value = -torch.finfo(sim.dtype).max - mask = repeat(mask, 'b j -> (b h) () j', h=h) - sim.masked_fill_(~mask, max_neg_value) - - # attention, what we cannot get enough of - attn = sim.softmax(dim=-1) - - out = einsum('b i j, b j d -> b i d', attn, v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) - return self.to_out(out) - - -class BasicTransformerBlock(nn.Module): - def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True): - super().__init__() - self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) - self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, - heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none - self.norm1 = nn.LayerNorm(dim) - self.norm2 = nn.LayerNorm(dim) - self.norm3 = nn.LayerNorm(dim) - self.checkpoint = checkpoint - - def forward(self, x, context=None): - return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) - - def _forward(self, x, context=None): - x = self.attn1(self.norm1(x)) + x - x = self.attn2(self.norm2(x), context=context) + x - x = self.ff(self.norm3(x)) + x - return x - - -class SpatialTransformer(nn.Module): - """ - Transformer block for image-like data. - First, project the input (aka embedding) - and reshape to b, t, d. - Then apply standard transformer action. - Finally, reshape to image - """ - def __init__(self, in_channels, n_heads, d_head, - depth=1, dropout=0., context_dim=None): - super().__init__() - self.in_channels = in_channels - inner_dim = n_heads * d_head - self.norm = Normalize(in_channels) - - self.proj_in = nn.Conv2d(in_channels, - inner_dim, - kernel_size=1, - stride=1, - padding=0) - - self.transformer_blocks = nn.ModuleList( - [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) - for d in range(depth)] - ) - - self.proj_out = zero_module(nn.Conv2d(inner_dim, - in_channels, - kernel_size=1, - stride=1, - padding=0)) - - def forward(self, x, context=None): - # note: if no context is given, cross-attention defaults to self-attention - b, c, h, w = x.shape - x_in = x - x = self.norm(x) - x = self.proj_in(x) - x = rearrange(x, 'b c h w -> b (h w) c') - for block in self.transformer_blocks: - x = block(x, context=context) - x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w) - x = self.proj_out(x) - return x + x_in diff --git a/spaces/ttt246/brain/Extension/src/pages/Panel/index.css b/spaces/ttt246/brain/Extension/src/pages/Panel/index.css deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ucalyptus/PTI/models/StyleCLIP/criteria/clip_loss.py b/spaces/ucalyptus/PTI/models/StyleCLIP/criteria/clip_loss.py deleted file mode 100644 index 18176ee8eb0d992d69d5b951d7f36e2efa92a37b..0000000000000000000000000000000000000000 --- a/spaces/ucalyptus/PTI/models/StyleCLIP/criteria/clip_loss.py +++ /dev/null @@ -1,17 +0,0 @@ - -import torch -import clip - - -class CLIPLoss(torch.nn.Module): - - def __init__(self, opts): - super(CLIPLoss, self).__init__() - self.model, self.preprocess = clip.load("ViT-B/32", device="cuda") - self.upsample = torch.nn.Upsample(scale_factor=7) - self.avg_pool = torch.nn.AvgPool2d(kernel_size=opts.stylegan_size // 32) - - def forward(self, image, text): - image = self.avg_pool(self.upsample(image)) - similarity = 1 - self.model(image, text)[0] / 100 - return similarity \ No newline at end of file diff --git a/spaces/unclesamjo/GTalkGPTV01/app.py b/spaces/unclesamjo/GTalkGPTV01/app.py deleted file mode 100644 index 1030de6b355299b6b69ecaabf3dab09a7a954641..0000000000000000000000000000000000000000 --- a/spaces/unclesamjo/GTalkGPTV01/app.py +++ /dev/null @@ -1,48 +0,0 @@ -import gradio as gr -import openai -from langdetect import detect - -openai.api_key = "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" - -# Global variable to hold the chat history, initialise with system role -conversation = [{"role": "system", "content": "You are an intelligent professor."}] - -# transcribe function to record the audio input -def transcribe(audio): - if audio is None: - return "No audio input detected." - - # Whisper API - audio_file = open(audio, "rb") - transcript = openai.Audio.transcribe("whisper-1", audio_file) - - # Detect the language of the transcript - #lang = detect(transcript["text"]) - - #if lang != 'en': - - #return 'Non-English input detected. Please speak in English.' - - # Limit the user's input to fit within the model's maximum context length - max_input_tokens = 4097 - len(conversation) * 2 # Considering role tokens - user_input = transcript["text"][:max_input_tokens] - - # Append user's input to conversation - conversation.append({"role": "user", "content": user_input}) - - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=conversation - ) - - # System_message is the response from ChatGPT API - system_message = response["choices"][0]["message"]["content"] - - # Append ChatGPT response (assistant role) back to conversation - conversation.append({"role": "assistant", "content": system_message}) - - return system_message - -# Gradio output -bot = gr.Interface(fn=transcribe, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text") -bot.launch() diff --git a/spaces/vialibre/edia_lmodels_en/modules/module_logsManager.py b/spaces/vialibre/edia_lmodels_en/modules/module_logsManager.py deleted file mode 100644 index ccd73be6fdd582fd18e470d1745e7f4b20fd8b99..0000000000000000000000000000000000000000 --- a/spaces/vialibre/edia_lmodels_en/modules/module_logsManager.py +++ /dev/null @@ -1,186 +0,0 @@ -from gradio.flagging import FlaggingCallback, _get_dataset_features_info -from gradio.components import IOComponent -from gradio import utils -from typing import Any, List, Optional -from dotenv import load_dotenv -from datetime import datetime -import csv, os, pytz - - -# --- Load environments vars --- -load_dotenv() - - -# --- Classes declaration --- -class DateLogs: - def __init__( - self, - zone: str="America/Argentina/Cordoba" - ) -> None: - - self.time_zone = pytz.timezone(zone) - - def full( - self - ) -> str: - - now = datetime.now(self.time_zone) - return now.strftime("%H:%M:%S %d-%m-%Y") - - def day( - self - ) -> str: - - now = datetime.now(self.time_zone) - return now.strftime("%d-%m-%Y") - -class HuggingFaceDatasetSaver(FlaggingCallback): - """ - A callback that saves each flagged sample (both the input and output data) - to a HuggingFace dataset. - Example: - import gradio as gr - hf_writer = gr.HuggingFaceDatasetSaver(HF_API_TOKEN, "image-classification-mistakes") - def image_classifier(inp): - return {'cat': 0.3, 'dog': 0.7} - demo = gr.Interface(fn=image_classifier, inputs="image", outputs="label", - allow_flagging="manual", flagging_callback=hf_writer) - Guides: using_flagging - """ - - def __init__( - self, - dataset_name: str=None, - hf_token: str=os.getenv('HF_TOKEN'), - organization: Optional[str]=os.getenv('ORG_NAME'), - private: bool=True, - available_logs: bool=False - ) -> None: - """ - Parameters: - hf_token: The HuggingFace token to use to create (and write the flagged sample to) the HuggingFace dataset. - dataset_name: The name of the dataset to save the data to, e.g. "image-classifier-1" - organization: The organization to save the dataset under. The hf_token must provide write access to this organization. If not provided, saved under the name of the user corresponding to the hf_token. - private: Whether the dataset should be private (defaults to False). - """ - assert(dataset_name is not None), "Error: Parameter 'dataset_name' cannot be empty!." - - self.hf_token = hf_token - self.dataset_name = dataset_name - self.organization_name = organization - self.dataset_private = private - self.datetime = DateLogs() - self.available_logs = available_logs - - if not available_logs: - print("Push: logs DISABLED!...") - - - def setup( - self, - components: List[IOComponent], - flagging_dir: str - ) -> None: - """ - Params: - flagging_dir (str): local directory where the dataset is cloned, - updated, and pushed from. - """ - if self.available_logs: - - try: - import huggingface_hub - except (ImportError, ModuleNotFoundError): - raise ImportError( - "Package `huggingface_hub` not found is needed " - "for HuggingFaceDatasetSaver. Try 'pip install huggingface_hub'." - ) - - path_to_dataset_repo = huggingface_hub.create_repo( - repo_id=os.path.join(self.organization_name, self.dataset_name), - token=self.hf_token, - private=self.dataset_private, - repo_type="dataset", - exist_ok=True, - ) - - self.path_to_dataset_repo = path_to_dataset_repo - self.components = components - self.flagging_dir = flagging_dir - self.dataset_dir = self.dataset_name - - self.repo = huggingface_hub.Repository( - local_dir=self.dataset_dir, - clone_from=path_to_dataset_repo, - use_auth_token=self.hf_token, - ) - - self.repo.git_pull(lfs=True) - - # Should filename be user-specified? - # log_file_name = self.datetime.day()+"_"+self.flagging_dir+".csv" - self.log_file = os.path.join(self.dataset_dir, self.flagging_dir+".csv") - - def flag( - self, - flag_data: List[Any], - flag_option: Optional[str]=None, - flag_index: Optional[int]=None, - username: Optional[str]=None, - ) -> int: - - if self.available_logs: - self.repo.git_pull(lfs=True) - - is_new = not os.path.exists(self.log_file) - - with open(self.log_file, "a", newline="", encoding="utf-8") as csvfile: - writer = csv.writer(csvfile) - - # File previews for certain input and output types - infos, file_preview_types, headers = _get_dataset_features_info( - is_new, self.components - ) - - # Generate the headers and dataset_infos - if is_new: - headers = [ - component.label or f"component {idx}" - for idx, component in enumerate(self.components) - ] + [ - "flag", - "username", - "timestamp", - ] - writer.writerow(utils.sanitize_list_for_csv(headers)) - - # Generate the row corresponding to the flagged sample - csv_data = [] - for component, sample in zip(self.components, flag_data): - save_dir = os.path.join( - self.dataset_dir, - utils.strip_invalid_filename_characters(component.label), - ) - filepath = component.deserialize(sample, save_dir, None) - csv_data.append(filepath) - if isinstance(component, tuple(file_preview_types)): - csv_data.append( - "{}/resolve/main/{}".format(self.path_to_dataset_repo, filepath) - ) - - csv_data.append(flag_option if flag_option is not None else "") - csv_data.append(username if username is not None else "") - csv_data.append(self.datetime.full()) - writer.writerow(utils.sanitize_list_for_csv(csv_data)) - - - with open(self.log_file, "r", encoding="utf-8") as csvfile: - line_count = len([None for row in csv.reader(csvfile)]) - 1 - - self.repo.push_to_hub(commit_message="Flagged sample #{}".format(line_count)) - - else: - line_count = 0 - print("Logs: Virtual push...") - - return line_count \ No newline at end of file diff --git a/spaces/vinid/fashion-clip-app/app.py b/spaces/vinid/fashion-clip-app/app.py deleted file mode 100644 index 528af50723738ad20570b373ed433188036839dd..0000000000000000000000000000000000000000 --- a/spaces/vinid/fashion-clip-app/app.py +++ /dev/null @@ -1,85 +0,0 @@ -from fashion_clip.fashion_clip import FashionCLIP -import pickle -import subprocess -import streamlit as st -import numpy as np -from PIL import Image -import os - -st.sidebar.write("# FashionCLIP Resources") -st.sidebar.write("We have several resources related to FashionCLIP.") -st.sidebar.write("## Documentation") -st.sidebar.write("* 📚 [Blog Post](https://towardsdatascience.com/teaching-clip-some-fashion-3005ac3fdcc3)") -st.sidebar.write("* 📚 [Paper](https://www.nature.com/articles/s41598-022-23052-9)") - -st.sidebar.write("## Code") -st.sidebar.write("* 📚 [Repo](https://github.com/patrickjohncyh/fashion-clip)") -st.sidebar.write("* 📚 [Colab](https://colab.research.google.com/drive/1Z1hAxBnWjF76bEi9KQ6CMBBEmI_FVDrW#scrollTo=FzUQGwS1lhGS)") -st.sidebar.write("* 📚 [HuggingFace Weights](https://huggingface.co/patrickjohncyh/fashion-clip)") - - -st.write("# FashionCLIP. A Foundation Model for Fashion.") - -st.write("[![Youtube Video](https://img.shields.io/badge/youtube-video-red)](https://www.youtube.com/watch?v=uqRSc-KSA1Y) [![HuggingFace Model](https://img.shields.io/badge/HF%20Model-Weights-yellow)](https://huggingface.co/patrickjohncyh/fashion-clip) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1Z1hAxBnWjF76bEi9KQ6CMBBEmI_FVDrW?usp=sharing) [![Medium Blog Post](https://raw.githubusercontent.com/aleen42/badges/master/src/medium.svg)](https://towardsdatascience.com/teaching-clip-some-fashion-3005ac3fdcc3) [![Open in Streamlit](https://static.streamlit.io/badges/streamlit_badge_black_white.svg)](https://huggingface.co/spaces/vinid/fashion-clip-app)") - - -st.write("This web app uses FashionCLIP, our new foundation model for fashion, to find clothing items based on a query of the item you want to find.") - -st.write("The model is going to find the most similar item to your query, given a list of 5000 items that have been released by Alexey Grigorev [here](https://github.com/alexeygrigorev/clothing-dataset).") -st.write("Note that some queries might not return anything useful. This could be both due to model's limitation or to the fact that the item you are looking for is missing from the collection.") -st.write("You can find more about FashionCLIP on the [repo](https://github.com/patrickjohncyh/fashion-clip) or on our [paper](https://www.nature.com/articles/s41598-022-23052-9)") - -@st.cache_resource -def load_embedding_file(): - with open("embeddings_and_paths.pkl", "rb") as filino: - data = pickle.load(filino) - - images = data["images_path"] - embeddings = data["embeddings"] - return images, embeddings - -fclip = FashionCLIP('fashion-clip') - -if not os.path.exists("clothing-dataset"): - subprocess.run("git clone https://github.com/alexeygrigorev/clothing-dataset", shell=True) - -st.write("## Simple FashionCLIP search engine") -query = st.text_input("Enter a description of the clothing item you want to find", "a red dress") - -images, image_embeddings = load_embedding_file() - -text_embedding = fclip.encode_text([query], 32)[0] - -id_of_matched_object = np.argmax(text_embedding.dot(image_embeddings.T)) - -image = Image.open(images[id_of_matched_object]) - -st.image(image) - - -st.write("If you use FashionCLIP in your work, please cite our paper:") -st.write(""" -``` -@Article{Chia2022, - title="Contrastive language and vision learning of general fashion concepts", - author="Chia, Patrick John - and Attanasio, Giuseppe - and Bianchi, Federico - and Terragni, Silvia - and Magalh{\~a}es, Ana Rita - and Goncalves, Diogo - and Greco, Ciro - and Tagliabue, Jacopo", - journal="Scientific Reports", - year="2022", - month="Nov", - day="08", - volume="12", - number="1", - pages="18958", - issn="2045-2322", - doi="10.1038/s41598-022-23052-9", - url="https://doi.org/10.1038/s41598-022-23052-9" -``` -}""") - diff --git a/spaces/vinthony/SadTalker/src/face3d/models/losses.py b/spaces/vinthony/SadTalker/src/face3d/models/losses.py deleted file mode 100644 index 09d6a85870af1ef2b857e4a3fdd4b2f7fc991317..0000000000000000000000000000000000000000 --- a/spaces/vinthony/SadTalker/src/face3d/models/losses.py +++ /dev/null @@ -1,113 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -from kornia.geometry import warp_affine -import torch.nn.functional as F - -def resize_n_crop(image, M, dsize=112): - # image: (b, c, h, w) - # M : (b, 2, 3) - return warp_affine(image, M, dsize=(dsize, dsize), align_corners=True) - -### perceptual level loss -class PerceptualLoss(nn.Module): - def __init__(self, recog_net, input_size=112): - super(PerceptualLoss, self).__init__() - self.recog_net = recog_net - self.preprocess = lambda x: 2 * x - 1 - self.input_size=input_size - def forward(imageA, imageB, M): - """ - 1 - cosine distance - Parameters: - imageA --torch.tensor (B, 3, H, W), range (0, 1) , RGB order - imageB --same as imageA - """ - - imageA = self.preprocess(resize_n_crop(imageA, M, self.input_size)) - imageB = self.preprocess(resize_n_crop(imageB, M, self.input_size)) - - # freeze bn - self.recog_net.eval() - - id_featureA = F.normalize(self.recog_net(imageA), dim=-1, p=2) - id_featureB = F.normalize(self.recog_net(imageB), dim=-1, p=2) - cosine_d = torch.sum(id_featureA * id_featureB, dim=-1) - # assert torch.sum((cosine_d > 1).float()) == 0 - return torch.sum(1 - cosine_d) / cosine_d.shape[0] - -def perceptual_loss(id_featureA, id_featureB): - cosine_d = torch.sum(id_featureA * id_featureB, dim=-1) - # assert torch.sum((cosine_d > 1).float()) == 0 - return torch.sum(1 - cosine_d) / cosine_d.shape[0] - -### image level loss -def photo_loss(imageA, imageB, mask, eps=1e-6): - """ - l2 norm (with sqrt, to ensure backward stabililty, use eps, otherwise Nan may occur) - Parameters: - imageA --torch.tensor (B, 3, H, W), range (0, 1), RGB order - imageB --same as imageA - """ - loss = torch.sqrt(eps + torch.sum((imageA - imageB) ** 2, dim=1, keepdims=True)) * mask - loss = torch.sum(loss) / torch.max(torch.sum(mask), torch.tensor(1.0).to(mask.device)) - return loss - -def landmark_loss(predict_lm, gt_lm, weight=None): - """ - weighted mse loss - Parameters: - predict_lm --torch.tensor (B, 68, 2) - gt_lm --torch.tensor (B, 68, 2) - weight --numpy.array (1, 68) - """ - if not weight: - weight = np.ones([68]) - weight[28:31] = 20 - weight[-8:] = 20 - weight = np.expand_dims(weight, 0) - weight = torch.tensor(weight).to(predict_lm.device) - loss = torch.sum((predict_lm - gt_lm)**2, dim=-1) * weight - loss = torch.sum(loss) / (predict_lm.shape[0] * predict_lm.shape[1]) - return loss - - -### regulization -def reg_loss(coeffs_dict, opt=None): - """ - l2 norm without the sqrt, from yu's implementation (mse) - tf.nn.l2_loss https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss - Parameters: - coeffs_dict -- a dict of torch.tensors , keys: id, exp, tex, angle, gamma, trans - - """ - # coefficient regularization to ensure plausible 3d faces - if opt: - w_id, w_exp, w_tex = opt.w_id, opt.w_exp, opt.w_tex - else: - w_id, w_exp, w_tex = 1, 1, 1, 1 - creg_loss = w_id * torch.sum(coeffs_dict['id'] ** 2) + \ - w_exp * torch.sum(coeffs_dict['exp'] ** 2) + \ - w_tex * torch.sum(coeffs_dict['tex'] ** 2) - creg_loss = creg_loss / coeffs_dict['id'].shape[0] - - # gamma regularization to ensure a nearly-monochromatic light - gamma = coeffs_dict['gamma'].reshape([-1, 3, 9]) - gamma_mean = torch.mean(gamma, dim=1, keepdims=True) - gamma_loss = torch.mean((gamma - gamma_mean) ** 2) - - return creg_loss, gamma_loss - -def reflectance_loss(texture, mask): - """ - minimize texture variance (mse), albedo regularization to ensure an uniform skin albedo - Parameters: - texture --torch.tensor, (B, N, 3) - mask --torch.tensor, (N), 1 or 0 - - """ - mask = mask.reshape([1, mask.shape[0], 1]) - texture_mean = torch.sum(mask * texture, dim=1, keepdims=True) / torch.sum(mask) - loss = torch.sum(((texture - texture_mean) * mask)**2) / (texture.shape[0] * torch.sum(mask)) - return loss - diff --git a/spaces/vumichien/Generate_human_motion/pyrender/pyrender/version.py b/spaces/vumichien/Generate_human_motion/pyrender/pyrender/version.py deleted file mode 100644 index a33fc87f61f528780e3319a5160769cc84512b1b..0000000000000000000000000000000000000000 --- a/spaces/vumichien/Generate_human_motion/pyrender/pyrender/version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = '0.1.45' diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/datasets/drive.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/datasets/drive.py deleted file mode 100644 index 06e8ff606e0d2a4514ec8b7d2c6c436a32efcbf4..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/configs/_base_/datasets/drive.py +++ /dev/null @@ -1,59 +0,0 @@ -# dataset settings -dataset_type = 'DRIVEDataset' -data_root = 'data/DRIVE' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -img_scale = (584, 565) -crop_size = (64, 64) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', prob=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=img_scale, - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] - -data = dict( - samples_per_gpu=4, - workers_per_gpu=4, - train=dict( - type='RepeatDataset', - times=40000, - dataset=dict( - type=dataset_type, - data_root=data_root, - img_dir='images/training', - ann_dir='annotations/training', - pipeline=train_pipeline)), - val=dict( - type=dataset_type, - data_root=data_root, - img_dir='images/validation', - ann_dir='annotations/validation', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - data_root=data_root, - img_dir='images/validation', - ann_dir='annotations/validation', - pipeline=test_pipeline)) diff --git a/spaces/vumichien/canvas_controlnet/cldm/hack.py b/spaces/vumichien/canvas_controlnet/cldm/hack.py deleted file mode 100644 index 454361e9d036cd1a6a79122c2fd16b489e4767b1..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/cldm/hack.py +++ /dev/null @@ -1,111 +0,0 @@ -import torch -import einops - -import ldm.modules.encoders.modules -import ldm.modules.attention - -from transformers import logging -from ldm.modules.attention import default - - -def disable_verbosity(): - logging.set_verbosity_error() - print('logging improved.') - return - - -def enable_sliced_attention(): - ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward - print('Enabled sliced_attention.') - return - - -def hack_everything(clip_skip=0): - disable_verbosity() - ldm.modules.encoders.modules.FrozenCLIPEmbedder.forward = _hacked_clip_forward - ldm.modules.encoders.modules.FrozenCLIPEmbedder.clip_skip = clip_skip - print('Enabled clip hacks.') - return - - -# Written by Lvmin -def _hacked_clip_forward(self, text): - PAD = self.tokenizer.pad_token_id - EOS = self.tokenizer.eos_token_id - BOS = self.tokenizer.bos_token_id - - def tokenize(t): - return self.tokenizer(t, truncation=False, add_special_tokens=False)["input_ids"] - - def transformer_encode(t): - if self.clip_skip > 1: - rt = self.transformer(input_ids=t, output_hidden_states=True) - return self.transformer.text_model.final_layer_norm(rt.hidden_states[-self.clip_skip]) - else: - return self.transformer(input_ids=t, output_hidden_states=False).last_hidden_state - - def split(x): - return x[75 * 0: 75 * 1], x[75 * 1: 75 * 2], x[75 * 2: 75 * 3] - - def pad(x, p, i): - return x[:i] if len(x) >= i else x + [p] * (i - len(x)) - - raw_tokens_list = tokenize(text) - tokens_list = [] - - for raw_tokens in raw_tokens_list: - raw_tokens_123 = split(raw_tokens) - raw_tokens_123 = [[BOS] + raw_tokens_i + [EOS] for raw_tokens_i in raw_tokens_123] - raw_tokens_123 = [pad(raw_tokens_i, PAD, 77) for raw_tokens_i in raw_tokens_123] - tokens_list.append(raw_tokens_123) - - tokens_list = torch.IntTensor(tokens_list).to(self.device) - - feed = einops.rearrange(tokens_list, 'b f i -> (b f) i') - y = transformer_encode(feed) - z = einops.rearrange(y, '(b f) i c -> b (f i) c', f=3) - - return z - - -# Stolen from https://github.com/basujindal/stable-diffusion/blob/main/optimizedSD/splitAttention.py -def _hacked_sliced_attentin_forward(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - del context, x - - q, k, v = map(lambda t: einops.rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - limit = k.shape[0] - att_step = 1 - q_chunks = list(torch.tensor_split(q, limit // att_step, dim=0)) - k_chunks = list(torch.tensor_split(k, limit // att_step, dim=0)) - v_chunks = list(torch.tensor_split(v, limit // att_step, dim=0)) - - q_chunks.reverse() - k_chunks.reverse() - v_chunks.reverse() - sim = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device) - del k, q, v - for i in range(0, limit, att_step): - q_buffer = q_chunks.pop() - k_buffer = k_chunks.pop() - v_buffer = v_chunks.pop() - sim_buffer = torch.einsum('b i d, b j d -> b i j', q_buffer, k_buffer) * self.scale - - del k_buffer, q_buffer - # attention, what we cannot get enough of, by chunks - - sim_buffer = sim_buffer.softmax(dim=-1) - - sim_buffer = torch.einsum('b i j, b j d -> b i d', sim_buffer, v_buffer) - del v_buffer - sim[i:i + att_step, :, :] = sim_buffer - - del sim_buffer - sim = einops.rearrange(sim, '(b h) n d -> b n (h d)', h=h) - return self.to_out(sim) diff --git a/spaces/wahyupermana10/churn_prediction/README.md b/spaces/wahyupermana10/churn_prediction/README.md deleted file mode 100644 index ac5798f95e8b81b433ca477dc5200da1eba702c1..0000000000000000000000000000000000000000 --- a/spaces/wahyupermana10/churn_prediction/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Churn Prediction -emoji: 👁 -colorFrom: indigo -colorTo: blue -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/whitead/paper-qa/app.py b/spaces/whitead/paper-qa/app.py deleted file mode 100644 index 5d862060931aa5ab8cc570467e788067926e359a..0000000000000000000000000000000000000000 --- a/spaces/whitead/paper-qa/app.py +++ /dev/null @@ -1,207 +0,0 @@ -import gradio as gr -import paperqa -import pickle -import pandas as pd -from pathlib import Path -import requests -import zipfile -import io -import tempfile -import os - - -css_style = """ - -.gradio-container { - font-family: "IBM Plex Mono"; -} -""" - - -def request_pathname(files, data, openai_api_key): - if files is None: - return [[]] - for file in files: - # make sure we're not duplicating things in the dataset - if file.name in [x[0] for x in data]: - continue - data.append([file.name, None, None]) - return [[len(data), 0]], data, data, validate_dataset(pd.DataFrame(data), openai_api_key) - - -def validate_dataset(dataset, openapi): - docs_ready = dataset.iloc[-1, 0] != "" - if docs_ready and type(openapi) is str and len(openapi) > 0: - return "✨Ready✨" - elif docs_ready: - return "⚠️Waiting for key⚠️" - elif type(openapi) is str and len(openapi) > 0: - return "⚠️Waiting for documents⚠️" - else: - return "⚠️Waiting for documents and key⚠️" - - -def make_stats(docs): - return [[len(docs.doc_previews), sum([x[0] for x in docs.doc_previews])]] - - -# , progress=gr.Progress()): -def do_ask(question, button, openapi, dataset, length, do_marg, k, max_sources, docs): - passages = "" - docs_ready = dataset.iloc[-1, 0] != "" - if button == "✨Ready✨" and type(openapi) is str and len(openapi) > 0 and docs_ready: - os.environ['OPENAI_API_KEY'] = openapi.strip() - if docs is None: - docs = paperqa.Docs() - # dataset is pandas dataframe - for _, row in dataset.iterrows(): - try: - docs.add(row['filepath'], row['citation string'], - key=row['key'], disable_check=True) - yield "", "", "", docs, make_stats(docs) - except Exception as e: - pass - else: - yield "", "", "", docs, [[0, 0]] - #progress(0, "Building Index...") - docs._build_faiss_index() - #progress(0.25, "Querying...") - for i, result in enumerate(docs.query_gen(question, - length_prompt=f'use {length:d} words', - marginal_relevance=do_marg, - k=k, max_sources=max_sources)): - #progress(0.25 + 0.1 * i, "Generating Context" + str(i)) - yield result.formatted_answer, result.context, passages, docs, make_stats(docs) - #progress(1.0, "Done!") - # format the passages - for i, (key, passage) in enumerate(result.passages.items()): - passages += f'Disabled for now' - yield result.formatted_answer, result.context, passages, docs, make_stats(docs) - - -def download_repo(gh_repo, data, openai_api_key, pbar=gr.Progress()): - # download zipped version of repo - r = requests.get(f'https://api.github.com/repos/{gh_repo}/zipball') - if r.status_code == 200: - pbar(1, 'Downloaded') - - # iterate through files in zip - with zipfile.ZipFile(io.BytesIO(r.content)) as z: - for i, f in enumerate(z.namelist()): - # skip directories - if f.endswith('/'): - continue - # try to read as plaintext (skip binary files) - try: - text = z.read(f).decode('utf-8') - except UnicodeDecodeError: - continue - # check if it's bigger than 100kb or smaller than 10 bytes - if len(text) > 1e5 or len(text) < 10: - continue - # have to save to temporary file so we have a path - with tempfile.NamedTemporaryFile(delete=False) as tmp: - tmp.write(text.encode('utf-8')) - tmp.flush() - path = tmp.name - # strip off the first directory of f - rel_path = '/'.join(f.split('/')[1:]) - key = os.path.basename(f) - citation = f'[{rel_path}](https://github.com/{gh_repo}/tree/main/{rel_path})' - if path in [x[0] for x in data]: - continue - data.append([path, citation, key]) - yield [[len(data), 0]], data, data, validate_dataset(pd.DataFrame(data), openai_api_key) - pbar(int((i+1)/len(z.namelist()) * 99), - f'Added {f}') - pbar(100, 'Done') - else: - raise ValueError('Unknown Github Repo') - return data - - -with gr.Blocks(css=css_style) as demo: - - docs = gr.State(None) - data = gr.State([]) - openai_api_key = gr.State('') - - gr.Markdown(f""" - # Document Question and Answer (v{paperqa.__version__}) - - *By Andrew White ([@andrewwhite01](https://twitter.com/andrewwhite01))* - - This tool will enable asking questions of your uploaded text, PDF documents, - or scrape github repos. - It uses OpenAI's GPT models and thus you must enter your API key below. This - tool is under active development and currently uses many tokens - up to 10,000 - for a single query. That is $0.10-0.20 per query, so please be careful! - - * [PaperQA](https://github.com/whitead/paper-qa) is the code used to build this tool. - * [langchain](https://github.com/hwchase17/langchain) is the main library this tool utilizes. - - 1. Enter API Key ([What is that?](https://platform.openai.com/account/api-keys)) - 2. Upload your documents - 3. Ask a questions - """) - openai_api_key = gr.Textbox( - label="OpenAI API Key", placeholder="sk-...", type="password") - with gr.Tab('File Upload'): - uploaded_files = gr.File( - label="Your Documents Upload (PDF or txt)", file_count="multiple", ) - with gr.Tab('Github Repo'): - gh_repo = gr.Textbox( - label="Github Repo", placeholder="whitead/paper-qa") - download = gr.Button("Download Repo") - - with gr.Accordion("See Docs:", open=False): - dataset = gr.Dataframe( - headers=["filepath", "citation string", "key"], - datatype=["str", "str", "str"], - col_count=(3, "fixed"), - interactive=False, - label="Documents and Citations", - overflow_row_behaviour='paginate', - max_rows=5 - ) - buildb = gr.Textbox("⚠️Waiting for documents and key...", - label="Status", interactive=False, show_label=True, - max_lines=1) - stats = gr.Dataframe(headers=['Docs', 'Chunks'], - datatype=['number', 'number'], - col_count=(2, "fixed"), - interactive=False, - label="Doc Stats") - openai_api_key.change(validate_dataset, inputs=[ - dataset, openai_api_key], outputs=[buildb]) - dataset.change(validate_dataset, inputs=[ - dataset, openai_api_key], outputs=[buildb]) - uploaded_files.change(request_pathname, inputs=[ - uploaded_files, data, openai_api_key], outputs=[stats, data, dataset, buildb]) - download.click(fn=download_repo, inputs=[ - gh_repo, data, openai_api_key], outputs=[stats, data, dataset, buildb]) - query = gr.Textbox( - placeholder="Enter your question here...", label="Question") - with gr.Row(): - length = gr.Slider(25, 200, value=100, step=5, - label='Words in answer') - marg = gr.Checkbox(True, label='Max marginal relevance') - k = gr.Slider(1, 20, value=10, step=1, - label='Chunks to examine') - sources = gr.Slider(1, 10, value=5, step=1, - label='Contexts to include') - - ask = gr.Button("Ask Question") - answer = gr.Markdown(label="Answer") - with gr.Accordion("Context", open=True): - context = gr.Markdown(label="Context") - - with gr.Accordion("Raw Text", open=False): - passages = gr.Markdown(label="Passages") - ask.click(fn=do_ask, inputs=[query, buildb, - openai_api_key, dataset, - length, marg, k, sources, - docs], outputs=[answer, context, passages, docs, stats]) - -demo.queue(concurrency_count=20) -demo.launch(show_error=True) diff --git a/spaces/williamcfrancis/Deep-Blind-Motion-Deblurring/sidekick/plot/plot_graph.py b/spaces/williamcfrancis/Deep-Blind-Motion-Deblurring/sidekick/plot/plot_graph.py deleted file mode 100644 index c14928f6ba2c1b2804f38ddec96699bb2c0fd11a..0000000000000000000000000000000000000000 --- a/spaces/williamcfrancis/Deep-Blind-Motion-Deblurring/sidekick/plot/plot_graph.py +++ /dev/null @@ -1,17 +0,0 @@ -import matplotlib.pyplot as plt -import numpy as np - -def plot_graph(epochs, H, save=False): - plt.style.use('ggplot') - plt.figure() - plt.plot(np.arange(0, epochs, 1), H.history['loss'], label='train_loss') - plt.plot(np.arange(0, epochs, 1), H.history['val_loss'], label='val_loss') - plt.plot(np.arange(0, epochs, 1), H.history['accuracy'], label='train_acc') - plt.plot(np.arange(0, epochs, 1), H.history['val_accuracy'], label='val_acc') - plt.title('Training Loss & Accuracy') - plt.xlabel('# Epochs') - plt.ylabel('Metric Values') - plt.legend() - if save==True: - plt.savefig(fname= "./train_plot.jpg") - plt.show() \ No newline at end of file diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/utils/__init__.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/xfys/yolov5_tracking/yolov5/utils/flask_rest_api/README.md b/spaces/xfys/yolov5_tracking/yolov5/utils/flask_rest_api/README.md deleted file mode 100644 index a726acbd92043458311dd949cc09c0195cd35400..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/yolov5/utils/flask_rest_api/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Flask REST API - -[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are -commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API -created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). - -## Requirements - -[Flask](https://palletsprojects.com/p/flask/) is required. Install with: - -```shell -$ pip install Flask -``` - -## Run - -After Flask installation run: - -```shell -$ python3 restapi.py --port 5000 -``` - -Then use [curl](https://curl.se/) to perform a request: - -```shell -$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' -``` - -The model inference results are returned as a JSON response: - -```json -[ - { - "class": 0, - "confidence": 0.8900438547, - "height": 0.9318675399, - "name": "person", - "width": 0.3264600933, - "xcenter": 0.7438579798, - "ycenter": 0.5207948685 - }, - { - "class": 0, - "confidence": 0.8440024257, - "height": 0.7155083418, - "name": "person", - "width": 0.6546785235, - "xcenter": 0.427829951, - "ycenter": 0.6334488392 - }, - { - "class": 27, - "confidence": 0.3771208823, - "height": 0.3902671337, - "name": "tie", - "width": 0.0696444362, - "xcenter": 0.3675483763, - "ycenter": 0.7991207838 - }, - { - "class": 27, - "confidence": 0.3527112305, - "height": 0.1540903747, - "name": "tie", - "width": 0.0336618312, - "xcenter": 0.7814827561, - "ycenter": 0.5065554976 - } -] -``` - -An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given -in `example_request.py` diff --git "a/spaces/xwsm/gpt/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" "b/spaces/xwsm/gpt/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" deleted file mode 100644 index 7c6a7ffb5cb2c42e6543c75d6ad9dd643f412cd9..0000000000000000000000000000000000000000 --- "a/spaces/xwsm/gpt/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" +++ /dev/null @@ -1,29 +0,0 @@ -from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -import datetime -@CatchException -def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append(("这是什么功能?", "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板(该函数只有20多行代码)。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR!")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - for i in range(5): - currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month - currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day - i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词。' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="当你想发送一张照片时,请使用Markdown, 并且不要有反斜线, 不要用代码块。使用 Unsplash API (https://source.unsplash.com/1280x720/? < PUT_YOUR_QUERY_HERE >)。" - ) - chatbot[-1] = (i_say, gpt_say) - history.append(i_say);history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 diff --git a/spaces/xxbb/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py b/spaces/xxbb/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py deleted file mode 100644 index ce3e12bbf0469426872eed5f681985d3e1be9b26..0000000000000000000000000000000000000000 --- a/spaces/xxbb/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py +++ /dev/null @@ -1,30 +0,0 @@ -import re -import opencc - - -dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou', - 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing', - 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang', - 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan', - 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen', - 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'} - -converters = {} - -for dialect in dialects.values(): - try: - converters[dialect] = opencc.OpenCC(dialect) - except: - pass - - -def ngu_dialect_to_ipa(text, dialect): - dialect = dialects[dialect] - text = converters[dialect].convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/dnnlib/tflib/tfutil.py b/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/dnnlib/tflib/tfutil.py deleted file mode 100644 index a431a4d4d18a32c9cd44a14ce89f35e038dc312c..0000000000000000000000000000000000000000 --- a/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/dnnlib/tflib/tfutil.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Miscellaneous helper utils for Tensorflow.""" - -import os -import numpy as np -import tensorflow as tf - -from typing import Any, Iterable, List, Union - -TfExpression = Union[tf.Tensor, tf.Variable, tf.Operation] -"""A type that represents a valid Tensorflow expression.""" - -TfExpressionEx = Union[TfExpression, int, float, np.ndarray] -"""A type that can be converted to a valid Tensorflow expression.""" - - -def run(*args, **kwargs) -> Any: - """Run the specified ops in the default session.""" - assert_tf_initialized() - return tf.get_default_session().run(*args, **kwargs) - - -def is_tf_expression(x: Any) -> bool: - """Check whether the input is a valid Tensorflow expression, i.e., Tensorflow Tensor, Variable, or Operation.""" - return isinstance(x, (tf.Tensor, tf.Variable, tf.Operation)) - - -def shape_to_list(shape: Iterable[tf.Dimension]) -> List[Union[int, None]]: - """Convert a Tensorflow shape to a list of ints.""" - return [dim.value for dim in shape] - - -def flatten(x: TfExpressionEx) -> TfExpression: - """Shortcut function for flattening a tensor.""" - with tf.name_scope("Flatten"): - return tf.reshape(x, [-1]) - - -def log2(x: TfExpressionEx) -> TfExpression: - """Logarithm in base 2.""" - with tf.name_scope("Log2"): - return tf.log(x) * np.float32(1.0 / np.log(2.0)) - - -def exp2(x: TfExpressionEx) -> TfExpression: - """Exponent in base 2.""" - with tf.name_scope("Exp2"): - return tf.exp(x * np.float32(np.log(2.0))) - - -def lerp(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpressionEx: - """Linear interpolation.""" - with tf.name_scope("Lerp"): - return a + (b - a) * t - - -def lerp_clip(a: TfExpressionEx, b: TfExpressionEx, t: TfExpressionEx) -> TfExpression: - """Linear interpolation with clip.""" - with tf.name_scope("LerpClip"): - return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0) - - -def absolute_name_scope(scope: str) -> tf.name_scope: - """Forcefully enter the specified name scope, ignoring any surrounding scopes.""" - return tf.name_scope(scope + "/") - - -def absolute_variable_scope(scope: str, **kwargs) -> tf.variable_scope: - """Forcefully enter the specified variable scope, ignoring any surrounding scopes.""" - return tf.variable_scope(tf.VariableScope(name=scope, **kwargs), auxiliary_name_scope=False) - - -def _sanitize_tf_config(config_dict: dict = None) -> dict: - # Defaults. - cfg = dict() - cfg["rnd.np_random_seed"] = None # Random seed for NumPy. None = keep as is. - cfg["rnd.tf_random_seed"] = "auto" # Random seed for TensorFlow. 'auto' = derive from NumPy random state. None = keep as is. - cfg["env.TF_CPP_MIN_LOG_LEVEL"] = "1" # 0 = Print all available debug info from TensorFlow. 1 = Print warnings and errors, but disable debug info. - cfg["graph_options.place_pruned_graph"] = True # False = Check that all ops are available on the designated device. True = Skip the check for ops that are not used. - cfg["gpu_options.allow_growth"] = True # False = Allocate all GPU memory at the beginning. True = Allocate only as much GPU memory as needed. - - # User overrides. - if config_dict is not None: - cfg.update(config_dict) - return cfg - - -def init_tf(config_dict: dict = None) -> None: - """Initialize TensorFlow session using good default settings.""" - # Skip if already initialized. - if tf.get_default_session() is not None: - return - - # Setup config dict and random seeds. - cfg = _sanitize_tf_config(config_dict) - np_random_seed = cfg["rnd.np_random_seed"] - if np_random_seed is not None: - np.random.seed(np_random_seed) - tf_random_seed = cfg["rnd.tf_random_seed"] - if tf_random_seed == "auto": - tf_random_seed = np.random.randint(1 << 31) - if tf_random_seed is not None: - tf.set_random_seed(tf_random_seed) - - # Setup environment variables. - for key, value in list(cfg.items()): - fields = key.split(".") - if fields[0] == "env": - assert len(fields) == 2 - os.environ[fields[1]] = str(value) - - # Create default TensorFlow session. - create_session(cfg, force_as_default=True) - - -def assert_tf_initialized(): - """Check that TensorFlow session has been initialized.""" - if tf.get_default_session() is None: - raise RuntimeError("No default TensorFlow session found. Please call dnnlib.tflib.init_tf().") - - -def create_session(config_dict: dict = None, force_as_default: bool = False) -> tf.Session: - """Create tf.Session based on config dict.""" - # Setup TensorFlow config proto. - cfg = _sanitize_tf_config(config_dict) - config_proto = tf.ConfigProto() - for key, value in cfg.items(): - fields = key.split(".") - if fields[0] not in ["rnd", "env"]: - obj = config_proto - for field in fields[:-1]: - obj = getattr(obj, field) - setattr(obj, fields[-1], value) - - # Create session. - session = tf.Session(config=config_proto) - if force_as_default: - # pylint: disable=protected-access - session._default_session = session.as_default() - session._default_session.enforce_nesting = False - session._default_session.__enter__() # pylint: disable=no-member - - return session - - -def init_uninitialized_vars(target_vars: List[tf.Variable] = None) -> None: - """Initialize all tf.Variables that have not already been initialized. - - Equivalent to the following, but more efficient and does not bloat the tf graph: - tf.variables_initializer(tf.report_uninitialized_variables()).run() - """ - assert_tf_initialized() - if target_vars is None: - target_vars = tf.global_variables() - - test_vars = [] - test_ops = [] - - with tf.control_dependencies(None): # ignore surrounding control_dependencies - for var in target_vars: - assert is_tf_expression(var) - - try: - tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/IsVariableInitialized:0")) - except KeyError: - # Op does not exist => variable may be uninitialized. - test_vars.append(var) - - with absolute_name_scope(var.name.split(":")[0]): - test_ops.append(tf.is_variable_initialized(var)) - - init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited] - run([var.initializer for var in init_vars]) - - -def set_vars(var_to_value_dict: dict) -> None: - """Set the values of given tf.Variables. - - Equivalent to the following, but more efficient and does not bloat the tf graph: - tflib.run([tf.assign(var, value) for var, value in var_to_value_dict.items()] - """ - assert_tf_initialized() - ops = [] - feed_dict = {} - - for var, value in var_to_value_dict.items(): - assert is_tf_expression(var) - - try: - setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(":0", "/setter:0")) # look for existing op - except KeyError: - with absolute_name_scope(var.name.split(":")[0]): - with tf.control_dependencies(None): # ignore surrounding control_dependencies - setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, "new_value"), name="setter") # create new setter - - ops.append(setter) - feed_dict[setter.op.inputs[1]] = value - - run(ops, feed_dict) - - -def create_var_with_large_initial_value(initial_value: np.ndarray, *args, **kwargs): - """Create tf.Variable with large initial value without bloating the tf graph.""" - assert_tf_initialized() - assert isinstance(initial_value, np.ndarray) - zeros = tf.zeros(initial_value.shape, initial_value.dtype) - var = tf.Variable(zeros, *args, **kwargs) - set_vars({var: initial_value}) - return var - - -def convert_images_from_uint8(images, drange=[-1,1], nhwc_to_nchw=False): - """Convert a minibatch of images from uint8 to float32 with configurable dynamic range. - Can be used as an input transformation for Network.run(). - """ - images = tf.cast(images, tf.float32) - if nhwc_to_nchw: - images = tf.transpose(images, [0, 3, 1, 2]) - return (images - drange[0]) * ((drange[1] - drange[0]) / 255) - - -def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False, shrink=1): - """Convert a minibatch of images from float32 to uint8 with configurable dynamic range. - Can be used as an output transformation for Network.run(). - """ - images = tf.cast(images, tf.float32) - if shrink > 1: - ksize = [1, 1, shrink, shrink] - images = tf.nn.avg_pool(images, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") - if nchw_to_nhwc: - images = tf.transpose(images, [0, 2, 3, 1]) - scale = 255 / (drange[1] - drange[0]) - images = images * scale + (0.5 - drange[0] * scale) - return tf.saturate_cast(images, tf.uint8) diff --git a/spaces/yderre-aubay/midi-player-demo/src/common/player/Player.ts b/spaces/yderre-aubay/midi-player-demo/src/common/player/Player.ts deleted file mode 100644 index ff15488243b373601678a3c33f62ef17f6e480fa..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/common/player/Player.ts +++ /dev/null @@ -1,319 +0,0 @@ -import range from "lodash/range" -import throttle from "lodash/throttle" -import { AnyEvent, MIDIControlEvents } from "midifile-ts" -import { computed, makeObservable, observable } from "mobx" -import { SendableEvent, SynthOutput } from "../../main/services/SynthOutput" -import { SongStore } from "../../main/stores/SongStore" -import { filterEventsWithRange } from "../helpers/filterEvents" -import { Beat, createBeatsInRange } from "../helpers/mapBeats" -import { - controllerMidiEvent, - noteOffMidiEvent, - noteOnMidiEvent, -} from "../midi/MidiEvent" -import { getStatusEvents } from "../track/selector" -import { ITrackMute } from "../trackMute/ITrackMute" -import { DistributiveOmit } from "../types" -import EventScheduler from "./EventScheduler" -import { convertTrackEvents, PlayerEvent } from "./PlayerEvent" - -export interface LoopSetting { - begin: number - end: number - enabled: boolean -} - -const TIMER_INTERVAL = 50 -const LOOK_AHEAD_TIME = 50 -const METRONOME_TRACK_ID = 99999 -export const DEFAULT_TEMPO = 120 - -export default class Player { - private _currentTempo = DEFAULT_TEMPO - private _scheduler: EventScheduler | null = null - private _songStore: SongStore - private _output: SynthOutput - private _metronomeOutput: SynthOutput - private _trackMute: ITrackMute - private _interval: number | null = null - private _currentTick = 0 - private _isPlaying = false - - disableSeek: boolean = false - isMetronomeEnabled: boolean = false - - loop: LoopSetting | null = null - - constructor( - output: SynthOutput, - metronomeOutput: SynthOutput, - trackMute: ITrackMute, - songStore: SongStore, - ) { - makeObservable(this, { - _currentTick: observable, - _isPlaying: observable, - loop: observable, - isMetronomeEnabled: observable, - position: computed, - isPlaying: computed, - }) - - this._output = output - this._metronomeOutput = metronomeOutput - this._trackMute = trackMute - this._songStore = songStore - } - - private get song() { - return this._songStore.song - } - - private get timebase() { - return this.song.timebase - } - - play() { - if (this.isPlaying) { - console.warn("called play() while playing. aborted.") - return - } - this._scheduler = new EventScheduler( - (startTick, endTick) => - filterEventsWithRange(this.song.allEvents, startTick, endTick).concat( - filterEventsWithRange( - createBeatsInRange( - this.song.measures, - this.song.timebase, - startTick, - endTick, - ).flatMap((b) => this.beatToEvents(b)), - startTick, - endTick, - ), - ), - () => this.allNotesOffEvents(), - this._currentTick, - this.timebase, - TIMER_INTERVAL + LOOK_AHEAD_TIME, - ) - this._isPlaying = true - this._output.activate() - this._interval = window.setInterval(() => this._onTimer(), TIMER_INTERVAL) - this._output.activate() - } - - set position(tick: number) { - if (!Number.isInteger(tick)) { - console.warn("Player.tick should be an integer", tick) - } - if (this.disableSeek) { - return - } - tick = Math.min(Math.max(Math.floor(tick), 0), this.song.endOfSong) - if (this._scheduler) { - this._scheduler.seek(tick) - } - this._currentTick = tick - - if (this.isPlaying) { - this.allSoundsOff() - } - - this.sendCurrentStateEvents() - } - - get position() { - return this._currentTick - } - - get isPlaying() { - return this._isPlaying - } - - get numberOfChannels() { - return 0xf - } - - allSoundsOffChannel(ch: number) { - this.sendEvent( - controllerMidiEvent(0, ch, MIDIControlEvents.ALL_SOUNDS_OFF, 0), - ) - } - - allSoundsOff() { - for (const ch of range(0, this.numberOfChannels)) { - this.allSoundsOffChannel(ch) - } - } - - allSoundsOffExclude(channel: number) { - for (const ch of range(0, this.numberOfChannels)) { - if (ch !== channel) { - this.allSoundsOffChannel(ch) - } - } - } - - private allNotesOffEvents(): DistributiveOmit[] { - return range(0, this.numberOfChannels).map((ch) => ({ - ...controllerMidiEvent(0, ch, MIDIControlEvents.ALL_NOTES_OFF, 0), - trackId: -1, // do not mute - })) - } - - private resetControllers() { - for (const ch of range(0, this.numberOfChannels)) { - this.sendEvent( - controllerMidiEvent(0, ch, MIDIControlEvents.RESET_CONTROLLERS, 0x7f), - ) - } - } - - private beatToEvents(beat: Beat): PlayerEvent[] { - const velocity = beat.beat === 0 ? 100 : 70 - const noteNumber = beat.beat === 0 ? 76 : 77 - return [ - { - ...noteOnMidiEvent(0, 9, noteNumber, velocity), - tick: beat.tick, - trackId: METRONOME_TRACK_ID, - }, - ] - } - - stop() { - this._scheduler = null - this.allSoundsOff() - this._isPlaying = false - - if (this._interval !== null) { - clearInterval(this._interval) - this._interval = null - } - } - - reset() { - this.resetControllers() - this.stop() - this._currentTick = 0 - } - - /* - to restore synthesizer state (e.g. pitch bend) - collect all previous state events - and send them to the synthesizer - */ - sendCurrentStateEvents() { - this.song.tracks - .flatMap((t, i) => { - const statusEvents = getStatusEvents(t.events, this._currentTick) - statusEvents.forEach((e) => this.applyPlayerEvent(e)) - return convertTrackEvents(statusEvents, t.channel, i) - }) - .forEach((e) => this.sendEvent(e)) - } - - get currentTempo() { - return this._currentTempo - } - - set currentTempo(value: number) { - this._currentTempo = value - } - - startNote( - { - channel, - noteNumber, - velocity, - }: { - noteNumber: number - velocity: number - channel: number - }, - delayTime = 0, - ) { - this._output.activate() - this.sendEvent(noteOnMidiEvent(0, channel, noteNumber, velocity), delayTime) - } - - stopNote( - { - channel, - noteNumber, - }: { - noteNumber: number - channel: number - }, - delayTime = 0, - ) { - this.sendEvent(noteOffMidiEvent(0, channel, noteNumber, 0), delayTime) - } - - // delayTime: seconds, timestampNow: milliseconds - sendEvent( - event: SendableEvent, - delayTime: number = 0, - timestampNow: number = performance.now(), - ) { - this._output.sendEvent(event, delayTime, timestampNow) - } - - private syncPosition = throttle(() => { - if (this._scheduler !== null) { - this._currentTick = this._scheduler.scheduledTick - } - }, 50) - - private applyPlayerEvent( - e: DistributiveOmit, - ) { - if (e.type !== "channel" && "subtype" in e) { - switch (e.subtype) { - case "setTempo": - this._currentTempo = 60000000 / e.microsecondsPerBeat - break - default: - break - } - } - } - - private _onTimer() { - if (this._scheduler === null) { - return - } - - const timestamp = performance.now() - - this._scheduler.loop = - this.loop !== null && this.loop.enabled ? this.loop : null - const events = this._scheduler.readNextEvents(this._currentTempo, timestamp) - - events.forEach(({ event: e, timestamp: time }) => { - if (e.type === "channel") { - const delayTime = (time - timestamp) / 1000 - if (e.trackId === METRONOME_TRACK_ID) { - if (this.isMetronomeEnabled) { - this._metronomeOutput.sendEvent(e, delayTime, timestamp) - } - } else if (this._trackMute.shouldPlayTrack(e.trackId)) { - // channel イベントを MIDI Output に送信 - // Send Channel Event to MIDI OUTPUT - this.sendEvent(e, delayTime, timestamp) - } - } else { - // channel イベント以外を実行 - // Run other than Channel Event - this.applyPlayerEvent(e) - } - }) - - if (this._scheduler.scheduledTick >= this.song.endOfSong) { - this.stop() - } - - this.syncPosition() - } -} diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/helpers/matrix.ts b/spaces/yderre-aubay/midi-player-demo/src/main/helpers/matrix.ts deleted file mode 100644 index ad19552ce5602426b64e28dfcc81b7b65f71d959..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/helpers/matrix.ts +++ /dev/null @@ -1,7 +0,0 @@ -import { mat4, vec3 } from "gl-matrix" - -export const matrixFromTranslation = (x: number, y: number) => { - const newMat = mat4.create() - mat4.fromTranslation(newMat, vec3.fromValues(x, y, 0)) - return newMat -} diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/hooks/useContextMenu.ts b/spaces/yderre-aubay/midi-player-demo/src/main/hooks/useContextMenu.ts deleted file mode 100644 index f51a933d7f7de68d665818f397b6a2d012488391..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/hooks/useContextMenu.ts +++ /dev/null @@ -1,37 +0,0 @@ -import { useCallback, useState } from "react" - -export interface AbstractMouseEvent { - preventDefault: () => void - clientX: number - clientY: number -} - -export const useContextMenu = () => { - const [state, setState] = useState({ - mouseX: 0, - mouseY: 0, - isOpen: false, - }) - - const onContextMenu = useCallback((e: AbstractMouseEvent) => { - e.preventDefault() - setState({ - mouseX: e.clientX - 2, - mouseY: e.clientY - 4, - isOpen: true, - }) - }, []) - - const handleClose = () => { - setState({ ...state, isOpen: false }) - } - - return { - onContextMenu, - menuProps: { - handleClose, - isOpen: state.isOpen, - position: { x: state.mouseX, y: state.mouseY }, - }, - } -} diff --git a/spaces/yefengzi/vits-models/README.md b/spaces/yefengzi/vits-models/README.md deleted file mode 100644 index 2e44ec5507a21c84647346865c876ce2b48db560..0000000000000000000000000000000000000000 --- a/spaces/yefengzi/vits-models/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Vits Models -emoji: 🏃 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: sayashi/vits-models ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/optimizers/__init__.py b/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/optimizers/__init__.py deleted file mode 100644 index a0e0c5932838281e912079e5784d84d43444a61a..0000000000000000000000000000000000000000 --- a/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/optimizers/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from torch.optim import * # NOQA -from .radam import * # NOQA diff --git a/spaces/yerfor/SyntaSpeech/utils/metrics/pitch_distance.py b/spaces/yerfor/SyntaSpeech/utils/metrics/pitch_distance.py deleted file mode 100644 index 3bc11424a9f75270fc7eb5ef98731129e25ff715..0000000000000000000000000000000000000000 --- a/spaces/yerfor/SyntaSpeech/utils/metrics/pitch_distance.py +++ /dev/null @@ -1,102 +0,0 @@ -import numpy as np -import matplotlib.pyplot as plt -from numba import jit - -import torch - - -@jit -def time_warp(costs): - dtw = np.zeros_like(costs) - dtw[0, 1:] = np.inf - dtw[1:, 0] = np.inf - eps = 1e-4 - for i in range(1, costs.shape[0]): - for j in range(1, costs.shape[1]): - dtw[i, j] = costs[i, j] + min(dtw[i - 1, j], dtw[i, j - 1], dtw[i - 1, j - 1]) - return dtw - - -def align_from_distances(distance_matrix, debug=False, return_mindist=False): - # for each position in spectrum 1, returns best match position in spectrum2 - # using monotonic alignment - dtw = time_warp(distance_matrix) - - i = distance_matrix.shape[0] - 1 - j = distance_matrix.shape[1] - 1 - results = [0] * distance_matrix.shape[0] - while i > 0 and j > 0: - results[i] = j - i, j = min([(i - 1, j), (i, j - 1), (i - 1, j - 1)], key=lambda x: dtw[x[0], x[1]]) - - if debug: - visual = np.zeros_like(dtw) - visual[range(len(results)), results] = 1 - plt.matshow(visual) - plt.show() - if return_mindist: - return results, dtw[-1, -1] - return results - - -def get_local_context(input_f, max_window=32, scale_factor=1.): - # input_f: [S, 1], support numpy array or torch tensor - # return hist: [S, max_window * 2], list of list - T = input_f.shape[0] - # max_window = int(max_window * scale_factor) - derivative = [[0 for _ in range(max_window * 2)] for _ in range(T)] - - for t in range(T): # travel the time series - for feat_idx in range(-max_window, max_window): - if t + feat_idx < 0 or t + feat_idx >= T: - value = 0 - else: - value = input_f[t + feat_idx] - derivative[t][feat_idx + max_window] = value - return derivative - - -def cal_localnorm_dist(src, tgt, src_len, tgt_len): - local_src = torch.tensor(get_local_context(src)) - local_tgt = torch.tensor(get_local_context(tgt, scale_factor=tgt_len / src_len)) - - local_norm_src = (local_src - local_src.mean(-1).unsqueeze(-1)) # / local_src.std(-1).unsqueeze(-1) # [T1, 32] - local_norm_tgt = (local_tgt - local_tgt.mean(-1).unsqueeze(-1)) # / local_tgt.std(-1).unsqueeze(-1) # [T2, 32] - - dists = torch.cdist(local_norm_src[None, :, :], local_norm_tgt[None, :, :]) # [1, T1, T2] - return dists - - -## here is API for one sample -def LoNDTWDistance(src, tgt): - # src: [S] - # tgt: [T] - dists = cal_localnorm_dist(src, tgt, src.shape[0], tgt.shape[0]) # [1, S, T] - costs = dists.squeeze(0) # [S, T] - alignment, min_distance = align_from_distances(costs.T.cpu().detach().numpy(), return_mindist=True) # [T] - return alignment, min_distance - -# if __name__ == '__main__': -# # utils from ns -# from utils.pitch_utils import denorm_f0 -# from tasks.singing.fsinging import FastSingingDataset -# from utils.hparams import hparams, set_hparams -# -# set_hparams() -# -# train_ds = FastSingingDataset('test') -# -# # Test One sample case -# sample = train_ds[0] -# amateur_f0 = sample['f0'] -# prof_f0 = sample['prof_f0'] -# -# amateur_uv = sample['uv'] -# amateur_padding = sample['mel2ph'] == 0 -# prof_uv = sample['prof_uv'] -# prof_padding = sample['prof_mel2ph'] == 0 -# amateur_f0_denorm = denorm_f0(amateur_f0, amateur_uv, hparams, pitch_padding=amateur_padding) -# prof_f0_denorm = denorm_f0(prof_f0, prof_uv, hparams, pitch_padding=prof_padding) -# alignment, min_distance = LoNDTWDistance(amateur_f0_denorm, prof_f0_denorm) -# print(min_distance) -# python utils/pitch_distance.py --config egs/datasets/audio/molar/svc_ppg.yaml diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/chinese_clip/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/chinese_clip/__init__.py deleted file mode 100644 index dbc0a57e8324f3025c96fad65f18fc59de6fa56c..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/chinese_clip/__init__.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available - - -_import_structure = { - "configuration_chinese_clip": [ - "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", - "ChineseCLIPConfig", - "ChineseCLIPOnnxConfig", - "ChineseCLIPTextConfig", - "ChineseCLIPVisionConfig", - ], - "processing_chinese_clip": ["ChineseCLIPProcessor"], -} - -try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["feature_extraction_chinese_clip"] = ["ChineseCLIPFeatureExtractor"] - _import_structure["image_processing_chinese_clip"] = ["ChineseCLIPImageProcessor"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_chinese_clip"] = [ - "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", - "ChineseCLIPModel", - "ChineseCLIPPreTrainedModel", - "ChineseCLIPTextModel", - "ChineseCLIPVisionModel", - ] - -if TYPE_CHECKING: - from .configuration_chinese_clip import ( - CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, - ChineseCLIPConfig, - ChineseCLIPOnnxConfig, - ChineseCLIPTextConfig, - ChineseCLIPVisionConfig, - ) - from .processing_chinese_clip import ChineseCLIPProcessor - - try: - if not is_vision_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_chinese_clip import ( - CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, - ChineseCLIPModel, - ChineseCLIPPreTrainedModel, - ChineseCLIPTextModel, - ChineseCLIPVisionModel, - ) - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deberta/modeling_tf_deberta.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deberta/modeling_tf_deberta.py deleted file mode 100644 index 29c5a256d305996a22235c747e2795093209c25e..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deberta/modeling_tf_deberta.py +++ /dev/null @@ -1,1432 +0,0 @@ -# coding=utf-8 -# Copyright 2021 Microsoft and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" TF 2.0 DeBERTa model.""" - - -from __future__ import annotations - -import math -from typing import Dict, Optional, Sequence, Tuple, Union - -import numpy as np -import tensorflow as tf - -from ...activations_tf import get_tf_activation -from ...modeling_tf_outputs import ( - TFBaseModelOutput, - TFMaskedLMOutput, - TFQuestionAnsweringModelOutput, - TFSequenceClassifierOutput, - TFTokenClassifierOutput, -) -from ...modeling_tf_utils import ( - TFMaskedLanguageModelingLoss, - TFModelInputType, - TFPreTrainedModel, - TFQuestionAnsweringLoss, - TFSequenceClassificationLoss, - TFTokenClassificationLoss, - get_initializer, - unpack_inputs, -) -from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax -from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging -from .configuration_deberta import DebertaConfig - - -logger = logging.get_logger(__name__) - - -_CONFIG_FOR_DOC = "DebertaConfig" -_CHECKPOINT_FOR_DOC = "kamalkraj/deberta-base" - -TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "kamalkraj/deberta-base", - # See all DeBERTa models at https://huggingface.co/models?filter=DeBERTa -] - - -class TFDebertaContextPooler(tf.keras.layers.Layer): - def __init__(self, config: DebertaConfig, **kwargs): - super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense(config.pooler_hidden_size, name="dense") - self.dropout = TFDebertaStableDropout(config.pooler_dropout, name="dropout") - self.config = config - - def call(self, hidden_states, training: bool = False): - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - context_token = hidden_states[:, 0] - context_token = self.dropout(context_token, training=training) - pooled_output = self.dense(context_token) - pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output) - return pooled_output - - @property - def output_dim(self) -> int: - return self.config.hidden_size - - -class TFDebertaXSoftmax(tf.keras.layers.Layer): - """ - Masked Softmax which is optimized for saving memory - - Args: - input (`tf.Tensor`): The input tensor that will apply softmax. - mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation. - dim (int): The dimension that will apply softmax - """ - - def __init__(self, axis=-1, **kwargs): - super().__init__(**kwargs) - self.axis = axis - - def call(self, inputs: tf.Tensor, mask: tf.Tensor): - rmask = tf.logical_not(tf.cast(mask, tf.bool)) - output = tf.where(rmask, float("-inf"), inputs) - output = stable_softmax(output, self.axis) - output = tf.where(rmask, 0.0, output) - return output - - -class TFDebertaStableDropout(tf.keras.layers.Layer): - """ - Optimized dropout module for stabilizing the training - - Args: - drop_prob (float): the dropout probabilities - """ - - def __init__(self, drop_prob, **kwargs): - super().__init__(**kwargs) - self.drop_prob = drop_prob - - @tf.custom_gradient - def xdropout(self, inputs): - """ - Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob. - """ - mask = tf.cast( - 1 - - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), - tf.bool, - ) - scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32) - if self.drop_prob > 0: - inputs = tf.where(mask, 0.0, inputs) * scale - - def grad(upstream): - if self.drop_prob > 0: - return tf.where(mask, 0.0, upstream) * scale - else: - return upstream - - return inputs, grad - - def call(self, inputs: tf.Tensor, training: tf.Tensor = False): - if training: - return self.xdropout(inputs) - return inputs - - -class TFDebertaLayerNorm(tf.keras.layers.Layer): - """LayerNorm module in the TF style (epsilon inside the square root).""" - - def __init__(self, size, eps=1e-12, **kwargs): - super().__init__(**kwargs) - self.size = size - self.eps = eps - - def build(self, input_shape): - self.gamma = self.add_weight(shape=[self.size], initializer=tf.ones_initializer(), name="weight") - self.beta = self.add_weight(shape=[self.size], initializer=tf.zeros_initializer(), name="bias") - return super().build(input_shape) - - def call(self, x: tf.Tensor) -> tf.Tensor: - mean = tf.reduce_mean(x, axis=[-1], keepdims=True) - variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True) - std = tf.math.sqrt(variance + self.eps) - return self.gamma * (x - mean) / std + self.beta - - -class TFDebertaSelfOutput(tf.keras.layers.Layer): - def __init__(self, config: DebertaConfig, **kwargs): - super().__init__(**kwargs) - self.dense = tf.keras.layers.Dense(config.hidden_size, name="dense") - self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout") - - def call(self, hidden_states, input_tensor, training: bool = False): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class TFDebertaAttention(tf.keras.layers.Layer): - def __init__(self, config: DebertaConfig, **kwargs): - super().__init__(**kwargs) - self.self = TFDebertaDisentangledSelfAttention(config, name="self") - self.dense_output = TFDebertaSelfOutput(config, name="output") - self.config = config - - def call( - self, - input_tensor: tf.Tensor, - attention_mask: tf.Tensor, - query_states: tf.Tensor = None, - relative_pos: tf.Tensor = None, - rel_embeddings: tf.Tensor = None, - output_attentions: bool = False, - training: bool = False, - ) -> Tuple[tf.Tensor]: - self_outputs = self.self( - hidden_states=input_tensor, - attention_mask=attention_mask, - query_states=query_states, - relative_pos=relative_pos, - rel_embeddings=rel_embeddings, - output_attentions=output_attentions, - training=training, - ) - if query_states is None: - query_states = input_tensor - attention_output = self.dense_output( - hidden_states=self_outputs[0], input_tensor=query_states, training=training - ) - - output = (attention_output,) + self_outputs[1:] - - return output - - -class TFDebertaIntermediate(tf.keras.layers.Layer): - def __init__(self, config: DebertaConfig, **kwargs): - super().__init__(**kwargs) - - self.dense = tf.keras.layers.Dense( - units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" - ) - - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = get_tf_activation(config.hidden_act) - else: - self.intermediate_act_fn = config.hidden_act - - def call(self, hidden_states: tf.Tensor) -> tf.Tensor: - hidden_states = self.dense(inputs=hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - - return hidden_states - - -class TFDebertaOutput(tf.keras.layers.Layer): - def __init__(self, config: DebertaConfig, **kwargs): - super().__init__(**kwargs) - - self.dense = tf.keras.layers.Dense( - units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" - ) - self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout") - - def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: - hidden_states = self.dense(inputs=hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - - return hidden_states - - -class TFDebertaLayer(tf.keras.layers.Layer): - def __init__(self, config: DebertaConfig, **kwargs): - super().__init__(**kwargs) - - self.attention = TFDebertaAttention(config, name="attention") - self.intermediate = TFDebertaIntermediate(config, name="intermediate") - self.bert_output = TFDebertaOutput(config, name="output") - - def call( - self, - hidden_states: tf.Tensor, - attention_mask: tf.Tensor, - query_states: tf.Tensor = None, - relative_pos: tf.Tensor = None, - rel_embeddings: tf.Tensor = None, - output_attentions: bool = False, - training: bool = False, - ) -> Tuple[tf.Tensor]: - attention_outputs = self.attention( - input_tensor=hidden_states, - attention_mask=attention_mask, - query_states=query_states, - relative_pos=relative_pos, - rel_embeddings=rel_embeddings, - output_attentions=output_attentions, - training=training, - ) - attention_output = attention_outputs[0] - intermediate_output = self.intermediate(hidden_states=attention_output) - layer_output = self.bert_output( - hidden_states=intermediate_output, input_tensor=attention_output, training=training - ) - outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them - - return outputs - - -class TFDebertaEncoder(tf.keras.layers.Layer): - def __init__(self, config: DebertaConfig, **kwargs): - super().__init__(**kwargs) - - self.layer = [TFDebertaLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] - self.relative_attention = getattr(config, "relative_attention", False) - self.config = config - if self.relative_attention: - self.max_relative_positions = getattr(config, "max_relative_positions", -1) - if self.max_relative_positions < 1: - self.max_relative_positions = config.max_position_embeddings - - def build(self, input_shape): - if self.relative_attention: - self.rel_embeddings = self.add_weight( - name="rel_embeddings.weight", - shape=[self.max_relative_positions * 2, self.config.hidden_size], - initializer=get_initializer(self.config.initializer_range), - ) - return super().build(input_shape) - - def get_rel_embedding(self): - rel_embeddings = self.rel_embeddings if self.relative_attention else None - return rel_embeddings - - def get_attention_mask(self, attention_mask): - if len(shape_list(attention_mask)) <= 2: - extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2) - attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1) - attention_mask = tf.cast(attention_mask, tf.uint8) - elif len(shape_list(attention_mask)) == 3: - attention_mask = tf.expand_dims(attention_mask, 1) - - return attention_mask - - def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None): - if self.relative_attention and relative_pos is None: - q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2] - relative_pos = build_relative_position(q, shape_list(hidden_states)[-2]) - return relative_pos - - def call( - self, - hidden_states: tf.Tensor, - attention_mask: tf.Tensor, - query_states: tf.Tensor = None, - relative_pos: tf.Tensor = None, - output_attentions: bool = False, - output_hidden_states: bool = False, - return_dict: bool = True, - training: bool = False, - ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: - all_hidden_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - - attention_mask = self.get_attention_mask(attention_mask) - relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos) - - if isinstance(hidden_states, Sequence): - next_kv = hidden_states[0] - else: - next_kv = hidden_states - - rel_embeddings = self.get_rel_embedding() - - for i, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_outputs = layer_module( - hidden_states=next_kv, - attention_mask=attention_mask, - query_states=query_states, - relative_pos=relative_pos, - rel_embeddings=rel_embeddings, - output_attentions=output_attentions, - training=training, - ) - hidden_states = layer_outputs[0] - - if query_states is not None: - query_states = hidden_states - if isinstance(hidden_states, Sequence): - next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None - else: - next_kv = hidden_states - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - # Add last layer - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) - - return TFBaseModelOutput( - last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions - ) - - -def build_relative_position(query_size, key_size): - """ - Build relative position according to the query and key - - We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key - \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q - - P_k\\) - - Args: - query_size (int): the length of query - key_size (int): the length of key - - Return: - `tf.Tensor`: A tensor with shape [1, query_size, key_size] - - """ - q_ids = tf.range(query_size, dtype=tf.int32) - k_ids = tf.range(key_size, dtype=tf.int32) - rel_pos_ids = q_ids[:, None] - tf.tile(tf.reshape(k_ids, [1, -1]), [query_size, 1]) - rel_pos_ids = rel_pos_ids[:query_size, :] - rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0) - return tf.cast(rel_pos_ids, tf.int64) - - -def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos): - shapes = [ - shape_list(query_layer)[0], - shape_list(query_layer)[1], - shape_list(query_layer)[2], - shape_list(relative_pos)[-1], - ] - return tf.broadcast_to(c2p_pos, shapes) - - -def p2c_dynamic_expand(c2p_pos, query_layer, key_layer): - shapes = [ - shape_list(query_layer)[0], - shape_list(query_layer)[1], - shape_list(key_layer)[-2], - shape_list(key_layer)[-2], - ] - return tf.broadcast_to(c2p_pos, shapes) - - -def pos_dynamic_expand(pos_index, p2c_att, key_layer): - shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]] - return tf.broadcast_to(pos_index, shapes) - - -def torch_gather(x, indices, gather_axis): - if gather_axis < 0: - gather_axis = tf.rank(x) + gather_axis - - if gather_axis != tf.rank(x) - 1: - pre_roll = tf.rank(x) - 1 - gather_axis - permutation = tf.roll(tf.range(tf.rank(x)), pre_roll, axis=0) - x = tf.transpose(x, perm=permutation) - indices = tf.transpose(indices, perm=permutation) - else: - pre_roll = 0 - - flat_x = tf.reshape(x, (-1, tf.shape(x)[-1])) - flat_indices = tf.reshape(indices, (-1, tf.shape(indices)[-1])) - gathered = tf.gather(flat_x, flat_indices, batch_dims=1) - gathered = tf.reshape(gathered, tf.shape(indices)) - - if pre_roll != 0: - permutation = tf.roll(tf.range(tf.rank(x)), -pre_roll, axis=0) - gathered = tf.transpose(gathered, perm=permutation) - - return gathered - - -class TFDebertaDisentangledSelfAttention(tf.keras.layers.Layer): - """ - Disentangled self-attention module - - Parameters: - config (`str`): - A model config class instance with the configuration to build a new model. The schema is similar to - *BertConfig*, for more details, please refer [`DebertaConfig`] - - """ - - def __init__(self, config: DebertaConfig, **kwargs): - super().__init__(**kwargs) - if config.hidden_size % config.num_attention_heads != 0: - raise ValueError( - f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " - f"heads ({config.num_attention_heads})" - ) - self.num_attention_heads = config.num_attention_heads - self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - self.in_proj = tf.keras.layers.Dense( - self.all_head_size * 3, - kernel_initializer=get_initializer(config.initializer_range), - name="in_proj", - use_bias=False, - ) - self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else [] - - self.relative_attention = getattr(config, "relative_attention", False) - self.talking_head = getattr(config, "talking_head", False) - - if self.talking_head: - self.head_logits_proj = tf.keras.layers.Dense( - self.num_attention_heads, - kernel_initializer=get_initializer(config.initializer_range), - name="head_logits_proj", - use_bias=False, - ) - self.head_weights_proj = tf.keras.layers.Dense( - self.num_attention_heads, - kernel_initializer=get_initializer(config.initializer_range), - name="head_weights_proj", - use_bias=False, - ) - - self.softmax = TFDebertaXSoftmax(axis=-1) - - if self.relative_attention: - self.max_relative_positions = getattr(config, "max_relative_positions", -1) - if self.max_relative_positions < 1: - self.max_relative_positions = config.max_position_embeddings - self.pos_dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="pos_dropout") - if "c2p" in self.pos_att_type: - self.pos_proj = tf.keras.layers.Dense( - self.all_head_size, - kernel_initializer=get_initializer(config.initializer_range), - name="pos_proj", - use_bias=False, - ) - if "p2c" in self.pos_att_type: - self.pos_q_proj = tf.keras.layers.Dense( - self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="pos_q_proj" - ) - - self.dropout = TFDebertaStableDropout(config.attention_probs_dropout_prob, name="dropout") - - def build(self, input_shape): - self.q_bias = self.add_weight( - name="q_bias", shape=(self.all_head_size), initializer=tf.keras.initializers.Zeros() - ) - self.v_bias = self.add_weight( - name="v_bias", shape=(self.all_head_size), initializer=tf.keras.initializers.Zeros() - ) - return super().build(input_shape) - - def transpose_for_scores(self, tensor: tf.Tensor) -> tf.Tensor: - shape = shape_list(tensor)[:-1] + [self.num_attention_heads, -1] - # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] - tensor = tf.reshape(tensor=tensor, shape=shape) - - # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] - return tf.transpose(tensor, perm=[0, 2, 1, 3]) - - def call( - self, - hidden_states: tf.Tensor, - attention_mask: tf.Tensor, - query_states: tf.Tensor = None, - relative_pos: tf.Tensor = None, - rel_embeddings: tf.Tensor = None, - output_attentions: bool = False, - training: bool = False, - ) -> Tuple[tf.Tensor]: - """ - Call the module - - Args: - hidden_states (`tf.Tensor`): - Input states to the module usually the output from previous layer, it will be the Q,K and V in - *Attention(Q,K,V)* - - attention_mask (`tf.Tensor`): - An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum - sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j* - th token. - - return_att (`bool`, optional): - Whether return the attention matrix. - - query_states (`tf.Tensor`, optional): - The *Q* state in *Attention(Q,K,V)*. - - relative_pos (`tf.Tensor`): - The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with - values ranging in [*-max_relative_positions*, *max_relative_positions*]. - - rel_embeddings (`tf.Tensor`): - The embedding of relative distances. It's a tensor of shape [\\(2 \\times - \\text{max_relative_positions}\\), *hidden_size*]. - - - """ - if query_states is None: - qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1) - query_layer, key_layer, value_layer = tf.split( - self.transpose_for_scores(qp), num_or_size_splits=3, axis=-1 - ) - else: - - def linear(w, b, x): - out = tf.matmul(x, w, transpose_b=True) - if b is not None: - out += tf.transpose(b) - return out - - ws = tf.split( - tf.transpose(self.in_proj.weight[0]), num_or_size_splits=self.num_attention_heads * 3, axis=0 - ) - qkvw = tf.TensorArray(dtype=tf.float32, size=3) - for k in tf.range(3): - qkvw_inside = tf.TensorArray(dtype=tf.float32, size=self.num_attention_heads) - for i in tf.range(self.num_attention_heads): - qkvw_inside = qkvw_inside.write(i, ws[i * 3 + k]) - qkvw = qkvw.write(k, qkvw_inside.concat()) - qkvb = [None] * 3 - - q = linear(qkvw[0], qkvb[0], query_states) - k = linear(qkvw[1], qkvb[1], hidden_states) - v = linear(qkvw[2], qkvb[2], hidden_states) - query_layer = self.transpose_for_scores(q) - key_layer = self.transpose_for_scores(k) - value_layer = self.transpose_for_scores(v) - - query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :]) - value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :]) - - rel_att = None - # Take the dot product between "query" and "key" to get the raw attention scores. - scale_factor = 1 + len(self.pos_att_type) - scale = math.sqrt(shape_list(query_layer)[-1] * scale_factor) - query_layer = query_layer / scale - - attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 1, 3, 2])) - if self.relative_attention: - rel_embeddings = self.pos_dropout(rel_embeddings, training=training) - rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor) - - if rel_att is not None: - attention_scores = attention_scores + rel_att - - if self.talking_head: - attention_scores = tf.transpose( - self.head_logits_proj(tf.transpose(attention_scores, [0, 2, 3, 1])), [0, 3, 1, 2] - ) - - attention_probs = self.softmax(attention_scores, attention_mask) - attention_probs = self.dropout(attention_probs, training=training) - if self.talking_head: - attention_probs = tf.transpose( - self.head_weights_proj(tf.transpose(attention_probs, [0, 2, 3, 1])), [0, 3, 1, 2] - ) - - context_layer = tf.matmul(attention_probs, value_layer) - context_layer = tf.transpose(context_layer, [0, 2, 1, 3]) - context_layer_shape = shape_list(context_layer) - # Set the final dimension here explicitly. - # Calling tf.reshape(context_layer, (*context_layer_shape[:-2], -1)) raises an error when executing - # the model in graph mode as context_layer is reshaped to (None, 7, None) and Dense layer in TFDebertaV2SelfOutput - # requires final input dimension to be defined - new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]] - context_layer = tf.reshape(context_layer, new_context_layer_shape) - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) - return outputs - - def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor): - if relative_pos is None: - q = shape_list(query_layer)[-2] - relative_pos = build_relative_position(q, shape_list(key_layer)[-2]) - shape_list_pos = shape_list(relative_pos) - if len(shape_list_pos) == 2: - relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0) - elif len(shape_list_pos) == 3: - relative_pos = tf.expand_dims(relative_pos, 1) - # bxhxqxk - elif len(shape_list_pos) != 4: - raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}") - - att_span = tf.cast( - tf.minimum( - tf.maximum(shape_list(query_layer)[-2], shape_list(key_layer)[-2]), self.max_relative_positions - ), - tf.int64, - ) - rel_embeddings = tf.expand_dims( - rel_embeddings[self.max_relative_positions - att_span : self.max_relative_positions + att_span, :], 0 - ) - - score = 0 - - # content->position - if "c2p" in self.pos_att_type: - pos_key_layer = self.pos_proj(rel_embeddings) - pos_key_layer = self.transpose_for_scores(pos_key_layer) - c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 1, 3, 2])) - c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1) - c2p_att = torch_gather(c2p_att, c2p_dynamic_expand(c2p_pos, query_layer, relative_pos), -1) - score += c2p_att - - # position->content - if "p2c" in self.pos_att_type: - pos_query_layer = self.pos_q_proj(rel_embeddings) - pos_query_layer = self.transpose_for_scores(pos_query_layer) - pos_query_layer /= tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, dtype=tf.float32)) - if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]: - r_pos = build_relative_position(shape_list(key_layer)[-2], shape_list(key_layer)[-2]) - else: - r_pos = relative_pos - p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1) - p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 1, 3, 2])) - p2c_att = tf.transpose( - torch_gather(p2c_att, p2c_dynamic_expand(p2c_pos, query_layer, key_layer), -1), [0, 1, 3, 2] - ) - if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]: - pos_index = tf.expand_dims(relative_pos[:, :, :, 0], -1) - p2c_att = torch_gather(p2c_att, pos_dynamic_expand(pos_index, p2c_att, key_layer), -2) - score += p2c_att - - return score - - -class TFDebertaEmbeddings(tf.keras.layers.Layer): - """Construct the embeddings from word, position and token_type embeddings.""" - - def __init__(self, config, **kwargs): - super().__init__(**kwargs) - - self.config = config - self.embedding_size = getattr(config, "embedding_size", config.hidden_size) - self.hidden_size = config.hidden_size - self.max_position_embeddings = config.max_position_embeddings - self.position_biased_input = getattr(config, "position_biased_input", True) - self.initializer_range = config.initializer_range - if self.embedding_size != config.hidden_size: - self.embed_proj = tf.keras.layers.Dense( - config.hidden_size, - kernel_initializer=get_initializer(config.initializer_range), - name="embed_proj", - use_bias=False, - ) - self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout") - - def build(self, input_shape: tf.TensorShape): - with tf.name_scope("word_embeddings"): - self.weight = self.add_weight( - name="weight", - shape=[self.config.vocab_size, self.embedding_size], - initializer=get_initializer(self.initializer_range), - ) - - with tf.name_scope("token_type_embeddings"): - if self.config.type_vocab_size > 0: - self.token_type_embeddings = self.add_weight( - name="embeddings", - shape=[self.config.type_vocab_size, self.embedding_size], - initializer=get_initializer(self.initializer_range), - ) - else: - self.token_type_embeddings = None - - with tf.name_scope("position_embeddings"): - if self.position_biased_input: - self.position_embeddings = self.add_weight( - name="embeddings", - shape=[self.max_position_embeddings, self.hidden_size], - initializer=get_initializer(self.initializer_range), - ) - else: - self.position_embeddings = None - - super().build(input_shape) - - def call( - self, - input_ids: tf.Tensor = None, - position_ids: tf.Tensor = None, - token_type_ids: tf.Tensor = None, - inputs_embeds: tf.Tensor = None, - mask: tf.Tensor = None, - training: bool = False, - ) -> tf.Tensor: - """ - Applies embedding based on inputs tensor. - - Returns: - final_embeddings (`tf.Tensor`): output embedding tensor. - """ - if input_ids is None and inputs_embeds is None: - raise ValueError("Need to provide either `input_ids` or `input_embeds`.") - - if input_ids is not None: - check_embeddings_within_bounds(input_ids, self.config.vocab_size) - inputs_embeds = tf.gather(params=self.weight, indices=input_ids) - - input_shape = shape_list(inputs_embeds)[:-1] - - if token_type_ids is None: - token_type_ids = tf.fill(dims=input_shape, value=0) - - if position_ids is None: - position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) - - final_embeddings = inputs_embeds - if self.position_biased_input: - position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) - final_embeddings += position_embeds - if self.config.type_vocab_size > 0: - token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) - final_embeddings += token_type_embeds - - if self.embedding_size != self.hidden_size: - final_embeddings = self.embed_proj(final_embeddings) - - final_embeddings = self.LayerNorm(final_embeddings) - - if mask is not None: - if len(shape_list(mask)) != len(shape_list(final_embeddings)): - if len(shape_list(mask)) == 4: - mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1) - mask = tf.cast(tf.expand_dims(mask, axis=2), tf.float32) - - final_embeddings = final_embeddings * mask - - final_embeddings = self.dropout(final_embeddings, training=training) - - return final_embeddings - - -class TFDebertaPredictionHeadTransform(tf.keras.layers.Layer): - def __init__(self, config: DebertaConfig, **kwargs): - super().__init__(**kwargs) - - self.embedding_size = getattr(config, "embedding_size", config.hidden_size) - - self.dense = tf.keras.layers.Dense( - units=self.embedding_size, - kernel_initializer=get_initializer(config.initializer_range), - name="dense", - ) - - if isinstance(config.hidden_act, str): - self.transform_act_fn = get_tf_activation(config.hidden_act) - else: - self.transform_act_fn = config.hidden_act - self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") - - def call(self, hidden_states: tf.Tensor) -> tf.Tensor: - hidden_states = self.dense(inputs=hidden_states) - hidden_states = self.transform_act_fn(hidden_states) - hidden_states = self.LayerNorm(hidden_states) - - return hidden_states - - -class TFDebertaLMPredictionHead(tf.keras.layers.Layer): - def __init__(self, config: DebertaConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): - super().__init__(**kwargs) - - self.config = config - self.embedding_size = getattr(config, "embedding_size", config.hidden_size) - - self.transform = TFDebertaPredictionHeadTransform(config, name="transform") - - # The output weights are the same as the input embeddings, but there is - # an output-only bias for each token. - self.input_embeddings = input_embeddings - - def build(self, input_shape: tf.TensorShape): - self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") - - super().build(input_shape) - - def get_output_embeddings(self) -> tf.keras.layers.Layer: - return self.input_embeddings - - def set_output_embeddings(self, value: tf.Variable): - self.input_embeddings.weight = value - self.input_embeddings.vocab_size = shape_list(value)[0] - - def get_bias(self) -> Dict[str, tf.Variable]: - return {"bias": self.bias} - - def set_bias(self, value: tf.Variable): - self.bias = value["bias"] - self.config.vocab_size = shape_list(value["bias"])[0] - - def call(self, hidden_states: tf.Tensor) -> tf.Tensor: - hidden_states = self.transform(hidden_states=hidden_states) - seq_length = shape_list(hidden_states)[1] - hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size]) - hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) - hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) - hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) - - return hidden_states - - -class TFDebertaOnlyMLMHead(tf.keras.layers.Layer): - def __init__(self, config: DebertaConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): - super().__init__(**kwargs) - self.predictions = TFDebertaLMPredictionHead(config, input_embeddings, name="predictions") - - def call(self, sequence_output: tf.Tensor) -> tf.Tensor: - prediction_scores = self.predictions(hidden_states=sequence_output) - - return prediction_scores - - -# @keras_serializable -class TFDebertaMainLayer(tf.keras.layers.Layer): - config_class = DebertaConfig - - def __init__(self, config: DebertaConfig, **kwargs): - super().__init__(**kwargs) - - self.config = config - - self.embeddings = TFDebertaEmbeddings(config, name="embeddings") - self.encoder = TFDebertaEncoder(config, name="encoder") - - def get_input_embeddings(self) -> tf.keras.layers.Layer: - return self.embeddings - - def set_input_embeddings(self, value: tf.Variable): - self.embeddings.weight = value - self.embeddings.vocab_size = shape_list(value)[0] - - def _prune_heads(self, heads_to_prune): - """ - Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base - class PreTrainedModel - """ - raise NotImplementedError - - @unpack_inputs - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - token_type_ids: np.ndarray | tf.Tensor | None = None, - position_ids: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - training: bool = False, - ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = shape_list(input_ids) - elif inputs_embeds is not None: - input_shape = shape_list(inputs_embeds)[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - if attention_mask is None: - attention_mask = tf.fill(dims=input_shape, value=1) - - if token_type_ids is None: - token_type_ids = tf.fill(dims=input_shape, value=0) - - embedding_output = self.embeddings( - input_ids=input_ids, - position_ids=position_ids, - token_type_ids=token_type_ids, - inputs_embeds=inputs_embeds, - mask=attention_mask, - training=training, - ) - - encoder_outputs = self.encoder( - hidden_states=embedding_output, - attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - - sequence_output = encoder_outputs[0] - - if not return_dict: - return (sequence_output,) + encoder_outputs[1:] - - return TFBaseModelOutput( - last_hidden_state=sequence_output, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - ) - - -class TFDebertaPreTrainedModel(TFPreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = DebertaConfig - base_model_prefix = "deberta" - - -DEBERTA_START_DOCSTRING = r""" - The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled - Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build - on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two - improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data. - - This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it - as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and - behavior. - - - - TensorFlow models and layers in `transformers` accept two formats as input: - - - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional argument. - - The reason the second format is supported is that Keras methods prefer this format when passing inputs to models - and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just - pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second - format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with - the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first - positional argument: - - - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: - `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - - a dictionary with one or several input Tensors associated to the input names given in the docstring: - `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` - - Note that when creating models and layers with - [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry - about any of this, as you can just pass inputs like you would to any other Python function! - - - - Parameters: - config ([`DebertaConfig`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -DEBERTA_INPUTS_DOCSTRING = r""" - Args: - input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): - Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, - 1]`: - - - 0 corresponds to a *sentence A* token, - - 1 corresponds to a *sentence B* token. - - [What are token type IDs?](../glossary#token-type-ids) - position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.max_position_embeddings - 1]`. - - [What are position IDs?](../glossary#position-ids) - inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This - is useful if you want more control over how to convert *input_ids* indices into associated vectors than the - model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple. -""" - - -@add_start_docstrings( - "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.", - DEBERTA_START_DOCSTRING, -) -class TFDebertaModel(TFDebertaPreTrainedModel): - def __init__(self, config: DebertaConfig, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - - self.deberta = TFDebertaMainLayer(config, name="deberta") - - @unpack_inputs - @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFBaseModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - token_type_ids: np.ndarray | tf.Tensor | None = None, - position_ids: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - training: Optional[bool] = False, - ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: - outputs = self.deberta( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - - return outputs - - -@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING) -class TFDebertaForMaskedLM(TFDebertaPreTrainedModel, TFMaskedLanguageModelingLoss): - def __init__(self, config: DebertaConfig, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - - if config.is_decoder: - logger.warning( - "If you want to use `TFDebertaForMaskedLM` make sure `config.is_decoder=False` for " - "bi-directional self-attention." - ) - - self.deberta = TFDebertaMainLayer(config, name="deberta") - self.mlm = TFDebertaOnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name="cls") - - def get_lm_head(self) -> tf.keras.layers.Layer: - return self.mlm.predictions - - @unpack_inputs - @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFMaskedLMOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - token_type_ids: np.ndarray | tf.Tensor | None = None, - position_ids: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: np.ndarray | tf.Tensor | None = None, - training: Optional[bool] = False, - ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: - r""" - labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., - config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the - loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` - """ - outputs = self.deberta( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - sequence_output = outputs[0] - prediction_scores = self.mlm(sequence_output=sequence_output, training=training) - loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores) - - if not return_dict: - output = (prediction_scores,) + outputs[2:] - return ((loss,) + output) if loss is not None else output - - return TFMaskedLMOutput( - loss=loss, - logits=prediction_scores, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - -@add_start_docstrings( - """ - DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the - pooled output) e.g. for GLUE tasks. - """, - DEBERTA_START_DOCSTRING, -) -class TFDebertaForSequenceClassification(TFDebertaPreTrainedModel, TFSequenceClassificationLoss): - def __init__(self, config: DebertaConfig, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - - self.num_labels = config.num_labels - - self.deberta = TFDebertaMainLayer(config, name="deberta") - self.pooler = TFDebertaContextPooler(config, name="pooler") - - drop_out = getattr(config, "cls_dropout", None) - drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out - self.dropout = TFDebertaStableDropout(drop_out, name="cls_dropout") - self.classifier = tf.keras.layers.Dense( - units=config.num_labels, - kernel_initializer=get_initializer(config.initializer_range), - name="classifier", - ) - - @unpack_inputs - @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFSequenceClassifierOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - token_type_ids: np.ndarray | tf.Tensor | None = None, - position_ids: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: np.ndarray | tf.Tensor | None = None, - training: Optional[bool] = False, - ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: - r""" - labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): - Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., - config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If - `config.num_labels > 1` a classification loss is computed (Cross-Entropy). - """ - outputs = self.deberta( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - sequence_output = outputs[0] - pooled_output = self.pooler(sequence_output, training=training) - pooled_output = self.dropout(pooled_output, training=training) - logits = self.classifier(pooled_output) - loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) - - if not return_dict: - output = (logits,) + outputs[1:] - - return ((loss,) + output) if loss is not None else output - - return TFSequenceClassifierOutput( - loss=loss, - logits=logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - -@add_start_docstrings( - """ - DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for - Named-Entity-Recognition (NER) tasks. - """, - DEBERTA_START_DOCSTRING, -) -class TFDebertaForTokenClassification(TFDebertaPreTrainedModel, TFTokenClassificationLoss): - def __init__(self, config: DebertaConfig, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - - self.num_labels = config.num_labels - - self.deberta = TFDebertaMainLayer(config, name="deberta") - self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) - self.classifier = tf.keras.layers.Dense( - units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" - ) - - @unpack_inputs - @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFTokenClassifierOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - token_type_ids: np.ndarray | tf.Tensor | None = None, - position_ids: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: np.ndarray | tf.Tensor | None = None, - training: Optional[bool] = False, - ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: - r""" - labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. - """ - outputs = self.deberta( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - sequence_output = outputs[0] - sequence_output = self.dropout(sequence_output, training=training) - logits = self.classifier(inputs=sequence_output) - loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) - - if not return_dict: - output = (logits,) + outputs[1:] - return ((loss,) + output) if loss is not None else output - - return TFTokenClassifierOutput( - loss=loss, - logits=logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - -@add_start_docstrings( - """ - DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear - layers on top of the hidden-states output to compute `span start logits` and `span end logits`). - """, - DEBERTA_START_DOCSTRING, -) -class TFDebertaForQuestionAnswering(TFDebertaPreTrainedModel, TFQuestionAnsweringLoss): - def __init__(self, config: DebertaConfig, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - - self.num_labels = config.num_labels - - self.deberta = TFDebertaMainLayer(config, name="deberta") - self.qa_outputs = tf.keras.layers.Dense( - units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" - ) - - @unpack_inputs - @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFQuestionAnsweringModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - def call( - self, - input_ids: TFModelInputType | None = None, - attention_mask: np.ndarray | tf.Tensor | None = None, - token_type_ids: np.ndarray | tf.Tensor | None = None, - position_ids: np.ndarray | tf.Tensor | None = None, - inputs_embeds: np.ndarray | tf.Tensor | None = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - start_positions: np.ndarray | tf.Tensor | None = None, - end_positions: np.ndarray | tf.Tensor | None = None, - training: Optional[bool] = False, - ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: - r""" - start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the start of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): - Labels for position (index) of the end of the labelled span for computing the token classification loss. - Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence - are not taken into account for computing the loss. - """ - outputs = self.deberta( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - sequence_output = outputs[0] - logits = self.qa_outputs(inputs=sequence_output) - start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1) - start_logits = tf.squeeze(input=start_logits, axis=-1) - end_logits = tf.squeeze(input=end_logits, axis=-1) - loss = None - - if start_positions is not None and end_positions is not None: - labels = {"start_position": start_positions} - labels["end_position"] = end_positions - loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits)) - - if not return_dict: - output = (start_logits, end_logits) + outputs[2:] - return ((loss,) + output) if loss is not None else output - - return TFQuestionAnsweringModelOutput( - loss=loss, - start_logits=start_logits, - end_logits=end_logits, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/efficientnet/convert_efficientnet_to_pytorch.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/efficientnet/convert_efficientnet_to_pytorch.py deleted file mode 100644 index e9988524aca04de2a1d600586ff01d9b9a3ea6c2..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/efficientnet/convert_efficientnet_to_pytorch.py +++ /dev/null @@ -1,339 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Convert EfficientNet checkpoints from the original repository. - -URL: https://github.com/keras-team/keras/blob/v2.11.0/keras/applications/efficientnet.py""" - -import argparse -import json -import os - -import numpy as np -import PIL -import requests -import tensorflow.keras.applications.efficientnet as efficientnet -import torch -from huggingface_hub import hf_hub_download -from PIL import Image -from tensorflow.keras.preprocessing import image - -from transformers import ( - EfficientNetConfig, - EfficientNetForImageClassification, - EfficientNetImageProcessor, -) -from transformers.utils import logging - - -logging.set_verbosity_info() -logger = logging.get_logger(__name__) - -model_classes = { - "b0": efficientnet.EfficientNetB0, - "b1": efficientnet.EfficientNetB1, - "b2": efficientnet.EfficientNetB2, - "b3": efficientnet.EfficientNetB3, - "b4": efficientnet.EfficientNetB4, - "b5": efficientnet.EfficientNetB5, - "b6": efficientnet.EfficientNetB6, - "b7": efficientnet.EfficientNetB7, -} - -CONFIG_MAP = { - "b0": { - "hidden_dim": 1280, - "width_coef": 1.0, - "depth_coef": 1.0, - "image_size": 224, - "dropout_rate": 0.2, - "dw_padding": [], - }, - "b1": { - "hidden_dim": 1280, - "width_coef": 1.0, - "depth_coef": 1.1, - "image_size": 240, - "dropout_rate": 0.2, - "dw_padding": [16], - }, - "b2": { - "hidden_dim": 1408, - "width_coef": 1.1, - "depth_coef": 1.2, - "image_size": 260, - "dropout_rate": 0.3, - "dw_padding": [5, 8, 16], - }, - "b3": { - "hidden_dim": 1536, - "width_coef": 1.2, - "depth_coef": 1.4, - "image_size": 300, - "dropout_rate": 0.3, - "dw_padding": [5, 18], - }, - "b4": { - "hidden_dim": 1792, - "width_coef": 1.4, - "depth_coef": 1.8, - "image_size": 380, - "dropout_rate": 0.4, - "dw_padding": [6], - }, - "b5": { - "hidden_dim": 2048, - "width_coef": 1.6, - "depth_coef": 2.2, - "image_size": 456, - "dropout_rate": 0.4, - "dw_padding": [13, 27], - }, - "b6": { - "hidden_dim": 2304, - "width_coef": 1.8, - "depth_coef": 2.6, - "image_size": 528, - "dropout_rate": 0.5, - "dw_padding": [31], - }, - "b7": { - "hidden_dim": 2560, - "width_coef": 2.0, - "depth_coef": 3.1, - "image_size": 600, - "dropout_rate": 0.5, - "dw_padding": [18], - }, -} - - -def get_efficientnet_config(model_name): - config = EfficientNetConfig() - config.hidden_dim = CONFIG_MAP[model_name]["hidden_dim"] - config.width_coefficient = CONFIG_MAP[model_name]["width_coef"] - config.depth_coefficient = CONFIG_MAP[model_name]["depth_coef"] - config.image_size = CONFIG_MAP[model_name]["image_size"] - config.dropout_rate = CONFIG_MAP[model_name]["dropout_rate"] - config.depthwise_padding = CONFIG_MAP[model_name]["dw_padding"] - - repo_id = "huggingface/label-files" - filename = "imagenet-1k-id2label.json" - config.num_labels = 1000 - id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) - id2label = {int(k): v for k, v in id2label.items()} - - config.id2label = id2label - config.label2id = {v: k for k, v in id2label.items()} - return config - - -# We will verify our results on an image of cute cats -def prepare_img(): - url = "http://images.cocodataset.org/val2017/000000039769.jpg" - im = Image.open(requests.get(url, stream=True).raw) - return im - - -def convert_image_processor(model_name): - size = CONFIG_MAP[model_name]["image_size"] - preprocessor = EfficientNetImageProcessor( - size={"height": size, "width": size}, - image_mean=[0.485, 0.456, 0.406], - image_std=[0.47853944, 0.4732864, 0.47434163], - do_center_crop=False, - ) - return preprocessor - - -# here we list all keys to be renamed (original name on the left, our name on the right) -def rename_keys(original_param_names): - block_names = [v.split("_")[0].split("block")[1] for v in original_param_names if v.startswith("block")] - block_names = sorted(set(block_names)) - num_blocks = len(block_names) - block_name_mapping = {b: str(i) for b, i in zip(block_names, range(num_blocks))} - - rename_keys = [] - rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight")) - rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight")) - rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias")) - rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean")) - rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var")) - - for b in block_names: - hf_b = block_name_mapping[b] - rename_keys.append((f"block{b}_expand_conv/kernel:0", f"encoder.blocks.{hf_b}.expansion.expand_conv.weight")) - rename_keys.append((f"block{b}_expand_bn/gamma:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.weight")) - rename_keys.append((f"block{b}_expand_bn/beta:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.bias")) - rename_keys.append( - (f"block{b}_expand_bn/moving_mean:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") - ) - rename_keys.append( - (f"block{b}_expand_bn/moving_variance:0", f"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") - ) - rename_keys.append( - (f"block{b}_dwconv/depthwise_kernel:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") - ) - rename_keys.append((f"block{b}_bn/gamma:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight")) - rename_keys.append((f"block{b}_bn/beta:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias")) - rename_keys.append( - (f"block{b}_bn/moving_mean:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") - ) - rename_keys.append( - (f"block{b}_bn/moving_variance:0", f"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") - ) - - rename_keys.append((f"block{b}_se_reduce/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight")) - rename_keys.append((f"block{b}_se_reduce/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias")) - rename_keys.append((f"block{b}_se_expand/kernel:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.weight")) - rename_keys.append((f"block{b}_se_expand/bias:0", f"encoder.blocks.{hf_b}.squeeze_excite.expand.bias")) - rename_keys.append( - (f"block{b}_project_conv/kernel:0", f"encoder.blocks.{hf_b}.projection.project_conv.weight") - ) - rename_keys.append((f"block{b}_project_bn/gamma:0", f"encoder.blocks.{hf_b}.projection.project_bn.weight")) - rename_keys.append((f"block{b}_project_bn/beta:0", f"encoder.blocks.{hf_b}.projection.project_bn.bias")) - rename_keys.append( - (f"block{b}_project_bn/moving_mean:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_mean") - ) - rename_keys.append( - (f"block{b}_project_bn/moving_variance:0", f"encoder.blocks.{hf_b}.projection.project_bn.running_var") - ) - - rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight")) - rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight")) - rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias")) - rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean")) - rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var")) - - key_mapping = {} - for item in rename_keys: - if item[0] in original_param_names: - key_mapping[item[0]] = "efficientnet." + item[1] - - key_mapping["predictions/kernel:0"] = "classifier.weight" - key_mapping["predictions/bias:0"] = "classifier.bias" - return key_mapping - - -def replace_params(hf_params, tf_params, key_mapping): - for key, value in tf_params.items(): - if "normalization" in key: - continue - - hf_key = key_mapping[key] - if "_conv" in key and "kernel" in key: - new_hf_value = torch.from_numpy(value).permute(3, 2, 0, 1) - elif "depthwise_kernel" in key: - new_hf_value = torch.from_numpy(value).permute(2, 3, 0, 1) - elif "kernel" in key: - new_hf_value = torch.from_numpy(np.transpose(value)) - else: - new_hf_value = torch.from_numpy(value) - - # Replace HF parameters with original TF model parameters - assert hf_params[hf_key].shape == new_hf_value.shape - hf_params[hf_key].copy_(new_hf_value) - - -@torch.no_grad() -def convert_efficientnet_checkpoint(model_name, pytorch_dump_folder_path, save_model, push_to_hub): - """ - Copy/paste/tweak model's weights to our EfficientNet structure. - """ - # Load original model - original_model = model_classes[model_name]( - include_top=True, - weights="imagenet", - input_tensor=None, - input_shape=None, - pooling=None, - classes=1000, - classifier_activation="softmax", - ) - - tf_params = original_model.trainable_variables - tf_non_train_params = original_model.non_trainable_variables - tf_params = {param.name: param.numpy() for param in tf_params} - for param in tf_non_train_params: - tf_params[param.name] = param.numpy() - tf_param_names = list(tf_params.keys()) - - # Load HuggingFace model - config = get_efficientnet_config(model_name) - hf_model = EfficientNetForImageClassification(config).eval() - hf_params = hf_model.state_dict() - - # Create src-to-dst parameter name mapping dictionary - print("Converting parameters...") - key_mapping = rename_keys(tf_param_names) - replace_params(hf_params, tf_params, key_mapping) - - # Initialize preprocessor and preprocess input image - preprocessor = convert_image_processor(model_name) - inputs = preprocessor(images=prepare_img(), return_tensors="pt") - - # HF model inference - hf_model.eval() - with torch.no_grad(): - outputs = hf_model(**inputs) - hf_logits = outputs.logits.detach().numpy() - - # Original model inference - original_model.trainable = False - image_size = CONFIG_MAP[model_name]["image_size"] - img = prepare_img().resize((image_size, image_size), resample=PIL.Image.NEAREST) - x = image.img_to_array(img) - x = np.expand_dims(x, axis=0) - original_logits = original_model.predict(x) - - # Check whether original and HF model outputs match -> np.allclose - assert np.allclose(original_logits, hf_logits, atol=1e-3), "The predicted logits are not the same." - print("Model outputs match!") - - if save_model: - # Create folder to save model - if not os.path.isdir(pytorch_dump_folder_path): - os.mkdir(pytorch_dump_folder_path) - # Save converted model and image processor - hf_model.save_pretrained(pytorch_dump_folder_path) - preprocessor.save_pretrained(pytorch_dump_folder_path) - - if push_to_hub: - # Push model and image processor to hub - print(f"Pushing converted {model_name} to the hub...") - model_name = f"efficientnet-{model_name}" - preprocessor.push_to_hub(model_name) - hf_model.push_to_hub(model_name) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - # Required parameters - parser.add_argument( - "--model_name", - default="b0", - type=str, - help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].", - ) - parser.add_argument( - "--pytorch_dump_folder_path", - default="hf_model", - type=str, - help="Path to the output PyTorch model directory.", - ) - parser.add_argument("--save_model", action="store_true", help="Save model to local") - parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") - - args = parser.parse_args() - convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt_neo/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt_neo/__init__.py deleted file mode 100644 index 02ca0a11949b73ecef0329412d869ce1996d1bc6..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/gpt_neo/__init__.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2021 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available - - -_import_structure = { - "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_gpt_neo"] = [ - "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", - "GPTNeoForCausalLM", - "GPTNeoForQuestionAnswering", - "GPTNeoForSequenceClassification", - "GPTNeoForTokenClassification", - "GPTNeoModel", - "GPTNeoPreTrainedModel", - "load_tf_weights_in_gpt_neo", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_gpt_neo"] = [ - "FlaxGPTNeoForCausalLM", - "FlaxGPTNeoModel", - "FlaxGPTNeoPreTrainedModel", - ] - - -if TYPE_CHECKING: - from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_gpt_neo import ( - GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, - GPTNeoForCausalLM, - GPTNeoForQuestionAnswering, - GPTNeoForSequenceClassification, - GPTNeoForTokenClassification, - GPTNeoModel, - GPTNeoPreTrainedModel, - load_tf_weights_in_gpt_neo, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel - - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py deleted file mode 100644 index d7bbdd7d00505f1e51154379c99ab621cb648a6d..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_regnetx_4gf_dds_fpn_1x.py +++ /dev/null @@ -1,34 +0,0 @@ -from ..common.optim import SGD as optimizer -from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier -from ..common.data.coco import dataloader -from ..common.models.mask_rcnn_fpn import model -from ..common.train import train - -from detectron2.config import LazyCall as L -from detectron2.modeling.backbone import RegNet -from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock - - -# Replace default ResNet with RegNetX-4GF from the DDS paper. Config source: -# https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnetx/RegNetX-4.0GF_dds_8gpu.yaml#L4-L9 # noqa -model.backbone.bottom_up = L(RegNet)( - stem_class=SimpleStem, - stem_width=32, - block_class=ResBottleneckBlock, - depth=23, - w_a=38.65, - w_0=96, - w_m=2.43, - group_width=40, - freeze_at=2, - norm="FrozenBN", - out_features=["s1", "s2", "s3", "s4"], -) -model.pixel_std = [57.375, 57.120, 58.395] - -optimizer.weight_decay = 5e-5 -train.init_checkpoint = ( - "https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906383/RegNetX-4.0GF_dds_8gpu.pyth" -) -# RegNets benefit from enabling cudnn benchmark mode -train.cudnn_benchmark = True diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h deleted file mode 100644 index 03f4211003f42f601f0cfcf4a690f5da4a0a1f67..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -#pragma once -#include - -namespace detectron2 { - -at::Tensor ROIAlignRotated_forward_cpu( - const at::Tensor& input, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int sampling_ratio); - -at::Tensor ROIAlignRotated_backward_cpu( - const at::Tensor& grad, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int batch_size, - const int channels, - const int height, - const int width, - const int sampling_ratio); - -#if defined(WITH_CUDA) || defined(WITH_HIP) -at::Tensor ROIAlignRotated_forward_cuda( - const at::Tensor& input, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int sampling_ratio); - -at::Tensor ROIAlignRotated_backward_cuda( - const at::Tensor& grad, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int batch_size, - const int channels, - const int height, - const int width, - const int sampling_ratio); -#endif - -// Interface for Python -inline at::Tensor ROIAlignRotated_forward( - const at::Tensor& input, - const at::Tensor& rois, - const double spatial_scale, - const int64_t pooled_height, - const int64_t pooled_width, - const int64_t sampling_ratio) { - if (input.is_cuda()) { -#if defined(WITH_CUDA) || defined(WITH_HIP) - return ROIAlignRotated_forward_cuda( - input, - rois, - spatial_scale, - pooled_height, - pooled_width, - sampling_ratio); -#else - AT_ERROR("Detectron2 is not compiled with GPU support!"); -#endif - } - return ROIAlignRotated_forward_cpu( - input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio); -} - -inline at::Tensor ROIAlignRotated_backward( - const at::Tensor& grad, - const at::Tensor& rois, - const double spatial_scale, - const int64_t pooled_height, - const int64_t pooled_width, - const int64_t batch_size, - const int64_t channels, - const int64_t height, - const int64_t width, - const int64_t sampling_ratio) { - if (grad.is_cuda()) { -#if defined(WITH_CUDA) || defined(WITH_HIP) - return ROIAlignRotated_backward_cuda( - grad, - rois, - spatial_scale, - pooled_height, - pooled_width, - batch_size, - channels, - height, - width, - sampling_ratio); -#else - AT_ERROR("Detectron2 is not compiled with GPU support!"); -#endif - } - return ROIAlignRotated_backward_cpu( - grad, - rois, - spatial_scale, - pooled_height, - pooled_width, - batch_size, - channels, - height, - width, - sampling_ratio); -} - -} // namespace detectron2 diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/docs/README.md b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/docs/README.md deleted file mode 100644 index 8531cafd4d1aae0267f4fc5e7212f7db5ed90686..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/docs/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Read the docs: - -The latest documentation built from this directory is available at [detectron2.readthedocs.io](https://detectron2.readthedocs.io/). -Documents in this directory are not meant to be read on github. - -# Build the docs: - -1. Install detectron2 according to [INSTALL.md](../INSTALL.md). -2. Install additional libraries required to build docs: - - docutils==0.16 - - Sphinx==3.2.0 - - recommonmark==0.6.0 - - sphinx_rtd_theme - -3. Run `make html` from this directory. diff --git a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/flex-spec.js b/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/flex-spec.js deleted file mode 100644 index a077d66066dedb1499ef5c3a4b854e9fb9a58162..0000000000000000000000000000000000000000 --- a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/flex-spec.js +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Return flexbox spec versions by prefix - */ -module.exports = function (prefix) { - let spec - if (prefix === '-webkit- 2009' || prefix === '-moz-') { - spec = 2009 - } else if (prefix === '-ms-') { - spec = 2012 - } else if (prefix === '-webkit-') { - spec = 'final' - } - - if (prefix === '-webkit- 2009') { - prefix = '-webkit-' - } - - return [spec, prefix] -} diff --git a/spaces/ysharma/nougat/README.md b/spaces/ysharma/nougat/README.md deleted file mode 100644 index b092baa13f21944b343f3cccebeb8dc4cce53bd8..0000000000000000000000000000000000000000 --- a/spaces/ysharma/nougat/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Nougat -emoji: 👁📄 -colorFrom: pink -colorTo: pink -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: False -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yueyouxin/runwayml-stable-diffusion-v1-5/app.py b/spaces/yueyouxin/runwayml-stable-diffusion-v1-5/app.py deleted file mode 100644 index a82df332731f067826d3e1ef79fabceffb74d07e..0000000000000000000000000000000000000000 --- a/spaces/yueyouxin/runwayml-stable-diffusion-v1-5/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch() \ No newline at end of file diff --git a/spaces/yufiofficial/MusicGenQ/audiocraft/models/loaders.py b/spaces/yufiofficial/MusicGenQ/audiocraft/models/loaders.py deleted file mode 100644 index 97c662c3212b7695669cbfc5214ff2f099c3f319..0000000000000000000000000000000000000000 --- a/spaces/yufiofficial/MusicGenQ/audiocraft/models/loaders.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Utility functions to load from the checkpoints. -Each checkpoint is a torch.saved dict with the following keys: -- 'xp.cfg': the hydra config as dumped during training. This should be used - to rebuild the object using the audiocraft.models.builders functions, -- 'model_best_state': a readily loadable best state for the model, including - the conditioner. The model obtained from `xp.cfg` should be compatible - with this state dict. In the case of a LM, the encodec model would not be - bundled along but instead provided separately. - -Those functions also support loading from a remote location with the Torch Hub API. -They also support overriding some parameters, in particular the device and dtype -of the returned model. -""" - -from pathlib import Path -from huggingface_hub import hf_hub_download -import typing as tp -import os - -from omegaconf import OmegaConf -import torch - -from . import builders - - -HF_MODEL_CHECKPOINTS_MAP = { - "small": "facebook/musicgen-small", - "medium": "facebook/musicgen-medium", - "large": "facebook/musicgen-large", - "melody": "facebook/musicgen-melody", -} - - -def _get_state_dict( - file_or_url_or_id: tp.Union[Path, str], - filename: tp.Optional[str] = None, - device='cpu', - cache_dir: tp.Optional[str] = None, -): - # Return the state dict either from a file or url - file_or_url_or_id = str(file_or_url_or_id) - assert isinstance(file_or_url_or_id, str) - - if os.path.isfile(file_or_url_or_id): - return torch.load(file_or_url_or_id, map_location=device) - - if os.path.isdir(file_or_url_or_id): - file = f"{file_or_url_or_id}/{filename}" - return torch.load(file, map_location=device) - - elif file_or_url_or_id.startswith('https://'): - return torch.hub.load_state_dict_from_url(file_or_url_or_id, map_location=device, check_hash=True) - - elif file_or_url_or_id in HF_MODEL_CHECKPOINTS_MAP: - assert filename is not None, "filename needs to be defined if using HF checkpoints" - - repo_id = HF_MODEL_CHECKPOINTS_MAP[file_or_url_or_id] - file = hf_hub_download(repo_id=repo_id, filename=filename, cache_dir=cache_dir) - return torch.load(file, map_location=device) - - else: - raise ValueError(f"{file_or_url_or_id} is not a valid name, path or link that can be loaded.") - - -def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None): - pkg = _get_state_dict(file_or_url_or_id, filename="compression_state_dict.bin", cache_dir=cache_dir) - cfg = OmegaConf.create(pkg['xp.cfg']) - cfg.device = str(device) - model = builders.get_compression_model(cfg) - model.load_state_dict(pkg['best_state']) - model.eval() - return model - - -def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None): - pkg = _get_state_dict(file_or_url_or_id, filename="state_dict.bin", cache_dir=cache_dir) - cfg = OmegaConf.create(pkg['xp.cfg']) - cfg.device = str(device) - if cfg.device == 'cpu': - cfg.dtype = 'float32' - else: - cfg.dtype = 'float16' - model = builders.get_lm_model(cfg) - model.load_state_dict(pkg['best_state']) - model.eval() - model.cfg = cfg - return model diff --git a/spaces/yunfei0710/gpt-academic/docs/test_markdown_format.py b/spaces/yunfei0710/gpt-academic/docs/test_markdown_format.py deleted file mode 100644 index 896f6f130c69f8a94d6f49feadf7091f0f23c2c9..0000000000000000000000000000000000000000 --- a/spaces/yunfei0710/gpt-academic/docs/test_markdown_format.py +++ /dev/null @@ -1,130 +0,0 @@ -sample = """ -[1]: https://baike.baidu.com/item/%E8%B4%A8%E8%83%BD%E6%96%B9%E7%A8%8B/1884527 "质能方程(质能方程式)_百度百科" -[2]: https://www.zhihu.com/question/348249281 "如何理解质能方程 E=mc²? - 知乎" -[3]: https://zhuanlan.zhihu.com/p/32597385 "质能方程的推导与理解 - 知乎 - 知乎专栏" - -你好,这是必应。质能方程是描述质量与能量之间的当量关系的方程[^1^][1]。用tex格式,质能方程可以写成$$E=mc^2$$,其中$E$是能量,$m$是质量,$c$是光速[^2^][2] [^3^][3]。 -""" -import re - -def preprocess_newbing_out(s): - pattern = r'\^(\d+)\^' # 匹配^数字^ - pattern2 = r'\[(\d+)\]' # 匹配^数字^ - sub = lambda m: '\['+m.group(1)+'\]' # 将匹配到的数字作为替换值 - result = re.sub(pattern, sub, s) # 替换操作 - if '[1]' in result: - result += '


              ' + "
              ".join([re.sub(pattern2, sub, r) for r in result.split('\n') if r.startswith('[')]) + '
              ' - return result - - -def close_up_code_segment_during_stream(gpt_reply): - """ - 在gpt输出代码的中途(输出了前面的```,但还没输出完后面的```),补上后面的``` - - Args: - gpt_reply (str): GPT模型返回的回复字符串。 - - Returns: - str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。 - - """ - if '```' not in gpt_reply: - return gpt_reply - if gpt_reply.endswith('```'): - return gpt_reply - - # 排除了以上两个情况,我们 - segments = gpt_reply.split('```') - n_mark = len(segments) - 1 - if n_mark % 2 == 1: - # print('输出代码片段中!') - return gpt_reply+'\n```' - else: - return gpt_reply - -import markdown -from latex2mathml.converter import convert as tex2mathml -from functools import wraps, lru_cache -def markdown_convertion(txt): - """ - 将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。 - """ - pre = '
              ' - suf = '
              ' - if txt.startswith(pre) and txt.endswith(suf): - # print('警告,输入了已经经过转化的字符串,二次转化可能出问题') - return txt # 已经被转化过,不需要再次转化 - - markdown_extension_configs = { - 'mdx_math': { - 'enable_dollar_delimiter': True, - 'use_gitlab_delimiters': False, - }, - } - find_equation_pattern = r'\n', '') - return content - - - if ('$' in txt) and ('```' not in txt): # 有$标识的公式符号,且没有代码段```的标识 - # convert everything to html format - split = markdown.markdown(text='---') - convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs) - convert_stage_1 = markdown_bug_hunt(convert_stage_1) - # re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s). - # 1. convert to easy-to-copy tex (do not render math) - convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL) - # 2. convert to rendered equation - convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL) - # cat them together - return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf - else: - return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf - - -sample = preprocess_newbing_out(sample) -sample = close_up_code_segment_during_stream(sample) -sample = markdown_convertion(sample) -with open('tmp.html', 'w', encoding='utf8') as f: - f.write(""" - - - My Website - - - - """) - f.write(sample) diff --git a/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/README.md b/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/README.md deleted file mode 100644 index 25fd6be516553bb7329fa05adf710360d5536923..0000000000000000000000000000000000000000 --- a/spaces/zdxiaoda/sovits-4.0-V1-anime-character-model/so-vits-svc/README.md +++ /dev/null @@ -1,292 +0,0 @@ -# SoftVC VITS Singing Voice Conversion - -In the field of Singing Voice Conversion, there is not only one project, SoVitsSvc, but also many other projects, which will not be listed here. The project was officially discontinued for maintenance and Archived. -However, there are still other enthusiasts who have created their own branches and continue to maintain the SoVitsSvc project (still unrelated to SvcDevelopTeam and the repository maintainers) and have made some big changes to it for you to find out for yourself. - -#### ✨ A fork with a greatly improved interface: [34j/so-vits-svc-fork](https://github.com/34j/so-vits-svc-fork) - -#### ✨ A client supports real-time conversion: [w-okada/voice-changer](https://github.com/w-okada/voice-changer) - -#### This project is fundamentally different from Vits. Vits is TTS and this project is SVC. TTS cannot be carried out in this project, and Vits cannot carry out SVC, and the two project models are not universal - -## Disclaimer - -This project is an open source, offline project, and all members of SvcDevelopTeam and all developers and maintainers of this project (hereinafter referred to as contributors) have no control over this project. The contributor of this project has never provided any organization or individual with any form of assistance, including but not limited to data set extraction, data set processing, computing support, training support, infering, etc. Contributors to the project do not and cannot know what users are using the project for. Therefore, all AI models and synthesized audio based on the training of this project have nothing to do with the contributors of this project. All problems arising therefrom shall be borne by the user. - -This project is run completely offline and cannot collect any user information or obtain user input data. Therefore, contributors to this project are not aware of all user input and models and therefore are not responsible for any user input. - -This project is only a framework project, which does not have the function of speech synthesis itself, and all the functions require the user to train the model themselves. Meanwhile, there is no model attached to this project, and any secondary distributed project has nothing to do with the contributors of this project - -## 📏 Terms of Use - -# Warning: Please solve the authorization problem of the dataset on your own. You shall be solely responsible for any problems caused by the use of non-authorized datasets for training and all consequences thereof.The repository and its maintainer, svc develop team, have nothing to do with the consequences! - -1. This project is established for academic exchange purposes only and is intended for communication and learning purposes. It is not intended for production environments. -2. Any videos based on sovits that are published on video platforms must clearly indicate in the description that they are used for voice changing and specify the input source of the voice or audio, for example, using videos or audios published by others and separating the vocals as input source for conversion, which must provide clear original video or music links. If your own voice or other synthesized voices from other commercial vocal synthesis software are used as the input source for conversion, you must also explain it in the description. -3. You shall be solely responsible for any infringement problems caused by the input source. When using other commercial vocal synthesis software as input source, please ensure that you comply with the terms of use of the software. Note that many vocal synthesis engines clearly state in their terms of use that they cannot be used for input source conversion. -4. It is forbidden to use the project to engage in illegal activities, religious and political activities. The project developers firmly resist the above activities. If they do not agree with this article, the use of the project is prohibited. -5. Continuing to use this project is deemed as agreeing to the relevant provisions stated in this repository README. This repository README has the obligation to persuade, and is not responsible for any subsequent problems that may arise. -6. If you use this project for any other plan, please contact and inform the author of this repository in advance. Thank you very much. - -## 🆕 Update! - -> Updated the 4.0-v2 model, the entire process is the same as 4.0. Compared to 4.0, there is some improvement in certain scenarios, but there are also some cases where it has regressed. Please refer to the [4.0-v2 branch](https://github.com/svc-develop-team/so-vits-svc/tree/4.0-v2) for more information. - -## 📝 4.0 Feature list of branches - -| Branch | Feature | whether compatible with the main branch model | -| :-------------: | :----------: | :------------: | -| 4.0 | main branch | - | -| 4.0v2 | The VISinger2 model is used | incompatibility | -| 4.0-Vec768-Layer12 | The feature input is the Layer 12 Transformer output of the Content Vec | incompatibility | - -## 📝 Model Introduction - -The singing voice conversion model uses SoftVC content encoder to extract source audio speech features, then the vectors are directly fed into VITS instead of converting to a text based intermediate; thus the pitch and intonations are conserved. Additionally, the vocoder is changed to [NSF HiFiGAN](https://github.com/openvpi/DiffSinger/tree/refactor/modules/nsf_hifigan) to solve the problem of sound interruption. - -### 🆕 4.0 Version Update Content - -- Feature input is changed to [Content Vec](https://github.com/auspicious3000/contentvec) -- The sampling rate is unified to use 44100Hz -- Due to the change of hop size and other parameters, as well as the streamlining of some model structures, the required GPU memory for inference is **significantly reduced**. The 44kHz GPU memory usage of version 4.0 is even smaller than the 32kHz usage of version 3.0. -- Some code structures have been adjusted -- The dataset creation and training process are consistent with version 3.0, but the model is completely non-universal, and the data set needs to be fully pre-processed again. -- Added an option 1: automatic pitch prediction for vc mode, which means that you don't need to manually enter the pitch key when converting speech, and the pitch of male and female voices can be automatically converted. However, this mode will cause pitch shift when converting songs. -- Added option 2: reduce timbre leakage through k-means clustering scheme, making the timbre more similar to the target timbre. -- Added option 3: Added [NSF-HIFIGAN Enhancer](https://github.com/yxlllc/DDSP-SVC), which has certain sound quality enhancement effect on some models with few train-sets, but has negative effect on well-trained models, so it is closed by default - -## 💬 About Python Version - -After conducting tests, we believe that the project runs stably on `Python 3.8.9`. - -## 📥 Pre-trained Model Files - -#### **Required** - -- ContentVec: [checkpoint_best_legacy_500.pt](https://ibm.box.com/s/z1wgl1stco8ffooyatzdwsqn2psd9lrr) - - Place it under the `hubert` directory - -```shell -# contentvec -wget -P hubert/ http://obs.cstcloud.cn/share/obs/sankagenkeshi/checkpoint_best_legacy_500.pt -# Alternatively, you can manually download and place it in the hubert directory -``` - -#### **Optional(Strongly recommend)** - -- Pre-trained model files: `G_0.pth` `D_0.pth` - - Place them under the `logs/44k` directory - -Get them from svc-develop-team(TBD) or anywhere else. - -Although the pretrained model generally does not cause any copyright problems, please pay attention to it. For example, ask the author in advance, or the author has indicated the feasible use in the description clearly. - -#### **Optional(Select as Required)** - -If you are using the NSF-HIFIGAN enhancer, you will need to download the pre-trained NSF-HIFIGAN model, or not if you do not need it. - -- Pre-trained NSF-HIFIGAN Vocoder: [nsf_hifigan_20221211.zip](https://github.com/openvpi/vocoders/releases/download/nsf-hifigan-v1/nsf_hifigan_20221211.zip) - - Unzip and place the four files under the `pretrain/nsf_hifigan` directory - -```shell -# nsf_hifigan -https://github.com/openvpi/vocoders/releases/download/nsf-hifigan-v1/nsf_hifigan_20221211.zip -# Alternatively, you can manually download and place it in the pretrain/nsf_hifigan directory -# URL:https://github.com/openvpi/vocoders/releases/tag/nsf-hifigan-v1 -``` - -## 📊 Dataset Preparation - -Simply place the dataset in the `dataset_raw` directory with the following file structure. - -``` -dataset_raw -├───speaker0 -│ ├───xxx1-xxx1.wav -│ ├───... -│ └───Lxx-0xx8.wav -└───speaker1 - ├───xx2-0xxx2.wav - ├───... - └───xxx7-xxx007.wav -``` - -You can customize the speaker name. - -``` -dataset_raw -└───suijiSUI - ├───1.wav - ├───... - └───25788785-20221210-200143-856_01_(Vocals)_0_0.wav -``` - -## 🛠️ Preprocessing - -### 0. Slice audio - -Slice to `5s - 15s`, a bit longer is no problem. Too long may lead to `torch.cuda.OutOfMemoryError` during training or even pre-processing. - -By using [audio-slicer-GUI](https://github.com/flutydeer/audio-slicer) or [audio-slicer-CLI](https://github.com/openvpi/audio-slicer) - -In general, only the `Minimum Interval` needs to be adjusted. For statement audio it usually remains default. For singing audio it can be adjusted to `100` or even `50`. - -After slicing, delete audio that is too long and too short. - -### 1. Resample to 44100Hz and mono - -```shell -python resample.py -``` - -### 2. Automatically split the dataset into training and validation sets, and generate configuration files. - -```shell -python preprocess_flist_config.py -``` - -### 3. Generate hubert and f0 - -```shell -python preprocess_hubert_f0.py -``` - -After completing the above steps, the dataset directory will contain the preprocessed data, and the dataset_raw folder can be deleted. - -#### You can modify some parameters in the generated config.json - -* `keep_ckpts`: Keep the last `keep_ckpts` models during training. Set to `0` will keep them all. Default is `3`. - -* `all_in_mem`: Load all dataset to RAM. It can be enabled when the disk IO of some platforms is too low and the system memory is **much larger** than your dataset. - -## 🏋️‍♀️ Training - -```shell -python train.py -c configs/config.json -m 44k -``` - -## 🤖 Inference - -Use [inference_main.py](https://github.com/svc-develop-team/so-vits-svc/blob/4.0/inference_main.py) - -```shell -# Example -python inference_main.py -m "logs/44k/G_30400.pth" -c "configs/config.json" -s "nen" -n "君の知らない物語-src.wav" -t 0 -``` - -Required parameters: -- `-m` | `--model_path`: Path to the model. -- `-c` | `--config_path`: Path to the configuration file. -- `-s` | `--spk_list`: Target speaker name for conversion. -- `-n` | `--clean_names`: A list of wav file names located in the raw folder. -- `-t` | `--trans`: Pitch adjustment, supports positive and negative (semitone) values. - -Optional parameters: see the next section -- `-a` | `--auto_predict_f0`: Automatic pitch prediction for voice conversion. Do not enable this when converting songs as it can cause serious pitch issues. -- `-cl` | `--clip`: Voice forced slicing. Set to 0 to turn off(default), duration in seconds. -- `-lg` | `--linear_gradient`: The cross fade length of two audio slices in seconds. If there is a discontinuous voice after forced slicing, you can adjust this value. Otherwise, it is recommended to use. Default 0. -- `-cm` | `--cluster_model_path`: Path to the clustering model. Fill in any value if clustering is not trained. -- `-cr` | `--cluster_infer_ratio`: Proportion of the clustering solution, range 0-1. Fill in 0 if the clustering model is not trained. -- `-fmp` | `--f0_mean_pooling`: Apply mean filter (pooling) to f0, which may improve some hoarse sounds. Enabling this option will reduce inference speed. -- `-eh` | `--enhance`: Whether to use NSF_HIFIGAN enhancer. This option has certain effect on sound quality enhancement for some models with few training sets, but has negative effect on well-trained models, so it is turned off by default. - -## 🤔 Optional Settings - -If the results from the previous section are satisfactory, or if you didn't understand what is being discussed in the following section, you can skip it, and it won't affect the model usage. (These optional settings have a relatively small impact, and they may have some effect on certain specific data, but in most cases, the difference may not be noticeable.) - -### Automatic f0 prediction - -During the 4.0 model training, an f0 predictor is also trained, which can be used for automatic pitch prediction during voice conversion. However, if the effect is not good, manual pitch prediction can be used instead. But please do not enable this feature when converting singing voice as it may cause serious pitch shifting! -- Set `auto_predict_f0` to true in inference_main. - -### Cluster-based timbre leakage control - -Introduction: The clustering scheme can reduce timbre leakage and make the trained model sound more like the target's timbre (although this effect is not very obvious), but using clustering alone will lower the model's clarity (the model may sound unclear). Therefore, this model adopts a fusion method to linearly control the proportion of clustering and non-clustering schemes. In other words, you can manually adjust the ratio between "sounding like the target's timbre" and "being clear and articulate" to find a suitable trade-off point. - -The existing steps before clustering do not need to be changed. All you need to do is to train an additional clustering model, which has a relatively low training cost. - -- Training process: - - Train on a machine with good CPU performance. According to my experience, it takes about 4 minutes to train each speaker on a Tencent Cloud machine with 6-core CPU. - - Execute `python cluster/train_cluster.py`. The output model will be saved in `logs/44k/kmeans_10000.pt`. -- Inference process: - - Specify `cluster_model_path` in `inference_main.py`. - - Specify `cluster_infer_ratio` in `inference_main.py`, where `0` means not using clustering at all, `1` means only using clustering, and usually `0.5` is sufficient. - -### F0 mean filtering - -Introduction: The mean filtering of F0 can effectively reduce the hoarse sound caused by the predicted fluctuation of pitch (the hoarse sound caused by reverb or harmony can not be eliminated temporarily). This function has been greatly improved on some songs. However, some songs are out of tune. If the song appears dumb after reasoning, it can be considered to open. - -- Set `f0_mean_pooling` to true in `inference_main.py` - -### [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/svc-develop-team/so-vits-svc/blob/4.0/sovits4_for_colab.ipynb) [sovits4_for_colab.ipynb](https://colab.research.google.com/github/svc-develop-team/so-vits-svc/blob/4.0/sovits4_for_colab.ipynb) - -**[23/03/16] No longer need to download hubert manually** - -**[23/04/14] Support NSF_HIFIGAN enhancer** - -## 📤 Exporting to Onnx - -Use [onnx_export.py](https://github.com/svc-develop-team/so-vits-svc/blob/4.0/onnx_export.py) - -- Create a folder named `checkpoints` and open it -- Create a folder in the `checkpoints` folder as your project folder, naming it after your project, for example `aziplayer` -- Rename your model as `model.pth`, the configuration file as `config.json`, and place them in the `aziplayer` folder you just created -- Modify `"NyaruTaffy"` in `path = "NyaruTaffy"` in [onnx_export.py](https://github.com/svc-develop-team/so-vits-svc/blob/4.0/onnx_export.py) to your project name, `path = "aziplayer"` -- Run [onnx_export.py](https://github.com/svc-develop-team/so-vits-svc/blob/4.0/onnx_export.py) -- Wait for it to finish running. A `model.onnx` will be generated in your project folder, which is the exported model. - -### UI support for Onnx models - -- [MoeSS](https://github.com/NaruseMioShirakana/MoeSS) - - [Hubert4.0](https://huggingface.co/NaruseMioShirakana/MoeSS-SUBModel) - -Note: For Hubert Onnx models, please use the models provided by MoeSS. Currently, they cannot be exported on their own (Hubert in fairseq has many unsupported operators and things involving constants that can cause errors or result in problems with the input/output shape and results when exported.) - -CppDataProcess are some functions to preprocess data used in MoeSS - -## ☀️ Previous contributors - -For some reason the author deleted the original repository. Because of the negligence of the organization members, the contributor list was cleared because all files were directly reuploaded to this repository at the beginning of the reconstruction of this repository. Now add a previous contributor list to README.md. - -*Some members have not listed according to their personal wishes.* - -
      TitleAlbumYear
      OyoyoOyoyo2015
      Ozi OziOzi Ozi2016
      Omo EbiraOmo Ebira2017
      Ebira GyrationEbira Gyration2018
      Anebira WaoAnebira Wao2019
      Ebira VibeEbira Vibe2020
      Ebira AnthemEbira Anthem2021
      - - - - - - - - - -

      MistEO


      XiaoMiku01


      しぐれ


      TomoGaSukunai


      Plachtaa


      zd小达


      凍聲響世

      - -## 📚 Some legal provisions for reference - -#### Any country, region, organization, or individual using this project must comply with the following laws. - -#### 《民法典》 - -##### 第一千零一十九条 - -任何组织或者个人不得以丑化、污损,或者利用信息技术手段伪造等方式侵害他人的肖像权。未经肖像权人同意,不得制作、使用、公开肖像权人的肖像,但是法律另有规定的除外。未经肖像权人同意,肖像作品权利人不得以发表、复制、发行、出租、展览等方式使用或者公开肖像权人的肖像。对自然人声音的保护,参照适用肖像权保护的有关规定。 - -##### 第一千零二十四条 - -【名誉权】民事主体享有名誉权。任何组织或者个人不得以侮辱、诽谤等方式侵害他人的名誉权。 - -##### 第一千零二十七条 - -【作品侵害名誉权】行为人发表的文学、艺术作品以真人真事或者特定人为描述对象,含有侮辱、诽谤内容,侵害他人名誉权的,受害人有权依法请求该行为人承担民事责任。行为人发表的文学、艺术作品不以特定人为描述对象,仅其中的情节与该特定人的情况相似的,不承担民事责任。 - -#### 《[中华人民共和国宪法](http://www.gov.cn/guoqing/2018-03/22/content_5276318.htm)》 - -#### 《[中华人民共和国刑法](http://gongbao.court.gov.cn/Details/f8e30d0689b23f57bfc782d21035c3.html?sw=%E4%B8%AD%E5%8D%8E%E4%BA%BA%E6%B0%91%E5%85%B1%E5%92%8C%E5%9B%BD%E5%88%91%E6%B3%95)》 - -#### 《[中华人民共和国民法典](http://gongbao.court.gov.cn/Details/51eb6750b8361f79be8f90d09bc202.html)》 - -## 💪 Thanks to all contributors for their efforts - - - diff --git a/spaces/zhaiqi/qq/devices/device_8958.js b/spaces/zhaiqi/qq/devices/device_8958.js deleted file mode 100644 index 455ddb0108b70276949e6539926481590a98e0d9..0000000000000000000000000000000000000000 --- a/spaces/zhaiqi/qq/devices/device_8958.js +++ /dev/null @@ -1,344 +0,0 @@ -"use strict"; -var __importDefault = (this && this.__importDefault) || function (mod) { - return (mod && mod.__esModule) ? mod : { "default": mod }; -}; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.getApkInfo = exports.Platform = exports.Device = exports.generateFullDevice = exports.generateShortDevice = void 0; -const crypto_1 = require("crypto"); -const constants_1 = require("./constants"); -const axios_1 = __importDefault(require("axios")); -const algo_1 = require("./algo"); -function generateImei() { - let imei = `86${(0, constants_1.randomString)(12, '0123456789')}`; - function calcSP(imei) { - let sum = 0; - for (let i = 0; i < imei.length; ++i) { - if (i % 2) { - let j = parseInt(imei[i]) * 2; - sum += j % 10 + Math.floor(j / 10); - } - else { - sum += parseInt(imei[i]); - } - } - return (100 - sum) % 10; - } - return imei + calcSP(imei); -} -/** 生成短设备信息 */ -function generateShortDevice() { - const randstr = (length, num = false) => { - const map = num ? '0123456789' : '0123456789abcdef'; - return (0, constants_1.randomString)(length, map); - }; - return { - "--begin--": "该设备为随机生成,丢失后不能得到原先配置", - product: `ILPP-${randstr(5).toUpperCase()}`, - device: `${randstr(5).toUpperCase()}`, - board: `${randstr(5).toUpperCase()}`, - brand: `${randstr(4).toUpperCase()}`, - model: `ICQQ ${randstr(4).toUpperCase()}`, - wifi_ssid: `HUAWEI-${randstr(7)}`, - bootloader: `U-boot`, - android_id: `IL.${randstr(7, true)}.${randstr(4, true)}`, - boot_id: `${randstr(8)}-${randstr(4)}-${randstr(4)}-${randstr(4)}-${randstr(12)}`, - proc_version: `Linux version 5.10.101-android12-${randstr(8)}`, - mac_address: `2D:${randstr(2).toUpperCase()}:${randstr(2).toUpperCase()}:${randstr(2).toUpperCase()}:${randstr(2).toUpperCase()}:${randstr(2).toUpperCase()}`, - ip_address: `192.168.${randstr(2, true)}.${randstr(2, true)}`, - imei: `${generateImei()}`, - incremental: `${randstr(10, true).toUpperCase()}`, - "--end--": "修改后可能需要重新验证设备。" - }; -} -exports.generateShortDevice = generateShortDevice; -/** 生成完整设备信息 */ -function generateFullDevice(apk, d) { - if (!d) - d = generateShortDevice(); - return { - display: d.android_id, - product: d.product, - device: d.device, - board: d.board, - brand: d.brand, - model: d.model, - bootloader: d.bootloader, - fingerprint: `${d.brand}/${d.product}/${d.device}:10/${d.android_id}/${d.incremental}:user/release-keys`, - boot_id: d.boot_id, - proc_version: d.proc_version, - baseband: "", - sim: "T-Mobile", - os_type: "android", - mac_address: d.mac_address, - ip_address: d.ip_address, - wifi_bssid: d.mac_address, - wifi_ssid: d.wifi_ssid, - imei: d.imei, - android_id: (0, constants_1.md5)(d.android_id).toString("hex"), - apn: "wifi", - version: { - incremental: d.incremental, - release: "10", - codename: "REL", - sdk: 29, - }, - imsi: (0, crypto_1.randomBytes)(16), - guid: (0, constants_1.md5)(Buffer.concat([Buffer.from(d.imei), Buffer.from(d.mac_address)])), - }; -} -exports.generateFullDevice = generateFullDevice; -class Device { - constructor(apk, d) { - this.apk = apk; - this.secret = 'ZdJqM15EeO2zWc08'; - this.publicKey = `-----BEGIN PUBLIC KEY----- -MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDEIxgwoutfwoJxcGQeedgP7FG9 -qaIuS0qzfR8gWkrkTZKM2iWHn2ajQpBRZjMSoSf6+KJGvar2ORhBfpDXyVtZCKpq -LQ+FLkpncClKVIrBwv6PHyUvuCb0rIarmgDnzkfQAqVufEtR64iazGDKatvJ9y6B -9NMbHddGSAUmRTCrHQIDAQAB ------END PUBLIC KEY-----`; - if (!d) - d = generateShortDevice(); - Object.assign(this, generateFullDevice(apk, d)); - } - async getQIMEI() { - if (this.apk.app_key === "") { - return; - } - const k = (0, constants_1.randomString)(16); - const key = (0, algo_1.encryptPKCS1)(this.publicKey, k); - const time = Date.now(); - const nonce = (0, constants_1.randomString)(16); - const payload = this.genRandomPayloadByDevice(); - const params = (0, algo_1.aesEncrypt)(JSON.stringify(payload), k).toString('base64'); - try { - const { data } = await axios_1.default.post("https://snowflake.qq.com/ola/android", { - key, - params, - time, nonce, - sign: (0, constants_1.md5)(key + params + time + nonce + this.secret).toString("hex"), - extra: '' - }, { - headers: { - 'User-Agent': `Dalvik/2.1.0 (Linux; U; Android ${this.version.release}; PCRT00 Build/N2G48H)`, - 'Content-Type': "application/json" - } - }); - if (data?.code !== 0) { - return; - } - const { q16, q36 } = JSON.parse((0, algo_1.aesDecrypt)(data.data, k)); - this.qImei16 = q16; - this.qImei36 = q36; - } - catch { - } - } - genRandomPayloadByDevice() { - const fixedRand = (max = 1, min = 0) => { - if (max < min) - [max, min] = [min, max]; - const diff = max - min; - return Math.floor(Math.random() * diff) + min; - }; - const reserved = { - "harmony": "0", - "clone": Math.random() > 0.5 ? "1" : "0", - "containe": "", - "oz": "", - "oo": "", - "kelong": Math.random() > 0.5 ? "1" : "0", - "uptimes": (0, constants_1.formatTime)(new Date()), - "multiUser": Math.random() > 0.5 ? "1" : "0", - "bod": this.board, - "brd": this.brand, - "dv": this.device, - "firstLevel": "", - "manufact": this.brand, - "name": this.model, - "host": "se.infra", - "kernel": this.fingerprint - }; - const timestamp = Date.now(); - this.mtime = this.mtime || Date.now(); - const mtime1 = new Date(this.mtime || Date.now()); - const dateFormat = (fmt, time = Date.now()) => (0, constants_1.formatTime)(time, fmt); - const mtimeStr1 = dateFormat("YYYY-mm-ddHHMMSS", mtime1) + "." + this.imei.slice(2, 11); - const mtime2 = new Date(this.mtime - parseInt(this.imei.slice(2, 4))); - const mtimeStr2 = dateFormat("YYYY-mm-ddHHMMSS", mtime2) + "." + this.imei.slice(5, 14); - let beaconIdArr = [ - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - mtimeStr1, - '0000000000000000', - (0, constants_1.md5)(this.android_id + this.imei).toString("hex").slice(0, 16), - ...new Array(4).fill(false).map((_) => fixedRand(10000000, 1000000)), - this.boot_id, - '1', - fixedRand(5, 0), - fixedRand(5, 0), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - fixedRand(5, 0), - fixedRand(100, 10), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - fixedRand(50000, 10000), - fixedRand(100, 10), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - mtimeStr2, - fixedRand(10000, 1000), - fixedRand(5, 0), - `${dateFormat("YYYY-mm-ddHHMMSS")}.${String(((10 + parseInt(this.imei.slice(5, 7))) % 100)).padStart(2, "0")}0000000`, - `${dateFormat("YYYY-mm-ddHHMMSS")}.${String(((11 + parseInt(this.imei.slice(5, 7))) % 100)).padStart(2, "0")}0000000`, - fixedRand(10000, 1000), - fixedRand(100, 10), - `${dateFormat("YYYY-mm-ddHHMMSS")}.${String(((11 + parseInt(this.imei.slice(5, 7))) % 100)).padStart(2, "0")}0000000`, - `${dateFormat("YYYY-mm-ddHHMMSS")}.${String(((11 + parseInt(this.imei.slice(5, 7))) % 100)).padStart(2, "0")}0000000`, - fixedRand(10000, 1000), - fixedRand(5, 0), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - fixedRand(5, 0), - fixedRand(100, 10), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - fixedRand(5, 0), - fixedRand(5, 0), - ].map((str, idx) => `k${idx + 1}:${str}`); - return { - "androidId": this.android_id, - "platformId": 1, - "appKey": this.apk.app_key, - "appVersion": this.apk.version, - "beaconIdSrc": beaconIdArr.join(';'), - "brand": this.brand, - "channelId": "2017", - "cid": "", - "imei": this.imei, - "imsi": this.imsi.toString("hex"), - "mac": this.mac_address, - "model": this.model, - "networkType": "unknown", - "oaid": "", - "osVersion": `Android ${this.version.release},level ${this.version.sdk}`, - "qimei": "", - "qimei36": "", - "sdkVersion": "1.2.13.6", - "targetSdkVersion": "26", - "audit": "", - "userId": "{}", - "packageId": this.apk.id, - "deviceType": this.display, - "sdkName": "", - "reserved": JSON.stringify(reserved), - }; - } -} -exports.Device = Device; -/** 支持的登录设备平台 */ -var Platform; -(function (Platform) { - Platform[Platform["Android"] = 1] = "Android"; - Platform[Platform["aPad"] = 2] = "aPad"; - Platform[Platform["Watch"] = 3] = "Watch"; - Platform[Platform["iMac"] = 4] = "iMac"; - Platform[Platform["iPad"] = 5] = "iPad"; - Platform[Platform["Tim"] = 6] = "Tim"; -})(Platform = exports.Platform || (exports.Platform = {})); -const mobile = { - id: "com.tencent.mobileqq", - app_key: '0S200MNJT807V3GE', - name: "A8.9.58.11175", - version: "8.9.58.11175", - ver: "8.9.58", - sign: Buffer.from('A6 B7 45 BF 24 A2 C2 77 52 77 16 F6 F3 6E B6 8D'.split(' ').map(s => parseInt(s, 16))), - buildtime: 1684467300, - appid: 16, - subid: 537163194, - bitmap: 150470524, - main_sig_map: 16724722, - sub_sig_map: 0x10400, - sdkver: "6.0.0.2545", - display: "Android_8.9.58", - qua: 'V1_AND_SQ_8.9.58_4108_YYB_D', - ssover: 20, -}; -const tim = { - id: "com.tencent.tim", - app_key: '0S200MNJT807V3GE', - name: "A3.5.1.3168", - version: "3.5.1.3168", - ver: "3.5.1", - sign: Buffer.from('775e696d09856872fdd8ab4f3f06b1e0', 'hex'), - buildtime: 1630062176, - appid: 16, - subid: 537150355, - bitmap: 150470524, - main_sig_map: 16724722, - sub_sig_map: 0x10400, - sdkver: "6.0.0.2484", - display: "Tim", - qua: "V1_AND_SQ_8.3.9_351_TIM_D", - ssover: 18, -}; -const watch = { - id: "com.tencent.qqlite", - app_key: '0S200MNJT807V3GE', - name: "A2.0.8", - version: "2.0.8", - ver: "2.0.8", - sign: Buffer.from('A6 B7 45 BF 24 A2 C2 77 52 77 16 F6 F3 6E B6 8D'.split(' ').map(s => parseInt(s, 16))), - buildtime: 1559564731, - appid: 16, - subid: 537065138, - bitmap: 16252796, - main_sig_map: 16724722, - sub_sig_map: 0x10400, - sdkver: "6.0.0.2365", - display: "Watch", - qua: '', - ssover: 5 -}; -const hd = { - id: "com.tencent.minihd.qq", - app_key: '0S200MNJT807V3GE', - name: "A5.9.3.3468", - version: "5.9.3.3468", - ver: "5.9.3", - sign: Buffer.from('AA 39 78 F4 1F D9 6F F9 91 4A 66 9E 18 64 74 C7'.split(' ').map(s => parseInt(s, 16))), - buildtime: 1637427966, - appid: 16, - subid: 537128930, - bitmap: 150470524, - main_sig_map: 1970400, - sub_sig_map: 66560, - sdkver: "6.0.0.2433", - display: "iMac", - qua: '', - ssover: 12 -}; -const apklist = { - [Platform.Android]: mobile, - [Platform.Tim]: tim, - [Platform.aPad]: { - ...mobile, - subid: 537163242, - display: 'aPad_8.9.58' - }, - [Platform.Watch]: watch, - [Platform.iMac]: { ...hd }, - [Platform.iPad]: { - ...mobile, - subid: 537155074, - sign: hd.sign, - name: '8.9.50.611', - ver: '8.9.50', - sdkver: '6.0.0.2535', - qua: 'V1_AND_SQ_8.9.50_3898_YYB_D', - display: 'iPad' - }, -}; -function getApkInfo(p) { - return apklist[p] || apklist[Platform.Android]; -} -exports.getApkInfo = getApkInfo; diff --git a/spaces/zhang-wei-jian/docker/node_modules/anymatch/index.js b/spaces/zhang-wei-jian/docker/node_modules/anymatch/index.js deleted file mode 100644 index 8eb73e9c9a61d2c642d3c5f44a2fe4cd6cd04615..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/anymatch/index.js +++ /dev/null @@ -1,104 +0,0 @@ -'use strict'; - -Object.defineProperty(exports, "__esModule", { value: true }); - -const picomatch = require('picomatch'); -const normalizePath = require('normalize-path'); - -/** - * @typedef {(testString: string) => boolean} AnymatchFn - * @typedef {string|RegExp|AnymatchFn} AnymatchPattern - * @typedef {AnymatchPattern|AnymatchPattern[]} AnymatchMatcher - */ -const BANG = '!'; -const DEFAULT_OPTIONS = {returnIndex: false}; -const arrify = (item) => Array.isArray(item) ? item : [item]; - -/** - * @param {AnymatchPattern} matcher - * @param {object} options - * @returns {AnymatchFn} - */ -const createPattern = (matcher, options) => { - if (typeof matcher === 'function') { - return matcher; - } - if (typeof matcher === 'string') { - const glob = picomatch(matcher, options); - return (string) => matcher === string || glob(string); - } - if (matcher instanceof RegExp) { - return (string) => matcher.test(string); - } - return (string) => false; -}; - -/** - * @param {Array} patterns - * @param {Array} negPatterns - * @param {String|Array} args - * @param {Boolean} returnIndex - * @returns {boolean|number} - */ -const matchPatterns = (patterns, negPatterns, args, returnIndex) => { - const isList = Array.isArray(args); - const _path = isList ? args[0] : args; - if (!isList && typeof _path !== 'string') { - throw new TypeError('anymatch: second argument must be a string: got ' + - Object.prototype.toString.call(_path)) - } - const path = normalizePath(_path, false); - - for (let index = 0; index < negPatterns.length; index++) { - const nglob = negPatterns[index]; - if (nglob(path)) { - return returnIndex ? -1 : false; - } - } - - const applied = isList && [path].concat(args.slice(1)); - for (let index = 0; index < patterns.length; index++) { - const pattern = patterns[index]; - if (isList ? pattern(...applied) : pattern(path)) { - return returnIndex ? index : true; - } - } - - return returnIndex ? -1 : false; -}; - -/** - * @param {AnymatchMatcher} matchers - * @param {Array|string} testString - * @param {object} options - * @returns {boolean|number|Function} - */ -const anymatch = (matchers, testString, options = DEFAULT_OPTIONS) => { - if (matchers == null) { - throw new TypeError('anymatch: specify first argument'); - } - const opts = typeof options === 'boolean' ? {returnIndex: options} : options; - const returnIndex = opts.returnIndex || false; - - // Early cache for matchers. - const mtchers = arrify(matchers); - const negatedGlobs = mtchers - .filter(item => typeof item === 'string' && item.charAt(0) === BANG) - .map(item => item.slice(1)) - .map(item => picomatch(item, opts)); - const patterns = mtchers - .filter(item => typeof item !== 'string' || (typeof item === 'string' && item.charAt(0) !== BANG)) - .map(matcher => createPattern(matcher, opts)); - - if (testString == null) { - return (testString, ri = false) => { - const returnIndex = typeof ri === 'boolean' ? ri : false; - return matchPatterns(patterns, negatedGlobs, testString, returnIndex); - } - } - - return matchPatterns(patterns, negatedGlobs, testString, returnIndex); -}; - -anymatch.default = anymatch; -module.exports = anymatch; diff --git a/spaces/zlc99/M4Singer/tasks/tts/fs2.py b/spaces/zlc99/M4Singer/tasks/tts/fs2.py deleted file mode 100644 index 32fb54f5bda486ece04598673cff367f5d8844fa..0000000000000000000000000000000000000000 --- a/spaces/zlc99/M4Singer/tasks/tts/fs2.py +++ /dev/null @@ -1,512 +0,0 @@ -import matplotlib - -matplotlib.use('Agg') - -from utils import audio -import matplotlib.pyplot as plt -from data_gen.tts.data_gen_utils import get_pitch -from tasks.tts.fs2_utils import FastSpeechDataset -from utils.cwt import cwt2f0 -from utils.pl_utils import data_loader -import os -from multiprocessing.pool import Pool -from tqdm import tqdm -from modules.fastspeech.tts_modules import mel2ph_to_dur -from utils.hparams import hparams -from utils.plot import spec_to_figure, dur_to_figure, f0_to_figure -from utils.pitch_utils import denorm_f0 -from modules.fastspeech.fs2 import FastSpeech2 -from tasks.tts.tts import TtsTask -import torch -import torch.optim -import torch.utils.data -import torch.nn.functional as F -import utils -import torch.distributions -import numpy as np -from modules.commons.ssim import ssim - -class FastSpeech2Task(TtsTask): - def __init__(self): - super(FastSpeech2Task, self).__init__() - self.dataset_cls = FastSpeechDataset - self.mse_loss_fn = torch.nn.MSELoss() - mel_losses = hparams['mel_loss'].split("|") - self.loss_and_lambda = {} - for i, l in enumerate(mel_losses): - if l == '': - continue - if ':' in l: - l, lbd = l.split(":") - lbd = float(lbd) - else: - lbd = 1.0 - self.loss_and_lambda[l] = lbd - print("| Mel losses:", self.loss_and_lambda) - self.sil_ph = self.phone_encoder.sil_phonemes() - - @data_loader - def train_dataloader(self): - train_dataset = self.dataset_cls(hparams['train_set_name'], shuffle=True) - return self.build_dataloader(train_dataset, True, self.max_tokens, self.max_sentences, - endless=hparams['endless_ds']) - - @data_loader - def val_dataloader(self): - valid_dataset = self.dataset_cls(hparams['valid_set_name'], shuffle=False) - return self.build_dataloader(valid_dataset, False, self.max_eval_tokens, self.max_eval_sentences) - - @data_loader - def test_dataloader(self): - test_dataset = self.dataset_cls(hparams['test_set_name'], shuffle=False) - return self.build_dataloader(test_dataset, False, self.max_eval_tokens, - self.max_eval_sentences, batch_by_size=False) - - def build_tts_model(self): - self.model = FastSpeech2(self.phone_encoder) - - def build_model(self): - self.build_tts_model() - if hparams['load_ckpt'] != '': - self.load_ckpt(hparams['load_ckpt'], strict=True) - utils.print_arch(self.model) - return self.model - - def _training_step(self, sample, batch_idx, _): - loss_output = self.run_model(self.model, sample) - total_loss = sum([v for v in loss_output.values() if isinstance(v, torch.Tensor) and v.requires_grad]) - loss_output['batch_size'] = sample['txt_tokens'].size()[0] - return total_loss, loss_output - - def validation_step(self, sample, batch_idx): - outputs = {} - outputs['losses'] = {} - outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True) - outputs['total_loss'] = sum(outputs['losses'].values()) - outputs['nsamples'] = sample['nsamples'] - mel_out = self.model.out2mel(model_out['mel_out']) - outputs = utils.tensors_to_scalars(outputs) - # if sample['mels'].shape[0] == 1: - # self.add_laplace_var(mel_out, sample['mels'], outputs) - if batch_idx < hparams['num_valid_plots']: - self.plot_mel(batch_idx, sample['mels'], mel_out) - self.plot_dur(batch_idx, sample, model_out) - if hparams['use_pitch_embed']: - self.plot_pitch(batch_idx, sample, model_out) - return outputs - - def _validation_end(self, outputs): - all_losses_meter = { - 'total_loss': utils.AvgrageMeter(), - } - for output in outputs: - n = output['nsamples'] - for k, v in output['losses'].items(): - if k not in all_losses_meter: - all_losses_meter[k] = utils.AvgrageMeter() - all_losses_meter[k].update(v, n) - all_losses_meter['total_loss'].update(output['total_loss'], n) - return {k: round(v.avg, 4) for k, v in all_losses_meter.items()} - - def run_model(self, model, sample, return_output=False): - txt_tokens = sample['txt_tokens'] # [B, T_t] - target = sample['mels'] # [B, T_s, 80] - mel2ph = sample['mel2ph'] # [B, T_s] - f0 = sample['f0'] - uv = sample['uv'] - energy = sample['energy'] - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - if hparams['pitch_type'] == 'cwt': - cwt_spec = sample[f'cwt_spec'] - f0_mean = sample['f0_mean'] - f0_std = sample['f0_std'] - sample['f0_cwt'] = f0 = model.cwt2f0_norm(cwt_spec, f0_mean, f0_std, mel2ph) - - output = model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, - ref_mels=target, f0=f0, uv=uv, energy=energy, infer=False) - - losses = {} - self.add_mel_loss(output['mel_out'], target, losses) - self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses) - if hparams['use_pitch_embed']: - self.add_pitch_loss(output, sample, losses) - if hparams['use_energy_embed']: - self.add_energy_loss(output['energy_pred'], energy, losses) - if not return_output: - return losses - else: - return losses, output - - ############ - # losses - ############ - def add_mel_loss(self, mel_out, target, losses, postfix='', mel_mix_loss=None): - if mel_mix_loss is None: - for loss_name, lbd in self.loss_and_lambda.items(): - if 'l1' == loss_name: - l = self.l1_loss(mel_out, target) - elif 'mse' == loss_name: - raise NotImplementedError - elif 'ssim' == loss_name: - l = self.ssim_loss(mel_out, target) - elif 'gdl' == loss_name: - raise NotImplementedError - losses[f'{loss_name}{postfix}'] = l * lbd - else: - raise NotImplementedError - - def l1_loss(self, decoder_output, target): - # decoder_output : B x T x n_mel - # target : B x T x n_mel - l1_loss = F.l1_loss(decoder_output, target, reduction='none') - weights = self.weights_nonzero_speech(target) - l1_loss = (l1_loss * weights).sum() / weights.sum() - return l1_loss - - def ssim_loss(self, decoder_output, target, bias=6.0): - # decoder_output : B x T x n_mel - # target : B x T x n_mel - assert decoder_output.shape == target.shape - weights = self.weights_nonzero_speech(target) - decoder_output = decoder_output[:, None] + bias - target = target[:, None] + bias - ssim_loss = 1 - ssim(decoder_output, target, size_average=False) - ssim_loss = (ssim_loss * weights).sum() / weights.sum() - return ssim_loss - - def add_dur_loss(self, dur_pred, mel2ph, txt_tokens, losses=None): - """ - - :param dur_pred: [B, T], float, log scale - :param mel2ph: [B, T] - :param txt_tokens: [B, T] - :param losses: - :return: - """ - B, T = txt_tokens.shape - nonpadding = (txt_tokens != 0).float() - dur_gt = mel2ph_to_dur(mel2ph, T).float() * nonpadding - is_sil = torch.zeros_like(txt_tokens).bool() - for p in self.sil_ph: - is_sil = is_sil | (txt_tokens == self.phone_encoder.encode(p)[0]) - is_sil = is_sil.float() # [B, T_txt] - - # phone duration loss - if hparams['dur_loss'] == 'mse': - losses['pdur'] = F.mse_loss(dur_pred, (dur_gt + 1).log(), reduction='none') - losses['pdur'] = (losses['pdur'] * nonpadding).sum() / nonpadding.sum() - dur_pred = (dur_pred.exp() - 1).clamp(min=0) - elif hparams['dur_loss'] == 'mog': - return NotImplementedError - elif hparams['dur_loss'] == 'crf': - losses['pdur'] = -self.model.dur_predictor.crf( - dur_pred, dur_gt.long().clamp(min=0, max=31), mask=nonpadding > 0, reduction='mean') - losses['pdur'] = losses['pdur'] * hparams['lambda_ph_dur'] - - # use linear scale for sent and word duration - if hparams['lambda_word_dur'] > 0: - word_id = (is_sil.cumsum(-1) * (1 - is_sil)).long() - word_dur_p = dur_pred.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_pred)[:, 1:] - word_dur_g = dur_gt.new_zeros([B, word_id.max() + 1]).scatter_add(1, word_id, dur_gt)[:, 1:] - wdur_loss = F.mse_loss((word_dur_p + 1).log(), (word_dur_g + 1).log(), reduction='none') - word_nonpadding = (word_dur_g > 0).float() - wdur_loss = (wdur_loss * word_nonpadding).sum() / word_nonpadding.sum() - losses['wdur'] = wdur_loss * hparams['lambda_word_dur'] - if hparams['lambda_sent_dur'] > 0: - sent_dur_p = dur_pred.sum(-1) - sent_dur_g = dur_gt.sum(-1) - sdur_loss = F.mse_loss((sent_dur_p + 1).log(), (sent_dur_g + 1).log(), reduction='mean') - losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur'] - - def add_pitch_loss(self, output, sample, losses): - if hparams['pitch_type'] == 'ph': - nonpadding = (sample['txt_tokens'] != 0).float() - pitch_loss_fn = F.l1_loss if hparams['pitch_loss'] == 'l1' else F.mse_loss - losses['f0'] = (pitch_loss_fn(output['pitch_pred'][:, :, 0], sample['f0'], - reduction='none') * nonpadding).sum() \ - / nonpadding.sum() * hparams['lambda_f0'] - return - mel2ph = sample['mel2ph'] # [B, T_s] - f0 = sample['f0'] - uv = sample['uv'] - nonpadding = (mel2ph != 0).float() - if hparams['pitch_type'] == 'cwt': - cwt_spec = sample[f'cwt_spec'] - f0_mean = sample['f0_mean'] - f0_std = sample['f0_std'] - cwt_pred = output['cwt'][:, :, :10] - f0_mean_pred = output['f0_mean'] - f0_std_pred = output['f0_std'] - losses['C'] = self.cwt_loss(cwt_pred, cwt_spec) * hparams['lambda_f0'] - if hparams['use_uv']: - assert output['cwt'].shape[-1] == 11 - uv_pred = output['cwt'][:, :, -1] - losses['uv'] = (F.binary_cross_entropy_with_logits(uv_pred, uv, reduction='none') * nonpadding) \ - .sum() / nonpadding.sum() * hparams['lambda_uv'] - losses['f0_mean'] = F.l1_loss(f0_mean_pred, f0_mean) * hparams['lambda_f0'] - losses['f0_std'] = F.l1_loss(f0_std_pred, f0_std) * hparams['lambda_f0'] - if hparams['cwt_add_f0_loss']: - f0_cwt_ = self.model.cwt2f0_norm(cwt_pred, f0_mean_pred, f0_std_pred, mel2ph) - self.add_f0_loss(f0_cwt_[:, :, None], f0, uv, losses, nonpadding=nonpadding) - elif hparams['pitch_type'] == 'frame': - self.add_f0_loss(output['pitch_pred'], f0, uv, losses, nonpadding=nonpadding) - - def add_f0_loss(self, p_pred, f0, uv, losses, nonpadding): - assert p_pred[..., 0].shape == f0.shape - if hparams['use_uv']: - assert p_pred[..., 1].shape == uv.shape - losses['uv'] = (F.binary_cross_entropy_with_logits( - p_pred[:, :, 1], uv, reduction='none') * nonpadding).sum() \ - / nonpadding.sum() * hparams['lambda_uv'] - nonpadding = nonpadding * (uv == 0).float() - - f0_pred = p_pred[:, :, 0] - if hparams['pitch_loss'] in ['l1', 'l2']: - pitch_loss_fn = F.l1_loss if hparams['pitch_loss'] == 'l1' else F.mse_loss - losses['f0'] = (pitch_loss_fn(f0_pred, f0, reduction='none') * nonpadding).sum() \ - / nonpadding.sum() * hparams['lambda_f0'] - elif hparams['pitch_loss'] == 'ssim': - return NotImplementedError - - def cwt_loss(self, cwt_p, cwt_g): - if hparams['cwt_loss'] == 'l1': - return F.l1_loss(cwt_p, cwt_g) - if hparams['cwt_loss'] == 'l2': - return F.mse_loss(cwt_p, cwt_g) - if hparams['cwt_loss'] == 'ssim': - return self.ssim_loss(cwt_p, cwt_g, 20) - - def add_energy_loss(self, energy_pred, energy, losses): - nonpadding = (energy != 0).float() - loss = (F.mse_loss(energy_pred, energy, reduction='none') * nonpadding).sum() / nonpadding.sum() - loss = loss * hparams['lambda_energy'] - losses['e'] = loss - - - ############ - # validation plots - ############ - def plot_mel(self, batch_idx, spec, spec_out, name=None): - spec_cat = torch.cat([spec, spec_out], -1) - name = f'mel_{batch_idx}' if name is None else name - vmin = hparams['mel_vmin'] - vmax = hparams['mel_vmax'] - self.logger.experiment.add_figure(name, spec_to_figure(spec_cat[0], vmin, vmax), self.global_step) - - def plot_dur(self, batch_idx, sample, model_out): - T_txt = sample['txt_tokens'].shape[1] - dur_gt = mel2ph_to_dur(sample['mel2ph'], T_txt)[0] - dur_pred = self.model.dur_predictor.out2dur(model_out['dur']).float() - txt = self.phone_encoder.decode(sample['txt_tokens'][0].cpu().numpy()) - txt = txt.split(" ") - self.logger.experiment.add_figure( - f'dur_{batch_idx}', dur_to_figure(dur_gt, dur_pred, txt), self.global_step) - - def plot_pitch(self, batch_idx, sample, model_out): - f0 = sample['f0'] - if hparams['pitch_type'] == 'ph': - mel2ph = sample['mel2ph'] - f0 = self.expand_f0_ph(f0, mel2ph) - f0_pred = self.expand_f0_ph(model_out['pitch_pred'][:, :, 0], mel2ph) - self.logger.experiment.add_figure( - f'f0_{batch_idx}', f0_to_figure(f0[0], None, f0_pred[0]), self.global_step) - return - f0 = denorm_f0(f0, sample['uv'], hparams) - if hparams['pitch_type'] == 'cwt': - # cwt - cwt_out = model_out['cwt'] - cwt_spec = cwt_out[:, :, :10] - cwt = torch.cat([cwt_spec, sample['cwt_spec']], -1) - self.logger.experiment.add_figure(f'cwt_{batch_idx}', spec_to_figure(cwt[0]), self.global_step) - # f0 - f0_pred = cwt2f0(cwt_spec, model_out['f0_mean'], model_out['f0_std'], hparams['cwt_scales']) - if hparams['use_uv']: - assert cwt_out.shape[-1] == 11 - uv_pred = cwt_out[:, :, -1] > 0 - f0_pred[uv_pred > 0] = 0 - f0_cwt = denorm_f0(sample['f0_cwt'], sample['uv'], hparams) - self.logger.experiment.add_figure( - f'f0_{batch_idx}', f0_to_figure(f0[0], f0_cwt[0], f0_pred[0]), self.global_step) - elif hparams['pitch_type'] == 'frame': - # f0 - uv_pred = model_out['pitch_pred'][:, :, 1] > 0 - pitch_pred = denorm_f0(model_out['pitch_pred'][:, :, 0], uv_pred, hparams) - self.logger.experiment.add_figure( - f'f0_{batch_idx}', f0_to_figure(f0[0], None, pitch_pred[0]), self.global_step) - - ############ - # infer - ############ - def test_step(self, sample, batch_idx): - spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids') - txt_tokens = sample['txt_tokens'] - mel2ph, uv, f0 = None, None, None - ref_mels = None - if hparams['profile_infer']: - pass - else: - if hparams['use_gt_dur']: - mel2ph = sample['mel2ph'] - if hparams['use_gt_f0']: - f0 = sample['f0'] - uv = sample['uv'] - print('Here using gt f0!!') - if hparams.get('use_midi') is not None and hparams['use_midi']: - outputs = self.model( - txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=ref_mels, infer=True, - pitch_midi=sample['pitch_midi'], midi_dur=sample.get('midi_dur'), is_slur=sample.get('is_slur')) - else: - outputs = self.model( - txt_tokens, spk_embed=spk_embed, mel2ph=mel2ph, f0=f0, uv=uv, ref_mels=ref_mels, infer=True) - sample['outputs'] = self.model.out2mel(outputs['mel_out']) - sample['mel2ph_pred'] = outputs['mel2ph'] - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - sample['f0'] = self.pe(sample['mels'])['f0_denorm_pred'] # pe predict from GT mel - sample['f0_pred'] = self.pe(sample['outputs'])['f0_denorm_pred'] # pe predict from Pred mel - else: - sample['f0'] = denorm_f0(sample['f0'], sample['uv'], hparams) - sample['f0_pred'] = outputs.get('f0_denorm') - - return self.after_infer(sample) - - def after_infer(self, predictions): - if self.saving_result_pool is None and not hparams['profile_infer']: - self.saving_result_pool = Pool(min(int(os.getenv('N_PROC', os.cpu_count())), 16)) - self.saving_results_futures = [] - predictions = utils.unpack_dict_to_list(predictions) - t = tqdm(predictions) - for num_predictions, prediction in enumerate(t): - for k, v in prediction.items(): - if type(v) is torch.Tensor: - prediction[k] = v.cpu().numpy() - - item_name = prediction.get('item_name') - text = prediction.get('text').replace(":", "%3A")[:80] - - # remove paddings - mel_gt = prediction["mels"] - mel_gt_mask = np.abs(mel_gt).sum(-1) > 0 - mel_gt = mel_gt[mel_gt_mask] - mel2ph_gt = prediction.get("mel2ph") - mel2ph_gt = mel2ph_gt[mel_gt_mask] if mel2ph_gt is not None else None - mel_pred = prediction["outputs"] - mel_pred_mask = np.abs(mel_pred).sum(-1) > 0 - mel_pred = mel_pred[mel_pred_mask] - mel_gt = np.clip(mel_gt, hparams['mel_vmin'], hparams['mel_vmax']) - mel_pred = np.clip(mel_pred, hparams['mel_vmin'], hparams['mel_vmax']) - - mel2ph_pred = prediction.get("mel2ph_pred") - if mel2ph_pred is not None: - if len(mel2ph_pred) > len(mel_pred_mask): - mel2ph_pred = mel2ph_pred[:len(mel_pred_mask)] - mel2ph_pred = mel2ph_pred[mel_pred_mask] - - f0_gt = prediction.get("f0") - f0_pred = prediction.get("f0_pred") - if f0_pred is not None: - f0_gt = f0_gt[mel_gt_mask] - if len(f0_pred) > len(mel_pred_mask): - f0_pred = f0_pred[:len(mel_pred_mask)] - f0_pred = f0_pred[mel_pred_mask] - - str_phs = None - if self.phone_encoder is not None and 'txt_tokens' in prediction: - str_phs = self.phone_encoder.decode(prediction['txt_tokens'], strip_padding=True) - gen_dir = os.path.join(hparams['work_dir'], - f'generated_{self.trainer.global_step}_{hparams["gen_dir_name"]}') - wav_pred = self.vocoder.spec2wav(mel_pred, f0=f0_pred) - if not hparams['profile_infer']: - os.makedirs(gen_dir, exist_ok=True) - os.makedirs(f'{gen_dir}/wavs', exist_ok=True) - os.makedirs(f'{gen_dir}/plot', exist_ok=True) - os.makedirs(os.path.join(hparams['work_dir'], 'P_mels_npy'), exist_ok=True) - os.makedirs(os.path.join(hparams['work_dir'], 'G_mels_npy'), exist_ok=True) - self.saving_results_futures.append( - self.saving_result_pool.apply_async(self.save_result, args=[ - wav_pred, mel_pred, 'P', item_name, text, gen_dir, str_phs, mel2ph_pred, f0_gt, f0_pred])) - - if mel_gt is not None and hparams['save_gt']: - wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt) - self.saving_results_futures.append( - self.saving_result_pool.apply_async(self.save_result, args=[ - wav_gt, mel_gt, 'G', item_name, text, gen_dir, str_phs, mel2ph_gt, f0_gt, f0_pred])) - if hparams['save_f0']: - import matplotlib.pyplot as plt - # f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams) - f0_pred_ = f0_pred - f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams) - fig = plt.figure() - plt.plot(f0_pred_, label=r'$f0_P$') - plt.plot(f0_gt_, label=r'$f0_G$') - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - # f0_midi = prediction.get("f0_midi") - # f0_midi = f0_midi[mel_gt_mask] - # plt.plot(f0_midi, label=r'$f0_M$') - pass - plt.legend() - plt.tight_layout() - plt.savefig(f'{gen_dir}/plot/[F0][{item_name}]{text}.png', format='png') - plt.close(fig) - - t.set_description( - f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}") - else: - if 'gen_wav_time' not in self.stats: - self.stats['gen_wav_time'] = 0 - self.stats['gen_wav_time'] += len(wav_pred) / hparams['audio_sample_rate'] - print('gen_wav_time: ', self.stats['gen_wav_time']) - - return {} - - @staticmethod - def save_result(wav_out, mel, prefix, item_name, text, gen_dir, str_phs=None, mel2ph=None, gt_f0=None, pred_f0=None): - item_name = item_name.replace('/', '-') - base_fn = f'[{item_name}][{prefix}]' - - if text is not None: - base_fn += text - base_fn += ('-' + hparams['exp_name']) - np.save(os.path.join(hparams['work_dir'], f'{prefix}_mels_npy', item_name), mel) - audio.save_wav(wav_out, f'{gen_dir}/wavs/{base_fn}.wav', hparams['audio_sample_rate'], - norm=hparams['out_wav_norm']) - fig = plt.figure(figsize=(14, 10)) - spec_vmin = hparams['mel_vmin'] - spec_vmax = hparams['mel_vmax'] - heatmap = plt.pcolor(mel.T, vmin=spec_vmin, vmax=spec_vmax) - fig.colorbar(heatmap) - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - gt_f0 = (gt_f0 - 100) / (800 - 100) * 80 * (gt_f0 > 0) - pred_f0 = (pred_f0 - 100) / (800 - 100) * 80 * (pred_f0 > 0) - plt.plot(pred_f0, c='white', linewidth=1, alpha=0.6) - plt.plot(gt_f0, c='red', linewidth=1, alpha=0.6) - else: - f0, _ = get_pitch(wav_out, mel, hparams) - f0 = (f0 - 100) / (800 - 100) * 80 * (f0 > 0) - plt.plot(f0, c='white', linewidth=1, alpha=0.6) - if mel2ph is not None and str_phs is not None: - decoded_txt = str_phs.split(" ") - dur = mel2ph_to_dur(torch.LongTensor(mel2ph)[None, :], len(decoded_txt))[0].numpy() - dur = [0] + list(np.cumsum(dur)) - for i in range(len(dur) - 1): - shift = (i % 20) + 1 - plt.text(dur[i], shift, decoded_txt[i]) - plt.hlines(shift, dur[i], dur[i + 1], colors='b' if decoded_txt[i] != '|' else 'black') - plt.vlines(dur[i], 0, 5, colors='b' if decoded_txt[i] != '|' else 'black', - alpha=1, linewidth=1) - plt.tight_layout() - plt.savefig(f'{gen_dir}/plot/{base_fn}.png', format='png', dpi=1000) - plt.close(fig) - - ############## - # utils - ############## - @staticmethod - def expand_f0_ph(f0, mel2ph): - f0 = denorm_f0(f0, None, hparams) - f0 = F.pad(f0, [1, 0]) - f0 = torch.gather(f0, 1, mel2ph) # [B, T_mel] - return f0 - - -if __name__ == '__main__': - FastSpeech2Task.start() diff --git a/spaces/zzz666/ChuanhuChatGPT/app.py b/spaces/zzz666/ChuanhuChatGPT/app.py deleted file mode 100644 index 65017af139e7ca831068f163ca4003a6a84a5acb..0000000000000000000000000000000000000000 --- a/spaces/zzz666/ChuanhuChatGPT/app.py +++ /dev/null @@ -1,438 +0,0 @@ -# -*- coding:utf-8 -*- -import os -import logging -import sys - -import gradio as gr - -from modules.utils import * -from modules.presets import * -from modules.overwrites import * -from modules.chat_func import * -from modules.openai_func import get_usage - -logging.basicConfig( - level=logging.DEBUG, - format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", -) - -my_api_key = "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" # 在这里输入你的 API 密钥 - -# if we are running in Docker -if os.environ.get("dockerrun") == "yes": - dockerflag = True -else: - dockerflag = False - -authflag = False - -if dockerflag: - my_api_key = os.environ.get("my_api_key") - if my_api_key == "empty": - logging.error("Please give a api key!") - sys.exit(1) - # auth - username = os.environ.get("USERNAME") - password = os.environ.get("PASSWORD") - if not (isinstance(username, type(None)) or isinstance(password, type(None))): - authflag = True -else: - if ( - not my_api_key - and os.path.exists("api_key.txt") - and os.path.getsize("api_key.txt") - ): - with open("api_key.txt", "r") as f: - my_api_key = f.read().strip() - if os.path.exists("auth.json"): - with open("auth.json", "r", encoding='utf-8') as f: - auth = json.load(f) - username = auth["username"] - password = auth["password"] - if username != "" and password != "": - authflag = True - -gr.Chatbot.postprocess = postprocess -PromptHelper.compact_text_chunks = compact_text_chunks - -with open("assets/custom.css", "r", encoding="utf-8") as f: - customCSS = f.read() - -with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo: - history = gr.State([]) - token_count = gr.State([]) - promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2)) - user_api_key = gr.State(my_api_key) - user_question = gr.State("") - outputing = gr.State(False) - topic = gr.State("未命名对话历史记录") - - with gr.Row(): - with gr.Column(scale=1): - gr.HTML(title) - with gr.Column(scale=4): - gr.HTML('
      Duplicate SpaceDuplicate the Space and run securely with your OpenAI API Key
      ') - with gr.Column(scale=4): - status_display = gr.Markdown(get_geoip(), elem_id="status_display") - - with gr.Row(scale=1).style(equal_height=True): - with gr.Column(scale=5): - with gr.Row(scale=1): - chatbot = gr.Chatbot(elem_id="chuanhu_chatbot").style(height="100%") - with gr.Row(scale=1): - with gr.Column(scale=12): - user_input = gr.Textbox( - show_label=False, placeholder="在这里输入", interactive=True - ).style(container=False) - with gr.Column(min_width=70, scale=1): - submitBtn = gr.Button("发送", variant="primary") - cancelBtn = gr.Button("取消", variant="secondary", visible=False) - with gr.Row(scale=1): - emptyBtn = gr.Button( - "🧹 新的对话", - ) - retryBtn = gr.Button("🔄 重新生成") - delLastBtn = gr.Button("🗑️ 删除一条对话") - reduceTokenBtn = gr.Button("♻️ 总结对话") - - with gr.Column(): - with gr.Column(min_width=50, scale=1): - with gr.Tab(label="ChatGPT"): - keyTxt = gr.Textbox( - show_label=True, - placeholder=f"OpenAI API-key...", - value=hide_middle_chars(my_api_key), - type="password", - visible=not HIDE_MY_KEY, - label="API-Key", - ) - usageTxt = gr.Markdown(get_usage(my_api_key), elem_id="usage_display") - model_select_dropdown = gr.Dropdown( - label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0] - ) - use_streaming_checkbox = gr.Checkbox( - label="实时传输回答", value=True, visible=enable_streaming_option - ) - use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False) - language_select_dropdown = gr.Dropdown( - label="选择回复语言(针对搜索&索引功能)", - choices=REPLY_LANGUAGES, - multiselect=False, - value=REPLY_LANGUAGES[0], - ) - index_files = gr.Files(label="上传索引文件", type="file", multiple=True) - - with gr.Tab(label="Prompt"): - systemPromptTxt = gr.Textbox( - show_label=True, - placeholder=f"在这里输入System Prompt...", - label="System prompt", - value=initial_prompt, - lines=10, - ).style(container=False) - with gr.Accordion(label="加载Prompt模板", open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - templateFileSelectDropdown = gr.Dropdown( - label="选择Prompt模板集合文件", - choices=get_template_names(plain=True), - multiselect=False, - value=get_template_names(plain=True)[0], - ).style(container=False) - with gr.Column(scale=1): - templateRefreshBtn = gr.Button("🔄 刷新") - with gr.Row(): - with gr.Column(): - templateSelectDropdown = gr.Dropdown( - label="从Prompt模板中加载", - choices=load_template( - get_template_names(plain=True)[0], mode=1 - ), - multiselect=False, - value=load_template( - get_template_names(plain=True)[0], mode=1 - )[0], - ).style(container=False) - - with gr.Tab(label="保存/加载"): - with gr.Accordion(label="保存/加载对话历史记录", open=True): - with gr.Column(): - with gr.Row(): - with gr.Column(scale=6): - historyFileSelectDropdown = gr.Dropdown( - label="从列表中加载对话", - choices=get_history_names(plain=True), - multiselect=False, - value=get_history_names(plain=True)[0], - ) - with gr.Column(scale=1): - historyRefreshBtn = gr.Button("🔄 刷新") - with gr.Row(): - with gr.Column(scale=6): - saveFileName = gr.Textbox( - show_label=True, - placeholder=f"设置文件名: 默认为.json,可选为.md", - label="设置保存文件名", - value="对话历史记录", - ).style(container=True) - with gr.Column(scale=1): - saveHistoryBtn = gr.Button("💾 保存对话") - exportMarkdownBtn = gr.Button("📝 导出为Markdown") - gr.Markdown("默认保存于history文件夹") - with gr.Row(): - with gr.Column(): - downloadFile = gr.File(interactive=True) - - with gr.Tab(label="高级"): - gr.Markdown("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置") - default_btn = gr.Button("🔙 恢复默认设置") - - with gr.Accordion("参数", open=False): - top_p = gr.Slider( - minimum=-0, - maximum=1.0, - value=1.0, - step=0.05, - interactive=True, - label="Top-p", - ) - temperature = gr.Slider( - minimum=-0, - maximum=2.0, - value=1.0, - step=0.1, - interactive=True, - label="Temperature", - ) - - with gr.Accordion("网络设置", open=False): - apiurlTxt = gr.Textbox( - show_label=True, - placeholder=f"在这里输入API地址...", - label="API地址", - value="https://api.openai.com/v1/chat/completions", - lines=2, - ) - changeAPIURLBtn = gr.Button("🔄 切换API地址") - proxyTxt = gr.Textbox( - show_label=True, - placeholder=f"在这里输入代理地址...", - label="代理地址(示例:http://127.0.0.1:10809)", - value="", - lines=2, - ) - changeProxyBtn = gr.Button("🔄 设置代理地址") - - gr.Markdown(description) - - chatgpt_predict_args = dict( - fn=predict, - inputs=[ - user_api_key, - systemPromptTxt, - history, - user_question, - chatbot, - token_count, - top_p, - temperature, - use_streaming_checkbox, - model_select_dropdown, - use_websearch_checkbox, - index_files, - language_select_dropdown, - ], - outputs=[chatbot, history, status_display, token_count], - show_progress=True, - ) - - start_outputing_args = dict( - fn=start_outputing, - inputs=[], - outputs=[submitBtn, cancelBtn], - show_progress=True, - ) - - end_outputing_args = dict( - fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn] - ) - - reset_textbox_args = dict( - fn=reset_textbox, inputs=[], outputs=[user_input] - ) - - transfer_input_args = dict( - fn=transfer_input, inputs=[user_input], outputs=[user_question, user_input], show_progress=True - ) - - get_usage_args = dict( - fn=get_usage, inputs=[user_api_key], outputs=[usageTxt], show_progress=False - ) - - # Chatbot - cancelBtn.click(cancel_outputing, [], []) - - user_input.submit(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args) - user_input.submit(**get_usage_args) - - submitBtn.click(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args) - submitBtn.click(**get_usage_args) - - emptyBtn.click( - reset_state, - outputs=[chatbot, history, token_count, status_display], - show_progress=True, - ) - emptyBtn.click(**reset_textbox_args) - - retryBtn.click(**reset_textbox_args) - retryBtn.click( - retry, - [ - user_api_key, - systemPromptTxt, - history, - chatbot, - token_count, - top_p, - temperature, - use_streaming_checkbox, - model_select_dropdown, - language_select_dropdown, - ], - [chatbot, history, status_display, token_count], - show_progress=True, - ) - retryBtn.click(**get_usage_args) - - delLastBtn.click( - delete_last_conversation, - [chatbot, history, token_count], - [chatbot, history, token_count, status_display], - show_progress=True, - ) - - reduceTokenBtn.click( - reduce_token_size, - [ - user_api_key, - systemPromptTxt, - history, - chatbot, - token_count, - top_p, - temperature, - gr.State(0), - model_select_dropdown, - language_select_dropdown, - ], - [chatbot, history, status_display, token_count], - show_progress=True, - ) - reduceTokenBtn.click(**get_usage_args) - - # ChatGPT - keyTxt.change(submit_key, keyTxt, [user_api_key, status_display]).then(**get_usage_args) - - # Template - templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown]) - templateFileSelectDropdown.change( - load_template, - [templateFileSelectDropdown], - [promptTemplates, templateSelectDropdown], - show_progress=True, - ) - templateSelectDropdown.change( - get_template_content, - [promptTemplates, templateSelectDropdown, systemPromptTxt], - [systemPromptTxt], - show_progress=True, - ) - - # S&L - saveHistoryBtn.click( - save_chat_history, - [saveFileName, systemPromptTxt, history, chatbot], - downloadFile, - show_progress=True, - ) - saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown]) - exportMarkdownBtn.click( - export_markdown, - [saveFileName, systemPromptTxt, history, chatbot], - downloadFile, - show_progress=True, - ) - historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown]) - historyFileSelectDropdown.change( - load_chat_history, - [historyFileSelectDropdown, systemPromptTxt, history, chatbot], - [saveFileName, systemPromptTxt, history, chatbot], - show_progress=True, - ) - downloadFile.change( - load_chat_history, - [downloadFile, systemPromptTxt, history, chatbot], - [saveFileName, systemPromptTxt, history, chatbot], - ) - - # Advanced - default_btn.click( - reset_default, [], [apiurlTxt, proxyTxt, status_display], show_progress=True - ) - changeAPIURLBtn.click( - change_api_url, - [apiurlTxt], - [status_display], - show_progress=True, - ) - changeProxyBtn.click( - change_proxy, - [proxyTxt], - [status_display], - show_progress=True, - ) - -logging.info( - colorama.Back.GREEN - + "\n川虎的温馨提示:访问 http://localhost:7860 查看界面" - + colorama.Style.RESET_ALL -) -# 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接 -demo.title = "川虎ChatGPT 🚀" - -if __name__ == "__main__": - reload_javascript() - # if running in Docker - if dockerflag: - if authflag: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - server_name="0.0.0.0", - server_port=7860, - auth=(username, password), - favicon_path="./assets/favicon.ico", - ) - else: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - server_name="0.0.0.0", - server_port=7860, - share=False, - favicon_path="./assets/favicon.ico", - ) - # if not running in Docker - else: - if authflag: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - share=False, - auth=(username, password), - favicon_path="./assets/favicon.ico", - inbrowser=True, - ) - else: - demo.queue(concurrency_count=CONCURRENT_COUNT).launch( - share=False, favicon_path="./assets/favicon.ico", inbrowser=True - ) # 改为 share=True 可以创建公开分享链接 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码 - # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理