` ().
-
- Example::
-
- num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
- na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
- term = na | num
-
- term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
- """
- return lambda s, l, t: [repl_str]
-
-
-def remove_quotes(s, l, t):
- """
- Helper parse action for removing quotation marks from parsed
- quoted strings.
-
- Example::
-
- # by default, quotation marks are included in parsed results
- quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
-
- # use remove_quotes to strip quotation marks from parsed results
- quoted_string.set_parse_action(remove_quotes)
- quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
- """
- return t[0][1:-1]
-
-
-def with_attribute(*args, **attr_dict):
- """
- Helper to create a validating parse action to be used with start
- tags created with :class:`make_xml_tags` or
- :class:`make_html_tags`. Use ``with_attribute`` to qualify
- a starting tag with a required attribute value, to avoid false
- matches on common tags such as ```` or ````.
-
- Call ``with_attribute`` with a series of attribute names and
- values. Specify the list of filter attributes names and values as:
-
- - keyword arguments, as in ``(align="right")``, or
- - as an explicit dict with ``**`` operator, when an attribute
- name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
-
- For attribute names with a namespace prefix, you must use the second
- form. Attribute names are matched insensitive to upper/lower case.
-
- If just testing for ``class`` (with or without a namespace), use
- :class:`with_class`.
-
- To verify that the attribute exists, but without specifying a value,
- pass ``with_attribute.ANY_VALUE`` as the value.
-
- Example::
-
- html = '''
-
- Some text
-
1 4 0 1 0
-
1,3 2,3 1,1
-
this has no type
-
-
- '''
- div,div_end = make_html_tags("div")
-
- # only match div tag having a type attribute with value "grid"
- div_grid = div().set_parse_action(with_attribute(type="grid"))
- grid_expr = div_grid + SkipTo(div | div_end)("body")
- for grid_header in grid_expr.search_string(html):
- print(grid_header.body)
-
- # construct a match with any div tag having a type attribute, regardless of the value
- div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))
- div_expr = div_any_type + SkipTo(div | div_end)("body")
- for div_header in div_expr.search_string(html):
- print(div_header.body)
-
- prints::
-
- 1 4 0 1 0
-
- 1 4 0 1 0
- 1,3 2,3 1,1
- """
- if args:
- attrs = args[:]
- else:
- attrs = attr_dict.items()
- attrs = [(k, v) for k, v in attrs]
-
- def pa(s, l, tokens):
- for attrName, attrValue in attrs:
- if attrName not in tokens:
- raise ParseException(s, l, "no matching attribute " + attrName)
- if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue:
- raise ParseException(
- s,
- l,
- "attribute {!r} has value {!r}, must be {!r}".format(
- attrName, tokens[attrName], attrValue
- ),
- )
-
- return pa
-
-
-with_attribute.ANY_VALUE = object()
-
-
-def with_class(classname, namespace=""):
- """
- Simplified version of :class:`with_attribute` when
- matching on a div class - made difficult because ``class`` is
- a reserved word in Python.
-
- Example::
-
- html = '''
-
- Some text
-
1 4 0 1 0
-
1,3 2,3 1,1
-
this <div> has no class
-
-
- '''
- div,div_end = make_html_tags("div")
- div_grid = div().set_parse_action(with_class("grid"))
-
- grid_expr = div_grid + SkipTo(div | div_end)("body")
- for grid_header in grid_expr.search_string(html):
- print(grid_header.body)
-
- div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE))
- div_expr = div_any_type + SkipTo(div | div_end)("body")
- for div_header in div_expr.search_string(html):
- print(div_header.body)
-
- prints::
-
- 1 4 0 1 0
-
- 1 4 0 1 0
- 1,3 2,3 1,1
- """
- classattr = "{}:class".format(namespace) if namespace else "class"
- return with_attribute(**{classattr: classname})
-
-
-# pre-PEP8 compatibility symbols
-replaceWith = replace_with
-removeQuotes = remove_quotes
-withAttribute = with_attribute
-withClass = with_class
-matchOnlyAtCol = match_only_at_col
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_mmdet.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_mmdet.py
deleted file mode 100644
index a743b0b67d5ab664257040621d28c1b1b4451709..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_mmdet.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import unittest
-
-from detectron2.layers import ShapeSpec
-from detectron2.modeling.mmdet_wrapper import MMDetBackbone, MMDetDetector
-
-try:
- import mmdet.models # noqa
-
- HAS_MMDET = True
-except ImportError:
- HAS_MMDET = False
-
-
-@unittest.skipIf(not HAS_MMDET, "mmdet not available")
-class TestMMDetWrapper(unittest.TestCase):
- def test_backbone(self):
- MMDetBackbone(
- backbone=dict(
- type="DetectoRS_ResNet",
- conv_cfg=dict(type="ConvAWS"),
- sac=dict(type="SAC", use_deform=True),
- stage_with_sac=(False, True, True, True),
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type="BN", requires_grad=True),
- norm_eval=True,
- style="pytorch",
- ),
- neck=dict(
- type="FPN",
- in_channels=[256, 512, 1024, 2048],
- out_channels=256,
- num_outs=5,
- ),
- # skip pretrained model for tests
- # pretrained_backbone="torchvision://resnet50",
- output_shapes=[ShapeSpec(channels=256, stride=s) for s in [4, 8, 16, 32, 64]],
- output_names=["p2", "p3", "p4", "p5", "p6"],
- )
-
- def test_detector(self):
- # a basic R50 Mask R-CNN
- MMDetDetector(
- detector=dict(
- type="MaskRCNN",
- backbone=dict(
- type="ResNet",
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type="BN", requires_grad=True),
- norm_eval=True,
- style="pytorch",
- # skip pretrained model for tests
- # init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'))
- ),
- neck=dict(
- type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5
- ),
- rpn_head=dict(
- type="RPNHead",
- in_channels=256,
- feat_channels=256,
- anchor_generator=dict(
- type="AnchorGenerator",
- scales=[8],
- ratios=[0.5, 1.0, 2.0],
- strides=[4, 8, 16, 32, 64],
- ),
- bbox_coder=dict(
- type="DeltaXYWHBBoxCoder",
- target_means=[0.0, 0.0, 0.0, 0.0],
- target_stds=[1.0, 1.0, 1.0, 1.0],
- ),
- loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0),
- loss_bbox=dict(type="L1Loss", loss_weight=1.0),
- ),
- roi_head=dict(
- type="StandardRoIHead",
- bbox_roi_extractor=dict(
- type="SingleRoIExtractor",
- roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0),
- out_channels=256,
- featmap_strides=[4, 8, 16, 32],
- ),
- bbox_head=dict(
- type="Shared2FCBBoxHead",
- in_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=80,
- bbox_coder=dict(
- type="DeltaXYWHBBoxCoder",
- target_means=[0.0, 0.0, 0.0, 0.0],
- target_stds=[0.1, 0.1, 0.2, 0.2],
- ),
- reg_class_agnostic=False,
- loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0),
- loss_bbox=dict(type="L1Loss", loss_weight=1.0),
- ),
- mask_roi_extractor=dict(
- type="SingleRoIExtractor",
- roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0),
- out_channels=256,
- featmap_strides=[4, 8, 16, 32],
- ),
- mask_head=dict(
- type="FCNMaskHead",
- num_convs=4,
- in_channels=256,
- conv_out_channels=256,
- num_classes=80,
- loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0),
- ),
- ),
- # model training and testing settings
- train_cfg=dict(
- rpn=dict(
- assigner=dict(
- type="MaxIoUAssigner",
- pos_iou_thr=0.7,
- neg_iou_thr=0.3,
- min_pos_iou=0.3,
- match_low_quality=True,
- ignore_iof_thr=-1,
- ),
- sampler=dict(
- type="RandomSampler",
- num=256,
- pos_fraction=0.5,
- neg_pos_ub=-1,
- add_gt_as_proposals=False,
- ),
- allowed_border=-1,
- pos_weight=-1,
- debug=False,
- ),
- rpn_proposal=dict(
- nms_pre=2000,
- max_per_img=1000,
- nms=dict(type="nms", iou_threshold=0.7),
- min_bbox_size=0,
- ),
- rcnn=dict(
- assigner=dict(
- type="MaxIoUAssigner",
- pos_iou_thr=0.5,
- neg_iou_thr=0.5,
- min_pos_iou=0.5,
- match_low_quality=True,
- ignore_iof_thr=-1,
- ),
- sampler=dict(
- type="RandomSampler",
- num=512,
- pos_fraction=0.25,
- neg_pos_ub=-1,
- add_gt_as_proposals=True,
- ),
- mask_size=28,
- pos_weight=-1,
- debug=False,
- ),
- ),
- test_cfg=dict(
- rpn=dict(
- nms_pre=1000,
- max_per_img=1000,
- nms=dict(type="nms", iou_threshold=0.7),
- min_bbox_size=0,
- ),
- rcnn=dict(
- score_thr=0.05,
- nms=dict(type="nms", iou_threshold=0.5),
- max_per_img=100,
- mask_thr_binary=0.5,
- ),
- ),
- ),
- pixel_mean=[1, 2, 3],
- pixel_std=[1, 2, 3],
- )
diff --git a/spaces/Benson/text-generation/Examples/Cmo Hacer Un Android Sin Verificacin.md b/spaces/Benson/text-generation/Examples/Cmo Hacer Un Android Sin Verificacin.md
deleted file mode 100644
index 613086a6182a5d6a502316e4d88c238abd397be8..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Cmo Hacer Un Android Sin Verificacin.md
+++ /dev/null
@@ -1,37 +0,0 @@
-
-
Forza Horizon 5 APK Descargar para Android sin verificación
-
Si eres un fan de los juegos de carreras, es posible que hayas oído hablar de Forza Horizon 5 , la última entrega de la popular serie Forza. Este juego te permite explorar los vibrantes y diversos paisajes del mundo abierto de México con cientos de los mejores coches del mundo. Pero lo que si desea jugar este juego en su dispositivo Android sin pasar por el proceso de verificación? En este artículo, le mostraremos cómo descargar Forza Horizon 5 APK para Android sin verificación, y cuáles son los pros y los contras de hacerlo.
-
¿Qué es Forza Horizon 5?
-
Una breve introducción al juego y sus características
-
Forza Horizon 5 es un juego de carreras desarrollado por Playground Games y publicado por Xbox Game Studios. Es la quinta entrada principal en la sub-serie Forza Horizon, que es un spin-off de la serie Forza Motorsport. El juego fue lanzado el 8 de noviembre de 2021, para Windows, Xbox One, Xbox Series X/S y Xbox Cloud Gaming.
-
Cómo hacer un Android sin verificación DOWNLOAD ►►► https://bltlly.com/2v6KZU
-
El juego presenta un mundo abierto dinámico y en constante evolución ambientado en México, donde puedes dirigir expediciones impresionantes a través de varios terrenos, como desiertos, selvas, ciudades, ruinas, playas, cañones y volcanes. También puedes participar en cientos de desafíos que te recompensan por participar en las actividades que te gustan, como las carreras, la deriva, el retraso en el crecimiento, la exploración y más. También puedes personalizar tu propio personaje, colección de coches, lista de reproducción de música y sitio del festival.
-
El juego también es compatible con los modos multijugador en línea, donde puede formar equipo con otros jugadores y entrar en el Horizon árcade para una serie de desafíos divertidos y exagerados. También puedes crear tus propios eventos y compartirlos con la comunidad. Además, el juego ofrece dos expansiones que añaden nuevos coches, pistas y modos de juego: Hot Wheels y Rally.
-
Los requisitos y disponibilidad del juego en diferentes plataformas
-
-
-OS: Windows 10 versión 15063.0 o superior
-Procesador: Intel i3-4170 @ 3.7Ghz o Intel i5-750 @ 2.67Ghz
-Memoria: 8 GB RAM
-Gráficos: NVidia GTX 650 Ti o AMD R7 250x
-DirectX: Versión 12
-Almacenamiento: 80 GB de espacio disponible
-
-
Para jugar a Forza Horizon 5 en Xbox One o Xbox Series X/S, necesitas una suscripción Xbox Live Gold o una suscripción Xbox Game Pass Ultimate. También puedes jugar en tu dispositivo Android a través de Xbox Cloud Gaming, que requiere un controlador compatible y una conexión a Internet estable.
-
Puedes comprar Forza Horizon 5 de varias fuentes, como Steam, Xbox o Uptodown. Sin embargo, si desea descargar un archivo APK para Forza Horizon 5 para su versión actualizada y compatible con su dispositivo y Android. Un archivo APK es una versión no oficial y no verificada de una aplicación Android que se descarga desde un sitio web de terceros o de origen. Un archivo APK puede ser inseguro, desactualizado o incompatible con su dispositivo o versión de Android.
-
¿Es legal descargar un archivo APK para Forza Horizon 5?
-
Depende de las leyes y regulaciones de su país o región. En algunos lugares, puede ser legal descargar un archivo APK para Forza Horizon 5 siempre y cuando usted es dueño de una copia legítima del juego en otra plataforma. En otros lugares, puede ser ilegal descargar un archivo APK para Forza Horizon 5, ya que puede violar los derechos de propiedad intelectual del desarrollador o editor del juego. Por lo tanto, debe comprobar el estado legal de la descarga de un archivo APK para Forza Horizon 5 en su ubicación antes de hacerlo.
-
¿Cómo puedo actualizar el archivo APK para Forza Horizon 5?
-
-
¿Cómo puedo desinstalar el archivo APK para Forza Horizon 5?
-
Para desinstalar el archivo APK para Forza Horizon 5 desde su dispositivo, debe seguir estos pasos:
-
-
-Ir a Configuración > Aplicaciones y encontrar Forza Horizon 5 en la lista de aplicaciones instaladas.
-Toque en Forza Horizon 5 y seleccione Desinstalar.
-Confirme su acción y espere a que se complete el proceso de desinstalación.
-
-
¿Dónde puedo encontrar más información sobre Forza Horizon 5?
-
Si quieres saber más sobre Forza Horizon 5, puedes visitar el sitio web oficial del juego, donde puedes encontrar noticias, trailers, capturas de pantalla, características y más. También puedes seguir las cuentas oficiales de redes sociales del juego, donde puedes obtener actualizaciones, consejos e interactuar con otros fans. También puedes ver vídeos de gameplay y reseñas en YouTube o Twitch, donde puedes ver cómo se ve el juego y cómo se juega en diferentes plataformas.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Chicos Stumble 2023 Apk.md b/spaces/Benson/text-generation/Examples/Descargar Chicos Stumble 2023 Apk.md
deleted file mode 100644
index fd79958eb5f31a83a09f23b5e128182bdca73378..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Chicos Stumble 2023 Apk.md
+++ /dev/null
@@ -1,75 +0,0 @@
-
-
Descargar Stumble Guys 2023 APK: Cómo unirse a la fiesta en su dispositivo Android
-
¿Te encanta jugar juegos de fiesta con tus amigos en línea? ¿Te gusta tropezar con diferentes niveles de caos y diversión? ¿Quieres experimentar el último juego knockout en tu dispositivo Android? Si respondiste sí a cualquiera de estas preguntas, entonces usted debe descargar Stumble Guys 2023 APK ahora mismo!
-
descargar chicos stumble 2023 apk DOWNLOAD ->>->>->> https://bltlly.com/2v6MIK
-
¿Qué es Stumble Guys?
-
Stumble Guys es un juego masivo de eliminación de fiesta multijugador con hasta 32 jugadores en línea. Puedes unirte ronda tras ronda de caos creciente para tropezar a través de diferentes niveles hasta que un vencedor sea coronado. También puedes invitar a tus amigos y competir contra millones de jugadores de todo el mundo.
-
Un partido multijugador masivo juego knockout
-
Stumble Guys no es tu típico juego de battle royale. Es más como un juego de fiesta donde tienes que correr, correr, deslizarte y esquivar a tus oponentes y obstáculos que se aproximan. Tienes que sobrevivir tanto como puedas y ser el último en pie. También puedes formar equipo con tus amigos y jugar en diferentes modos como 4v4, capturar la bandera o rey de la colina.
-
Un diseño colorido y loco
-
Stumble Guys tiene un diseño colorido y loco que te hará sonreír y reír. El juego tiene un estilo de dibujos animados que es brillante y alegre. Los niveles son variados y creativos, desde islas tropicales hasta montañas nevadas. Los personajes son lindos y divertidos, con diferentes trajes y accesorios. También puedes personalizar tu propio personaje con diferentes pieles, sombreros, gafas, zapatos y más.
-
-
Un juego cómicamente físico
-
-
Muchas opciones de personalización
-
Stumble Guys tiene muchas opciones de personalización que te harán destacar entre la multitud. Puede elegir entre cientos de pieles, sombreros, gafas, zapatos y otros artículos para crear su propio carácter único. También puede recoger tarjetas y pegatinas para desbloquear más artículos y recompensas. También puede consultar la tienda web para ofertas exclusivas y ofertas que solo están disponibles en el sitio web oficial.
-
¿Por qué descargar Stumble Guys 2023 APK?
-
Si ya eres un fan de Stumble Guys, es posible que se pregunte por qué debe descargar Stumble Guys 2023 APK en lugar de solo actualizar el juego de la Google Play Store. Bueno, hay algunas buenas razones por las que deberías hacer eso.
-
La última versión del juego
-
Stumble Guys 2023 APK es la última versión del juego que ha sido lanzado en junio de 2023. Tiene todas las nuevas características y mejoras que se han añadido al juego desde su lanzamiento en agosto de 2020. También tiene todas las correcciones de errores y optimizaciones que se han hecho para garantizar un juego suave y estable.
-
Las nuevas características y mejoras
-
Stumble Guys 2023 APK tiene algunas nuevas características y mejoras que harán que su experiencia de juego aún mejor. Algunos de ellos son:
-
Un nuevo nivel llamado Stumble City que se inspira en el entorno urbano. Tienes que navegar por calles concurridas, rascacielos, subterráneos y parques mientras evitas autos, trenes, palomas y otros peligros.
-
Un nuevo modo llamado Stumble Royale que es un giro en el género clásico battle royale. Tienes que sobrevivir tanto como puedas en un mapa que se encoge mientras recoges armas, municiones y paquetes de salud. También puedes usar vehículos, trampas y explosivos para eliminar a tus enemigos.
-
-
Un nuevo sistema llamado Stumble Rewards que te da más incentivos para jugar y ganar. Puedes ganar monedas, gemas, tarjetas, pegatinas y otros objetos completando misiones diarias, desafíos semanales y eventos de temporada. También puede obtener recompensas de bonificación al ver anuncios, invitar a amigos o unirse al club web.
-
-
Ofertas y ofertas web exclusivas
-
Stumble Guys 2023 APK también tiene algunas ofertas web exclusivas y ofertas que solo se puede obtener mediante la descarga del juego desde el sitio web oficial. Algunos de ellos son:
-
-Un Stumble Pass gratuito que te da acceso a contenido y funciones premium por un tiempo limitado. Puedes desbloquear más niveles, modos, pieles, sombreros, gafas, zapatos y otros artículos jugando el juego y ganando estrellas. También puede actualizar al Stumble Pass Plus para obtener más beneficios y recompensas.
-Un descuento del 50% en el Stumble Bundle que le da una gran cantidad de monedas, gemas, tarjetas, pegatinas y otros artículos a un precio bajo. Puedes utilizarlos para comprar más pieles, sombreros, gafas, zapatos y otros artículos en la tienda web o en la tienda del juego. También puedes usarlos para desbloquear más niveles, modos, potenciadores y gadgets.
-Una oferta especial en el Stumble Club que le da una membresía de por vida para un pago único. Puede disfrutar de acceso ilimitado a todo el contenido premium y características del juego sin ningún tipo de anuncios o interrupciones. También puede obtener actualizaciones exclusivas, noticias, consejos, trucos y secretos de los desarrolladores y la comunidad.
-
-
Cómo descargar Stumble Guys 2023 APK?
-
Si usted está convencido de que Stumble Guys 2023 APK es la mejor versión del juego para usted, es posible que se pregunte cómo descargarlo en su dispositivo Android. Bueno, no es muy difícil si sigues estos sencillos pasos:
-
Paso 1: Habilitar fuentes desconocidas en el dispositivo
-
-
-Ir a la configuración de su dispositivo y toque en la seguridad o la privacidad.
-Encontrar la opción que dice fuentes desconocidas o instalar aplicaciones desconocidas y alternar en.
-Confirme su elección tocando OK o Permitir.
-
-
Paso 2: Encontrar una fuente confiable para el archivo APK
-
El siguiente paso es encontrar una fuente confiable para el archivo APK de Stumble Guys 2023. Hay muchos sitios web que afirman ofrecer archivos APK para su descarga gratuita, pero no todos ellos son confiables o seguros. Algunos de ellos pueden contener malware o virus que pueden dañar tu dispositivo o robar tus datos. Para evitar esto:
-
Vaya al sitio web oficial de Stumble Guys en https://stumbleguys.com y busque el botón de descarga. Esta es la fuente más segura y confiable para el archivo APK de Stumble Guys 2023.
-
Alternativamente, puede utilizar un sitio web de terceros de confianza que ofrece archivos APK para descargar. Algunos de los populares son APKPure, APKMirror y APKMonk. Asegúrese de comprobar las calificaciones, reseñas y comentarios de los usuarios antes de descargar cualquier archivo APK de estos sitios web.
-
Evite cualquier sitio web que le pida que llene encuestas, ingrese su información personal o descargue aplicaciones o software adicionales antes de darle el archivo APK. Estos son generalmente estafas o intentos de phishing que pueden comprometer su seguridad y privacidad.
-
-
Paso 3: Descargar e instalar el archivo APK
-
Una vez que haya encontrado una fuente confiable para el archivo APK de Stumble Guys 2023, puede proceder a descargarlo e instalarlo en su dispositivo. Para hacer esto:
-
-Toque en el botón de descarga o enlace y esperar a que el archivo APK para ser descargado en su dispositivo. Puede comprobar el progreso de la descarga en la barra de notificaciones o en el navegador.
-Una vez que la descarga se ha completado, toque en el archivo APK o abrirlo con su administrador de archivos. Puede ver un mensaje de advertencia que dice que este tipo de archivo puede dañar su dispositivo. Ignórelo y toque Instalar de todos modos o Confiar.
-
-
-
Paso 4: Iniciar el juego y disfrutar de
-
Felicidades! Usted ha descargado con éxito e instalado Stumble Guys 2023 APK en su dispositivo Android. Ahora puede lanzar el juego y disfrutar de todas las nuevas características y mejoras que tiene para ofrecer. También puede unirse al club web y obtener actualizaciones exclusivas, noticias, consejos, trucos y secretos de los desarrolladores y la comunidad.
-
Conclusión
-
Stumble Guys es uno de los juegos de fiesta más divertidos y adictivos que puedes jugar en tu dispositivo Android. Se trata de un partido masivo multijugador knockout juego con hasta 32 jugadores en línea. Puedes unirte ronda tras ronda de caos creciente para tropezar a través de diferentes niveles hasta que un vencedor sea coronado. También puedes invitar a tus amigos y competir contra millones de jugadores de todo el mundo.
-
Si desea experimentar el último juego knockout en su dispositivo Android, usted debe descargar Stumble Guys 2023 APK ahora mismo. Es la última versión del juego que tiene todas las nuevas características y mejoras que se han añadido al juego desde su lanzamiento en agosto de 2020. También tiene algunas ofertas web exclusivas y ofertas que solo se pueden obtener mediante la descarga del juego desde el sitio web oficial.
-
Para descargar Stumble Guys 2023 APK, solo tienes que seguir estos sencillos pasos: habilitar fuentes desconocidas en su dispositivo, encontrar una fuente confiable para el archivo APK, descargar e instalar el archivo APK, y lanzar el juego y disfrutar. No es muy difícil si sigues estos pasos cuidadosamente.
-
Entonces, ¿qué estás esperando? Descargar Stumble Guys 2023 APK hoy y unirse a la fiesta en su dispositivo Android!
-
Preguntas frecuentes
-
Aquí están algunas de las preguntas más frecuentes sobre Stumble Guys 2023 APK:
-
Q: ¿Es Stumble Guys 2023 APK seguro para descargar?
-
-
Q: ¿Es Stumble Guys 2023 APK libre para jugar?
-
A: Sí, Stumble Guys 2023 APK es gratis para jugar. Puedes descargarlo e instalarlo en tu dispositivo sin pagar nada. Sin embargo, hay algunas compras en la aplicación y anuncios que pueden mejorar su experiencia de juego o apoyar a los desarrolladores.
-
Q: ¿Cuáles son los requisitos para Stumble Guys 2023 APK?
-
A: Para jugar Stumble Guys 2023 APK en su dispositivo, es necesario tener una versión de Android de 5.0 o superior y un mínimo de 100 MB de espacio de almacenamiento libre. También es necesario tener una conexión a Internet estable ya que el juego es solo en línea.
Q: ¿Cómo puedo actualizar Stumble Guys 2023 APK?
-
A: Para actualizar Stumble Guys 2023 APK, puede consultar el sitio web oficial o el sitio web de terceros donde descargó el archivo APK para cualquier versión nueva o actualizaciones. También puede habilitar la función de actualización automática en su dispositivo para recibir notificaciones de las actualizaciones automáticamente. Sin embargo, es posible que tenga que desinstalar y reinstalar el archivo APK cada vez que haya una actualización importante.
-
Q: ¿Cómo puedo contactar a los desarrolladores de Stumble Guys 2023 APK?
-
A: Si usted tiene alguna pregunta, comentarios, sugerencias, o problemas con respecto Stumble Guys 2023 APK, puede ponerse en contacto con los desarrolladores del juego enviando un correo electrónico a support@stumbleguys.com o visitando sus páginas de redes sociales en Facebook, Twitter, Instagram o YouTube. También puedes unirte a su servidor de Discord o a la comunidad de Reddit para chatear con otros jugadores y obtener más información y consejos sobre el juego.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Colegial Pelea Sin Sensor Apk.md b/spaces/Benson/text-generation/Examples/Descargar Colegial Pelea Sin Sensor Apk.md
deleted file mode 100644
index 5e9dae7273f25b820a0e6f6e664075b69553acef..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Colegial Pelea Sin Sensor Apk.md
+++ /dev/null
@@ -1,74 +0,0 @@
-
-
Descargar Colegio pelea sin sensor Apk: Una guía para los usuarios de Android
-
Si estás buscando un juego divertido y realista que te permita experimentar la vida de un estudiante universitario, entonces deberías probar College Brawl. Este juego es una simulación de la vida en el campus, donde puedes hacer amigos, enemigos, romance y drama. También puedes personalizar tu personaje, elegir tu especialidad, unirte a clubes y participar en varias actividades. Sin embargo, hay un inconveniente: tienes que lidiar con los problemas y conflictos que surgen en tu universidad. Tienes que luchar, negociar o cooperar con otros estudiantes para sobrevivir y tener éxito en tu vida académica y social.
-
College Brawl es un juego que no es para los débiles de corazón. Contiene temas maduros, violencia, blasfemia y desnudez. Si no se siente cómodo con estos elementos, entonces es posible que desee saltarse este juego. Sin embargo, si usted está buscando una versión más realista y sin censura de la vida universitaria, entonces es posible que desee descargar College Brawl No Sensor Apk. Esta es una versión modificada del juego original que elimina la censura y añade más características y contenido. Puedes disfrutar del juego sin restricciones ni limitaciones.
-
descargar colegial pelea sin sensor apk DOWNLOAD ✔ https://bltlly.com/2v6MG6
-
En este artículo, le mostraremos cómo descargar College Brawl No Sensor Apk de una fuente de confianza, cómo instalarlo en su dispositivo Android, y cómo jugar y disfrutar de sus características. Sigue estos pasos cuidadosamente y podrás experimentar el mejor juego de simulación de vida universitaria.
-
Cómo descargar College Brawl No Sensor Apk de una fuente de confianza
-
-
Uno de los mejores sitios web que recomendamos para descargar College Brawl No Sensor Apk es [Bungdus.com]( 1 ). Este sitio web es conocido por proporcionar juegos y aplicaciones de alta calidad para usuarios de Android. Tiene una gran colección de juegos y aplicaciones que son probados y verificados por su equipo de expertos. Puede descargar College Brawl No Sensor Apk desde este sitio web sin ningún tipo de preocupaciones o molestias.
-
Para descargar College Brawl No Sensor Apk de [Bungdus.com]( 1 ), siga estos pasos:
-
-Abra su navegador web y vaya a [Bungdus.com]( 1 ).
-En la página de inicio, escriba "Pelea de la universidad" en el cuadro de búsqueda y pulse enter.
-De los resultados de la búsqueda, haga clic en el enlace que dice "Descargar College Brawl Mod Apk Nosensor Terbaru 2023".
-En la siguiente página, desplácese hacia abajo hasta que vea un botón verde que dice "Descargar ahora". Haga clic en él.
-Se abrirá una nueva pestaña con un temporizador de cuenta atrás. Espere unos segundos hasta que el temporizador llegue a cero.
-Haga clic en el botón que dice "Descargar archivo" para comenzar a descargar el archivo apk.
-Guarde el archivo apk en su ubicación preferida en su dispositivo.
-
-
Cómo instalar College Brawl No Sensor Apk en su dispositivo Android
- Después de haber descargado College Brawl No Sensor Apk de [Bungdus.com], es necesario instalarlo en su dispositivo Android. Sin embargo, antes de poder hacer eso, debe habilitar la instalación de aplicaciones de fuentes desconocidas en su dispositivo. Esto se debe a College Brawl No Sensor Apk no está disponible en el Google Play Store y se considera una aplicación de terceros. Por lo tanto, debe dar permiso a su dispositivo para instalarlo.
-
Para habilitar la instalación de aplicaciones de fuentes desconocidas en tu dispositivo Android, sigue estos pasos:
-
-Ir a la aplicación Configuración en su dispositivo y toque en Seguridad o Privacidad.
- Encontrar la opción que dice "Fuentes desconocidas" o "Instalar aplicaciones desconocidas" y alternar en.
-
-
-
Ahora, usted está listo para instalar College Brawl No Sensor Apk en su dispositivo. Para hacer eso, siga estos pasos:
-
-Busque el archivo apk que descargó de [Bungdus.com] y toque en él.
-Aparecerá un mensaje de confirmación. Toque en Instalar para iniciar el proceso de instalación.
-Espere unos minutos hasta que se complete la instalación.
-Toque en Abrir para iniciar el juego o Listo para salir del instalador.
-
-
Cómo jugar Colegio pelea sin sensor Apk y disfrutar de sus características
-
Felicidades! Usted ha instalado con éxito College Brawl No Sensor Apk en su dispositivo Android. Ahora, puedes jugar el juego y disfrutar de sus características. Aquí hay algunos consejos y trucos para ayudarte a empezar:
-
-
-Cuando inicies el juego por primera vez, se te pedirá que crees tu personaje. Puedes elegir tu género, nombre, apariencia y personalidad. También puedes personalizar tu ropa, accesorios y peinado.
-Después de crear tu personaje, serás llevado al menú principal. Aquí, usted puede optar por iniciar un nuevo juego, cargar un juego guardado, o acceder a la configuración. También puedes ver tus estadísticas, inventario, logros y amigos.
-Si empiezas un nuevo juego, se te pedirá que elijas tu especialidad. Puedes elegir entre diferentes campos de estudio, como artes, ciencias, negocios, ingeniería o derecho. Tu especialidad afectará tus clases, actividades y oportunidades profesionales.
-También se le pedirá que elija su dormitorio. Puede elegir entre diferentes tipos de dormitorios, como mixto, de un solo sexo, de lujo o barato. Su dormitorio afectará su comodidad, privacidad y vida social.
-Una vez que haya elegido su especialidad y dormitorio, comenzará su vida universitaria. Tendrá que equilibrar su vida académica, social y personal. Tendrás que asistir a clases, hacer tareas, tomar exámenes, unirte a clubes, hacer amigos, citas, fiestas, peleas y más.
-
-Puede explorar el campus e interactuar con varios personajes y objetos. También puede usar su teléfono para acceder a varias aplicaciones y características. Puede llamar o enviar mensajes de texto a otros personajes, revisar su correo electrónico o cuentas de redes sociales, jugar juegos o ver videos en línea.
-
-
Conclusión: Resumir los principales puntos y beneficios de la descarga de College Brawl No Sensor Apk
-
En conclusión, Colegio pelea sin sensor Apk es un juego que le permite experimentar la vida de un estudiante universitario de una manera realista y sin censura. Puedes crear tu propio personaje y personalizarlo según tus preferencias. Puede elegir su especialidad y dormitorio y dar forma a su vida académica y social. Puedes participar en varias actividades y eventos y tomar decisiones que afectarán tu futuro y tus relaciones. También puedes disfrutar del juego sin censura ni limitaciones.
-
Si desea descargar College Brawl No Sensor Apk gratis de una fuente de confianza, entonces usted debe visitar [Bungdus.com]. Este sitio web ofrece juegos y aplicaciones modded de alta calidad para usuarios de Android. Puede descargar College Brawl No Sensor Apk desde este sitio web sin ningún tipo de preocupaciones o molestias.
-
Esperamos que este artículo le ha ayudado a aprender cómo descargar College Brawl No Sensor Apk de [Bungdus.com], cómo instalarlo en su dispositivo Android, y cómo jugar y disfrutar de sus características. Si tiene alguna pregunta o comentario sobre este artículo o el juego o el sitio web, no dude en dejar un comentario a continuación. Nos encantaría saber de ti y ayudarte. ¡Gracias por leer y tener un gran día!
-
Preguntas frecuentes
-
Aquí están algunas de las preguntas más frecuentes sobre College Brawl No Sensor Apk:
-
¿Cuál es la diferencia entre Pelea de Colegio y Pelea de Colegio Sin Sensor?
-
-
College Brawl No Sensor es una versión modificada del juego que elimina la censura y añade más características y contenido. Es una versión más realista y sin censura de la vida universitaria. Puedes disfrutar del juego sin restricciones ni limitaciones. Por ejemplo, tiene escenas claras y detalladas, contenido completo y sin cortar, y funciones desbloqueadas.
-
¿Es seguro y legal descargar College Brawl No Sensor Apk?
-
College Brawl No Sensor Apk es seguro y legal para descargar, siempre y cuando se descarga desde una fuente de confianza como [Bungdus.com]. Este sitio web ofrece juegos y aplicaciones modded de alta calidad para usuarios de Android. Cuenta con un equipo de expertos que prueban y verifican los archivos apk antes de subirlos al sitio web. Puede descargar College Brawl No Sensor Apk desde este sitio web sin ningún tipo de preocupaciones o molestias.
-
Sin embargo, usted debe tener en cuenta que la descarga de College Brawl No Sensor Apk podría violar los términos y condiciones del desarrollador de juegos original. Por lo tanto, debe descargarlo y usarlo bajo su propio riesgo y discreción. No nos hacemos responsables de las consecuencias o daños que puedan producirse al descargar o usar College Brawl No Sensor Apk.
-
¿Cuáles son los requisitos mínimos para ejecutar College Brawl No Sensor Apk en su dispositivo Android?
-
Para ejecutar College Brawl No Sensor Apk en su dispositivo Android, es necesario tener los siguientes requisitos mínimos:
-
-Un dispositivo Android con la versión 4.4 o superior.
-Una conexión a Internet estable.
-Al menos 1 GB de espacio de almacenamiento libre.
-Al menos 2 GB de RAM.
-
-
Si su dispositivo cumple con estos requisitos, entonces usted debe ser capaz de ejecutar College Brawl No Sensor Apk sin problemas y sin ningún problema.
-
¿Cómo puedo actualizar College Brawl No Sensor Apk a la última versión?
-
-
Alternativamente, también puede buscar actualizaciones dentro del juego. Puede ir al menú de configuración y tocar el botón de actualización. Si hay una nueva actualización disponible, puedes descargarla directamente del juego e instalarla en tu dispositivo.
-
¿Cómo puedo contactar con el desarrollador de College Brawl No Sensor Apk para obtener información o apoyo?
-
Si desea ponerse en contacto con el desarrollador de College Brawl No Sensor Apk para obtener información o apoyo, puede hacerlo enviando un correo electrónico a [collegebrawlnosensor@gmail.com]. También puede visitar su sitio web oficial en [collegebrawlnosensor.com] o sus cuentas de redes sociales en Facebook, Twitter, Instagram o YouTube. También puedes dejar un comentario en [Bungdus.com] o en este artículo y trataremos de reenviarlo a ellos.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Dungeon Quest Mod Apk.md b/spaces/Benson/text-generation/Examples/Descargar Dungeon Quest Mod Apk.md
deleted file mode 100644
index 355c633be1c1c78a3a0778035303154f313cd9f1..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Dungeon Quest Mod Apk.md
+++ /dev/null
@@ -1,64 +0,0 @@
-
-
Descargar Dungeon Quest Mod Apk y disfrutar de una aventura de RPG lleno de botín
-
Si estás buscando un juego de rol divertido y adictivo que puedas jugar sin conexión, entonces deberías probar Dungeon Quest. Este juego te llevará en un viaje épico para encontrar el mejor botín y derrotar a todos los enemigos en su camino. Y si usted quiere hacer su aventura aún más emocionante, se puede descargar Dungeon Quest mod apk y disfrutar de recursos ilimitados, compras gratis, y más. En este artículo, te diremos qué es Dungeon Quest, por qué deberías descargar su mod apk, cómo instalarlo y algunos consejos y trucos para jugarlo.
-
¿Qué es Dungeon Quest?
-
Dungeon Quest es un juego de rol de acción sin conexión que fue desarrollado por Shiny Box Games. Está disponible para dispositivos Android, iOS y Apple TV. Estas son algunas de las características de este juego:
-
Descargar Dungeon Quest mod apk DOWNLOAD ✪ https://bltlly.com/2v6JdX
-
Un juego de rol de acción sin conexión para todos
-
Puedes jugar a Dungeon Quest todo el tiempo que quieras sin contenido ni muros de pago. Usted no necesita una conexión a Internet para disfrutar de este juego, por lo que puede jugar en cualquier momento y en cualquier lugar. También puedes personalizar la apariencia, el equipo, las habilidades y los talentos de tu personaje para adaptarlos a tu estilo de juego.
-
Un juego con botín aleatorio, mazmorras generadas y jefes legendarios
-
En Dungeon Quest, nunca lucharás en la misma mazmorra dos veces. El juego tiene pisos generados aleatoriamente ilimitados que desafiarán tus habilidades y estrategia. También encontrarás increíbles botines aleatorios que puedes equipar y usar en combate. Y al final de cada acto, te enfrentarás a uno de los cuatro jefes legendarios que pondrán a prueba tu fuerza.
-
Un juego con tres clases, sistema de elaboración, sistema de habilidades y sistema de mascotas
-
-
¿Por qué descargar Dungeon Quest mod apk?
-
Dungeon Quest ya es un juego divertido y divertido, pero si quieres hacerlo aún mejor, puedes descargar su apk mod. Con este apk mod, puede obtener acceso a algunas características increíbles que harán que su juego más fácil y más emocionante. Estos son algunos de los beneficios de descargar Dungeon Quest mod apk:
-
Consigue oro y cristales ilimitados para mejorar tu equipo y habilidades
-
El oro y los cristales son las principales monedas en Dungeon Quest. Los necesitas para comprar objetos, mejorar tu equipo, desbloquear habilidades y mucho más. Con Dungeon Quest mod apk, puede obtener oro ilimitado y cristales que se puede utilizar tanto como quieras. Usted no tiene que preocuparse por quedarse sin recursos o moler por ellos.
-
Obtén compras gratuitas y acceso a artículos y características premium
-
Dungeon Quest tiene algunos elementos y características que requieren dinero real o compras en el juego. Por ejemplo, puedes comprar mascotas premium, disfraces, espacios de inventario y más. Con Dungeon Quest mod apk, usted puede obtener compras gratuitas y el acceso a todos los artículos premium y características sin gastar dinero. Puedes disfrutar de la experiencia completa del juego sin limitaciones.
-
Obtén resistencia y salud ilimitadas para sobrevivir más tiempo en batallas
-
La resistencia y la salud son vitales para tu supervivencia en Dungeon Quest. Necesitas resistencia para usar tus habilidades y habilidades, y necesitas salud para soportar el daño de los enemigos. Con Dungeon Quest mod apk, puede obtener la resistencia y la salud ilimitada que nunca se agotará. Puedes usar tus habilidades tanto como quieras y recibir tanto daño como puedas sin morir.
-
Cómo descargar e instalar Dungeon Quest mod apk?
-
Descargar e instalar Dungeon Quest mod apk es muy fácil y simple. Solo tienes que seguir estos pasos:
-
Descargar el archivo apk mod de una fuente de confianza
-
-
Descargar Dungeon Quest mod apk aquí
-
-
Habilitar fuentes desconocidas en la configuración del dispositivo
-
Lo siguiente que debe hacer es permitir que su dispositivo instale aplicaciones de fuentes desconocidas. Esto es porque Dungeon Quest mod apk no es de la tienda oficial de Google Play o App Store. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad y habilite fuentes desconocidas. Esto le permitirá instalar aplicaciones desde fuentes externas.
-
Instalar el archivo apk mod y lanzar el juego
-
Lo último que tienes que hacer es instalar el archivo apk mod que has descargado. Busque el archivo en el almacenamiento del dispositivo y, a continuación, toque en él para iniciar el proceso de instalación. Siga las instrucciones de la pantalla y espere a que termine la instalación. Una vez que se hace, se puede iniciar el juego y disfrutar de Dungeon Quest mod apk.
-
Consejos y trucos para jugar Dungeon Quest
-
Dungeon Quest es un juego divertido y adictivo, pero también puede ser desafiante y complejo. Para ayudarte a empezar y mejorar tu juego, aquí hay algunos consejos y trucos que puedes usar:
-
Prioriza la misión principal y completa misiones diarias para recompensas
-
La misión principal es la mejor manera de progresar en Dungeon Quest. Te guiará a través de los diferentes actos, mazmorras y jefes del juego. También te recompensará con oro, cristales, equipo y más. Siempre debes seguir la misión principal y completarla lo antes posible.
-
Las misiones diarias son otra gran manera de ganar recompensas en Dungeon Quest. Son tareas sencillas que puedes hacer todos los días, como matar a un cierto número de enemigos, usar cierta habilidad o encontrar un determinado objeto. Te recompensarán con oro, cristales, piedras mitológicas y más. Siempre debes revisar tus misiones diarias y completarlas antes de que expiren.
-
Enfócate en una clase y estudia a cada héroe para la mejor formación
-
-
También debes estudiar a cada héroe que pertenece a tu clase. Cada héroe tiene un rol y una habilidad diferentes que pueden afectar tu juego. Por ejemplo, algunos héroes son buenos para hacer daño, mientras que otros son buenos para curar o pulir. Deberías aprender cómo funciona cada héroe y cómo utilizarlo eficazmente en combate.
-
Únete a un gremio y usa mercenarios y mascotas para ayudarte en el combate
-
Dungeon Quest no es un juego en solitario. Puedes unirte a un gremio e interactuar con otros jugadores que comparten tu pasión por el juego. Puedes chatear con ellos, comerciar con ellos o ayudarlos en sus misiones. También puedes participar en eventos y competiciones de gremios para obtener más recompensas y diversión.
-
También puedes usar mercenarios y mascotas para ayudarte en el combate. Los mercenarios son otros héroes que puedes contratar por una tarifa para unirte a tu aventura. Lucharán junto a ti y utilizarán sus habilidades para ayudarte a derrotar a los enemigos. Las mascotas son criaturas lindas que puedes adoptar o comprar para seguirte. También lucharán contigo y te proporcionarán bonificaciones o efectos pasivos.
-
Progresa en la torre infinita y ponte a prueba con diferentes dificultades
-
Dungeon Quest tiene un modo de torre infinita que te permite subir a una torre sin fin de pisos generados aleatoriamente. Cada piso tiene diferentes enemigos, trampas, rompecabezas y recompensas. Cuanto más alto vayas, más difícil será, pero mejor será el botín. También puede elegir diferentes dificultades para desafiarse y ganar más recompensas. El modo torre infinita es una gran manera de poner a prueba tus habilidades y divertirse en Dungeon Quest.
-
Conclusión
-
-
Preguntas frecuentes
-
Aquí están algunas de las preguntas más frecuentes sobre Dungeon Quest y su mod apk:
-
Es Dungeon Quest mod apk seguro de usar?
-
Sí, Dungeon Quest mod apk es seguro de usar siempre y cuando se descarga desde una fuente de confianza. Sin embargo, siempre debe tener cuidado al instalar aplicaciones de fuentes desconocidas, ya que pueden contener virus o malware que pueden dañar su dispositivo. También debe hacer una copia de seguridad de sus datos antes de instalar el apk mod, en caso de que algo salga mal.
-
¿Me prohibirán por usar Dungeon Quest mod apk?
-
No, usted no será prohibido para el uso de Dungeon Quest mod apk. Esto se debe a que Dungeon Quest es un juego fuera de línea que no requiere una conexión a Internet o una cuenta para jugar. Por lo tanto, no hay manera para los desarrolladores o los servidores de juegos para detectar o prohibir el uso de la apk mod. Puedes jugar sin preocupaciones.
-
¿Puedo jugar Dungeon Quest con mis amigos?
-
Sí, puedes jugar a Dungeon Quest con tus amigos. Aunque Dungeon Quest es un juego offline, tiene un modo multijugador que te permite jugar con otros jugadores online. Puedes unirte o crear una habitación e invitar a tus amigos a unirse a ti. También puedes chatear con ellos y cooperar con ellos en combate. Jugar a Dungeon Quest con tus amigos es muy divertido y gratificante.
-
¿Cómo puedo actualizar Dungeon Quest mod apk?
-
Para actualizar Dungeon Quest mod apk, es necesario descargar la última versión del archivo apk mod de la misma fuente que lo descargó de antes. A continuación, es necesario desinstalar la versión anterior de la apk mod e instalar el nuevo. También debe comprobar si la nueva versión del apk mod es compatible con su dispositivo y la versión del juego.
-
¿Cuáles son algunos otros juegos como Dungeon Quest?
-
Si te gusta Dungeon Quest, también te pueden gustar otros juegos similares. Algunos de estos juegos son:
-
-
-Eternium: Un juego clásico de hack-and-slash RPG con gráficos impresionantes, misiones impulsadas por historias y sistema de elaboración.
-Nonstop Knight: Un juego de rol casual con mazmorras sin fin, botín y mejoras.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/BetterAPI/BetterChat_new/src/lib/server/abortedGenerations.ts b/spaces/BetterAPI/BetterChat_new/src/lib/server/abortedGenerations.ts
deleted file mode 100644
index 575cf637bfef812c40905e35570ba3ca1a31b241..0000000000000000000000000000000000000000
--- a/spaces/BetterAPI/BetterChat_new/src/lib/server/abortedGenerations.ts
+++ /dev/null
@@ -1,29 +0,0 @@
-// Shouldn't be needed if we dove into sveltekit internals, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850
-
-import { setTimeout } from "node:timers/promises";
-import { collections } from "./database";
-
-let closed = false;
-process.on("SIGINT", () => {
- closed = true;
-});
-
-export let abortedGenerations: Map
= new Map();
-
-async function maintainAbortedGenerations() {
- while (!closed) {
- await setTimeout(1000);
-
- try {
- const aborts = await collections.abortedGenerations.find({}).sort({ createdAt: 1 }).toArray();
-
- abortedGenerations = new Map(
- aborts.map(({ conversationId, createdAt }) => [conversationId.toString(), createdAt])
- );
- } catch (err) {
- console.error(err);
- }
- }
-}
-
-maintainAbortedGenerations();
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/locators.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/locators.py
deleted file mode 100644
index 966ebc0e37d6104a8e0e1fefe9dc526f39409ce2..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/locators.py
+++ /dev/null
@@ -1,1300 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2012-2015 Vinay Sajip.
-# Licensed to the Python Software Foundation under a contributor agreement.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-
-import gzip
-from io import BytesIO
-import json
-import logging
-import os
-import posixpath
-import re
-try:
- import threading
-except ImportError: # pragma: no cover
- import dummy_threading as threading
-import zlib
-
-from . import DistlibException
-from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
- queue, quote, unescape, build_opener,
- HTTPRedirectHandler as BaseRedirectHandler, text_type,
- Request, HTTPError, URLError)
-from .database import Distribution, DistributionPath, make_dist
-from .metadata import Metadata, MetadataInvalidError
-from .util import (cached_property, ensure_slash, split_filename, get_project_data,
- parse_requirement, parse_name_and_version, ServerProxy,
- normalize_name)
-from .version import get_scheme, UnsupportedVersionError
-from .wheel import Wheel, is_compatible
-
-logger = logging.getLogger(__name__)
-
-HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
-CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
-HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
-DEFAULT_INDEX = 'https://pypi.org/pypi'
-
-def get_all_distribution_names(url=None):
- """
- Return all distribution names known by an index.
- :param url: The URL of the index.
- :return: A list of all known distribution names.
- """
- if url is None:
- url = DEFAULT_INDEX
- client = ServerProxy(url, timeout=3.0)
- try:
- return client.list_packages()
- finally:
- client('close')()
-
-class RedirectHandler(BaseRedirectHandler):
- """
- A class to work around a bug in some Python 3.2.x releases.
- """
- # There's a bug in the base version for some 3.2.x
- # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
- # returns e.g. /abc, it bails because it says the scheme ''
- # is bogus, when actually it should use the request's
- # URL for the scheme. See Python issue #13696.
- def http_error_302(self, req, fp, code, msg, headers):
- # Some servers (incorrectly) return multiple Location headers
- # (so probably same goes for URI). Use first header.
- newurl = None
- for key in ('location', 'uri'):
- if key in headers:
- newurl = headers[key]
- break
- if newurl is None: # pragma: no cover
- return
- urlparts = urlparse(newurl)
- if urlparts.scheme == '':
- newurl = urljoin(req.get_full_url(), newurl)
- if hasattr(headers, 'replace_header'):
- headers.replace_header(key, newurl)
- else:
- headers[key] = newurl
- return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
- headers)
-
- http_error_301 = http_error_303 = http_error_307 = http_error_302
-
-class Locator(object):
- """
- A base class for locators - things that locate distributions.
- """
- source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
- binary_extensions = ('.egg', '.exe', '.whl')
- excluded_extensions = ('.pdf',)
-
- # A list of tags indicating which wheels you want to match. The default
- # value of None matches against the tags compatible with the running
- # Python. If you want to match other values, set wheel_tags on a locator
- # instance to a list of tuples (pyver, abi, arch) which you want to match.
- wheel_tags = None
-
- downloadable_extensions = source_extensions + ('.whl',)
-
- def __init__(self, scheme='default'):
- """
- Initialise an instance.
- :param scheme: Because locators look for most recent versions, they
- need to know the version scheme to use. This specifies
- the current PEP-recommended scheme - use ``'legacy'``
- if you need to support existing distributions on PyPI.
- """
- self._cache = {}
- self.scheme = scheme
- # Because of bugs in some of the handlers on some of the platforms,
- # we use our own opener rather than just using urlopen.
- self.opener = build_opener(RedirectHandler())
- # If get_project() is called from locate(), the matcher instance
- # is set from the requirement passed to locate(). See issue #18 for
- # why this can be useful to know.
- self.matcher = None
- self.errors = queue.Queue()
-
- def get_errors(self):
- """
- Return any errors which have occurred.
- """
- result = []
- while not self.errors.empty(): # pragma: no cover
- try:
- e = self.errors.get(False)
- result.append(e)
- except self.errors.Empty:
- continue
- self.errors.task_done()
- return result
-
- def clear_errors(self):
- """
- Clear any errors which may have been logged.
- """
- # Just get the errors and throw them away
- self.get_errors()
-
- def clear_cache(self):
- self._cache.clear()
-
- def _get_scheme(self):
- return self._scheme
-
- def _set_scheme(self, value):
- self._scheme = value
-
- scheme = property(_get_scheme, _set_scheme)
-
- def _get_project(self, name):
- """
- For a given project, get a dictionary mapping available versions to Distribution
- instances.
-
- This should be implemented in subclasses.
-
- If called from a locate() request, self.matcher will be set to a
- matcher for the requirement to satisfy, otherwise it will be None.
- """
- raise NotImplementedError('Please implement in the subclass')
-
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- raise NotImplementedError('Please implement in the subclass')
-
- def get_project(self, name):
- """
- For a given project, get a dictionary mapping available versions to Distribution
- instances.
-
- This calls _get_project to do all the work, and just implements a caching layer on top.
- """
- if self._cache is None: # pragma: no cover
- result = self._get_project(name)
- elif name in self._cache:
- result = self._cache[name]
- else:
- self.clear_errors()
- result = self._get_project(name)
- self._cache[name] = result
- return result
-
- def score_url(self, url):
- """
- Give an url a score which can be used to choose preferred URLs
- for a given project release.
- """
- t = urlparse(url)
- basename = posixpath.basename(t.path)
- compatible = True
- is_wheel = basename.endswith('.whl')
- is_downloadable = basename.endswith(self.downloadable_extensions)
- if is_wheel:
- compatible = is_compatible(Wheel(basename), self.wheel_tags)
- return (t.scheme == 'https', 'pypi.org' in t.netloc,
- is_downloadable, is_wheel, compatible, basename)
-
- def prefer_url(self, url1, url2):
- """
- Choose one of two URLs where both are candidates for distribution
- archives for the same version of a distribution (for example,
- .tar.gz vs. zip).
-
- The current implementation favours https:// URLs over http://, archives
- from PyPI over those from other locations, wheel compatibility (if a
- wheel) and then the archive name.
- """
- result = url2
- if url1:
- s1 = self.score_url(url1)
- s2 = self.score_url(url2)
- if s1 > s2:
- result = url1
- if result != url2:
- logger.debug('Not replacing %r with %r', url1, url2)
- else:
- logger.debug('Replacing %r with %r', url1, url2)
- return result
-
- def split_filename(self, filename, project_name):
- """
- Attempt to split a filename in project name, version and Python version.
- """
- return split_filename(filename, project_name)
-
- def convert_url_to_download_info(self, url, project_name):
- """
- See if a URL is a candidate for a download URL for a project (the URL
- has typically been scraped from an HTML page).
-
- If it is, a dictionary is returned with keys "name", "version",
- "filename" and "url"; otherwise, None is returned.
- """
- def same_project(name1, name2):
- return normalize_name(name1) == normalize_name(name2)
-
- result = None
- scheme, netloc, path, params, query, frag = urlparse(url)
- if frag.lower().startswith('egg='): # pragma: no cover
- logger.debug('%s: version hint in fragment: %r',
- project_name, frag)
- m = HASHER_HASH.match(frag)
- if m:
- algo, digest = m.groups()
- else:
- algo, digest = None, None
- origpath = path
- if path and path[-1] == '/': # pragma: no cover
- path = path[:-1]
- if path.endswith('.whl'):
- try:
- wheel = Wheel(path)
- if not is_compatible(wheel, self.wheel_tags):
- logger.debug('Wheel not compatible: %s', path)
- else:
- if project_name is None:
- include = True
- else:
- include = same_project(wheel.name, project_name)
- if include:
- result = {
- 'name': wheel.name,
- 'version': wheel.version,
- 'filename': wheel.filename,
- 'url': urlunparse((scheme, netloc, origpath,
- params, query, '')),
- 'python-version': ', '.join(
- ['.'.join(list(v[2:])) for v in wheel.pyver]),
- }
- except Exception as e: # pragma: no cover
- logger.warning('invalid path for wheel: %s', path)
- elif not path.endswith(self.downloadable_extensions): # pragma: no cover
- logger.debug('Not downloadable: %s', path)
- else: # downloadable extension
- path = filename = posixpath.basename(path)
- for ext in self.downloadable_extensions:
- if path.endswith(ext):
- path = path[:-len(ext)]
- t = self.split_filename(path, project_name)
- if not t: # pragma: no cover
- logger.debug('No match for project/version: %s', path)
- else:
- name, version, pyver = t
- if not project_name or same_project(project_name, name):
- result = {
- 'name': name,
- 'version': version,
- 'filename': filename,
- 'url': urlunparse((scheme, netloc, origpath,
- params, query, '')),
- #'packagetype': 'sdist',
- }
- if pyver: # pragma: no cover
- result['python-version'] = pyver
- break
- if result and algo:
- result['%s_digest' % algo] = digest
- return result
-
- def _get_digest(self, info):
- """
- Get a digest from a dictionary by looking at a "digests" dictionary
- or keys of the form 'algo_digest'.
-
- Returns a 2-tuple (algo, digest) if found, else None. Currently
- looks only for SHA256, then MD5.
- """
- result = None
- if 'digests' in info:
- digests = info['digests']
- for algo in ('sha256', 'md5'):
- if algo in digests:
- result = (algo, digests[algo])
- break
- if not result:
- for algo in ('sha256', 'md5'):
- key = '%s_digest' % algo
- if key in info:
- result = (algo, info[key])
- break
- return result
-
- def _update_version_data(self, result, info):
- """
- Update a result dictionary (the final result from _get_project) with a
- dictionary for a specific version, which typically holds information
- gleaned from a filename or URL for an archive for the distribution.
- """
- name = info.pop('name')
- version = info.pop('version')
- if version in result:
- dist = result[version]
- md = dist.metadata
- else:
- dist = make_dist(name, version, scheme=self.scheme)
- md = dist.metadata
- dist.digest = digest = self._get_digest(info)
- url = info['url']
- result['digests'][url] = digest
- if md.source_url != info['url']:
- md.source_url = self.prefer_url(md.source_url, url)
- result['urls'].setdefault(version, set()).add(url)
- dist.locator = self
- result[version] = dist
-
- def locate(self, requirement, prereleases=False):
- """
- Find the most recent distribution which matches the given
- requirement.
-
- :param requirement: A requirement of the form 'foo (1.0)' or perhaps
- 'foo (>= 1.0, < 2.0, != 1.3)'
- :param prereleases: If ``True``, allow pre-release versions
- to be located. Otherwise, pre-release versions
- are not returned.
- :return: A :class:`Distribution` instance, or ``None`` if no such
- distribution could be located.
- """
- result = None
- r = parse_requirement(requirement)
- if r is None: # pragma: no cover
- raise DistlibException('Not a valid requirement: %r' % requirement)
- scheme = get_scheme(self.scheme)
- self.matcher = matcher = scheme.matcher(r.requirement)
- logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
- versions = self.get_project(r.name)
- if len(versions) > 2: # urls and digests keys are present
- # sometimes, versions are invalid
- slist = []
- vcls = matcher.version_class
- for k in versions:
- if k in ('urls', 'digests'):
- continue
- try:
- if not matcher.match(k):
- pass # logger.debug('%s did not match %r', matcher, k)
- else:
- if prereleases or not vcls(k).is_prerelease:
- slist.append(k)
- # else:
- # logger.debug('skipping pre-release '
- # 'version %s of %s', k, matcher.name)
- except Exception: # pragma: no cover
- logger.warning('error matching %s with %r', matcher, k)
- pass # slist.append(k)
- if len(slist) > 1:
- slist = sorted(slist, key=scheme.key)
- if slist:
- logger.debug('sorted list: %s', slist)
- version = slist[-1]
- result = versions[version]
- if result:
- if r.extras:
- result.extras = r.extras
- result.download_urls = versions.get('urls', {}).get(version, set())
- d = {}
- sd = versions.get('digests', {})
- for url in result.download_urls:
- if url in sd: # pragma: no cover
- d[url] = sd[url]
- result.digests = d
- self.matcher = None
- return result
-
-
-class PyPIRPCLocator(Locator):
- """
- This locator uses XML-RPC to locate distributions. It therefore
- cannot be used with simple mirrors (that only mirror file content).
- """
- def __init__(self, url, **kwargs):
- """
- Initialise an instance.
-
- :param url: The URL to use for XML-RPC.
- :param kwargs: Passed to the superclass constructor.
- """
- super(PyPIRPCLocator, self).__init__(**kwargs)
- self.base_url = url
- self.client = ServerProxy(url, timeout=3.0)
-
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- return set(self.client.list_packages())
-
- def _get_project(self, name):
- result = {'urls': {}, 'digests': {}}
- versions = self.client.package_releases(name, True)
- for v in versions:
- urls = self.client.release_urls(name, v)
- data = self.client.release_data(name, v)
- metadata = Metadata(scheme=self.scheme)
- metadata.name = data['name']
- metadata.version = data['version']
- metadata.license = data.get('license')
- metadata.keywords = data.get('keywords', [])
- metadata.summary = data.get('summary')
- dist = Distribution(metadata)
- if urls:
- info = urls[0]
- metadata.source_url = info['url']
- dist.digest = self._get_digest(info)
- dist.locator = self
- result[v] = dist
- for info in urls:
- url = info['url']
- digest = self._get_digest(info)
- result['urls'].setdefault(v, set()).add(url)
- result['digests'][url] = digest
- return result
-
-class PyPIJSONLocator(Locator):
- """
- This locator uses PyPI's JSON interface. It's very limited in functionality
- and probably not worth using.
- """
- def __init__(self, url, **kwargs):
- super(PyPIJSONLocator, self).__init__(**kwargs)
- self.base_url = ensure_slash(url)
-
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- raise NotImplementedError('Not available from this locator')
-
- def _get_project(self, name):
- result = {'urls': {}, 'digests': {}}
- url = urljoin(self.base_url, '%s/json' % quote(name))
- try:
- resp = self.opener.open(url)
- data = resp.read().decode() # for now
- d = json.loads(data)
- md = Metadata(scheme=self.scheme)
- data = d['info']
- md.name = data['name']
- md.version = data['version']
- md.license = data.get('license')
- md.keywords = data.get('keywords', [])
- md.summary = data.get('summary')
- dist = Distribution(md)
- dist.locator = self
- urls = d['urls']
- result[md.version] = dist
- for info in d['urls']:
- url = info['url']
- dist.download_urls.add(url)
- dist.digests[url] = self._get_digest(info)
- result['urls'].setdefault(md.version, set()).add(url)
- result['digests'][url] = self._get_digest(info)
- # Now get other releases
- for version, infos in d['releases'].items():
- if version == md.version:
- continue # already done
- omd = Metadata(scheme=self.scheme)
- omd.name = md.name
- omd.version = version
- odist = Distribution(omd)
- odist.locator = self
- result[version] = odist
- for info in infos:
- url = info['url']
- odist.download_urls.add(url)
- odist.digests[url] = self._get_digest(info)
- result['urls'].setdefault(version, set()).add(url)
- result['digests'][url] = self._get_digest(info)
-# for info in urls:
-# md.source_url = info['url']
-# dist.digest = self._get_digest(info)
-# dist.locator = self
-# for info in urls:
-# url = info['url']
-# result['urls'].setdefault(md.version, set()).add(url)
-# result['digests'][url] = self._get_digest(info)
- except Exception as e:
- self.errors.put(text_type(e))
- logger.exception('JSON fetch failed: %s', e)
- return result
-
-
-class Page(object):
- """
- This class represents a scraped HTML page.
- """
- # The following slightly hairy-looking regex just looks for the contents of
- # an anchor link, which has an attribute "href" either immediately preceded
- # or immediately followed by a "rel" attribute. The attribute values can be
- # declared with double quotes, single quotes or no quotes - which leads to
- # the length of the expression.
- _href = re.compile("""
-(rel\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*))\\s+)?
-href\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*))
-(\\s+rel\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*)))?
-""", re.I | re.S | re.X)
- _base = re.compile(r""" ]+)""", re.I | re.S)
-
- def __init__(self, data, url):
- """
- Initialise an instance with the Unicode page contents and the URL they
- came from.
- """
- self.data = data
- self.base_url = self.url = url
- m = self._base.search(self.data)
- if m:
- self.base_url = m.group(1)
-
- _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
-
- @cached_property
- def links(self):
- """
- Return the URLs of all the links on a page together with information
- about their "rel" attribute, for determining which ones to treat as
- downloads and which ones to queue for further scraping.
- """
- def clean(url):
- "Tidy up an URL."
- scheme, netloc, path, params, query, frag = urlparse(url)
- return urlunparse((scheme, netloc, quote(path),
- params, query, frag))
-
- result = set()
- for match in self._href.finditer(self.data):
- d = match.groupdict('')
- rel = (d['rel1'] or d['rel2'] or d['rel3'] or
- d['rel4'] or d['rel5'] or d['rel6'])
- url = d['url1'] or d['url2'] or d['url3']
- url = urljoin(self.base_url, url)
- url = unescape(url)
- url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
- result.add((url, rel))
- # We sort the result, hoping to bring the most recent versions
- # to the front
- result = sorted(result, key=lambda t: t[0], reverse=True)
- return result
-
-
-class SimpleScrapingLocator(Locator):
- """
- A locator which scrapes HTML pages to locate downloads for a distribution.
- This runs multiple threads to do the I/O; performance is at least as good
- as pip's PackageFinder, which works in an analogous fashion.
- """
-
- # These are used to deal with various Content-Encoding schemes.
- decoders = {
- 'deflate': zlib.decompress,
- 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(b)).read(),
- 'none': lambda b: b,
- }
-
- def __init__(self, url, timeout=None, num_workers=10, **kwargs):
- """
- Initialise an instance.
- :param url: The root URL to use for scraping.
- :param timeout: The timeout, in seconds, to be applied to requests.
- This defaults to ``None`` (no timeout specified).
- :param num_workers: The number of worker threads you want to do I/O,
- This defaults to 10.
- :param kwargs: Passed to the superclass.
- """
- super(SimpleScrapingLocator, self).__init__(**kwargs)
- self.base_url = ensure_slash(url)
- self.timeout = timeout
- self._page_cache = {}
- self._seen = set()
- self._to_fetch = queue.Queue()
- self._bad_hosts = set()
- self.skip_externals = False
- self.num_workers = num_workers
- self._lock = threading.RLock()
- # See issue #45: we need to be resilient when the locator is used
- # in a thread, e.g. with concurrent.futures. We can't use self._lock
- # as it is for coordinating our internal threads - the ones created
- # in _prepare_threads.
- self._gplock = threading.RLock()
- self.platform_check = False # See issue #112
-
- def _prepare_threads(self):
- """
- Threads are created only when get_project is called, and terminate
- before it returns. They are there primarily to parallelise I/O (i.e.
- fetching web pages).
- """
- self._threads = []
- for i in range(self.num_workers):
- t = threading.Thread(target=self._fetch)
- t.daemon = True
- t.start()
- self._threads.append(t)
-
- def _wait_threads(self):
- """
- Tell all the threads to terminate (by sending a sentinel value) and
- wait for them to do so.
- """
- # Note that you need two loops, since you can't say which
- # thread will get each sentinel
- for t in self._threads:
- self._to_fetch.put(None) # sentinel
- for t in self._threads:
- t.join()
- self._threads = []
-
- def _get_project(self, name):
- result = {'urls': {}, 'digests': {}}
- with self._gplock:
- self.result = result
- self.project_name = name
- url = urljoin(self.base_url, '%s/' % quote(name))
- self._seen.clear()
- self._page_cache.clear()
- self._prepare_threads()
- try:
- logger.debug('Queueing %s', url)
- self._to_fetch.put(url)
- self._to_fetch.join()
- finally:
- self._wait_threads()
- del self.result
- return result
-
- platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
- r'win(32|_amd64)|macosx_?\d+)\b', re.I)
-
- def _is_platform_dependent(self, url):
- """
- Does an URL refer to a platform-specific download?
- """
- return self.platform_dependent.search(url)
-
- def _process_download(self, url):
- """
- See if an URL is a suitable download for a project.
-
- If it is, register information in the result dictionary (for
- _get_project) about the specific version it's for.
-
- Note that the return value isn't actually used other than as a boolean
- value.
- """
- if self.platform_check and self._is_platform_dependent(url):
- info = None
- else:
- info = self.convert_url_to_download_info(url, self.project_name)
- logger.debug('process_download: %s -> %s', url, info)
- if info:
- with self._lock: # needed because self.result is shared
- self._update_version_data(self.result, info)
- return info
-
- def _should_queue(self, link, referrer, rel):
- """
- Determine whether a link URL from a referring page and with a
- particular "rel" attribute should be queued for scraping.
- """
- scheme, netloc, path, _, _, _ = urlparse(link)
- if path.endswith(self.source_extensions + self.binary_extensions +
- self.excluded_extensions):
- result = False
- elif self.skip_externals and not link.startswith(self.base_url):
- result = False
- elif not referrer.startswith(self.base_url):
- result = False
- elif rel not in ('homepage', 'download'):
- result = False
- elif scheme not in ('http', 'https', 'ftp'):
- result = False
- elif self._is_platform_dependent(link):
- result = False
- else:
- host = netloc.split(':', 1)[0]
- if host.lower() == 'localhost':
- result = False
- else:
- result = True
- logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
- referrer, result)
- return result
-
- def _fetch(self):
- """
- Get a URL to fetch from the work queue, get the HTML page, examine its
- links for download candidates and candidates for further scraping.
-
- This is a handy method to run in a thread.
- """
- while True:
- url = self._to_fetch.get()
- try:
- if url:
- page = self.get_page(url)
- if page is None: # e.g. after an error
- continue
- for link, rel in page.links:
- if link not in self._seen:
- try:
- self._seen.add(link)
- if (not self._process_download(link) and
- self._should_queue(link, url, rel)):
- logger.debug('Queueing %s from %s', link, url)
- self._to_fetch.put(link)
- except MetadataInvalidError: # e.g. invalid versions
- pass
- except Exception as e: # pragma: no cover
- self.errors.put(text_type(e))
- finally:
- # always do this, to avoid hangs :-)
- self._to_fetch.task_done()
- if not url:
- #logger.debug('Sentinel seen, quitting.')
- break
-
- def get_page(self, url):
- """
- Get the HTML for an URL, possibly from an in-memory cache.
-
- XXX TODO Note: this cache is never actually cleared. It's assumed that
- the data won't get stale over the lifetime of a locator instance (not
- necessarily true for the default_locator).
- """
- # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
- scheme, netloc, path, _, _, _ = urlparse(url)
- if scheme == 'file' and os.path.isdir(url2pathname(path)):
- url = urljoin(ensure_slash(url), 'index.html')
-
- if url in self._page_cache:
- result = self._page_cache[url]
- logger.debug('Returning %s from cache: %s', url, result)
- else:
- host = netloc.split(':', 1)[0]
- result = None
- if host in self._bad_hosts:
- logger.debug('Skipping %s due to bad host %s', url, host)
- else:
- req = Request(url, headers={'Accept-encoding': 'identity'})
- try:
- logger.debug('Fetching %s', url)
- resp = self.opener.open(req, timeout=self.timeout)
- logger.debug('Fetched %s', url)
- headers = resp.info()
- content_type = headers.get('Content-Type', '')
- if HTML_CONTENT_TYPE.match(content_type):
- final_url = resp.geturl()
- data = resp.read()
- encoding = headers.get('Content-Encoding')
- if encoding:
- decoder = self.decoders[encoding] # fail if not found
- data = decoder(data)
- encoding = 'utf-8'
- m = CHARSET.search(content_type)
- if m:
- encoding = m.group(1)
- try:
- data = data.decode(encoding)
- except UnicodeError: # pragma: no cover
- data = data.decode('latin-1') # fallback
- result = Page(data, final_url)
- self._page_cache[final_url] = result
- except HTTPError as e:
- if e.code != 404:
- logger.exception('Fetch failed: %s: %s', url, e)
- except URLError as e: # pragma: no cover
- logger.exception('Fetch failed: %s: %s', url, e)
- with self._lock:
- self._bad_hosts.add(host)
- except Exception as e: # pragma: no cover
- logger.exception('Fetch failed: %s: %s', url, e)
- finally:
- self._page_cache[url] = result # even if None (failure)
- return result
-
- _distname_re = re.compile(']*>([^<]+)<')
-
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- result = set()
- page = self.get_page(self.base_url)
- if not page:
- raise DistlibException('Unable to get %s' % self.base_url)
- for match in self._distname_re.finditer(page.data):
- result.add(match.group(1))
- return result
-
-class DirectoryLocator(Locator):
- """
- This class locates distributions in a directory tree.
- """
-
- def __init__(self, path, **kwargs):
- """
- Initialise an instance.
- :param path: The root of the directory tree to search.
- :param kwargs: Passed to the superclass constructor,
- except for:
- * recursive - if True (the default), subdirectories are
- recursed into. If False, only the top-level directory
- is searched,
- """
- self.recursive = kwargs.pop('recursive', True)
- super(DirectoryLocator, self).__init__(**kwargs)
- path = os.path.abspath(path)
- if not os.path.isdir(path): # pragma: no cover
- raise DistlibException('Not a directory: %r' % path)
- self.base_dir = path
-
- def should_include(self, filename, parent):
- """
- Should a filename be considered as a candidate for a distribution
- archive? As well as the filename, the directory which contains it
- is provided, though not used by the current implementation.
- """
- return filename.endswith(self.downloadable_extensions)
-
- def _get_project(self, name):
- result = {'urls': {}, 'digests': {}}
- for root, dirs, files in os.walk(self.base_dir):
- for fn in files:
- if self.should_include(fn, root):
- fn = os.path.join(root, fn)
- url = urlunparse(('file', '',
- pathname2url(os.path.abspath(fn)),
- '', '', ''))
- info = self.convert_url_to_download_info(url, name)
- if info:
- self._update_version_data(result, info)
- if not self.recursive:
- break
- return result
-
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- result = set()
- for root, dirs, files in os.walk(self.base_dir):
- for fn in files:
- if self.should_include(fn, root):
- fn = os.path.join(root, fn)
- url = urlunparse(('file', '',
- pathname2url(os.path.abspath(fn)),
- '', '', ''))
- info = self.convert_url_to_download_info(url, None)
- if info:
- result.add(info['name'])
- if not self.recursive:
- break
- return result
-
-class JSONLocator(Locator):
- """
- This locator uses special extended metadata (not available on PyPI) and is
- the basis of performant dependency resolution in distlib. Other locators
- require archive downloads before dependencies can be determined! As you
- might imagine, that can be slow.
- """
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- raise NotImplementedError('Not available from this locator')
-
- def _get_project(self, name):
- result = {'urls': {}, 'digests': {}}
- data = get_project_data(name)
- if data:
- for info in data.get('files', []):
- if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
- continue
- # We don't store summary in project metadata as it makes
- # the data bigger for no benefit during dependency
- # resolution
- dist = make_dist(data['name'], info['version'],
- summary=data.get('summary',
- 'Placeholder for summary'),
- scheme=self.scheme)
- md = dist.metadata
- md.source_url = info['url']
- # TODO SHA256 digest
- if 'digest' in info and info['digest']:
- dist.digest = ('md5', info['digest'])
- md.dependencies = info.get('requirements', {})
- dist.exports = info.get('exports', {})
- result[dist.version] = dist
- result['urls'].setdefault(dist.version, set()).add(info['url'])
- return result
-
-class DistPathLocator(Locator):
- """
- This locator finds installed distributions in a path. It can be useful for
- adding to an :class:`AggregatingLocator`.
- """
- def __init__(self, distpath, **kwargs):
- """
- Initialise an instance.
-
- :param distpath: A :class:`DistributionPath` instance to search.
- """
- super(DistPathLocator, self).__init__(**kwargs)
- assert isinstance(distpath, DistributionPath)
- self.distpath = distpath
-
- def _get_project(self, name):
- dist = self.distpath.get_distribution(name)
- if dist is None:
- result = {'urls': {}, 'digests': {}}
- else:
- result = {
- dist.version: dist,
- 'urls': {dist.version: set([dist.source_url])},
- 'digests': {dist.version: set([None])}
- }
- return result
-
-
-class AggregatingLocator(Locator):
- """
- This class allows you to chain and/or merge a list of locators.
- """
- def __init__(self, *locators, **kwargs):
- """
- Initialise an instance.
-
- :param locators: The list of locators to search.
- :param kwargs: Passed to the superclass constructor,
- except for:
- * merge - if False (the default), the first successful
- search from any of the locators is returned. If True,
- the results from all locators are merged (this can be
- slow).
- """
- self.merge = kwargs.pop('merge', False)
- self.locators = locators
- super(AggregatingLocator, self).__init__(**kwargs)
-
- def clear_cache(self):
- super(AggregatingLocator, self).clear_cache()
- for locator in self.locators:
- locator.clear_cache()
-
- def _set_scheme(self, value):
- self._scheme = value
- for locator in self.locators:
- locator.scheme = value
-
- scheme = property(Locator.scheme.fget, _set_scheme)
-
- def _get_project(self, name):
- result = {}
- for locator in self.locators:
- d = locator.get_project(name)
- if d:
- if self.merge:
- files = result.get('urls', {})
- digests = result.get('digests', {})
- # next line could overwrite result['urls'], result['digests']
- result.update(d)
- df = result.get('urls')
- if files and df:
- for k, v in files.items():
- if k in df:
- df[k] |= v
- else:
- df[k] = v
- dd = result.get('digests')
- if digests and dd:
- dd.update(digests)
- else:
- # See issue #18. If any dists are found and we're looking
- # for specific constraints, we only return something if
- # a match is found. For example, if a DirectoryLocator
- # returns just foo (1.0) while we're looking for
- # foo (>= 2.0), we'll pretend there was nothing there so
- # that subsequent locators can be queried. Otherwise we
- # would just return foo (1.0) which would then lead to a
- # failure to find foo (>= 2.0), because other locators
- # weren't searched. Note that this only matters when
- # merge=False.
- if self.matcher is None:
- found = True
- else:
- found = False
- for k in d:
- if self.matcher.match(k):
- found = True
- break
- if found:
- result = d
- break
- return result
-
- def get_distribution_names(self):
- """
- Return all the distribution names known to this locator.
- """
- result = set()
- for locator in self.locators:
- try:
- result |= locator.get_distribution_names()
- except NotImplementedError:
- pass
- return result
-
-
-# We use a legacy scheme simply because most of the dists on PyPI use legacy
-# versions which don't conform to PEP 440.
-default_locator = AggregatingLocator(
- # JSONLocator(), # don't use as PEP 426 is withdrawn
- SimpleScrapingLocator('https://pypi.org/simple/',
- timeout=3.0),
- scheme='legacy')
-
-locate = default_locator.locate
-
-
-class DependencyFinder(object):
- """
- Locate dependencies for distributions.
- """
-
- def __init__(self, locator=None):
- """
- Initialise an instance, using the specified locator
- to locate distributions.
- """
- self.locator = locator or default_locator
- self.scheme = get_scheme(self.locator.scheme)
-
- def add_distribution(self, dist):
- """
- Add a distribution to the finder. This will update internal information
- about who provides what.
- :param dist: The distribution to add.
- """
- logger.debug('adding distribution %s', dist)
- name = dist.key
- self.dists_by_name[name] = dist
- self.dists[(name, dist.version)] = dist
- for p in dist.provides:
- name, version = parse_name_and_version(p)
- logger.debug('Add to provided: %s, %s, %s', name, version, dist)
- self.provided.setdefault(name, set()).add((version, dist))
-
- def remove_distribution(self, dist):
- """
- Remove a distribution from the finder. This will update internal
- information about who provides what.
- :param dist: The distribution to remove.
- """
- logger.debug('removing distribution %s', dist)
- name = dist.key
- del self.dists_by_name[name]
- del self.dists[(name, dist.version)]
- for p in dist.provides:
- name, version = parse_name_and_version(p)
- logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
- s = self.provided[name]
- s.remove((version, dist))
- if not s:
- del self.provided[name]
-
- def get_matcher(self, reqt):
- """
- Get a version matcher for a requirement.
- :param reqt: The requirement
- :type reqt: str
- :return: A version matcher (an instance of
- :class:`distlib.version.Matcher`).
- """
- try:
- matcher = self.scheme.matcher(reqt)
- except UnsupportedVersionError: # pragma: no cover
- # XXX compat-mode if cannot read the version
- name = reqt.split()[0]
- matcher = self.scheme.matcher(name)
- return matcher
-
- def find_providers(self, reqt):
- """
- Find the distributions which can fulfill a requirement.
-
- :param reqt: The requirement.
- :type reqt: str
- :return: A set of distribution which can fulfill the requirement.
- """
- matcher = self.get_matcher(reqt)
- name = matcher.key # case-insensitive
- result = set()
- provided = self.provided
- if name in provided:
- for version, provider in provided[name]:
- try:
- match = matcher.match(version)
- except UnsupportedVersionError:
- match = False
-
- if match:
- result.add(provider)
- break
- return result
-
- def try_to_replace(self, provider, other, problems):
- """
- Attempt to replace one provider with another. This is typically used
- when resolving dependencies from multiple sources, e.g. A requires
- (B >= 1.0) while C requires (B >= 1.1).
-
- For successful replacement, ``provider`` must meet all the requirements
- which ``other`` fulfills.
-
- :param provider: The provider we are trying to replace with.
- :param other: The provider we're trying to replace.
- :param problems: If False is returned, this will contain what
- problems prevented replacement. This is currently
- a tuple of the literal string 'cantreplace',
- ``provider``, ``other`` and the set of requirements
- that ``provider`` couldn't fulfill.
- :return: True if we can replace ``other`` with ``provider``, else
- False.
- """
- rlist = self.reqts[other]
- unmatched = set()
- for s in rlist:
- matcher = self.get_matcher(s)
- if not matcher.match(provider.version):
- unmatched.add(s)
- if unmatched:
- # can't replace other with provider
- problems.add(('cantreplace', provider, other,
- frozenset(unmatched)))
- result = False
- else:
- # can replace other with provider
- self.remove_distribution(other)
- del self.reqts[other]
- for s in rlist:
- self.reqts.setdefault(provider, set()).add(s)
- self.add_distribution(provider)
- result = True
- return result
-
- def find(self, requirement, meta_extras=None, prereleases=False):
- """
- Find a distribution and all distributions it depends on.
-
- :param requirement: The requirement specifying the distribution to
- find, or a Distribution instance.
- :param meta_extras: A list of meta extras such as :test:, :build: and
- so on.
- :param prereleases: If ``True``, allow pre-release versions to be
- returned - otherwise, don't return prereleases
- unless they're all that's available.
-
- Return a set of :class:`Distribution` instances and a set of
- problems.
-
- The distributions returned should be such that they have the
- :attr:`required` attribute set to ``True`` if they were
- from the ``requirement`` passed to ``find()``, and they have the
- :attr:`build_time_dependency` attribute set to ``True`` unless they
- are post-installation dependencies of the ``requirement``.
-
- The problems should be a tuple consisting of the string
- ``'unsatisfied'`` and the requirement which couldn't be satisfied
- by any distribution known to the locator.
- """
-
- self.provided = {}
- self.dists = {}
- self.dists_by_name = {}
- self.reqts = {}
-
- meta_extras = set(meta_extras or [])
- if ':*:' in meta_extras:
- meta_extras.remove(':*:')
- # :meta: and :run: are implicitly included
- meta_extras |= set([':test:', ':build:', ':dev:'])
-
- if isinstance(requirement, Distribution):
- dist = odist = requirement
- logger.debug('passed %s as requirement', odist)
- else:
- dist = odist = self.locator.locate(requirement,
- prereleases=prereleases)
- if dist is None:
- raise DistlibException('Unable to locate %r' % requirement)
- logger.debug('located %s', odist)
- dist.requested = True
- problems = set()
- todo = set([dist])
- install_dists = set([odist])
- while todo:
- dist = todo.pop()
- name = dist.key # case-insensitive
- if name not in self.dists_by_name:
- self.add_distribution(dist)
- else:
- #import pdb; pdb.set_trace()
- other = self.dists_by_name[name]
- if other != dist:
- self.try_to_replace(dist, other, problems)
-
- ireqts = dist.run_requires | dist.meta_requires
- sreqts = dist.build_requires
- ereqts = set()
- if meta_extras and dist in install_dists:
- for key in ('test', 'build', 'dev'):
- e = ':%s:' % key
- if e in meta_extras:
- ereqts |= getattr(dist, '%s_requires' % key)
- all_reqts = ireqts | sreqts | ereqts
- for r in all_reqts:
- providers = self.find_providers(r)
- if not providers:
- logger.debug('No providers found for %r', r)
- provider = self.locator.locate(r, prereleases=prereleases)
- # If no provider is found and we didn't consider
- # prereleases, consider them now.
- if provider is None and not prereleases:
- provider = self.locator.locate(r, prereleases=True)
- if provider is None:
- logger.debug('Cannot satisfy %r', r)
- problems.add(('unsatisfied', r))
- else:
- n, v = provider.key, provider.version
- if (n, v) not in self.dists:
- todo.add(provider)
- providers.add(provider)
- if r in ireqts and dist in install_dists:
- install_dists.add(provider)
- logger.debug('Adding %s to install_dists',
- provider.name_and_version)
- for p in providers:
- name = p.key
- if name not in self.dists_by_name:
- self.reqts.setdefault(p, set()).add(r)
- else:
- other = self.dists_by_name[name]
- if other != p:
- # see if other can be replaced by p
- self.try_to_replace(p, other, problems)
-
- dists = set(self.dists.values())
- for dist in dists:
- dist.build_time_dependency = dist not in install_dists
- if dist.build_time_dependency:
- logger.debug('%s is a build-time dependency only.',
- dist.name_and_version)
- logger.debug('find done for %s', odist)
- return dists, problems
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py
deleted file mode 100644
index 2199cc7b7f004009493d032720c36d6568f9d89e..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from .ssl_ import create_urllib3_context, resolve_cert_reqs, resolve_ssl_version
-
-
-def connection_requires_http_tunnel(
- proxy_url=None, proxy_config=None, destination_scheme=None
-):
- """
- Returns True if the connection requires an HTTP CONNECT through the proxy.
-
- :param URL proxy_url:
- URL of the proxy.
- :param ProxyConfig proxy_config:
- Proxy configuration from poolmanager.py
- :param str destination_scheme:
- The scheme of the destination. (i.e https, http, etc)
- """
- # If we're not using a proxy, no way to use a tunnel.
- if proxy_url is None:
- return False
-
- # HTTP destinations never require tunneling, we always forward.
- if destination_scheme == "http":
- return False
-
- # Support for forwarding with HTTPS proxies and HTTPS destinations.
- if (
- proxy_url.scheme == "https"
- and proxy_config
- and proxy_config.use_forwarding_for_https
- ):
- return False
-
- # Otherwise always use a tunnel.
- return True
-
-
-def create_proxy_ssl_context(
- ssl_version, cert_reqs, ca_certs=None, ca_cert_dir=None, ca_cert_data=None
-):
- """
- Generates a default proxy ssl context if one hasn't been provided by the
- user.
- """
- ssl_context = create_urllib3_context(
- ssl_version=resolve_ssl_version(ssl_version),
- cert_reqs=resolve_cert_reqs(cert_reqs),
- )
-
- if (
- not ca_certs
- and not ca_cert_dir
- and not ca_cert_data
- and hasattr(ssl_context, "load_default_certs")
- ):
- ssl_context.load_default_certs()
-
- return ssl_context
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/connectionpool.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/connectionpool.py
deleted file mode 100644
index c23d736b186f50eb723eebbd6dfce281d91c2353..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/connectionpool.py
+++ /dev/null
@@ -1,1110 +0,0 @@
-from __future__ import absolute_import
-
-import errno
-import logging
-import re
-import socket
-import sys
-import warnings
-from socket import error as SocketError
-from socket import timeout as SocketTimeout
-
-from .connection import (
- BaseSSLError,
- BrokenPipeError,
- DummyConnection,
- HTTPConnection,
- HTTPException,
- HTTPSConnection,
- VerifiedHTTPSConnection,
- port_by_scheme,
-)
-from .exceptions import (
- ClosedPoolError,
- EmptyPoolError,
- HeaderParsingError,
- HostChangedError,
- InsecureRequestWarning,
- LocationValueError,
- MaxRetryError,
- NewConnectionError,
- ProtocolError,
- ProxyError,
- ReadTimeoutError,
- SSLError,
- TimeoutError,
-)
-from .packages import six
-from .packages.six.moves import queue
-from .request import RequestMethods
-from .response import HTTPResponse
-from .util.connection import is_connection_dropped
-from .util.proxy import connection_requires_http_tunnel
-from .util.queue import LifoQueue
-from .util.request import set_file_position
-from .util.response import assert_header_parsing
-from .util.retry import Retry
-from .util.ssl_match_hostname import CertificateError
-from .util.timeout import Timeout
-from .util.url import Url, _encode_target
-from .util.url import _normalize_host as normalize_host
-from .util.url import get_host, parse_url
-
-xrange = six.moves.xrange
-
-log = logging.getLogger(__name__)
-
-_Default = object()
-
-
-# Pool objects
-class ConnectionPool(object):
- """
- Base class for all connection pools, such as
- :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
-
- .. note::
- ConnectionPool.urlopen() does not normalize or percent-encode target URIs
- which is useful if your target server doesn't support percent-encoded
- target URIs.
- """
-
- scheme = None
- QueueCls = LifoQueue
-
- def __init__(self, host, port=None):
- if not host:
- raise LocationValueError("No host specified.")
-
- self.host = _normalize_host(host, scheme=self.scheme)
- self._proxy_host = host.lower()
- self.port = port
-
- def __str__(self):
- return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.close()
- # Return False to re-raise any potential exceptions
- return False
-
- def close(self):
- """
- Close all pooled connections and disable the pool.
- """
- pass
-
-
-# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
-_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
-
-
-class HTTPConnectionPool(ConnectionPool, RequestMethods):
- """
- Thread-safe connection pool for one host.
-
- :param host:
- Host used for this HTTP Connection (e.g. "localhost"), passed into
- :class:`http.client.HTTPConnection`.
-
- :param port:
- Port used for this HTTP Connection (None is equivalent to 80), passed
- into :class:`http.client.HTTPConnection`.
-
- :param strict:
- Causes BadStatusLine to be raised if the status line can't be parsed
- as a valid HTTP/1.0 or 1.1 status line, passed into
- :class:`http.client.HTTPConnection`.
-
- .. note::
- Only works in Python 2. This parameter is ignored in Python 3.
-
- :param timeout:
- Socket timeout in seconds for each individual connection. This can
- be a float or integer, which sets the timeout for the HTTP request,
- or an instance of :class:`urllib3.util.Timeout` which gives you more
- fine-grained control over request timeouts. After the constructor has
- been parsed, this is always a `urllib3.util.Timeout` object.
-
- :param maxsize:
- Number of connections to save that can be reused. More than 1 is useful
- in multithreaded situations. If ``block`` is set to False, more
- connections will be created but they will not be saved once they've
- been used.
-
- :param block:
- If set to True, no more than ``maxsize`` connections will be used at
- a time. When no free connections are available, the call will block
- until a connection has been released. This is a useful side effect for
- particular multithreaded situations where one does not want to use more
- than maxsize connections per host to prevent flooding.
-
- :param headers:
- Headers to include with all requests, unless other headers are given
- explicitly.
-
- :param retries:
- Retry configuration to use by default with requests in this pool.
-
- :param _proxy:
- Parsed proxy URL, should not be used directly, instead, see
- :class:`urllib3.ProxyManager`
-
- :param _proxy_headers:
- A dictionary with proxy headers, should not be used directly,
- instead, see :class:`urllib3.ProxyManager`
-
- :param \\**conn_kw:
- Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
- :class:`urllib3.connection.HTTPSConnection` instances.
- """
-
- scheme = "http"
- ConnectionCls = HTTPConnection
- ResponseCls = HTTPResponse
-
- def __init__(
- self,
- host,
- port=None,
- strict=False,
- timeout=Timeout.DEFAULT_TIMEOUT,
- maxsize=1,
- block=False,
- headers=None,
- retries=None,
- _proxy=None,
- _proxy_headers=None,
- _proxy_config=None,
- **conn_kw
- ):
- ConnectionPool.__init__(self, host, port)
- RequestMethods.__init__(self, headers)
-
- self.strict = strict
-
- if not isinstance(timeout, Timeout):
- timeout = Timeout.from_float(timeout)
-
- if retries is None:
- retries = Retry.DEFAULT
-
- self.timeout = timeout
- self.retries = retries
-
- self.pool = self.QueueCls(maxsize)
- self.block = block
-
- self.proxy = _proxy
- self.proxy_headers = _proxy_headers or {}
- self.proxy_config = _proxy_config
-
- # Fill the queue up so that doing get() on it will block properly
- for _ in xrange(maxsize):
- self.pool.put(None)
-
- # These are mostly for testing and debugging purposes.
- self.num_connections = 0
- self.num_requests = 0
- self.conn_kw = conn_kw
-
- if self.proxy:
- # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
- # We cannot know if the user has added default socket options, so we cannot replace the
- # list.
- self.conn_kw.setdefault("socket_options", [])
-
- self.conn_kw["proxy"] = self.proxy
- self.conn_kw["proxy_config"] = self.proxy_config
-
- def _new_conn(self):
- """
- Return a fresh :class:`HTTPConnection`.
- """
- self.num_connections += 1
- log.debug(
- "Starting new HTTP connection (%d): %s:%s",
- self.num_connections,
- self.host,
- self.port or "80",
- )
-
- conn = self.ConnectionCls(
- host=self.host,
- port=self.port,
- timeout=self.timeout.connect_timeout,
- strict=self.strict,
- **self.conn_kw
- )
- return conn
-
- def _get_conn(self, timeout=None):
- """
- Get a connection. Will return a pooled connection if one is available.
-
- If no connections are available and :prop:`.block` is ``False``, then a
- fresh connection is returned.
-
- :param timeout:
- Seconds to wait before giving up and raising
- :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
- :prop:`.block` is ``True``.
- """
- conn = None
- try:
- conn = self.pool.get(block=self.block, timeout=timeout)
-
- except AttributeError: # self.pool is None
- raise ClosedPoolError(self, "Pool is closed.")
-
- except queue.Empty:
- if self.block:
- raise EmptyPoolError(
- self,
- "Pool reached maximum size and no more connections are allowed.",
- )
- pass # Oh well, we'll create a new connection then
-
- # If this is a persistent connection, check if it got disconnected
- if conn and is_connection_dropped(conn):
- log.debug("Resetting dropped connection: %s", self.host)
- conn.close()
- if getattr(conn, "auto_open", 1) == 0:
- # This is a proxied connection that has been mutated by
- # http.client._tunnel() and cannot be reused (since it would
- # attempt to bypass the proxy)
- conn = None
-
- return conn or self._new_conn()
-
- def _put_conn(self, conn):
- """
- Put a connection back into the pool.
-
- :param conn:
- Connection object for the current host and port as returned by
- :meth:`._new_conn` or :meth:`._get_conn`.
-
- If the pool is already full, the connection is closed and discarded
- because we exceeded maxsize. If connections are discarded frequently,
- then maxsize should be increased.
-
- If the pool is closed, then the connection will be closed and discarded.
- """
- try:
- self.pool.put(conn, block=False)
- return # Everything is dandy, done.
- except AttributeError:
- # self.pool is None.
- pass
- except queue.Full:
- # This should never happen if self.block == True
- log.warning(
- "Connection pool is full, discarding connection: %s. Connection pool size: %s",
- self.host,
- self.pool.qsize(),
- )
- # Connection never got put back into the pool, close it.
- if conn:
- conn.close()
-
- def _validate_conn(self, conn):
- """
- Called right before a request is made, after the socket is created.
- """
- pass
-
- def _prepare_proxy(self, conn):
- # Nothing to do for HTTP connections.
- pass
-
- def _get_timeout(self, timeout):
- """Helper that always returns a :class:`urllib3.util.Timeout`"""
- if timeout is _Default:
- return self.timeout.clone()
-
- if isinstance(timeout, Timeout):
- return timeout.clone()
- else:
- # User passed us an int/float. This is for backwards compatibility,
- # can be removed later
- return Timeout.from_float(timeout)
-
- def _raise_timeout(self, err, url, timeout_value):
- """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
-
- if isinstance(err, SocketTimeout):
- raise ReadTimeoutError(
- self, url, "Read timed out. (read timeout=%s)" % timeout_value
- )
-
- # See the above comment about EAGAIN in Python 3. In Python 2 we have
- # to specifically catch it and throw the timeout error
- if hasattr(err, "errno") and err.errno in _blocking_errnos:
- raise ReadTimeoutError(
- self, url, "Read timed out. (read timeout=%s)" % timeout_value
- )
-
- # Catch possible read timeouts thrown as SSL errors. If not the
- # case, rethrow the original. We need to do this because of:
- # http://bugs.python.org/issue10272
- if "timed out" in str(err) or "did not complete (read)" in str(
- err
- ): # Python < 2.7.4
- raise ReadTimeoutError(
- self, url, "Read timed out. (read timeout=%s)" % timeout_value
- )
-
- def _make_request(
- self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
- ):
- """
- Perform a request on a given urllib connection object taken from our
- pool.
-
- :param conn:
- a connection from one of our connection pools
-
- :param timeout:
- Socket timeout in seconds for the request. This can be a
- float or integer, which will set the same timeout value for
- the socket connect and the socket read, or an instance of
- :class:`urllib3.util.Timeout`, which gives you more fine-grained
- control over your timeouts.
- """
- self.num_requests += 1
-
- timeout_obj = self._get_timeout(timeout)
- timeout_obj.start_connect()
- conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout)
-
- # Trigger any extra validation we need to do.
- try:
- self._validate_conn(conn)
- except (SocketTimeout, BaseSSLError) as e:
- # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
- self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
- raise
-
- # conn.request() calls http.client.*.request, not the method in
- # urllib3.request. It also calls makefile (recv) on the socket.
- try:
- if chunked:
- conn.request_chunked(method, url, **httplib_request_kw)
- else:
- conn.request(method, url, **httplib_request_kw)
-
- # We are swallowing BrokenPipeError (errno.EPIPE) since the server is
- # legitimately able to close the connection after sending a valid response.
- # With this behaviour, the received response is still readable.
- except BrokenPipeError:
- # Python 3
- pass
- except IOError as e:
- # Python 2 and macOS/Linux
- # EPIPE and ESHUTDOWN are BrokenPipeError on Python 2, and EPROTOTYPE is needed on macOS
- # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
- if e.errno not in {
- errno.EPIPE,
- errno.ESHUTDOWN,
- errno.EPROTOTYPE,
- }:
- raise
-
- # Reset the timeout for the recv() on the socket
- read_timeout = timeout_obj.read_timeout
-
- # App Engine doesn't have a sock attr
- if getattr(conn, "sock", None):
- # In Python 3 socket.py will catch EAGAIN and return None when you
- # try and read into the file pointer created by http.client, which
- # instead raises a BadStatusLine exception. Instead of catching
- # the exception and assuming all BadStatusLine exceptions are read
- # timeouts, check for a zero timeout before making the request.
- if read_timeout == 0:
- raise ReadTimeoutError(
- self, url, "Read timed out. (read timeout=%s)" % read_timeout
- )
- if read_timeout is Timeout.DEFAULT_TIMEOUT:
- conn.sock.settimeout(socket.getdefaulttimeout())
- else: # None or a value
- conn.sock.settimeout(read_timeout)
-
- # Receive the response from the server
- try:
- try:
- # Python 2.7, use buffering of HTTP responses
- httplib_response = conn.getresponse(buffering=True)
- except TypeError:
- # Python 3
- try:
- httplib_response = conn.getresponse()
- except BaseException as e:
- # Remove the TypeError from the exception chain in
- # Python 3 (including for exceptions like SystemExit).
- # Otherwise it looks like a bug in the code.
- six.raise_from(e, None)
- except (SocketTimeout, BaseSSLError, SocketError) as e:
- self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
- raise
-
- # AppEngine doesn't have a version attr.
- http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
- log.debug(
- '%s://%s:%s "%s %s %s" %s %s',
- self.scheme,
- self.host,
- self.port,
- method,
- url,
- http_version,
- httplib_response.status,
- httplib_response.length,
- )
-
- try:
- assert_header_parsing(httplib_response.msg)
- except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
- log.warning(
- "Failed to parse headers (url=%s): %s",
- self._absolute_url(url),
- hpe,
- exc_info=True,
- )
-
- return httplib_response
-
- def _absolute_url(self, path):
- return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
-
- def close(self):
- """
- Close all pooled connections and disable the pool.
- """
- if self.pool is None:
- return
- # Disable access to the pool
- old_pool, self.pool = self.pool, None
-
- try:
- while True:
- conn = old_pool.get(block=False)
- if conn:
- conn.close()
-
- except queue.Empty:
- pass # Done.
-
- def is_same_host(self, url):
- """
- Check if the given ``url`` is a member of the same host as this
- connection pool.
- """
- if url.startswith("/"):
- return True
-
- # TODO: Add optional support for socket.gethostbyname checking.
- scheme, host, port = get_host(url)
- if host is not None:
- host = _normalize_host(host, scheme=scheme)
-
- # Use explicit default port for comparison when none is given
- if self.port and not port:
- port = port_by_scheme.get(scheme)
- elif not self.port and port == port_by_scheme.get(scheme):
- port = None
-
- return (scheme, host, port) == (self.scheme, self.host, self.port)
-
- def urlopen(
- self,
- method,
- url,
- body=None,
- headers=None,
- retries=None,
- redirect=True,
- assert_same_host=True,
- timeout=_Default,
- pool_timeout=None,
- release_conn=None,
- chunked=False,
- body_pos=None,
- **response_kw
- ):
- """
- Get a connection from the pool and perform an HTTP request. This is the
- lowest level call for making a request, so you'll need to specify all
- the raw details.
-
- .. note::
-
- More commonly, it's appropriate to use a convenience method provided
- by :class:`.RequestMethods`, such as :meth:`request`.
-
- .. note::
-
- `release_conn` will only behave as expected if
- `preload_content=False` because we want to make
- `preload_content=False` the default behaviour someday soon without
- breaking backwards compatibility.
-
- :param method:
- HTTP request method (such as GET, POST, PUT, etc.)
-
- :param url:
- The URL to perform the request on.
-
- :param body:
- Data to send in the request body, either :class:`str`, :class:`bytes`,
- an iterable of :class:`str`/:class:`bytes`, or a file-like object.
-
- :param headers:
- Dictionary of custom headers to send, such as User-Agent,
- If-None-Match, etc. If None, pool headers are used. If provided,
- these headers completely replace any pool-specific headers.
-
- :param retries:
- Configure the number of retries to allow before raising a
- :class:`~urllib3.exceptions.MaxRetryError` exception.
-
- Pass ``None`` to retry until you receive a response. Pass a
- :class:`~urllib3.util.retry.Retry` object for fine-grained control
- over different types of retries.
- Pass an integer number to retry connection errors that many times,
- but no other types of errors. Pass zero to never retry.
-
- If ``False``, then retries are disabled and any exception is raised
- immediately. Also, instead of raising a MaxRetryError on redirects,
- the redirect response will be returned.
-
- :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
-
- :param redirect:
- If True, automatically handle redirects (status codes 301, 302,
- 303, 307, 308). Each redirect counts as a retry. Disabling retries
- will disable redirect, too.
-
- :param assert_same_host:
- If ``True``, will make sure that the host of the pool requests is
- consistent else will raise HostChangedError. When ``False``, you can
- use the pool on an HTTP proxy and request foreign hosts.
-
- :param timeout:
- If specified, overrides the default timeout for this one
- request. It may be a float (in seconds) or an instance of
- :class:`urllib3.util.Timeout`.
-
- :param pool_timeout:
- If set and the pool is set to block=True, then this method will
- block for ``pool_timeout`` seconds and raise EmptyPoolError if no
- connection is available within the time period.
-
- :param release_conn:
- If False, then the urlopen call will not release the connection
- back into the pool once a response is received (but will release if
- you read the entire contents of the response such as when
- `preload_content=True`). This is useful if you're not preloading
- the response's content immediately. You will need to call
- ``r.release_conn()`` on the response ``r`` to return the connection
- back into the pool. If None, it takes the value of
- ``response_kw.get('preload_content', True)``.
-
- :param chunked:
- If True, urllib3 will send the body using chunked transfer
- encoding. Otherwise, urllib3 will send the body using the standard
- content-length form. Defaults to False.
-
- :param int body_pos:
- Position to seek to in file-like body in the event of a retry or
- redirect. Typically this won't need to be set because urllib3 will
- auto-populate the value when needed.
-
- :param \\**response_kw:
- Additional parameters are passed to
- :meth:`urllib3.response.HTTPResponse.from_httplib`
- """
-
- parsed_url = parse_url(url)
- destination_scheme = parsed_url.scheme
-
- if headers is None:
- headers = self.headers
-
- if not isinstance(retries, Retry):
- retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
-
- if release_conn is None:
- release_conn = response_kw.get("preload_content", True)
-
- # Check host
- if assert_same_host and not self.is_same_host(url):
- raise HostChangedError(self, url, retries)
-
- # Ensure that the URL we're connecting to is properly encoded
- if url.startswith("/"):
- url = six.ensure_str(_encode_target(url))
- else:
- url = six.ensure_str(parsed_url.url)
-
- conn = None
-
- # Track whether `conn` needs to be released before
- # returning/raising/recursing. Update this variable if necessary, and
- # leave `release_conn` constant throughout the function. That way, if
- # the function recurses, the original value of `release_conn` will be
- # passed down into the recursive call, and its value will be respected.
- #
- # See issue #651 [1] for details.
- #
- # [1]
- release_this_conn = release_conn
-
- http_tunnel_required = connection_requires_http_tunnel(
- self.proxy, self.proxy_config, destination_scheme
- )
-
- # Merge the proxy headers. Only done when not using HTTP CONNECT. We
- # have to copy the headers dict so we can safely change it without those
- # changes being reflected in anyone else's copy.
- if not http_tunnel_required:
- headers = headers.copy()
- headers.update(self.proxy_headers)
-
- # Must keep the exception bound to a separate variable or else Python 3
- # complains about UnboundLocalError.
- err = None
-
- # Keep track of whether we cleanly exited the except block. This
- # ensures we do proper cleanup in finally.
- clean_exit = False
-
- # Rewind body position, if needed. Record current position
- # for future rewinds in the event of a redirect/retry.
- body_pos = set_file_position(body, body_pos)
-
- try:
- # Request a connection from the queue.
- timeout_obj = self._get_timeout(timeout)
- conn = self._get_conn(timeout=pool_timeout)
-
- conn.timeout = timeout_obj.connect_timeout
-
- is_new_proxy_conn = self.proxy is not None and not getattr(
- conn, "sock", None
- )
- if is_new_proxy_conn and http_tunnel_required:
- self._prepare_proxy(conn)
-
- # Make the request on the httplib connection object.
- httplib_response = self._make_request(
- conn,
- method,
- url,
- timeout=timeout_obj,
- body=body,
- headers=headers,
- chunked=chunked,
- )
-
- # If we're going to release the connection in ``finally:``, then
- # the response doesn't need to know about the connection. Otherwise
- # it will also try to release it and we'll have a double-release
- # mess.
- response_conn = conn if not release_conn else None
-
- # Pass method to Response for length checking
- response_kw["request_method"] = method
-
- # Import httplib's response into our own wrapper object
- response = self.ResponseCls.from_httplib(
- httplib_response,
- pool=self,
- connection=response_conn,
- retries=retries,
- **response_kw
- )
-
- # Everything went great!
- clean_exit = True
-
- except EmptyPoolError:
- # Didn't get a connection from the pool, no need to clean up
- clean_exit = True
- release_this_conn = False
- raise
-
- except (
- TimeoutError,
- HTTPException,
- SocketError,
- ProtocolError,
- BaseSSLError,
- SSLError,
- CertificateError,
- ) as e:
- # Discard the connection for these exceptions. It will be
- # replaced during the next _get_conn() call.
- clean_exit = False
-
- def _is_ssl_error_message_from_http_proxy(ssl_error):
- # We're trying to detect the message 'WRONG_VERSION_NUMBER' but
- # SSLErrors are kinda all over the place when it comes to the message,
- # so we try to cover our bases here!
- message = " ".join(re.split("[^a-z]", str(ssl_error).lower()))
- return (
- "wrong version number" in message or "unknown protocol" in message
- )
-
- # Try to detect a common user error with proxies which is to
- # set an HTTP proxy to be HTTPS when it should be 'http://'
- # (ie {'http': 'http://proxy', 'https': 'https://proxy'})
- # Instead we add a nice error message and point to a URL.
- if (
- isinstance(e, BaseSSLError)
- and self.proxy
- and _is_ssl_error_message_from_http_proxy(e)
- and conn.proxy
- and conn.proxy.scheme == "https"
- ):
- e = ProxyError(
- "Your proxy appears to only use HTTP and not HTTPS, "
- "try changing your proxy URL to be HTTP. See: "
- "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
- "#https-proxy-error-http-proxy",
- SSLError(e),
- )
- elif isinstance(e, (BaseSSLError, CertificateError)):
- e = SSLError(e)
- elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
- e = ProxyError("Cannot connect to proxy.", e)
- elif isinstance(e, (SocketError, HTTPException)):
- e = ProtocolError("Connection aborted.", e)
-
- retries = retries.increment(
- method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
- )
- retries.sleep()
-
- # Keep track of the error for the retry warning.
- err = e
-
- finally:
- if not clean_exit:
- # We hit some kind of exception, handled or otherwise. We need
- # to throw the connection away unless explicitly told not to.
- # Close the connection, set the variable to None, and make sure
- # we put the None back in the pool to avoid leaking it.
- conn = conn and conn.close()
- release_this_conn = True
-
- if release_this_conn:
- # Put the connection back to be reused. If the connection is
- # expired then it will be None, which will get replaced with a
- # fresh connection during _get_conn.
- self._put_conn(conn)
-
- if not conn:
- # Try again
- log.warning(
- "Retrying (%r) after connection broken by '%r': %s", retries, err, url
- )
- return self.urlopen(
- method,
- url,
- body,
- headers,
- retries,
- redirect,
- assert_same_host,
- timeout=timeout,
- pool_timeout=pool_timeout,
- release_conn=release_conn,
- chunked=chunked,
- body_pos=body_pos,
- **response_kw
- )
-
- # Handle redirect?
- redirect_location = redirect and response.get_redirect_location()
- if redirect_location:
- if response.status == 303:
- method = "GET"
-
- try:
- retries = retries.increment(method, url, response=response, _pool=self)
- except MaxRetryError:
- if retries.raise_on_redirect:
- response.drain_conn()
- raise
- return response
-
- response.drain_conn()
- retries.sleep_for_retry(response)
- log.debug("Redirecting %s -> %s", url, redirect_location)
- return self.urlopen(
- method,
- redirect_location,
- body,
- headers,
- retries=retries,
- redirect=redirect,
- assert_same_host=assert_same_host,
- timeout=timeout,
- pool_timeout=pool_timeout,
- release_conn=release_conn,
- chunked=chunked,
- body_pos=body_pos,
- **response_kw
- )
-
- # Check if we should retry the HTTP response.
- has_retry_after = bool(response.headers.get("Retry-After"))
- if retries.is_retry(method, response.status, has_retry_after):
- try:
- retries = retries.increment(method, url, response=response, _pool=self)
- except MaxRetryError:
- if retries.raise_on_status:
- response.drain_conn()
- raise
- return response
-
- response.drain_conn()
- retries.sleep(response)
- log.debug("Retry: %s", url)
- return self.urlopen(
- method,
- url,
- body,
- headers,
- retries=retries,
- redirect=redirect,
- assert_same_host=assert_same_host,
- timeout=timeout,
- pool_timeout=pool_timeout,
- release_conn=release_conn,
- chunked=chunked,
- body_pos=body_pos,
- **response_kw
- )
-
- return response
-
-
-class HTTPSConnectionPool(HTTPConnectionPool):
- """
- Same as :class:`.HTTPConnectionPool`, but HTTPS.
-
- :class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
- ``assert_hostname`` and ``host`` in this order to verify connections.
- If ``assert_hostname`` is False, no verification is done.
-
- The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
- ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
- is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
- the connection socket into an SSL socket.
- """
-
- scheme = "https"
- ConnectionCls = HTTPSConnection
-
- def __init__(
- self,
- host,
- port=None,
- strict=False,
- timeout=Timeout.DEFAULT_TIMEOUT,
- maxsize=1,
- block=False,
- headers=None,
- retries=None,
- _proxy=None,
- _proxy_headers=None,
- key_file=None,
- cert_file=None,
- cert_reqs=None,
- key_password=None,
- ca_certs=None,
- ssl_version=None,
- assert_hostname=None,
- assert_fingerprint=None,
- ca_cert_dir=None,
- **conn_kw
- ):
-
- HTTPConnectionPool.__init__(
- self,
- host,
- port,
- strict,
- timeout,
- maxsize,
- block,
- headers,
- retries,
- _proxy,
- _proxy_headers,
- **conn_kw
- )
-
- self.key_file = key_file
- self.cert_file = cert_file
- self.cert_reqs = cert_reqs
- self.key_password = key_password
- self.ca_certs = ca_certs
- self.ca_cert_dir = ca_cert_dir
- self.ssl_version = ssl_version
- self.assert_hostname = assert_hostname
- self.assert_fingerprint = assert_fingerprint
-
- def _prepare_conn(self, conn):
- """
- Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
- and establish the tunnel if proxy is used.
- """
-
- if isinstance(conn, VerifiedHTTPSConnection):
- conn.set_cert(
- key_file=self.key_file,
- key_password=self.key_password,
- cert_file=self.cert_file,
- cert_reqs=self.cert_reqs,
- ca_certs=self.ca_certs,
- ca_cert_dir=self.ca_cert_dir,
- assert_hostname=self.assert_hostname,
- assert_fingerprint=self.assert_fingerprint,
- )
- conn.ssl_version = self.ssl_version
- return conn
-
- def _prepare_proxy(self, conn):
- """
- Establishes a tunnel connection through HTTP CONNECT.
-
- Tunnel connection is established early because otherwise httplib would
- improperly set Host: header to proxy's IP:port.
- """
-
- conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
-
- if self.proxy.scheme == "https":
- conn.tls_in_tls_required = True
-
- conn.connect()
-
- def _new_conn(self):
- """
- Return a fresh :class:`http.client.HTTPSConnection`.
- """
- self.num_connections += 1
- log.debug(
- "Starting new HTTPS connection (%d): %s:%s",
- self.num_connections,
- self.host,
- self.port or "443",
- )
-
- if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
- raise SSLError(
- "Can't connect to HTTPS URL because the SSL module is not available."
- )
-
- actual_host = self.host
- actual_port = self.port
- if self.proxy is not None:
- actual_host = self.proxy.host
- actual_port = self.proxy.port
-
- conn = self.ConnectionCls(
- host=actual_host,
- port=actual_port,
- timeout=self.timeout.connect_timeout,
- strict=self.strict,
- cert_file=self.cert_file,
- key_file=self.key_file,
- key_password=self.key_password,
- **self.conn_kw
- )
-
- return self._prepare_conn(conn)
-
- def _validate_conn(self, conn):
- """
- Called right before a request is made, after the socket is created.
- """
- super(HTTPSConnectionPool, self)._validate_conn(conn)
-
- # Force connect early to allow us to validate the connection.
- if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
- conn.connect()
-
- if not conn.is_verified:
- warnings.warn(
- (
- "Unverified HTTPS request is being made to host '%s'. "
- "Adding certificate verification is strongly advised. See: "
- "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
- "#ssl-warnings" % conn.host
- ),
- InsecureRequestWarning,
- )
-
- if getattr(conn, "proxy_is_verified", None) is False:
- warnings.warn(
- (
- "Unverified HTTPS connection done to an HTTPS proxy. "
- "Adding certificate verification is strongly advised. See: "
- "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
- "#ssl-warnings"
- ),
- InsecureRequestWarning,
- )
-
-
-def connection_from_url(url, **kw):
- """
- Given a url, return an :class:`.ConnectionPool` instance of its host.
-
- This is a shortcut for not having to parse out the scheme, host, and port
- of the url before creating an :class:`.ConnectionPool` instance.
-
- :param url:
- Absolute URL string that must include the scheme. Port is optional.
-
- :param \\**kw:
- Passes additional parameters to the constructor of the appropriate
- :class:`.ConnectionPool`. Useful for specifying things like
- timeout, maxsize, headers, etc.
-
- Example::
-
- >>> conn = connection_from_url('http://google.com/')
- >>> r = conn.request('GET', '/')
- """
- scheme, host, port = get_host(url)
- port = port or port_by_scheme.get(scheme, 80)
- if scheme == "https":
- return HTTPSConnectionPool(host, port=port, **kw)
- else:
- return HTTPConnectionPool(host, port=port, **kw)
-
-
-def _normalize_host(host, scheme):
- """
- Normalize hosts for comparisons and use with sockets.
- """
-
- host = normalize_host(host, scheme)
-
- # httplib doesn't like it when we include brackets in IPv6 addresses
- # Specifically, if we include brackets but also pass the port then
- # httplib crazily doubles up the square brackets on the Host header.
- # Instead, we need to make sure we never pass ``None`` as the port.
- # However, for backward compatibility reasons we can't actually
- # *assert* that. See http://bugs.python.org/issue28539
- if host.startswith("[") and host.endswith("]"):
- host = host[1:-1]
- return host
diff --git a/spaces/CVPR/LIVE/thrust/thrust/device_new_allocator.h b/spaces/CVPR/LIVE/thrust/thrust/device_new_allocator.h
deleted file mode 100644
index 9d7133ba711254d9284200173a453b2155f410c5..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/device_new_allocator.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/*! \file device_new_allocator.h
- * \brief An allocator which allocates storage with \p device_new
- */
-
-#pragma once
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-namespace thrust
-{
-
-/*! \addtogroup memory_management_classes Memory Management Classes
- * \ingroup memory_management
- * \{
- */
-
-/*! \p device_new_allocator is a device memory allocator that employs the
- * \p device_new function for allocation.
- *
- * \see device_new
- * \see device_ptr
- * \see http://www.sgi.com/tech/stl/Allocators.html
- */
-template
- class device_new_allocator
-{
- public:
- /*! Type of element allocated, \c T. */
- typedef T value_type;
-
- /*! Pointer to allocation, \c device_ptr. */
- typedef device_ptr pointer;
-
- /*! \c const pointer to allocation, \c device_ptr. */
- typedef device_ptr const_pointer;
-
- /*! Reference to allocated element, \c device_reference. */
- typedef device_reference reference;
-
- /*! \c const reference to allocated element, \c device_reference. */
- typedef device_reference const_reference;
-
- /*! Type of allocation size, \c std::size_t. */
- typedef std::size_t size_type;
-
- /*! Type of allocation difference, \c pointer::difference_type. */
- typedef typename pointer::difference_type difference_type;
-
- /*! The \p rebind metafunction provides the type of a \p device_new_allocator
- * instantiated with another type.
- *
- * \tparam U The other type to use for instantiation.
- */
- template
- struct rebind
- {
- /*! The typedef \p other gives the type of the rebound \p device_new_allocator.
- */
- typedef device_new_allocator other;
- }; // end rebind
-
- /*! No-argument constructor has no effect. */
- __host__ __device__
- inline device_new_allocator() {}
-
- /*! No-argument destructor has no effect. */
- __host__ __device__
- inline ~device_new_allocator() {}
-
- /*! Copy constructor has no effect. */
- __host__ __device__
- inline device_new_allocator(device_new_allocator const&) {}
-
- /*! Constructor from other \p device_malloc_allocator has no effect. */
- template
- __host__ __device__
- inline device_new_allocator(device_new_allocator const&) {}
-
- /*! Returns the address of an allocated object.
- * \return &r .
- */
- __host__ __device__
- inline pointer address(reference r) { return &r; }
-
- /*! Returns the address an allocated object.
- * \return &r .
- */
- __host__ __device__
- inline const_pointer address(const_reference r) { return &r; }
-
- /*! Allocates storage for \p cnt objects.
- * \param cnt The number of objects to allocate.
- * \return A \p pointer to uninitialized storage for \p cnt objects.
- * \note Memory allocated by this function must be deallocated with \p deallocate.
- */
- __host__
- inline pointer allocate(size_type cnt,
- const_pointer = const_pointer(static_cast(0)))
- {
- if(cnt > this->max_size())
- {
- throw std::bad_alloc();
- } // end if
-
- // use "::operator new" rather than keyword new
- return pointer(device_new(cnt));
- } // end allocate()
-
- /*! Deallocates storage for objects allocated with \p allocate.
- * \param p A \p pointer to the storage to deallocate.
- * \param cnt The size of the previous allocation.
- * \note Memory deallocated by this function must previously have been
- * allocated with \p allocate.
- */
- __host__
- inline void deallocate(pointer p, size_type cnt)
- {
- // use "::operator delete" rather than keyword delete
- (void)cnt;
- device_delete(p);
- } // end deallocate()
-
- /*! Returns the largest value \c n for which allocate(n) might succeed.
- * \return The largest value \c n for which allocate(n) might succeed.
- */
- __host__ __device__
- inline size_type max_size() const
- {
- return std::numeric_limits::max THRUST_PREVENT_MACRO_SUBSTITUTION () / sizeof(T);
- } // end max_size()
-
- /*! Compares against another \p device_malloc_allocator for equality.
- * \return \c true
- */
- __host__ __device__
- inline bool operator==(device_new_allocator const&) { return true; }
-
- /*! Compares against another \p device_malloc_allocator for inequality.
- * \return \c false
- */
- __host__ __device__
- inline bool operator!=(device_new_allocator const &a) {return !operator==(a); }
-}; // end device_new_allocator
-
-/*! \}
- */
-
-} // end thrust
-
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/count.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/count.h
deleted file mode 100644
index 5d6f1f748ffea9d1b3a33c764cc2ac307b51a5f8..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/count.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a count of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-
-// the purpose of this header is to #include the count.h header
-// of the sequential, host, and device systems. It should be #included in any
-// code which uses adl to dispatch count
-
-#include
-
-// SCons can't see through the #defines below to figure out what this header
-// includes, so we fake it out by specifying all possible files we might end up
-// including inside an #if 0.
-#if 0
-#include
-#include
-#include
-#include
-#endif
-
-#define __THRUST_HOST_SYSTEM_COUNT_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/count.h>
-#include __THRUST_HOST_SYSTEM_COUNT_HEADER
-#undef __THRUST_HOST_SYSTEM_COUNT_HEADER
-
-#define __THRUST_DEVICE_SYSTEM_COUNT_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/count.h>
-#include __THRUST_DEVICE_SYSTEM_COUNT_HEADER
-#undef __THRUST_DEVICE_SYSTEM_COUNT_HEADER
-
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/unique.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/unique.h
deleted file mode 100644
index 04388cbc008f031de63fc814b95d11485ec27fac..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/unique.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-#include
-
-namespace thrust
-{
-namespace system
-{
-namespace detail
-{
-namespace generic
-{
-
-
-template
-__host__ __device__
-ForwardIterator unique(thrust::execution_policy &exec,
- ForwardIterator first,
- ForwardIterator last);
-
-
-template
-__host__ __device__
-ForwardIterator unique(thrust::execution_policy &exec,
- ForwardIterator first,
- ForwardIterator last,
- BinaryPredicate binary_pred);
-
-
-template
-__host__ __device__
-OutputIterator unique_copy(thrust::execution_policy &exec,
- InputIterator first,
- InputIterator last,
- OutputIterator output);
-
-
-template
-__host__ __device__
-OutputIterator unique_copy(thrust::execution_policy &exec,
- InputIterator first,
- InputIterator last,
- OutputIterator output,
- BinaryPredicate binary_pred);
-
-
-} // end namespace generic
-} // end namespace detail
-} // end namespace system
-} // end namespace thrust
-
-#include
-
diff --git a/spaces/CVPR/time/README.md b/spaces/CVPR/time/README.md
deleted file mode 100644
index 3aec05e7b83e903d79963f55321a30aa7fe366e1..0000000000000000000000000000000000000000
--- a/spaces/CVPR/time/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
----
-title: Time
-emoji: ⏰
-colorFrom: green
-colorTo: green
-sdk: gradio
-sdk_version: 3.0.17
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
-
-This is the demo for It's About Time: Analog Clock Reading in the Wild
-Charig Yang, Weidi Xie, Andrew Zisserman
-CVPR 2022
-
-Project page: https://www.robots.ox.ac.uk/~vgg/research/time/
-Video: https://www.youtube.com/watch?v=6pYOi92XsGQ
-
-Note the model takes in cropped image (i.e. we don't run object detector on here).
diff --git a/spaces/Cecil8352/vits-models/transforms.py b/spaces/Cecil8352/vits-models/transforms.py
deleted file mode 100644
index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000
--- a/spaces/Cecil8352/vits-models/transforms.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import numpy as np
-
-
-DEFAULT_MIN_BIN_WIDTH = 1e-3
-DEFAULT_MIN_BIN_HEIGHT = 1e-3
-DEFAULT_MIN_DERIVATIVE = 1e-3
-
-
-def piecewise_rational_quadratic_transform(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails=None,
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
-
- if tails is None:
- spline_fn = rational_quadratic_spline
- spline_kwargs = {}
- else:
- spline_fn = unconstrained_rational_quadratic_spline
- spline_kwargs = {
- 'tails': tails,
- 'tail_bound': tail_bound
- }
-
- outputs, logabsdet = spline_fn(
- inputs=inputs,
- unnormalized_widths=unnormalized_widths,
- unnormalized_heights=unnormalized_heights,
- unnormalized_derivatives=unnormalized_derivatives,
- inverse=inverse,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- **spline_kwargs
- )
- return outputs, logabsdet
-
-
-def searchsorted(bin_locations, inputs, eps=1e-6):
- bin_locations[..., -1] += eps
- return torch.sum(
- inputs[..., None] >= bin_locations,
- dim=-1
- ) - 1
-
-
-def unconstrained_rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails='linear',
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
- outside_interval_mask = ~inside_interval_mask
-
- outputs = torch.zeros_like(inputs)
- logabsdet = torch.zeros_like(inputs)
-
- if tails == 'linear':
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
- constant = np.log(np.exp(1 - min_derivative) - 1)
- unnormalized_derivatives[..., 0] = constant
- unnormalized_derivatives[..., -1] = constant
-
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
- logabsdet[outside_interval_mask] = 0
- else:
- raise RuntimeError('{} tails are not implemented.'.format(tails))
-
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
- inputs=inputs[inside_interval_mask],
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
- inverse=inverse,
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative
- )
-
- return outputs, logabsdet
-
-def rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- left=0., right=1., bottom=0., top=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- if torch.min(inputs) < left or torch.max(inputs) > right:
- raise ValueError('Input to a transform is not within its domain')
-
- num_bins = unnormalized_widths.shape[-1]
-
- if min_bin_width * num_bins > 1.0:
- raise ValueError('Minimal bin width too large for the number of bins')
- if min_bin_height * num_bins > 1.0:
- raise ValueError('Minimal bin height too large for the number of bins')
-
- widths = F.softmax(unnormalized_widths, dim=-1)
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
- cumwidths = torch.cumsum(widths, dim=-1)
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
- cumwidths = (right - left) * cumwidths + left
- cumwidths[..., 0] = left
- cumwidths[..., -1] = right
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
-
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
-
- heights = F.softmax(unnormalized_heights, dim=-1)
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
- cumheights = torch.cumsum(heights, dim=-1)
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
- cumheights = (top - bottom) * cumheights + bottom
- cumheights[..., 0] = bottom
- cumheights[..., -1] = top
- heights = cumheights[..., 1:] - cumheights[..., :-1]
-
- if inverse:
- bin_idx = searchsorted(cumheights, inputs)[..., None]
- else:
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
-
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
-
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
- delta = heights / widths
- input_delta = delta.gather(-1, bin_idx)[..., 0]
-
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
-
- input_heights = heights.gather(-1, bin_idx)[..., 0]
-
- if inverse:
- a = (((inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta)
- + input_heights * (input_delta - input_derivatives)))
- b = (input_heights * input_derivatives
- - (inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta))
- c = - input_delta * (inputs - input_cumheights)
-
- discriminant = b.pow(2) - 4 * a * c
- assert (discriminant >= 0).all()
-
- root = (2 * c) / (-b - torch.sqrt(discriminant))
- outputs = root * input_bin_widths + input_cumwidths
-
- theta_one_minus_theta = root * (1 - root)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - root).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, -logabsdet
- else:
- theta = (inputs - input_cumwidths) / input_bin_widths
- theta_one_minus_theta = theta * (1 - theta)
-
- numerator = input_heights * (input_delta * theta.pow(2)
- + input_derivatives * theta_one_minus_theta)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- outputs = input_cumheights + numerator / denominator
-
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - theta).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, logabsdet
diff --git a/spaces/Chomkwoy/Nilkessye/cpool_new/src/top_pool.cpp b/spaces/Chomkwoy/Nilkessye/cpool_new/src/top_pool.cpp
deleted file mode 100644
index ccec09b5c4fb529599889f729e65c48cbb8721ce..0000000000000000000000000000000000000000
--- a/spaces/Chomkwoy/Nilkessye/cpool_new/src/top_pool.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-// #include
-#include
-
-#include
-
-std::vector top_pool_forward(
- torch::Tensor input
-) {
- // Initialize output
- torch::Tensor output = torch::zeros_like(input);
-
- // Get height
- int64_t height = input.size(2);
-
- // Copy the last column
- torch::Tensor input_temp = input.select(2, height - 1);
- torch::Tensor output_temp = output.select(2, height - 1);
- output_temp.copy_(input_temp);
-
- torch::Tensor max_temp;
- for (int64_t ind = 1; ind < height; ++ind) {
- input_temp = input.select(2, height - ind - 1);
- output_temp = output.select(2, height - ind);
- max_temp = output.select(2, height - ind - 1);
-
- torch::max_out(max_temp, input_temp, output_temp);
- }
-
- return {
- output
- };
-}
-
-std::vector top_pool_backward(
- torch::Tensor input,
- torch::Tensor grad_output
-) {
- auto output = torch::zeros_like(input);
-
- int32_t batch = input.size(0);
- int32_t channel = input.size(1);
- int32_t height = input.size(2);
- int32_t width = input.size(3);
-
- // auto max_val = torch::zeros(torch::CUDA(torch::kFloat), {batch, channel, width});
- // auto max_ind = torch::zeros(torch::CUDA(torch::kLong), {batch, channel, width});
- auto max_val = torch::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA));
- auto max_ind = torch::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kLong).device(torch::kCUDA));
-
- auto input_temp = input.select(2, height - 1);
- max_val.copy_(input_temp);
-
- max_ind.fill_(height - 1);
-
- auto output_temp = output.select(2, height - 1);
- auto grad_output_temp = grad_output.select(2, height - 1);
- output_temp.copy_(grad_output_temp);
-
- auto un_max_ind = max_ind.unsqueeze(2);
- // auto gt_mask = torch::zeros(torch::CUDA(torch::kByte), {batch, channel, width});
- // auto max_temp = torch::zeros(torch::CUDA(torch::kFloat), {batch, channel, width});
- auto gt_mask = torch::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kByte).device(torch::kCUDA));
- auto max_temp = torch::zeros({batch, channel, width}, torch::TensorOptions().dtype(torch::kFloat).device(torch::kCUDA));
-
- for (int32_t ind = 1; ind < height; ++ind) {
- input_temp = input.select(2, height - ind - 1);
- torch::gt_out(gt_mask, input_temp, max_val);
-
- torch::masked_select_out(max_temp, input_temp, gt_mask);
- max_val.masked_scatter_(gt_mask, max_temp);
- max_ind.masked_fill_(gt_mask, height - ind - 1);
-
- grad_output_temp = grad_output.select(2, height - ind - 1).unsqueeze(2);
- output.scatter_add_(2, un_max_ind, grad_output_temp);
- }
-
- return {
- output
- };
-}
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
- m.def(
- "forward", &top_pool_forward, "Top Pool Forward",
- py::call_guard()
- );
- m.def(
- "backward", &top_pool_backward, "Top Pool Backward",
- py::call_guard()
- );
-}
diff --git a/spaces/CofAI/CalculatorUI/README.md b/spaces/CofAI/CalculatorUI/README.md
deleted file mode 100644
index cfd8bd59c6513a80295610beb994d7bf287fed7f..0000000000000000000000000000000000000000
--- a/spaces/CofAI/CalculatorUI/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: CalculatorUI
-emoji: ➕️➖️✖️➗️
-colorFrom: gray
-colorTo: gray
-sdk: static
-pinned: false
----
-
-Это UI модель калькулятора от CofAI, можете копировать и дорабатывать её, мы не против, даже можете зарабатывать на ней, спасибо!
\ No newline at end of file
diff --git a/spaces/CofAI/picgen/README.md b/spaces/CofAI/picgen/README.md
deleted file mode 100644
index 610aa37aeb8322d2009fee64cbe5a2f3b2a1e05d..0000000000000000000000000000000000000000
--- a/spaces/CofAI/picgen/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: PicGen
-emoji: 🖼☕🖼
-colorFrom: green
-colorTo: green
-sdk: gradio
-sdk_version: 3.12.0
-app_file: app.py
-pinned: false
-license: creativeml-openrail-m
-duplicated_from: null
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/logger.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/logger.py
deleted file mode 100644
index 0dab12dc305b88e880d1babde3ba3c7825132802..0000000000000000000000000000000000000000
--- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/logger.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# A simple torch style logger
-# (C) Wei YANG 2017
-from __future__ import absolute_import
-# import matplotlib.pyplot as plt
-import matplotlib
-matplotlib.use('pdf')
-import matplotlib.pyplot as plt
-import os
-import sys
-import numpy as np
-
-__all__ = ['Logger', 'LoggerMonitor', 'savefig']
-
-def savefig(fname, dpi=None):
- dpi = 150 if dpi == None else dpi
- plt.savefig(fname, dpi=dpi)
-
-def plot_overlap(logger, names=None):
- names = logger.names if names == None else names
- numbers = logger.numbers
- for _, name in enumerate(names):
- x = np.arange(len(numbers[name]))
- plt.plot(x, np.asarray(numbers[name]))
- return [logger.title + '(' + name + ')' for name in names]
-
-class Logger(object):
- '''Save training process to log file with simple plot function.'''
- def __init__(self, fpath, title=None, resume=False):
- self.file = None
- self.resume = resume
- self.title = '' if title == None else title
- if fpath is not None:
- if resume:
- self.file = open(fpath, 'r')
- name = self.file.readline()
- self.names = name.rstrip().split('\t')
- self.numbers = {}
- for _, name in enumerate(self.names):
- self.numbers[name] = []
-
- for numbers in self.file:
- numbers = numbers.rstrip().split('\t')
- for i in range(0, len(numbers)):
- self.numbers[self.names[i]].append(numbers[i])
- self.file.close()
- self.file = open(fpath, 'a')
- else:
- self.file = open(fpath, 'w')
-
- def set_names(self, names):
- if self.resume:
- pass
- # initialize numbers as empty list
- self.numbers = {}
- self.names = names
- for _, name in enumerate(self.names):
- self.file.write(name)
- self.file.write('\t')
- self.numbers[name] = []
- self.file.write('\n')
- self.file.flush()
-
-
- def append(self, numbers):
- assert len(self.names) == len(numbers), 'Numbers do not match names'
- for index, num in enumerate(numbers):
- self.file.write("{0:.6f}".format(num))
- self.file.write('\t')
- self.numbers[self.names[index]].append(num)
- self.file.write('\n')
- self.file.flush()
-
- def plot(self, names=None):
- print 'plot'
- '''
- names = self.names if names == None else names
- numbers = self.numbers
- for _, name in enumerate(names):
- x = np.arange(len(numbers[name]))
- plt.plot(x, np.asarray(numbers[name]))
- plt.legend([self.title + '(' + name + ')' for name in names])
- plt.grid(True)
- '''
-
- def close(self):
- if self.file is not None:
- self.file.close()
-
-class LoggerMonitor(object):
- '''Load and visualize multiple logs.'''
- def __init__ (self, paths):
- '''paths is a distionary with {name:filepath} pair'''
- self.loggers = []
- for title, path in paths.items():
- logger = Logger(path, title=title, resume=True)
- self.loggers.append(logger)
-
- def plot(self, names=None):
- plt.figure()
- plt.subplot(121)
- legend_text = []
- for logger in self.loggers:
- legend_text += plot_overlap(logger, names)
- plt.legend(legend_text, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
- plt.grid(True)
-
-if __name__ == '__main__':
- # # Example
- # logger = Logger('test.txt')
- # logger.set_names(['Train loss', 'Valid loss','Test loss'])
-
- # length = 100
- # t = np.arange(length)
- # train_loss = np.exp(-t / 10.0) + np.random.rand(length) * 0.1
- # valid_loss = np.exp(-t / 10.0) + np.random.rand(length) * 0.1
- # test_loss = np.exp(-t / 10.0) + np.random.rand(length) * 0.1
-
- # for i in range(0, length):
- # logger.append([train_loss[i], valid_loss[i], test_loss[i]])
- # logger.plot()
-
- # Example: logger monitor
- paths = {
- 'resadvnet20':'/home/wyang/code/pytorch-classification/checkpoint/cifar10/resadvnet20/log.txt',
- 'resadvnet32':'/home/wyang/code/pytorch-classification/checkpoint/cifar10/resadvnet32/log.txt',
- 'resadvnet44':'/home/wyang/code/pytorch-classification/checkpoint/cifar10/resadvnet44/log.txt',
- }
-
- field = ['Valid Acc.']
-
- monitor = LoggerMonitor(paths)
- monitor.plot(names=field)
- savefig('test.eps')
\ No newline at end of file
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-3610549a.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-3610549a.js
deleted file mode 100644
index 8592c28a0ef2444b42fa5e9d304fc94fdcb48e18..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-3610549a.js
+++ /dev/null
@@ -1,3 +0,0 @@
-import{S as I,e as J,s as K,J as U,K as u,p as j,M as y,n as P,A as E,N as R,O as V,P as D,L as F,Z as Le,ar as je,R as G,G as T,m as Z,V as Y,B as be,C as Ee,av as Q,aj as Ae,X as Ce,k as O,o as X,z as B,v as S,x as q,E as Me,ae as ze,q as Te,r as Be,u as pe,y as ke}from"./index-1d65707a.js";import{U as Se}from"./Upload-9bb55fba.js";import{M as Ue}from"./ModifyUpload-c89cfce3.js";import{B as Ne}from"./Button-f155035a.js";import{B as Fe}from"./BlockLabel-66866176.js";import{E as Oe}from"./Empty-eec13822.js";import{g as Xe}from"./color-90ab3aab.js";import{a as qe}from"./csv-b0b7514a.js";import{Z as x,_ as $,l as ee}from"./linear-58a44b5e.js";import{U as He}from"./UploadText-f599be03.js";import"./Blocks-c9e1499d.js";import"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";import"./IconButton-d42f3661.js";import"./dsv-576afacd.js";function Pe(l){let e,n,t;return{c(){e=U("svg"),n=U("path"),t=U("path"),u(n,"d","M28.828 3.172a4.094 4.094 0 0 0-5.656 0L4.05 22.292A6.954 6.954 0 0 0 2 27.242V30h2.756a6.952 6.952 0 0 0 4.95-2.05L28.828 8.829a3.999 3.999 0 0 0 0-5.657zM10.91 18.26l2.829 2.829l-2.122 2.121l-2.828-2.828zm-2.619 8.276A4.966 4.966 0 0 1 4.756 28H4v-.759a4.967 4.967 0 0 1 1.464-3.535l1.91-1.91l2.829 2.828zM27.415 7.414l-12.261 12.26l-2.829-2.828l12.262-12.26a2.047 2.047 0 0 1 2.828 0a2 2 0 0 1 0 2.828z"),u(n,"fill","currentColor"),u(t,"d","M6.5 15a3.5 3.5 0 0 1-2.475-5.974l3.5-3.5a1.502 1.502 0 0 0 0-2.121a1.537 1.537 0 0 0-2.121 0L3.415 5.394L2 3.98l1.99-1.988a3.585 3.585 0 0 1 4.95 0a3.504 3.504 0 0 1 0 4.949L5.439 10.44a1.502 1.502 0 0 0 0 2.121a1.537 1.537 0 0 0 2.122 0l4.024-4.024L13 9.95l-4.025 4.024A3.475 3.475 0 0 1 6.5 15z"),u(t,"fill","currentColor"),u(e,"width","1em"),u(e,"height","1em"),u(e,"viewBox","0 0 32 32")},m(a,s){j(a,e,s),y(e,n),y(e,t)},p:P,i:P,o:P,d(a){a&&E(e)}}}let ye=class extends I{constructor(e){super(),J(this,e,null,Pe,K,{})}};function le(l){let e;return Array.isArray(l)?e=l.reduce((n,{values:t})=>[...n,...t.map(({y:a})=>a)],[]):e=l.values,[Math.min(...e),Math.max(...e)]}function te(l,e,n){const t=Object.entries(l[0]).reduce((a,s,o)=>(!e&&o===0||e&&s[0]===e?a.x.name=s[0]:(!n||n&&n.includes(s[0]))&&a.y.push({name:s[0],values:[]}),a),{x:{name:"",values:[]},y:[]});for(let a=0;al[6].call(e))},m(o,_){j(o,e,_),y(e,n),y(e,t),y(e,a),s=je(e,l[6].bind(e))},p(o,[_]){_&8&&F(n,"background",o[3]),_&1&&G(a,o[0]),_&36&&F(e,"top",o[2]-o[5]/2+"px"),_&18&&F(e,"left",o[1]-o[4]-7+"px")},i:P,o:P,d(o){o&&E(e),s()}}}function Ve(l,e,n){let{text:t}=e,{x:a}=e,{y:s}=e,{color:o}=e,_,i;function v(){_=this.offsetWidth,i=this.offsetHeight,n(4,_),n(5,i)}return l.$$set=g=>{"text"in g&&n(0,t=g.text),"x"in g&&n(1,a=g.x),"y"in g&&n(2,s=g.y),"color"in g&&n(3,o=g.color)},[t,a,s,o,_,i,v]}class Ye extends I{constructor(e){super(),J(this,e,Ve,Re,K,{text:0,x:1,y:2,color:3})}}function Ze(l,{color:e,text:n}){let t;function a(i){return t=new Ye({props:{text:n,x:i.pageX,y:i.pageY,color:e},target:document.body}),i}function s(i){t.$set({x:i.pageX,y:i.pageY})}function o(){t.$destroy()}const _=l;return _.addEventListener("mouseover",a),_.addEventListener("mouseleave",o),_.addEventListener("mousemove",s),{destroy(){_.removeEventListener("mouseover",a),_.removeEventListener("mouseleave",o),_.removeEventListener("mousemove",s)}}}function ne(l,e,n){const t=l.slice();t[16]=e[n].name,t[17]=e[n].values;const a=t[8][t[16]];return t[18]=a,t}function ae(l,e,n){const t=l.slice();return t[0]=e[n].x,t[1]=e[n].y,t}function oe(l,e,n){const t=l.slice();t[16]=e[n].name,t[17]=e[n].values;const a=t[8][t[16]];return t[18]=a,t}function se(l,e,n){const t=l.slice();return t[0]=e[n].x,t[1]=e[n].y,t}function re(l,e,n){const t=l.slice();return t[27]=e[n],t}function ie(l,e,n){const t=l.slice();return t[27]=e[n],t}function fe(l,e,n){const t=l.slice();return t[16]=e[n].name,t}function _e(l){let e,n,t,a=l[16]+"",s,o;return{c(){e=R("div"),n=R("span"),t=V(),s=D(a),o=V(),u(n,"class","legend-box svelte-1mjxput"),F(n,"background-color",l[8][l[16]]),u(e,"class","legend-item svelte-1mjxput")},m(_,i){j(_,e,i),y(e,n),y(e,t),y(e,s),y(e,o)},p(_,i){i[0]&260&&F(n,"background-color",_[8][_[16]]),i[0]&4&&a!==(a=_[16]+"")&&G(s,a)},d(_){_&&E(e)}}}function ue(l){let e,n,t,a,s,o,_=l[27]+"",i,v,g;return{c(){e=U("line"),o=U("text"),i=D(_),u(e,"stroke-width","0.5"),u(e,"x1",n=l[5](l[27])),u(e,"x2",t=l[5](l[27])),u(e,"y1",a=l[4](l[9][0]l[9][l[9].length-1]?l[6][1]:l[9][l[9].length-1])),u(e,"stroke","#aaa"),u(o,"class","label-text svelte-1mjxput"),u(o,"text-anchor","middle"),u(o,"x",v=l[5](l[27])),u(o,"y",g=l[4](l[9][0])+30)},m(f,h){j(f,e,h),j(f,o,h),y(o,i)},p(f,h){h[0]&1056&&n!==(n=f[5](f[27]))&&u(e,"x1",n),h[0]&1056&&t!==(t=f[5](f[27]))&&u(e,"x2",t),h[0]&592&&a!==(a=f[4](f[9][0]f[9][f[9].length-1]?f[6][1]:f[9][f[9].length-1]))&&u(e,"y2",s),h[0]&1024&&_!==(_=f[27]+"")&&G(i,_),h[0]&1056&&v!==(v=f[5](f[27]))&&u(o,"x",v),h[0]&528&&g!==(g=f[4](f[9][0])+30)&&u(o,"y",g)},d(f){f&&(E(e),E(o))}}}function ce(l){let e,n,t,a,s,o,_=l[27]+"",i,v,g;return{c(){e=U("line"),o=U("text"),i=D(_),u(e,"stroke-width","0.5"),u(e,"y1",n=l[4](l[27])),u(e,"y2",t=l[4](l[27])),u(e,"x1",a=l[5](l[10][0]l[10][l[10].length-1]?l[7][1]:l[10][l[10].length-1])),u(e,"stroke","#aaa"),u(o,"class","label-text svelte-1mjxput"),u(o,"text-anchor","end"),u(o,"y",v=l[4](l[27])+4),u(o,"x",g=l[5](l[10][0])-20)},m(f,h){j(f,e,h),j(f,o,h),y(o,i)},p(f,h){h[0]&528&&n!==(n=f[4](f[27]))&&u(e,"y1",n),h[0]&528&&t!==(t=f[4](f[27]))&&u(e,"y2",t),h[0]&1184&&a!==(a=f[5](f[10][0]f[10][f[10].length-1]?f[7][1]:f[10][f[10].length-1]))&&u(e,"x2",s),h[0]&512&&_!==(_=f[27]+"")&&G(i,_),h[0]&528&&v!==(v=f[4](f[27])+4)&&u(o,"y",v),h[0]&1056&&g!==(g=f[5](f[10][0])-20)&&u(o,"x",g)},d(f){f&&(E(e),E(o))}}}function me(l){let e,n,t,a,s,o,_=l[6][1]+"",i,v,g;return{c(){e=U("line"),o=U("text"),i=D(_),u(e,"stroke-width","0.5"),u(e,"y1",n=l[4](l[6][1])),u(e,"y2",t=l[4](l[6][1])),u(e,"x1",a=l[5](l[10][0])),u(e,"x2",s=l[5](l[7][1])),u(e,"stroke","#aaa"),u(o,"class","label-text svelte-1mjxput"),u(o,"text-anchor","end"),u(o,"y",v=l[4](l[6][1])+4),u(o,"x",g=l[5](l[10][0])-20)},m(f,h){j(f,e,h),j(f,o,h),y(o,i)},p(f,h){h[0]&80&&n!==(n=f[4](f[6][1]))&&u(e,"y1",n),h[0]&80&&t!==(t=f[4](f[6][1]))&&u(e,"y2",t),h[0]&1056&&a!==(a=f[5](f[10][0]))&&u(e,"x1",a),h[0]&160&&s!==(s=f[5](f[7][1]))&&u(e,"x2",s),h[0]&64&&_!==(_=f[6][1]+"")&&G(i,_),h[0]&80&&v!==(v=f[4](f[6][1])+4)&&u(o,"y",v),h[0]&1056&&g!==(g=f[5](f[10][0])-20)&&u(o,"x",g)},d(f){f&&(E(e),E(o))}}}function he(l){let e,n,t,a;return{c(){e=U("circle"),u(e,"r","3.5"),u(e,"cx",n=l[5](l[0])),u(e,"cy",t=l[4](l[1])),u(e,"stroke-width","1.5"),u(e,"stroke",a=l[18]),u(e,"fill","none")},m(s,o){j(s,e,o)},p(s,o){o[0]&36&&n!==(n=s[5](s[0]))&&u(e,"cx",n),o[0]&20&&t!==(t=s[4](s[1]))&&u(e,"cy",t),o[0]&260&&a!==(a=s[18])&&u(e,"stroke",a)},d(s){s&&E(e)}}}function ge(l){let e,n,t,a=T(l[17]),s=[];for(let o=0;ol[9][l[9].length-1]&&me(l),C=T(l[2]),L=[];for(let c=0;cc[9][c[9].length-1]?d?d.p(c,z):(d=me(c),d.c(),d.m(s,null)):d&&(d.d(1),d=null),z[0]&308){C=T(c[2]);let r;for(r=0;r{b("process",{x:t,y:a})});const k=({x:d,y:C})=>[_(d),i(C)];return l.$$set=d=>{"value"in d&&n(11,f=d.value),"x"in d&&n(0,h=d.x),"y"in d&&n(1,A=d.y),"colors"in d&&n(12,m=d.colors)},l.$$.update=()=>{l.$$.dirty[0]&2051&&n(3,{x:t,y:a}=te(typeof f=="string"?qe(f):f,h,A),t,(n(2,a),n(11,f),n(0,h),n(1,A))),l.$$.dirty[0]&8&&n(7,s=le(t)),l.$$.dirty[0]&4&&n(6,o=le(a)),l.$$.dirty[0]&128&&n(5,_=x(s,[0,600]).nice()),l.$$.dirty[0]&64&&n(4,i=x(o,[350,0]).nice()),l.$$.dirty[0]&32&&n(10,v=_.ticks(8)),l.$$.dirty[0]&16&&n(9,g=i.ticks(8)),l.$$.dirty[0]&4&&n(8,p=a.reduce((d,C,L)=>({...d,[C.name]:N(L)}),{}))},[h,A,a,t,i,_,o,s,p,g,v,f,m,k]}class we extends I{constructor(e){super(),J(this,e,Ge,De,K,{value:11,x:0,y:1,colors:12},null,[-1,-1])}}function Ie(l){let e,n;return e=new Se({props:{filetype:"text/csv",include_file_metadata:!1,$$slots:{default:[We]},$$scope:{ctx:l}}}),e.$on("load",l[19]),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p(t,a){const s={};a&8388608&&(s.$$scope={dirty:a,ctx:t}),e.$set(s)},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function Je(l){let e,n,t,a,s;return n=new Ue({}),n.$on("clear",l[17]),a=new we({props:{value:l[14],y:l[4],x:l[5],colors:l[9]}}),a.$on("process",l[18]),{c(){e=R("div"),O(n.$$.fragment),t=V(),O(a.$$.fragment),u(e,"class","chart svelte-etmurc")},m(o,_){j(o,e,_),X(n,e,null),y(e,t),X(a,e,null),s=!0},p(o,_){const i={};_&16384&&(i.value=o[14]),_&16&&(i.y=o[4]),_&32&&(i.x=o[5]),_&512&&(i.colors=o[9]),a.$set(i)},i(o){s||(B(n.$$.fragment,o),B(a.$$.fragment,o),s=!0)},o(o){S(n.$$.fragment,o),S(a.$$.fragment,o),s=!1},d(o){o&&E(e),q(n),q(a)}}}function Ke(l){let e,n,t,a;const s=[xe,Qe],o=[];function _(i,v){return i[15]?0:1}return e=_(l),n=o[e]=s[e](l),{c(){n.c(),t=Z()},m(i,v){o[e].m(i,v),j(i,t,v),a=!0},p(i,v){let g=e;e=_(i),e===g?o[e].p(i,v):(pe(),S(o[g],1,1,()=>{o[g]=null}),ke(),n=o[e],n?n.p(i,v):(n=o[e]=s[e](i),n.c()),B(n,1),n.m(t.parentNode,t))},i(i){a||(B(n),a=!0)},o(i){S(n),a=!1},d(i){i&&E(t),o[e].d(i)}}}function We(l){let e,n;return e=new He({props:{type:"csv"}}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p:P,i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function Qe(l){let e,n;return e=new Oe({props:{unpadded_box:!0,size:"large",$$slots:{default:[$e]},$$scope:{ctx:l}}}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p(t,a){const s={};a&8388608&&(s.$$scope={dirty:a,ctx:t}),e.$set(s)},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function xe(l){let e,n;return e=new we({props:{value:l[15],colors:l[9]}}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p(t,a){const s={};a&32768&&(s.value=t[15]),a&512&&(s.colors=t[9]),e.$set(s)},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function $e(l){let e,n;return e=new ye({}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function el(l){let e,n,t,a,s,o,_,i;e=new Fe({props:{show_label:l[8],Icon:ye,label:l[7]||"TimeSeries"}});const v=[l[13]];let g={};for(let m=0;m{h[k]=null}),ke()),~s?(o=h[s],o?o.p(m,b):(o=h[s]=f[s](m),o.c()),B(o,1),o.m(_.parentNode,_)):o=null)},i(m){i||(B(e.$$.fragment,m),B(t.$$.fragment,m),B(o),i=!0)},o(m){S(e.$$.fragment,m),S(t.$$.fragment,m),S(o),i=!1},d(m){m&&(E(n),E(a),E(_)),q(e,m),q(t,m),~s&&h[s].d(m)}}}function ll(l){let e,n;return e=new Ne({props:{visible:l[3],variant:l[6]==="dynamic"&&!l[14]?"dashed":"solid",padding:!1,elem_id:l[1],elem_classes:l[2],container:l[10],scale:l[11],min_width:l[12],$$slots:{default:[el]},$$scope:{ctx:l}}}),{c(){O(e.$$.fragment)},m(t,a){X(e,t,a),n=!0},p(t,[a]){const s={};a&8&&(s.visible=t[3]),a&16448&&(s.variant=t[6]==="dynamic"&&!t[14]?"dashed":"solid"),a&2&&(s.elem_id=t[1]),a&4&&(s.elem_classes=t[2]),a&1024&&(s.container=t[10]),a&2048&&(s.scale=t[11]),a&4096&&(s.min_width=t[12]),a&8446961&&(s.$$scope={dirty:a,ctx:t}),e.$set(s)},i(t){n||(B(e.$$.fragment,t),n=!0)},o(t){S(e.$$.fragment,t),n=!1},d(t){q(e,t)}}}function tl(l){return l.data.map(e=>e.reduce((n,t,a)=>({...n,[l.headers[a]]:t}),{}))}function nl(l){const e=atob(l.split(",")[1]),n=l.split(",")[0].split(":")[1].split(";")[0],t=new ArrayBuffer(e.length),a=new Uint8Array(t);for(let s=0;sn.push(a));for(let a=0;as.push(o[a].y)),t.push(s)}return{headers:n,data:t}}function ol(l,e,n){let t;const a=be();let{elem_id:s=""}=e,{elem_classes:o=[]}=e,{visible:_=!0}=e,{value:i}=e,{y:v}=e,{x:g}=e,{mode:f}=e,{label:h}=e,{show_label:A}=e,{colors:m}=e,{container:b=!0}=e,{scale:p=null}=e,{min_width:N=void 0}=e,{loading_status:k}=e,d;function C(r){const w=new FileReader;w.addEventListener("loadend",W=>{n(14,d=W.srcElement.result)}),w.readAsText(r)}function L(r){r.headers&&n(14,d=r.headers.join(",")),r.data.forEach(W=>{n(14,d=d+`
-`),n(14,d=d+W.join(","))})}function H(r){return n(0,i={data:r}),r}function M({detail:r}){n(0,i=null),a("change"),a("clear")}const c=({detail:{x:r,y:w}})=>n(0,i=al(r,w)),z=({detail:r})=>H(r);return l.$$set=r=>{"elem_id"in r&&n(1,s=r.elem_id),"elem_classes"in r&&n(2,o=r.elem_classes),"visible"in r&&n(3,_=r.visible),"value"in r&&n(0,i=r.value),"y"in r&&n(4,v=r.y),"x"in r&&n(5,g=r.x),"mode"in r&&n(6,f=r.mode),"label"in r&&n(7,h=r.label),"show_label"in r&&n(8,A=r.show_label),"colors"in r&&n(9,m=r.colors),"container"in r&&n(10,b=r.container),"scale"in r&&n(11,p=r.scale),"min_width"in r&&n(12,N=r.min_width),"loading_status"in r&&n(13,k=r.loading_status)},l.$$.update=()=>{l.$$.dirty&1&&(i&&i.data&&typeof i.data=="string"?i?C(nl(i.data)):n(14,d=null):i&&i.data&&typeof i.data!="string"&&(i||n(14,d=null),L(i))),l.$$.dirty&16385&&n(14,d=i==null?null:d),l.$$.dirty&65&&n(15,t=f==="static"&&i&&tl(i)),l.$$.dirty&1&&a("change")},[i,s,o,_,v,g,f,h,A,m,b,p,N,k,d,t,H,M,c,z]}class sl extends I{constructor(e){super(),J(this,e,ol,ll,K,{elem_id:1,elem_classes:2,visible:3,value:0,y:4,x:5,mode:6,label:7,show_label:8,colors:9,container:10,scale:11,min_width:12,loading_status:13})}}const wl=sl,Ll=["static","dynamic"],jl=l=>({type:{payload:"{data: Array> | string; headers?: Array;}"},description:{payload:"dataset of series"}});export{wl as Component,jl as document,Ll as modes};
-//# sourceMappingURL=index-3610549a.js.map
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-82eb6288.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-82eb6288.js
deleted file mode 100644
index ea168e78136b367a8d320a007b4444d739585b7a..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-82eb6288.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{L as s}from"./index-f8ff95a1.js";import{s as o,t as r,L as n,i as P,w as a,f as i,a as Q,b as p}from"./index-3ba00a4a.js";import"./index-1d65707a.js";import"./Blocks-c9e1499d.js";import"./Button-f155035a.js";import"./BlockLabel-66866176.js";import"./Empty-eec13822.js";import"./Copy-9f1657c4.js";import"./Download-daff1959.js";const c=o({String:r.string,Number:r.number,"True False":r.bool,PropertyName:r.propertyName,Null:r.null,",":r.separator,"[ ]":r.squareBracket,"{ }":r.brace}),g=s.deserialize({version:14,states:"$bOVQPOOOOQO'#Cb'#CbOnQPO'#CeOvQPO'#CjOOQO'#Cp'#CpQOQPOOOOQO'#Cg'#CgO}QPO'#CfO!SQPO'#CrOOQO,59P,59PO![QPO,59PO!aQPO'#CuOOQO,59U,59UO!iQPO,59UOVQPO,59QOqQPO'#CkO!nQPO,59^OOQO1G.k1G.kOVQPO'#ClO!vQPO,59aOOQO1G.p1G.pOOQO1G.l1G.lOOQO,59V,59VOOQO-E6i-E6iOOQO,59W,59WOOQO-E6j-E6j",stateData:"#O~OcOS~OQSORSOSSOTSOWQO]ROePO~OVXOeUO~O[[O~PVOg^O~Oh_OVfX~OVaO~OhbO[iX~O[dO~Oh_OVfa~OhbO[ia~O",goto:"!kjPPPPPPkPPkqwPPk{!RPPP!XP!ePP!hXSOR^bQWQRf_TVQ_Q`WRg`QcZRicQTOQZRQe^RhbRYQR]R",nodeNames:"⚠ JsonText True False Null Number String } { Object Property PropertyName ] [ Array",maxTerm:25,nodeProps:[["openedBy",7,"{",12,"["],["closedBy",8,"}",13,"]"]],propSources:[c],skippedNodes:[0],repeatNodeCount:2,tokenData:"(p~RaXY!WYZ!W]^!Wpq!Wrs!]|}$i}!O$n!Q!R$w!R![&V![!]&h!}#O&m#P#Q&r#Y#Z&w#b#c'f#h#i'}#o#p(f#q#r(k~!]Oc~~!`Upq!]qr!]rs!rs#O!]#O#P!w#P~!]~!wOe~~!zXrs!]!P!Q!]#O#P!]#U#V!]#Y#Z!]#b#c!]#f#g!]#h#i!]#i#j#g~#jR!Q![#s!c!i#s#T#Z#s~#vR!Q![$P!c!i$P#T#Z$P~$SR!Q![$]!c!i$]#T#Z$]~$`R!Q![!]!c!i!]#T#Z!]~$nOh~~$qQ!Q!R$w!R![&V~$|RT~!O!P%V!g!h%k#X#Y%k~%YP!Q![%]~%bRT~!Q![%]!g!h%k#X#Y%k~%nR{|%w}!O%w!Q![%}~%zP!Q![%}~&SPT~!Q![%}~&[ST~!O!P%V!Q![&V!g!h%k#X#Y%k~&mOg~~&rO]~~&wO[~~&zP#T#U&}~'QP#`#a'T~'WP#g#h'Z~'^P#X#Y'a~'fOR~~'iP#i#j'l~'oP#`#a'r~'uP#`#a'x~'}OS~~(QP#f#g(T~(WP#i#j(Z~(^P#X#Y(a~(fOQ~~(kOW~~(pOV~",tokenizers:[0],topRules:{JsonText:[0,1]},tokenPrec:0}),V=()=>t=>{try{JSON.parse(t.state.doc.toString())}catch(O){if(!(O instanceof SyntaxError))throw O;const e=m(O,t.state.doc);return[{from:e,message:O.message,severity:"error",to:e}]}return[]};function m(t,O){let e;return(e=t.message.match(/at position (\d+)/))?Math.min(+e[1],O.length):(e=t.message.match(/at line (\d+) column (\d+)/))?Math.min(O.line(+e[1]).from+ +e[2]-1,O.length):0}const u=n.define({name:"json",parser:g.configure({props:[P.add({Object:a({except:/^\s*\}/}),Array:a({except:/^\s*\]/})}),i.add({"Object Array":Q})]}),languageData:{closeBrackets:{brackets:["[","{",'"']},indentOnInput:/^\s*[\}\]]$/}});function $(){return new p(u)}export{$ as json,u as jsonLanguage,V as jsonParseLinter};
-//# sourceMappingURL=index-82eb6288.js.map
diff --git a/spaces/Dauzy/whisper-webui/README.md b/spaces/Dauzy/whisper-webui/README.md
deleted file mode 100644
index 21124105a487c24c3bd6d1618d74cf7df4839a5f..0000000000000000000000000000000000000000
--- a/spaces/Dauzy/whisper-webui/README.md
+++ /dev/null
@@ -1,179 +0,0 @@
----
-title: Whisper Webui
-emoji: ⚡
-colorFrom: pink
-colorTo: purple
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: aadnk/whisper-webui
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
-
-# Running Locally
-
-To run this program locally, first install Python 3.9+ and Git. Then install Pytorch 10.1+ and all the other dependencies:
-```
-pip install -r requirements.txt
-```
-
-You can find detailed instructions for how to install this on Windows 10/11 [here (PDF)](docs/windows/install_win10_win11.pdf).
-
-Finally, run the full version (no audio length restrictions) of the app with parallel CPU/GPU enabled:
-```
-python app.py --input_audio_max_duration -1 --server_name 127.0.0.1 --auto_parallel True
-```
-
-You can also run the CLI interface, which is similar to Whisper's own CLI but also supports the following additional arguments:
-```
-python cli.py \
-[--vad {none,silero-vad,silero-vad-skip-gaps,silero-vad-expand-into-gaps,periodic-vad}] \
-[--vad_merge_window VAD_MERGE_WINDOW] \
-[--vad_max_merge_size VAD_MAX_MERGE_SIZE] \
-[--vad_padding VAD_PADDING] \
-[--vad_prompt_window VAD_PROMPT_WINDOW]
-[--vad_cpu_cores NUMBER_OF_CORES]
-[--vad_parallel_devices COMMA_DELIMITED_DEVICES]
-[--auto_parallel BOOLEAN]
-```
-In addition, you may also use URL's in addition to file paths as input.
-```
-python cli.py --model large --vad silero-vad --language Japanese "https://www.youtube.com/watch?v=4cICErqqRSM"
-```
-
-Rather than supplying arguments to `app.py` or `cli.py`, you can also use the configuration file [config.json5](config.json5). See that file for more information.
-If you want to use a different configuration file, you can use the `WHISPER_WEBUI_CONFIG` environment variable to specify the path to another file.
-
-### Multiple Files
-
-You can upload multiple files either through the "Upload files" option, or as a playlist on YouTube.
-Each audio file will then be processed in turn, and the resulting SRT/VTT/Transcript will be made available in the "Download" section.
-When more than one file is processed, the UI will also generate a "All_Output" zip file containing all the text output files.
-
-## Whisper Implementation
-
-You can choose between using `whisper` or `faster-whisper`. [Faster Whisper](https://github.com/guillaumekln/faster-whisper) as a drop-in replacement for the
-default Whisper which achieves up to a 4x speedup and 2x reduction in memory usage.
-
-You can install the requirements for a specific Whisper implementation in `requirements-fasterWhisper.txt`
-or `requirements-whisper.txt`:
-```
-pip install -r requirements-fasterWhisper.txt
-```
-And then run the App or the CLI with the `--whisper_implementation faster-whisper` flag:
-```
-python app.py --whisper_implementation faster-whisper --input_audio_max_duration -1 --server_name 127.0.0.1 --auto_parallel True
-```
-You can also select the whisper implementation in `config.json5`:
-```json5
-{
- "whisper_implementation": "faster-whisper"
-}
-```
-### GPU Acceleration
-
-In order to use GPU acceleration with Faster Whisper, both CUDA 11.2 and cuDNN 8 must be installed. You may want to install it in a virtual environment like Anaconda.
-
-## Google Colab
-
-You can also run this Web UI directly on [Google Colab](https://colab.research.google.com/drive/1qeTSvi7Bt_5RMm88ipW4fkcsMOKlDDss?usp=sharing), if you haven't got a GPU powerful enough to run the larger models.
-
-See the [colab documentation](docs/colab.md) for more information.
-
-## Parallel Execution
-
-You can also run both the Web-UI or the CLI on multiple GPUs in parallel, using the `vad_parallel_devices` option. This takes a comma-delimited list of
-device IDs (0, 1, etc.) that Whisper should be distributed to and run on concurrently:
-```
-python cli.py --model large --vad silero-vad --language Japanese \
---vad_parallel_devices 0,1 "https://www.youtube.com/watch?v=4cICErqqRSM"
-```
-
-Note that this requires a VAD to function properly, otherwise only the first GPU will be used. Though you could use `period-vad` to avoid taking the hit
-of running Silero-Vad, at a slight cost to accuracy.
-
-This is achieved by creating N child processes (where N is the number of selected devices), where Whisper is run concurrently. In `app.py`, you can also
-set the `vad_process_timeout` option. This configures the number of seconds until a process is killed due to inactivity, freeing RAM and video memory.
-The default value is 30 minutes.
-
-```
-python app.py --input_audio_max_duration -1 --vad_parallel_devices 0,1 --vad_process_timeout 3600
-```
-
-To execute the Silero VAD itself in parallel, use the `vad_cpu_cores` option:
-```
-python app.py --input_audio_max_duration -1 --vad_parallel_devices 0,1 --vad_process_timeout 3600 --vad_cpu_cores 4
-```
-
-You may also use `vad_process_timeout` with a single device (`--vad_parallel_devices 0`), if you prefer to always free video memory after a period of time.
-
-### Auto Parallel
-
-You can also set `auto_parallel` to `True`. This will set `vad_parallel_devices` to use all the GPU devices on the system, and `vad_cpu_cores` to be equal to the number of
-cores (up to 8):
-```
-python app.py --input_audio_max_duration -1 --auto_parallel True
-```
-
-# Docker
-
-To run it in Docker, first install Docker and optionally the NVIDIA Container Toolkit in order to use the GPU.
-Then either use the GitLab hosted container below, or check out this repository and build an image:
-```
-sudo docker build -t whisper-webui:1 .
-```
-
-You can then start the WebUI with GPU support like so:
-```
-sudo docker run -d --gpus=all -p 7860:7860 whisper-webui:1
-```
-
-Leave out "--gpus=all" if you don't have access to a GPU with enough memory, and are fine with running it on the CPU only:
-```
-sudo docker run -d -p 7860:7860 whisper-webui:1
-```
-
-# GitLab Docker Registry
-
-This Docker container is also hosted on GitLab:
-
-```
-sudo docker run -d --gpus=all -p 7860:7860 registry.gitlab.com/aadnk/whisper-webui:latest
-```
-
-## Custom Arguments
-
-You can also pass custom arguments to `app.py` in the Docker container, for instance to be able to use all the GPUs in parallel (replace administrator with your user):
-```
-sudo docker run -d --gpus all -p 7860:7860 \
---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \
---mount type=bind,source=/home/administrator/.cache/huggingface,target=/root/.cache/huggingface \
---restart=on-failure:15 registry.gitlab.com/aadnk/whisper-webui:latest \
-app.py --input_audio_max_duration -1 --server_name 0.0.0.0 --auto_parallel True \
---default_vad silero-vad --default_model_name large
-```
-
-You can also call `cli.py` the same way:
-```
-sudo docker run --gpus all \
---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \
---mount type=bind,source=/home/administrator/.cache/huggingface,target=/root/.cache/huggingface \
---mount type=bind,source=${PWD},target=/app/data \
-registry.gitlab.com/aadnk/whisper-webui:latest \
-cli.py --model large --auto_parallel True --vad silero-vad \
---output_dir /app/data /app/data/YOUR-FILE-HERE.mp4
-```
-
-## Caching
-
-Note that the models themselves are currently not included in the Docker images, and will be downloaded on the demand.
-To avoid this, bind the directory /root/.cache/whisper to some directory on the host (for instance /home/administrator/.cache/whisper), where you can (optionally)
-prepopulate the directory with the different Whisper models.
-```
-sudo docker run -d --gpus=all -p 7860:7860 \
---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \
-registry.gitlab.com/aadnk/whisper-webui:latest
-```
\ No newline at end of file
diff --git a/spaces/DemoLou/moe-tts/models.py b/spaces/DemoLou/moe-tts/models.py
deleted file mode 100644
index c214bbb0476ba4777093d8bcf032961f09e59496..0000000000000000000000000000000000000000
--- a/spaces/DemoLou/moe-tts/models.py
+++ /dev/null
@@ -1,549 +0,0 @@
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-import attentions
-import monotonic_align
-
-from torch.nn import Conv1d, ConvTranspose1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from commons import init_weights, get_padding
-
-
-class StochasticDurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
- super().__init__()
- filter_channels = in_channels # it needs to be removed from future version.
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.log_flow = modules.Log()
- self.flows = nn.ModuleList()
- self.flows.append(modules.ElementwiseAffine(2))
- for i in range(n_flows):
- self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.flows.append(modules.Flip())
-
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- self.post_flows = nn.ModuleList()
- self.post_flows.append(modules.ElementwiseAffine(2))
- for i in range(4):
- self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.post_flows.append(modules.Flip())
-
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
-
- def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
- x = torch.detach(x)
- x = self.pre(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.convs(x, x_mask)
- x = self.proj(x) * x_mask
-
- if not reverse:
- flows = self.flows
- assert w is not None
-
- logdet_tot_q = 0
- h_w = self.post_pre(w)
- h_w = self.post_convs(h_w, x_mask)
- h_w = self.post_proj(h_w) * x_mask
- e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
- z_q = e_q
- for flow in self.post_flows:
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
- logdet_tot_q += logdet_q
- z_u, z1 = torch.split(z_q, [1, 1], 1)
- u = torch.sigmoid(z_u) * x_mask
- z0 = (w - u) * x_mask
- logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2])
- logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q
-
- logdet_tot = 0
- z0, logdet = self.log_flow(z0, x_mask)
- logdet_tot += logdet
- z = torch.cat([z0, z1], 1)
- for flow in flows:
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
- logdet_tot = logdet_tot + logdet
- nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot
- return nll + logq # [b]
- else:
- flows = list(reversed(self.flows))
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
- z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
- for flow in flows:
- z = flow(z, x_mask, g=x, reverse=reverse)
- z0, z1 = torch.split(z, [1, 1], 1)
- logw = z0
- return logw
-
-
-class DurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
- super().__init__()
-
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
-
- self.drop = nn.Dropout(p_dropout)
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
- self.norm_1 = modules.LayerNorm(filter_channels)
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
- self.norm_2 = modules.LayerNorm(filter_channels)
- self.proj = nn.Conv1d(filter_channels, 1, 1)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
- def forward(self, x, x_mask, g=None):
- x = torch.detach(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.norm_1(x)
- x = self.drop(x)
- x = self.conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.norm_2(x)
- x = self.drop(x)
- x = self.proj(x * x_mask)
- return x * x_mask
-
-
-class TextEncoder(nn.Module):
- def __init__(self,
- n_vocab,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- emotion_embedding):
- super().__init__()
- self.n_vocab = n_vocab
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emotion_embedding = emotion_embedding
-
- if self.n_vocab != 0:
- self.emb = nn.Embedding(n_vocab, hidden_channels)
- if emotion_embedding:
- self.emo_proj = nn.Linear(1024, hidden_channels)
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5)
-
- self.encoder = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, emotion_embedding=None):
- if self.n_vocab != 0:
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
- if emotion_embedding is not None:
- x = x + self.emo_proj(emotion_embedding.unsqueeze(1))
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return x, m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
- gin_channels=gin_channels, mean_only=True))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class Generator(torch.nn.Module):
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
- upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(weight_norm(
- ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)),
- k, u, padding=(k - u) // 2)))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- print('Removing weight norm...')
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
- ])
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ])
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- n_vocab,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- n_speakers=0,
- gin_channels=0,
- use_sdp=True,
- emotion_embedding=False,
- **kwargs):
-
- super().__init__()
- self.n_vocab = n_vocab
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.n_speakers = n_speakers
- self.gin_channels = gin_channels
-
- self.use_sdp = use_sdp
-
- self.enc_p = TextEncoder(n_vocab,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- emotion_embedding)
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
- upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,
- gin_channels=gin_channels)
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
-
- if use_sdp:
- self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
- else:
- self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
-
- if n_speakers > 1:
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
-
- def forward(self, x, x_lengths, y, y_lengths, sid=None, emotion_embedding=None):
-
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding)
- if self.n_speakers > 1:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
-
- with torch.no_grad():
- # negative cross-entropy
- s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
- neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
- neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2),
- s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
- neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
-
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
-
- w = attn.sum(2)
- if self.use_sdp:
- l_length = self.dp(x, x_mask, w, g=g)
- l_length = l_length / torch.sum(x_mask)
- else:
- logw_ = torch.log(w + 1e-6) * x_mask
- logw = self.dp(x, x_mask, g=g)
- l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging
-
- # expand prior
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
-
- z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
- o = self.dec(z_slice, g=g)
- return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None,
- emotion_embedding=None):
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding)
- if self.n_speakers > 1:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- if self.use_sdp:
- logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
- else:
- logw = self.dp(x, x_mask, g=g)
- w = torch.exp(logw) * x_mask * length_scale
- w_ceil = torch.ceil(w)
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = commons.generate_path(w_ceil, attn_mask)
-
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,
- 2) # [b, t', t], [b, t, d] -> [b, d, t']
-
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
- z = self.flow(z_p, y_mask, g=g, reverse=True)
- o = self.dec((z * y_mask)[:, :, :max_len], g=g)
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
-
- def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
- assert self.n_speakers > 1, "n_speakers have to be larger than 1."
- g_src = self.emb_g(sid_src).unsqueeze(-1)
- g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
- z_p = self.flow(z, y_mask, g=g_src)
- z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
- o_hat = self.dec(z_hat * y_mask, g=g_tgt)
- return o_hat, y_mask, (z, z_p, z_hat)
diff --git a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/metric_base.py b/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/metric_base.py
deleted file mode 100644
index 0db82adecb60260393eaf82bd991575d79085787..0000000000000000000000000000000000000000
--- a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/metric_base.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
-#
-# This work is licensed under the Creative Commons Attribution-NonCommercial
-# 4.0 International License. To view a copy of this license, visit
-# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
-# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
-
-"""Common definitions for GAN metrics."""
-
-import os
-import time
-import hashlib
-import numpy as np
-import tensorflow as tf
-import dnnlib
-import dnnlib.tflib as tflib
-
-import config
-from training import misc
-from training import dataset
-
-#----------------------------------------------------------------------------
-# Standard metrics.
-
-fid50k = dnnlib.EasyDict(func_name='metrics.frechet_inception_distance.FID', name='fid50k', num_images=50000, minibatch_per_gpu=8)
-ppl_zfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zfull', num_samples=100000, epsilon=1e-4, space='z', sampling='full', minibatch_per_gpu=16)
-ppl_wfull = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wfull', num_samples=100000, epsilon=1e-4, space='w', sampling='full', minibatch_per_gpu=16)
-ppl_zend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_zend', num_samples=100000, epsilon=1e-4, space='z', sampling='end', minibatch_per_gpu=16)
-ppl_wend = dnnlib.EasyDict(func_name='metrics.perceptual_path_length.PPL', name='ppl_wend', num_samples=100000, epsilon=1e-4, space='w', sampling='end', minibatch_per_gpu=16)
-ls = dnnlib.EasyDict(func_name='metrics.linear_separability.LS', name='ls', num_samples=200000, num_keep=100000, attrib_indices=range(40), minibatch_per_gpu=4)
-dummy = dnnlib.EasyDict(func_name='metrics.metric_base.DummyMetric', name='dummy') # for debugging
-
-#----------------------------------------------------------------------------
-# Base class for metrics.
-
-class MetricBase:
- def __init__(self, name):
- self.name = name
- self._network_pkl = None
- self._dataset_args = None
- self._mirror_augment = None
- self._results = []
- self._eval_time = None
-
- def run(self, network_pkl, run_dir=None, dataset_args=None, mirror_augment=None, num_gpus=1, tf_config=None, log_results=True):
- self._network_pkl = network_pkl
- self._dataset_args = dataset_args
- self._mirror_augment = mirror_augment
- self._results = []
-
- if (dataset_args is None or mirror_augment is None) and run_dir is not None:
- run_config = misc.parse_config_for_previous_run(run_dir)
- self._dataset_args = dict(run_config['dataset'])
- self._dataset_args['shuffle_mb'] = 0
- self._mirror_augment = run_config['train'].get('mirror_augment', False)
-
- time_begin = time.time()
- with tf.Graph().as_default(), tflib.create_session(tf_config).as_default(): # pylint: disable=not-context-manager
- _G, _D, Gs = misc.load_pkl(self._network_pkl)
- self._evaluate(Gs, num_gpus=num_gpus)
- self._eval_time = time.time() - time_begin
-
- if log_results:
- result_str = self.get_result_str()
- if run_dir is not None:
- log = os.path.join(run_dir, 'metric-%s.txt' % self.name)
- with dnnlib.util.Logger(log, 'a'):
- print(result_str)
- else:
- print(result_str)
-
- def get_result_str(self):
- network_name = os.path.splitext(os.path.basename(self._network_pkl))[0]
- if len(network_name) > 29:
- network_name = '...' + network_name[-26:]
- result_str = '%-30s' % network_name
- result_str += ' time %-12s' % dnnlib.util.format_time(self._eval_time)
- for res in self._results:
- result_str += ' ' + self.name + res.suffix + ' '
- result_str += res.fmt % res.value
- return result_str
-
- def update_autosummaries(self):
- for res in self._results:
- tflib.autosummary.autosummary('Metrics/' + self.name + res.suffix, res.value)
-
- def _evaluate(self, Gs, num_gpus):
- raise NotImplementedError # to be overridden by subclasses
-
- def _report_result(self, value, suffix='', fmt='%-10.4f'):
- self._results += [dnnlib.EasyDict(value=value, suffix=suffix, fmt=fmt)]
-
- def _get_cache_file_for_reals(self, extension='pkl', **kwargs):
- all_args = dnnlib.EasyDict(metric_name=self.name, mirror_augment=self._mirror_augment)
- all_args.update(self._dataset_args)
- all_args.update(kwargs)
- md5 = hashlib.md5(repr(sorted(all_args.items())).encode('utf-8'))
- dataset_name = self._dataset_args['tfrecord_dir'].replace('\\', '/').split('/')[-1]
- return os.path.join(config.cache_dir, '%s-%s-%s.%s' % (md5.hexdigest(), self.name, dataset_name, extension))
-
- def _iterate_reals(self, minibatch_size):
- dataset_obj = dataset.load_dataset(data_dir=config.data_dir, **self._dataset_args)
- while True:
- images, _labels = dataset_obj.get_minibatch_np(minibatch_size)
- if self._mirror_augment:
- images = misc.apply_mirror_augment(images)
- yield images
-
- def _iterate_fakes(self, Gs, minibatch_size, num_gpus):
- while True:
- latents = np.random.randn(minibatch_size, *Gs.input_shape[1:])
- fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
- images = Gs.run(latents, None, output_transform=fmt, is_validation=True, num_gpus=num_gpus, assume_frozen=True)
- yield images
-
-#----------------------------------------------------------------------------
-# Group of multiple metrics.
-
-class MetricGroup:
- def __init__(self, metric_kwarg_list):
- self.metrics = [dnnlib.util.call_func_by_name(**kwargs) for kwargs in metric_kwarg_list]
-
- def run(self, *args, **kwargs):
- for metric in self.metrics:
- metric.run(*args, **kwargs)
-
- def get_result_str(self):
- return ' '.join(metric.get_result_str() for metric in self.metrics)
-
- def update_autosummaries(self):
- for metric in self.metrics:
- metric.update_autosummaries()
-
-#----------------------------------------------------------------------------
-# Dummy metric for debugging purposes.
-
-class DummyMetric(MetricBase):
- def _evaluate(self, Gs, num_gpus):
- _ = Gs, num_gpus
- self._report_result(0.0)
-
-#----------------------------------------------------------------------------
diff --git a/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/src/STrack.cpp b/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/src/STrack.cpp
deleted file mode 100644
index 8306165304355fe6d3d6e244207211757f21a646..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/src/STrack.cpp
+++ /dev/null
@@ -1,192 +0,0 @@
-#include "STrack.h"
-
-STrack::STrack(vector tlwh_, float score)
-{
- _tlwh.resize(4);
- _tlwh.assign(tlwh_.begin(), tlwh_.end());
-
- is_activated = false;
- track_id = 0;
- state = TrackState::New;
-
- tlwh.resize(4);
- tlbr.resize(4);
-
- static_tlwh();
- static_tlbr();
- frame_id = 0;
- tracklet_len = 0;
- this->score = score;
- start_frame = 0;
-}
-
-STrack::~STrack()
-{
-}
-
-void STrack::activate(byte_kalman::KalmanFilter &kalman_filter, int frame_id)
-{
- this->kalman_filter = kalman_filter;
- this->track_id = this->next_id();
-
- vector _tlwh_tmp(4);
- _tlwh_tmp[0] = this->_tlwh[0];
- _tlwh_tmp[1] = this->_tlwh[1];
- _tlwh_tmp[2] = this->_tlwh[2];
- _tlwh_tmp[3] = this->_tlwh[3];
- vector xyah = tlwh_to_xyah(_tlwh_tmp);
- DETECTBOX xyah_box;
- xyah_box[0] = xyah[0];
- xyah_box[1] = xyah[1];
- xyah_box[2] = xyah[2];
- xyah_box[3] = xyah[3];
- auto mc = this->kalman_filter.initiate(xyah_box);
- this->mean = mc.first;
- this->covariance = mc.second;
-
- static_tlwh();
- static_tlbr();
-
- this->tracklet_len = 0;
- this->state = TrackState::Tracked;
- if (frame_id == 1)
- {
- this->is_activated = true;
- }
- //this->is_activated = true;
- this->frame_id = frame_id;
- this->start_frame = frame_id;
-}
-
-void STrack::re_activate(STrack &new_track, int frame_id, bool new_id)
-{
- vector xyah = tlwh_to_xyah(new_track.tlwh);
- DETECTBOX xyah_box;
- xyah_box[0] = xyah[0];
- xyah_box[1] = xyah[1];
- xyah_box[2] = xyah[2];
- xyah_box[3] = xyah[3];
- auto mc = this->kalman_filter.update(this->mean, this->covariance, xyah_box);
- this->mean = mc.first;
- this->covariance = mc.second;
-
- static_tlwh();
- static_tlbr();
-
- this->tracklet_len = 0;
- this->state = TrackState::Tracked;
- this->is_activated = true;
- this->frame_id = frame_id;
- this->score = new_track.score;
- if (new_id)
- this->track_id = next_id();
-}
-
-void STrack::update(STrack &new_track, int frame_id)
-{
- this->frame_id = frame_id;
- this->tracklet_len++;
-
- vector xyah = tlwh_to_xyah(new_track.tlwh);
- DETECTBOX xyah_box;
- xyah_box[0] = xyah[0];
- xyah_box[1] = xyah[1];
- xyah_box[2] = xyah[2];
- xyah_box[3] = xyah[3];
-
- auto mc = this->kalman_filter.update(this->mean, this->covariance, xyah_box);
- this->mean = mc.first;
- this->covariance = mc.second;
-
- static_tlwh();
- static_tlbr();
-
- this->state = TrackState::Tracked;
- this->is_activated = true;
-
- this->score = new_track.score;
-}
-
-void STrack::static_tlwh()
-{
- if (this->state == TrackState::New)
- {
- tlwh[0] = _tlwh[0];
- tlwh[1] = _tlwh[1];
- tlwh[2] = _tlwh[2];
- tlwh[3] = _tlwh[3];
- return;
- }
-
- tlwh[0] = mean[0];
- tlwh[1] = mean[1];
- tlwh[2] = mean[2];
- tlwh[3] = mean[3];
-
- tlwh[2] *= tlwh[3];
- tlwh[0] -= tlwh[2] / 2;
- tlwh[1] -= tlwh[3] / 2;
-}
-
-void STrack::static_tlbr()
-{
- tlbr.clear();
- tlbr.assign(tlwh.begin(), tlwh.end());
- tlbr[2] += tlbr[0];
- tlbr[3] += tlbr[1];
-}
-
-vector STrack::tlwh_to_xyah(vector tlwh_tmp)
-{
- vector tlwh_output = tlwh_tmp;
- tlwh_output[0] += tlwh_output[2] / 2;
- tlwh_output[1] += tlwh_output[3] / 2;
- tlwh_output[2] /= tlwh_output[3];
- return tlwh_output;
-}
-
-vector STrack::to_xyah()
-{
- return tlwh_to_xyah(tlwh);
-}
-
-vector STrack::tlbr_to_tlwh(vector &tlbr)
-{
- tlbr[2] -= tlbr[0];
- tlbr[3] -= tlbr[1];
- return tlbr;
-}
-
-void STrack::mark_lost()
-{
- state = TrackState::Lost;
-}
-
-void STrack::mark_removed()
-{
- state = TrackState::Removed;
-}
-
-int STrack::next_id()
-{
- static int _count = 0;
- _count++;
- return _count;
-}
-
-int STrack::end_frame()
-{
- return this->frame_id;
-}
-
-void STrack::multi_predict(vector &stracks, byte_kalman::KalmanFilter &kalman_filter)
-{
- for (int i = 0; i < stracks.size(); i++)
- {
- if (stracks[i]->state != TrackState::Tracked)
- {
- stracks[i]->mean[7] = 0;
- }
- kalman_filter.predict(stracks[i]->mean, stracks[i]->covariance);
- }
-}
\ No newline at end of file
diff --git a/spaces/ECCV2022/bytetrack/tools/convert_ethz_to_coco.py b/spaces/ECCV2022/bytetrack/tools/convert_ethz_to_coco.py
deleted file mode 100644
index ceb32810dd0c6970f93d819bcca886fd42451a61..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/tools/convert_ethz_to_coco.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import os
-import numpy as np
-import json
-from PIL import Image
-
-DATA_PATH = 'datasets/ETHZ/'
-DATA_FILE_PATH = 'datasets/data_path/eth.train'
-OUT_PATH = DATA_PATH + 'annotations/'
-
-def load_paths(data_path):
- with open(data_path, 'r') as file:
- img_files = file.readlines()
- img_files = [x.replace('\n', '') for x in img_files]
- img_files = list(filter(lambda x: len(x) > 0, img_files))
- label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt') for x in img_files]
- return img_files, label_files
-
-if __name__ == '__main__':
- if not os.path.exists(OUT_PATH):
- os.mkdir(OUT_PATH)
-
- out_path = OUT_PATH + 'train.json'
- out = {'images': [], 'annotations': [], 'categories': [{'id': 1, 'name': 'person'}]}
- img_paths, label_paths = load_paths(DATA_FILE_PATH)
- image_cnt = 0
- ann_cnt = 0
- video_cnt = 0
- for img_path, label_path in zip(img_paths, label_paths):
- image_cnt += 1
- im = Image.open(img_path)
- image_info = {'file_name': img_path,
- 'id': image_cnt,
- 'height': im.size[1],
- 'width': im.size[0]}
- out['images'].append(image_info)
- # Load labels
- if os.path.isfile(label_path):
- labels0 = np.loadtxt(label_path, dtype=np.float32).reshape(-1, 6)
- # Normalized xywh to pixel xyxy format
- labels = labels0.copy()
- labels[:, 2] = image_info['width'] * (labels0[:, 2] - labels0[:, 4] / 2)
- labels[:, 3] = image_info['height'] * (labels0[:, 3] - labels0[:, 5] / 2)
- labels[:, 4] = image_info['width'] * labels0[:, 4]
- labels[:, 5] = image_info['height'] * labels0[:, 5]
- else:
- labels = np.array([])
- for i in range(len(labels)):
- ann_cnt += 1
- fbox = labels[i, 2:6].tolist()
- ann = {'id': ann_cnt,
- 'category_id': 1,
- 'image_id': image_cnt,
- 'track_id': -1,
- 'bbox': fbox,
- 'area': fbox[2] * fbox[3],
- 'iscrowd': 0}
- out['annotations'].append(ann)
- print('loaded train for {} images and {} samples'.format(len(out['images']), len(out['annotations'])))
- json.dump(out, open(out_path, 'w'))
\ No newline at end of file
diff --git a/spaces/ECCV2022/storydalle/dalle/models/stage1/vqgan.py b/spaces/ECCV2022/storydalle/dalle/models/stage1/vqgan.py
deleted file mode 100644
index 7f03a4d02aa579275d58290bc4f3714fd58bfe00..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/storydalle/dalle/models/stage1/vqgan.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# ------------------------------------------------------------------------------------
-# Modified from VQGAN (https://github.com/CompVis/taming-transformers)
-# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer. All Rights Reserved.
-# ------------------------------------------------------------------------------------
-
-import torch
-import torch.nn as nn
-from typing import List, Tuple, Optional
-from einops import rearrange
-from omegaconf import OmegaConf
-from .layers import Encoder, Decoder
-
-
-class VectorQuantizer(nn.Module):
- """
- Simplified VectorQuantizer in the original VQGAN repository
- by removing unncessary modules for sampling
- """
- def __init__(self, dim: int, n_embed: int, beta: float) -> None:
- super().__init__()
- self.n_embed = n_embed
- self.dim = dim
- self.beta = beta
-
- self.embedding = nn.Embedding(self.n_embed, self.dim)
- self.embedding.weight.data.uniform_(-1.0 / self.n_embed, 1.0 / self.n_embed)
-
- def forward(self,
- z: torch.FloatTensor) -> Tuple[torch.FloatTensor, torch.LongTensor]:
- z = rearrange(z, 'b c h w -> b h w c').contiguous() # [B,C,H,W] -> [B,H,W,C]
- z_flattened = z.view(-1, self.dim)
-
- d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
- torch.sum(self.embedding.weight**2, dim=1) - 2 * \
- torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
-
- min_encoding_indices = torch.argmin(d, dim=1)
- z_q = self.embedding(min_encoding_indices).view(z.shape)
- return z_q, min_encoding_indices
-
- def get_codebook_entry(self,
- indices: torch.LongTensor,
- shape: Optional[List[int]] = None) -> torch.FloatTensor:
- z_q = self.embedding(indices)
- if shape is not None:
- z_q = z_q.view(shape)
- z_q = z_q.permute(0, 3, 1, 2).contiguous()
- return z_q
-
-
-class VQGAN(nn.Module):
- def __init__(self, n_embed: int, embed_dim: int, hparams: OmegaConf) -> None:
- super().__init__()
- self.encoder = Encoder(**hparams)
- self.decoder = Decoder(**hparams)
- self.quantize = VectorQuantizer(dim=embed_dim, n_embed=n_embed, beta=0.25)
- self.quant_conv = torch.nn.Conv2d(hparams.z_channels, embed_dim, 1)
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, hparams.z_channels, 1)
- self.latent_dim = hparams.attn_resolutions[0]
-
- def forward(self, x: torch.FloatTensor) -> torch.FloatTensor:
- quant = self.encode(x)
- dec = self.decode(quant)
- return dec
-
- def encode(self, x: torch.FloatTensor) -> torch.FloatTensor:
- h = self.encoder(x)
- h = self.quant_conv(h)
- quant = self.quantize(h)[0]
- quant = rearrange(quant, 'b h w c -> b c h w').contiguous()
- return quant
-
- def decode(self, quant: torch.FloatTensor) -> torch.FloatTensor:
- quant = self.post_quant_conv(quant)
- dec = self.decoder(quant)
- return dec
-
- def decode_code(self, code: torch.LongTensor) -> torch.FloatTensor:
- quant = self.quantize.get_codebook_entry(code)
- quant = quant.permute(0, 3, 1, 2)
- dec = self.decode(quant)
- return dec
-
- def get_codes(self, x: torch.FloatTensor) -> torch.LongTensor:
- h = self.encoder(x)
- h = self.quant_conv(h)
- codes = self.quantize(h)[1].view(x.shape[0], self.latent_dim ** 2)
- return codes
-
- def from_ckpt(self, path: str, strict: bool = True) -> None:
- ckpt = torch.load(path, map_location='cpu')['state_dict']
- self.load_state_dict(ckpt, strict=strict)
- print(f'{path} successfully restored..')
diff --git a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_123821KB.py b/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_123821KB.py
deleted file mode 100644
index 4fc1b5cb85a3327f60cbb9f5deffbeeaaac516ad..0000000000000000000000000000000000000000
--- a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers_123821KB.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import torch
-import torch.nn.functional as F
-from torch import nn
-
-from . import spec_utils
-
-
-class Conv2DBNActiv(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
- super(Conv2DBNActiv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- nin,
- nout,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- dilation=dilation,
- bias=False,
- ),
- nn.BatchNorm2d(nout),
- activ(),
- )
-
- def __call__(self, x):
- return self.conv(x)
-
-
-class SeperableConv2DBNActiv(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
- super(SeperableConv2DBNActiv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- nin,
- nin,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- dilation=dilation,
- groups=nin,
- bias=False,
- ),
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
- nn.BatchNorm2d(nout),
- activ(),
- )
-
- def __call__(self, x):
- return self.conv(x)
-
-
-class Encoder(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
- super(Encoder, self).__init__()
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
-
- def __call__(self, x):
- skip = self.conv1(x)
- h = self.conv2(skip)
-
- return h, skip
-
-
-class Decoder(nn.Module):
- def __init__(
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
- ):
- super(Decoder, self).__init__()
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
- self.dropout = nn.Dropout2d(0.1) if dropout else None
-
- def __call__(self, x, skip=None):
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
- if skip is not None:
- skip = spec_utils.crop_center(skip, x)
- x = torch.cat([x, skip], dim=1)
- h = self.conv(x)
-
- if self.dropout is not None:
- h = self.dropout(h)
-
- return h
-
-
-class ASPPModule(nn.Module):
- def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
- super(ASPPModule, self).__init__()
- self.conv1 = nn.Sequential(
- nn.AdaptiveAvgPool2d((1, None)),
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
- )
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
- self.conv3 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
- )
- self.conv4 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
- )
- self.conv5 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.bottleneck = nn.Sequential(
- Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
- )
-
- def forward(self, x):
- _, _, h, w = x.size()
- feat1 = F.interpolate(
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
- )
- feat2 = self.conv2(x)
- feat3 = self.conv3(x)
- feat4 = self.conv4(x)
- feat5 = self.conv5(x)
- out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
- bottle = self.bottleneck(out)
- return bottle
diff --git a/spaces/FFZG-cleopatra/latvian-twitter-sentiment-classifier/dataset.py b/spaces/FFZG-cleopatra/latvian-twitter-sentiment-classifier/dataset.py
deleted file mode 100644
index f9030fbcb4b3fe25107c718abef5eda3bb28f031..0000000000000000000000000000000000000000
--- a/spaces/FFZG-cleopatra/latvian-twitter-sentiment-classifier/dataset.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import config
-import torch
-
-
-class BERTDataset:
- def __init__(self, review, target):
- self.review = review
- self.target = target
- self.tokenizer = config.TOKENIZER
- self.max_len = config.MAX_LEN
-
- def __len__(self):
- return len(self.review)
-
- def __getitem__(self, item):
- review = str(self.review[item])
- review = " ".join(review.split())
-
- inputs = self.tokenizer.encode_plus(
- review,
- None,
- add_special_tokens=True,
- max_length=self.max_len
- )
-
- ids = inputs["input_ids"]
- mask = inputs["attention_mask"]
- token_type_ids = inputs["token_type_ids"]
-
- padding_length = self.max_len - len(ids)
- ids = ids + ([0] * padding_length)
- mask = mask + ([0] * padding_length)
- token_type_ids = token_type_ids + ([0] * padding_length)
-
- return {
- 'ids': torch.tensor(ids, dtype=torch.long),
- 'mask': torch.tensor(mask, dtype=torch.long),
- 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),
- 'targets': torch.tensor(self.target[item], dtype=torch.float)
- }
diff --git a/spaces/Felix123456/bingo/src/app/layout.tsx b/spaces/Felix123456/bingo/src/app/layout.tsx
deleted file mode 100644
index 8b5122759987177b8dc4e4356d1d06cea25c15ea..0000000000000000000000000000000000000000
--- a/spaces/Felix123456/bingo/src/app/layout.tsx
+++ /dev/null
@@ -1,47 +0,0 @@
-import { Metadata } from 'next'
-import { Toaster } from 'react-hot-toast'
-import { TailwindIndicator } from '@/components/tailwind-indicator'
-import { Providers } from '@/components/providers'
-import { Header } from '@/components/header'
-
-import '@/app/globals.scss'
-
-
-export const metadata: Metadata = {
- title: {
- default: 'Bing AI Chatbot',
- template: `%s - Bing AI Chatbot`
- },
- description: 'Bing AI Chatbot Web App.',
- themeColor: [
- { media: '(prefers-color-scheme: light)', color: 'white' },
- { media: '(prefers-color-scheme: dark)', color: 'dark' }
- ],
- icons: {
- icon: '/favicon.ico',
- shortcut: '../assets/images/logo.svg',
- apple: '../assets/images/logo.svg'
- }
-}
-
-interface RootLayoutProps {
- children: React.ReactNode
-}
-
-export default function RootLayout({ children }: RootLayoutProps) {
- return (
-
-
-
-
-
- {/* @ts-ignore */}
-
- {children}
-
-
-
-
-
- )
-}
diff --git a/spaces/Felix123456/bingo/src/components/toaster.tsx b/spaces/Felix123456/bingo/src/components/toaster.tsx
deleted file mode 100644
index 4d2693460b61307a1d4c127fd01df9bee16e59ff..0000000000000000000000000000000000000000
--- a/spaces/Felix123456/bingo/src/components/toaster.tsx
+++ /dev/null
@@ -1,3 +0,0 @@
-'use client'
-
-export { Toaster } from 'react-hot-toast'
diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/data/data_sampler.py b/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/data/data_sampler.py
deleted file mode 100644
index 575452d9f844a928f7f42296c81635cfbadec7c2..0000000000000000000000000000000000000000
--- a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/data/data_sampler.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import math
-import torch
-from torch.utils.data.sampler import Sampler
-
-
-class EnlargedSampler(Sampler):
- """Sampler that restricts data loading to a subset of the dataset.
-
- Modified from torch.utils.data.distributed.DistributedSampler
- Support enlarging the dataset for iteration-based training, for saving
- time when restart the dataloader after each epoch
-
- Args:
- dataset (torch.utils.data.Dataset): Dataset used for sampling.
- num_replicas (int | None): Number of processes participating in
- the training. It is usually the world_size.
- rank (int | None): Rank of the current process within num_replicas.
- ratio (int): Enlarging ratio. Default: 1.
- """
-
- def __init__(self, dataset, num_replicas, rank, ratio=1):
- self.dataset = dataset
- self.num_replicas = num_replicas
- self.rank = rank
- self.epoch = 0
- self.num_samples = math.ceil(len(self.dataset) * ratio / self.num_replicas)
- self.total_size = self.num_samples * self.num_replicas
-
- def __iter__(self):
- # deterministically shuffle based on epoch
- g = torch.Generator()
- g.manual_seed(self.epoch)
- indices = torch.randperm(self.total_size, generator=g).tolist()
-
- dataset_size = len(self.dataset)
- indices = [v % dataset_size for v in indices]
-
- # subsample
- indices = indices[self.rank:self.total_size:self.num_replicas]
- assert len(indices) == self.num_samples
-
- return iter(indices)
-
- def __len__(self):
- return self.num_samples
-
- def set_epoch(self, epoch):
- self.epoch = epoch
diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/facelib/detection/yolov5face/utils/torch_utils.py b/spaces/FelixLuoX/codeformer/CodeFormer/facelib/detection/yolov5face/utils/torch_utils.py
deleted file mode 100644
index af2d06587b2d07b2eab199a8484380fde1de5c3c..0000000000000000000000000000000000000000
--- a/spaces/FelixLuoX/codeformer/CodeFormer/facelib/detection/yolov5face/utils/torch_utils.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import torch
-from torch import nn
-
-
-def fuse_conv_and_bn(conv, bn):
- # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
- fusedconv = (
- nn.Conv2d(
- conv.in_channels,
- conv.out_channels,
- kernel_size=conv.kernel_size,
- stride=conv.stride,
- padding=conv.padding,
- groups=conv.groups,
- bias=True,
- )
- .requires_grad_(False)
- .to(conv.weight.device)
- )
-
- # prepare filters
- w_conv = conv.weight.clone().view(conv.out_channels, -1)
- w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
- fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
-
- # prepare spatial bias
- b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
- b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
- fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
-
- return fusedconv
-
-
-def copy_attr(a, b, include=(), exclude=()):
- # Copy attributes from b to a, options to only include [...] and to exclude [...]
- for k, v in b.__dict__.items():
- if (include and k not in include) or k.startswith("_") or k in exclude:
- continue
-
- setattr(a, k, v)
diff --git a/spaces/FoxMeo/fire-detector/utils/torch_utils.py b/spaces/FoxMeo/fire-detector/utils/torch_utils.py
deleted file mode 100644
index 1e631b555508457a4944c11a479176463719c0e8..0000000000000000000000000000000000000000
--- a/spaces/FoxMeo/fire-detector/utils/torch_utils.py
+++ /dev/null
@@ -1,374 +0,0 @@
-# YOLOR PyTorch utils
-
-import datetime
-import logging
-import math
-import os
-import platform
-import subprocess
-import time
-from contextlib import contextmanager
-from copy import deepcopy
-from pathlib import Path
-
-import torch
-import torch.backends.cudnn as cudnn
-import torch.nn as nn
-import torch.nn.functional as F
-import torchvision
-
-try:
- import thop # for FLOPS computation
-except ImportError:
- thop = None
-logger = logging.getLogger(__name__)
-
-
-@contextmanager
-def torch_distributed_zero_first(local_rank: int):
- """
- Decorator to make all processes in distributed training wait for each local_master to do something.
- """
- if local_rank not in [-1, 0]:
- torch.distributed.barrier()
- yield
- if local_rank == 0:
- torch.distributed.barrier()
-
-
-def init_torch_seeds(seed=0):
- # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
- torch.manual_seed(seed)
- if seed == 0: # slower, more reproducible
- cudnn.benchmark, cudnn.deterministic = False, True
- else: # faster, less reproducible
- cudnn.benchmark, cudnn.deterministic = True, False
-
-
-def date_modified(path=__file__):
- # return human-readable file modification date, i.e. '2021-3-26'
- t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime)
- return f'{t.year}-{t.month}-{t.day}'
-
-
-def git_describe(path=Path(__file__).parent): # path must be a directory
- # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe
- s = f'git -C {path} describe --tags --long --always'
- try:
- return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1]
- except subprocess.CalledProcessError as e:
- return '' # not a git repository
-
-
-def select_device(device='', batch_size=None):
- # device = 'cpu' or '0' or '0,1,2,3'
- s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string
- cpu = device.lower() == 'cpu'
- if cpu:
- os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
- elif device: # non-cpu device requested
- os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
- assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
-
- cuda = not cpu and torch.cuda.is_available()
- if cuda:
- n = torch.cuda.device_count()
- if n > 1 and batch_size: # check that batch_size is compatible with device_count
- assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
- space = ' ' * len(s)
- for i, d in enumerate(device.split(',') if device else range(n)):
- p = torch.cuda.get_device_properties(i)
- s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
- else:
- s += 'CPU\n'
-
- logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe
- return torch.device('cuda:0' if cuda else 'cpu')
-
-
-def time_synchronized():
- # pytorch-accurate time
- if torch.cuda.is_available():
- torch.cuda.synchronize()
- return time.time()
-
-
-def profile(x, ops, n=100, device=None):
- # profile a pytorch module or list of modules. Example usage:
- # x = torch.randn(16, 3, 640, 640) # input
- # m1 = lambda x: x * torch.sigmoid(x)
- # m2 = nn.SiLU()
- # profile(x, [m1, m2], n=100) # profile speed over 100 iterations
-
- device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
- x = x.to(device)
- x.requires_grad = True
- print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '')
- print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}")
- for m in ops if isinstance(ops, list) else [ops]:
- m = m.to(device) if hasattr(m, 'to') else m # device
- m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type
- dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward
- try:
- flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS
- except:
- flops = 0
-
- for _ in range(n):
- t[0] = time_synchronized()
- y = m(x)
- t[1] = time_synchronized()
- try:
- _ = y.sum().backward()
- t[2] = time_synchronized()
- except: # no backward method
- t[2] = float('nan')
- dtf += (t[1] - t[0]) * 1000 / n # ms per op forward
- dtb += (t[2] - t[1]) * 1000 / n # ms per op backward
-
- s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list'
- s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list'
- p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters
- print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}')
-
-
-def is_parallel(model):
- return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
-
-
-def intersect_dicts(da, db, exclude=()):
- # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values
- return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}
-
-
-def initialize_weights(model):
- for m in model.modules():
- t = type(m)
- if t is nn.Conv2d:
- pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- elif t is nn.BatchNorm2d:
- m.eps = 1e-3
- m.momentum = 0.03
- elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
- m.inplace = True
-
-
-def find_modules(model, mclass=nn.Conv2d):
- # Finds layer indices matching module class 'mclass'
- return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
-
-
-def sparsity(model):
- # Return global model sparsity
- a, b = 0., 0.
- for p in model.parameters():
- a += p.numel()
- b += (p == 0).sum()
- return b / a
-
-
-def prune(model, amount=0.3):
- # Prune model to requested global sparsity
- import torch.nn.utils.prune as prune
- print('Pruning model... ', end='')
- for name, m in model.named_modules():
- if isinstance(m, nn.Conv2d):
- prune.l1_unstructured(m, name='weight', amount=amount) # prune
- prune.remove(m, 'weight') # make permanent
- print(' %.3g global sparsity' % sparsity(model))
-
-
-def fuse_conv_and_bn(conv, bn):
- # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
- fusedconv = nn.Conv2d(conv.in_channels,
- conv.out_channels,
- kernel_size=conv.kernel_size,
- stride=conv.stride,
- padding=conv.padding,
- groups=conv.groups,
- bias=True).requires_grad_(False).to(conv.weight.device)
-
- # prepare filters
- w_conv = conv.weight.clone().view(conv.out_channels, -1)
- w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
- fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
-
- # prepare spatial bias
- b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
- b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
- fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
-
- return fusedconv
-
-
-def model_info(model, verbose=False, img_size=640):
- # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]
- n_p = sum(x.numel() for x in model.parameters()) # number parameters
- n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
- if verbose:
- print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
- for i, (name, p) in enumerate(model.named_parameters()):
- name = name.replace('module_list.', '')
- print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
- (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
-
- try: # FLOPS
- from thop import profile
- stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32
- img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input
- flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS
- img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float
- fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS
- except (ImportError, Exception):
- fs = ''
-
- logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}")
-
-
-def load_classifier(name='resnet101', n=2):
- # Loads a pretrained model reshaped to n-class output
- model = torchvision.models.__dict__[name](pretrained=True)
-
- # ResNet model properties
- # input_size = [3, 224, 224]
- # input_space = 'RGB'
- # input_range = [0, 1]
- # mean = [0.485, 0.456, 0.406]
- # std = [0.229, 0.224, 0.225]
-
- # Reshape output to n classes
- filters = model.fc.weight.shape[1]
- model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
- model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
- model.fc.out_features = n
- return model
-
-
-def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)
- # scales img(bs,3,y,x) by ratio constrained to gs-multiple
- if ratio == 1.0:
- return img
- else:
- h, w = img.shape[2:]
- s = (int(h * ratio), int(w * ratio)) # new size
- img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
- if not same_shape: # pad/crop img
- h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
- return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
-
-
-def copy_attr(a, b, include=(), exclude=()):
- # Copy attributes from b to a, options to only include [...] and to exclude [...]
- for k, v in b.__dict__.items():
- if (len(include) and k not in include) or k.startswith('_') or k in exclude:
- continue
- else:
- setattr(a, k, v)
-
-
-class ModelEMA:
- """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
- Keep a moving average of everything in the model state_dict (parameters and buffers).
- This is intended to allow functionality like
- https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
- A smoothed version of the weights is necessary for some training schemes to perform well.
- This class is sensitive where it is initialized in the sequence of model init,
- GPU assignment and distributed training wrappers.
- """
-
- def __init__(self, model, decay=0.9999, updates=0):
- # Create EMA
- self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA
- # if next(model.parameters()).device.type != 'cpu':
- # self.ema.half() # FP16 EMA
- self.updates = updates # number of EMA updates
- self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
- for p in self.ema.parameters():
- p.requires_grad_(False)
-
- def update(self, model):
- # Update EMA parameters
- with torch.no_grad():
- self.updates += 1
- d = self.decay(self.updates)
-
- msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
- for k, v in self.ema.state_dict().items():
- if v.dtype.is_floating_point:
- v *= d
- v += (1. - d) * msd[k].detach()
-
- def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):
- # Update EMA attributes
- copy_attr(self.ema, model, include, exclude)
-
-
-class BatchNormXd(torch.nn.modules.batchnorm._BatchNorm):
- def _check_input_dim(self, input):
- # The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc
- # is this method that is overwritten by the sub-class
- # This original goal of this method was for tensor sanity checks
- # If you're ok bypassing those sanity checks (eg. if you trust your inference
- # to provide the right dimensional inputs), then you can just use this method
- # for easy conversion from SyncBatchNorm
- # (unfortunately, SyncBatchNorm does not store the original class - if it did
- # we could return the one that was originally created)
- return
-
-def revert_sync_batchnorm(module):
- # this is very similar to the function that it is trying to revert:
- # https://github.com/pytorch/pytorch/blob/c8b3686a3e4ba63dc59e5dcfe5db3430df256833/torch/nn/modules/batchnorm.py#L679
- module_output = module
- if isinstance(module, torch.nn.modules.batchnorm.SyncBatchNorm):
- new_cls = BatchNormXd
- module_output = BatchNormXd(module.num_features,
- module.eps, module.momentum,
- module.affine,
- module.track_running_stats)
- if module.affine:
- with torch.no_grad():
- module_output.weight = module.weight
- module_output.bias = module.bias
- module_output.running_mean = module.running_mean
- module_output.running_var = module.running_var
- module_output.num_batches_tracked = module.num_batches_tracked
- if hasattr(module, "qconfig"):
- module_output.qconfig = module.qconfig
- for name, child in module.named_children():
- module_output.add_module(name, revert_sync_batchnorm(child))
- del module
- return module_output
-
-
-class TracedModel(nn.Module):
-
- def __init__(self, model=None, device=None, img_size=(640,640)):
- super(TracedModel, self).__init__()
-
- print(" Convert model to Traced-model... ")
- self.stride = model.stride
- self.names = model.names
- self.model = model
-
- self.model = revert_sync_batchnorm(self.model)
- self.model.to('cpu')
- self.model.eval()
-
- self.detect_layer = self.model.model[-1]
- self.model.traced = True
-
- rand_example = torch.rand(1, 3, img_size, img_size)
-
- traced_script_module = torch.jit.trace(self.model, rand_example, strict=False)
- #traced_script_module = torch.jit.script(self.model)
- traced_script_module.save("traced_model.pt")
- print(" traced_script_module saved! ")
- self.model = traced_script_module
- self.model.to(device)
- self.detect_layer.to(device)
- print(" model is traced! \n")
-
- def forward(self, x, augment=False, profile=False):
- out = self.model(x)
- out = self.detect_layer(out)
- return out
\ No newline at end of file
diff --git a/spaces/FrancXPT/stabilityai-stable-diffusion-2-1/README.md b/spaces/FrancXPT/stabilityai-stable-diffusion-2-1/README.md
deleted file mode 100644
index a7d89d9e6602a9ddbb7f5092b1c6692fe362f265..0000000000000000000000000000000000000000
--- a/spaces/FrancXPT/stabilityai-stable-diffusion-2-1/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Stabilityai Stable Diffusion 2 1
-emoji: 📈
-colorFrom: yellow
-colorTo: purple
-sdk: gradio
-sdk_version: 3.18.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/GT4SD/protein_properties/model_cards/article.md b/spaces/GT4SD/protein_properties/model_cards/article.md
deleted file mode 100644
index 7230ba8a29575c08a3135ec262581199b9fedf8f..0000000000000000000000000000000000000000
--- a/spaces/GT4SD/protein_properties/model_cards/article.md
+++ /dev/null
@@ -1,35 +0,0 @@
-# Supported molecular properties
-
-
-### Instability
-Compute the protein instability as presented in [Guruprasad et al. (*Protein Engineering, Design and Selection*; 1990)](https://academic.oup.com/peds/article-abstract/4/2/155/1491271).
-
-### Aromaticity
-Compute the protein aromaticity as presented in [Lobry et al. (*Nucleic Acid Research*; 1994)](https://academic.oup.com/nar/article-abstract/22/15/3174/1087817).
-
-### Isoelectric point
-Computes the isoelectric point of every residue and aggregates.
-
-### Hydrophobicity
-"Computes the hydrophobicity of a protein, relative freq. of **A,C,F,I,L,M & V**.
-
-### Aliphaticity
-Compute the aliphatic index of globular proteins as presented in [Ikai (*The Journal of Biochemistry*; 1980)](https://academic.oup.com/jb/article-abstract/88/6/1895/773432).
-
-### Charge
-Compute the charge of a protein, based on a boolean for the amide (whether the sequences are C-terminally amidated) and a pH value; as presented in [Bjellqvist, (*Electrophoresis*; 1993)](https://analyticalsciencejournals.onlinelibrary.wiley.com/doi/abs/10.1002/elps.11501401163).
-
-### Charge Density
-Computes the charge density of a protein, based on a boolean for the amide (whether the sequences are C-terminally amidated) and a pH value; as presented in [Bjellqvist, (*Electrophoresis*; 1993)](https://analyticalsciencejournals.onlinelibrary.wiley.com/doi/abs/10.1002/elps.11501401163).
-
-### Boman index
-Compute the protein aromaticity as presented in [Boman (*Journal of internal medicine*; 2003)](https://onlinelibrary.wiley.com/doi/full/10.1046/j.1365-2796.2003.01228.x).
-
-### Protein weight
-Compute the molecular weight of a protein with [RDKit](https://www.rdkit.org/docs/GettingStartedInPython.html).
-
-### Length
-Retrieves the number of residues of a protein.
-
-Moreover, GT4SD also includes properties on other entities such as [molecules](https://gt4sd.github.io/gt4sd-core/api/gt4sd.properties.molecules.html) and [crystals](https://gt4sd.github.io/gt4sd-core/api/gt4sd.properties.crystals.html).
-The GT4SD web app for molecules can be found [here](https://huggingface.co/spaces/GT4SD/molecular_properties)
diff --git a/spaces/Gaurav261/medical_image_classification/app.py b/spaces/Gaurav261/medical_image_classification/app.py
deleted file mode 100644
index a699bc5b3c2e987102ca93e0ee28d601e0a93d02..0000000000000000000000000000000000000000
--- a/spaces/Gaurav261/medical_image_classification/app.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import gradio as gr
-
-def greet(name):
- return "Hello " + name + "!!"
-
-iface = gr.Interface(fn=greet, inputs="text", outputs="text")
-iface.launch()
\ No newline at end of file
diff --git a/spaces/Gen-Sim/Gen-Sim/gensim/evaluate_finetune_model.py b/spaces/Gen-Sim/Gen-Sim/gensim/evaluate_finetune_model.py
deleted file mode 100644
index 0f301476443f740b9311a4d208b35ce0265b003d..0000000000000000000000000000000000000000
--- a/spaces/Gen-Sim/Gen-Sim/gensim/evaluate_finetune_model.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import openai
-import argparse
-import os
-from cliport import tasks
-from cliport.dataset import RavensDataset
-from cliport.environments.environment import Environment
-
-from pygments import highlight
-from pygments.lexers import PythonLexer
-from pygments.formatters import TerminalFormatter
-
-import time
-import random
-import json
-import traceback
-import pybullet as p
-import IPython
-from gensim.topdown_sim_runner import TopDownSimulationRunner
-import hydra
-from datetime import datetime
-
-from gensim.memory import Memory
-from gensim.utils import set_gpt_model, clear_messages, format_finetune_prompt
-
-@hydra.main(config_path='../cliport/cfg', config_name='data', version_base="1.2")
-def main(cfg):
- # parser.add_argument("--task", type=str, default='build-car')
- # parser.add_argument("--model", type=str, default='davinci:ft-wang-lab:gensim-2023-08-04-18-28-34')
-
- task = cfg.target_task
- model = cfg.target_model
- prompt = format_finetune_prompt(task)
-
- openai.api_key = cfg['openai_key']
- model_time = datetime.now().strftime("%d_%m_%Y_%H:%M:%S")
- cfg['model_output_dir'] = os.path.join(cfg['output_folder'], cfg['prompt_folder'] + "_" + cfg.target_model)
- if 'seed' in cfg:
- cfg['model_output_dir'] = cfg['model_output_dir'] + f"_{cfg['seed']}"
-
- set_gpt_model(cfg['gpt_model'])
- memory = Memory(cfg)
- simulation_runner = TopDownSimulationRunner(cfg, memory)
-
- for trial_i in range(cfg['trials']):
- if 'new_finetuned_model' in cfg or 'gpt-3.5-turbo' in cfg.target_model:
- # the chat completion version
- response = openai.ChatCompletion.create(
- model=model,
- messages=[{"role": "system", "content": "You are an AI in robot simulation code and task design."},
- {"role": "user", "content": prompt}],
- temperature=0.01,
- max_tokens=1000,
- n=1,
- stop=["\n```\n"])
- res = response["choices"][0]["message"]["content"]
- else:
- response = openai.Completion.create(
- model=model,
- prompt=prompt,
- temperature=0,
- max_tokens=1800,
- stop=["\n```\n"])
- res = response["choices"][0]["text"]
-
- simulation_runner.task_creation(res)
- simulation_runner.simulate_task()
- simulation_runner.print_current_stats()
-
- simulation_runner.save_stats()
-
-
-
-
-# load few shot prompts
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/GeorgeOrville/bingo/src/app/loading.css b/spaces/GeorgeOrville/bingo/src/app/loading.css
deleted file mode 100644
index eaaab6a86a228334c4eca3c5368ae6f0f593d405..0000000000000000000000000000000000000000
--- a/spaces/GeorgeOrville/bingo/src/app/loading.css
+++ /dev/null
@@ -1,68 +0,0 @@
-::-webkit-scrollbar {
- width: 10px;
- height: 10px;
- display: none;
-}
-
-::-webkit-scrollbar-button:start:decrement,
-::-webkit-scrollbar-button:end:increment {
- height: 30px;
- background-color: transparent;
-}
-
-::-webkit-scrollbar-track-piece {
- background-color: #3b3b3b;
- -webkit-border-radius: 16px;
-}
-
-::-webkit-scrollbar-thumb:vertical {
- height: 50px;
- background-color: #666;
- border: 1px solid #eee;
- -webkit-border-radius: 6px;
-}
-
-/* loading start */
-.loading-spinner {
- display: flex;
- justify-content: center;
- align-items: center;
- height: 100vh;
- opacity: 1;
- transition: opacity .8s ease-out;
-}
-
-.loading-spinner.hidden {
- opacity: 0;
-}
-
-.loading-spinner>div {
- width: 30px;
- height: 30px;
- background: linear-gradient(90deg, #2870EA 10.79%, #1B4AEF 87.08%);
-
- border-radius: 100%;
- display: inline-block;
- animation: sk-bouncedelay 1.4s infinite ease-in-out both;
-}
-
-.loading-spinner .bounce1 {
- animation-delay: -0.32s;
-}
-
-.loading-spinner .bounce2 {
- animation-delay: -0.16s;
-}
-
-@keyframes sk-bouncedelay {
-
- 0%,
- 80%,
- 100% {
- transform: scale(0);
- }
-
- 40% {
- transform: scale(1.0);
- }
-}
diff --git a/spaces/Gradio-Blocks/anime-colorization/pixel_guide_diffusion/train_util.py b/spaces/Gradio-Blocks/anime-colorization/pixel_guide_diffusion/train_util.py
deleted file mode 100644
index 1867604145736352dc51ab05b6caae8b541a6ebb..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/anime-colorization/pixel_guide_diffusion/train_util.py
+++ /dev/null
@@ -1,356 +0,0 @@
-import copy
-import functools
-import os
-
-import blobfile as bf
-import numpy as np
-import torch as th
-import torch.distributed as dist
-from torch.nn.parallel.distributed import DistributedDataParallel as DDP
-from torch.optim import AdamW
-
-from . import dist_util, logger
-from .fp16_util import (
- make_master_params,
- master_params_to_model_params,
- model_grads_to_master_grads,
- unflatten_master_params,
- zero_grad,
-)
-from .nn import update_ema
-from .resample import LossAwareSampler, UniformSampler
-
-# For ImageNet experiments, this was a good default value.
-# We found that the lg_loss_scale quickly climbed to
-# 20-21 within the first ~1K steps of training.
-INITIAL_LOG_LOSS_SCALE = 20.0
-
-
-class TrainLoop:
- def __init__(
- self,
- *,
- model,
- diffusion,
- data,
- batch_size,
- microbatch,
- lr,
- ema_rate,
- log_interval,
- save_interval,
- resume_checkpoint,
- use_fp16=False,
- fp16_scale_growth=1e-3,
- schedule_sampler=None,
- weight_decay=0.0,
- lr_anneal_steps=0,
- ):
- self.model = model
- self.diffusion = diffusion
- self.data = data
- self.batch_size = batch_size
- self.microbatch = microbatch if microbatch > 0 else batch_size
- self.lr = lr
- self.ema_rate = (
- [ema_rate]
- if isinstance(ema_rate, float)
- else [float(x) for x in ema_rate.split(",")]
- )
- self.log_interval = log_interval
- self.save_interval = save_interval
- self.resume_checkpoint = resume_checkpoint
- self.use_fp16 = use_fp16
- self.fp16_scale_growth = fp16_scale_growth
- self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
- self.weight_decay = weight_decay
- self.lr_anneal_steps = lr_anneal_steps
-
- self.step = 0
- self.resume_step = 0
- self.global_batch = self.batch_size * dist.get_world_size()
-
- self.model_params = list(self.model.parameters())
- self.master_params = self.model_params
- self.lg_loss_scale = INITIAL_LOG_LOSS_SCALE
- self.sync_cuda = th.cuda.is_available()
-
- self._load_and_sync_parameters()
- if self.use_fp16:
- self._setup_fp16()
-
- self.opt = AdamW(self.master_params, lr=self.lr, weight_decay=self.weight_decay)
- if self.resume_step:
- self._load_optimizer_state()
- # Model was resumed, either due to a restart or a checkpoint
- # being specified at the command line.
- self.ema_params = [
- self._load_ema_parameters(rate) for rate in self.ema_rate
- ]
- else:
- self.ema_params = [
- copy.deepcopy(self.master_params) for _ in range(len(self.ema_rate))
- ]
-
- if th.cuda.is_available():
- self.use_ddp = True
- self.ddp_model = DDP(
- self.model,
- device_ids=[dist_util.dev()],
- output_device=dist_util.dev(),
- broadcast_buffers=False,
- bucket_cap_mb=128,
- find_unused_parameters=False,
- )
- else:
- if dist.get_world_size() > 1:
- logger.warn(
- "Distributed training requires CUDA. "
- "Gradients will not be synchronized properly!"
- )
- self.use_ddp = False
- self.ddp_model = self.model
-
- def _load_and_sync_parameters(self):
- resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
-
- if resume_checkpoint:
- self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
- if dist.get_rank() == 0:
- logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
- self.model.load_state_dict(
- dist_util.load_state_dict(
- resume_checkpoint, map_location=dist_util.dev()
- )
- )
-
- dist_util.sync_params(self.model.parameters())
-
- def _load_ema_parameters(self, rate):
- ema_params = copy.deepcopy(self.master_params)
-
- main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
- ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
- if ema_checkpoint:
- if dist.get_rank() == 0:
- logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...")
- state_dict = dist_util.load_state_dict(
- ema_checkpoint, map_location=dist_util.dev()
- )
- ema_params = self._state_dict_to_master_params(state_dict)
-
- dist_util.sync_params(ema_params)
- return ema_params
-
- def _load_optimizer_state(self):
- main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
- opt_checkpoint = bf.join(
- bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
- )
- if bf.exists(opt_checkpoint):
- logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
- state_dict = dist_util.load_state_dict(
- opt_checkpoint, map_location=dist_util.dev()
- )
- self.opt.load_state_dict(state_dict)
-
- def _setup_fp16(self):
- self.master_params = make_master_params(self.model_params)
- self.model.convert_to_fp16()
-
- def run_loop(self):
- while (
- not self.lr_anneal_steps
- or self.step + self.resume_step < self.lr_anneal_steps
- ):
- batch, cond = next(self.data)
- self.run_step(batch, cond)
- if self.step % self.log_interval == 0:
- logger.dumpkvs()
- if self.step % self.save_interval == 0:
- self.save()
- # Run for a finite amount of time in integration tests.
- if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
- return
- self.step += 1
- # Save the last checkpoint if it wasn't already saved.
- if (self.step - 1) % self.save_interval != 0:
- self.save()
-
- def run_step(self, batch, cond):
- self.forward_backward(batch, cond)
- if self.use_fp16:
- self.optimize_fp16()
- else:
- self.optimize_normal()
- self.log_step()
-
- def forward_backward(self, batch, cond):
- zero_grad(self.model_params)
- for i in range(0, batch.shape[0], self.microbatch):
- micro = batch[i : i + self.microbatch].to(dist_util.dev())
- micro_cond = {
- k: v[i : i + self.microbatch].to(dist_util.dev())
- for k, v in cond.items()
- }
- last_batch = (i + self.microbatch) >= batch.shape[0]
- t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
-
- compute_losses = functools.partial(
- self.diffusion.training_losses,
- self.ddp_model,
- micro,
- t,
- model_kwargs=micro_cond,
- )
-
- if last_batch or not self.use_ddp:
- losses = compute_losses()
- else:
- with self.ddp_model.no_sync():
- losses = compute_losses()
-
- if isinstance(self.schedule_sampler, LossAwareSampler):
- self.schedule_sampler.update_with_local_losses(
- t, losses["loss"].detach()
- )
-
- loss = (losses["loss"] * weights).mean()
- log_loss_dict(
- self.diffusion, t, {k: v * weights for k, v in losses.items()}
- )
- if self.use_fp16:
- loss_scale = 2 ** self.lg_loss_scale
- (loss * loss_scale).backward()
- else:
- loss.backward()
-
- def optimize_fp16(self):
- if any(not th.isfinite(p.grad).all() for p in self.model_params):
- self.lg_loss_scale -= 1
- logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
- return
-
- model_grads_to_master_grads(self.model_params, self.master_params)
- self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale))
- self._log_grad_norm()
- self._anneal_lr()
- self.opt.step()
- for rate, params in zip(self.ema_rate, self.ema_params):
- update_ema(params, self.master_params, rate=rate)
- master_params_to_model_params(self.model_params, self.master_params)
- self.lg_loss_scale += self.fp16_scale_growth
-
- def optimize_normal(self):
- self._log_grad_norm()
- self._anneal_lr()
- self.opt.step()
- for rate, params in zip(self.ema_rate, self.ema_params):
- update_ema(params, self.master_params, rate=rate)
-
- def _log_grad_norm(self):
- sqsum = 0.0
- for p in self.master_params:
- sqsum += (p.grad ** 2).sum().item()
- logger.logkv_mean("grad_norm", np.sqrt(sqsum))
-
- def _anneal_lr(self):
- if not self.lr_anneal_steps:
- return
- frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
- lr = self.lr * (1 - frac_done)
- for param_group in self.opt.param_groups:
- param_group["lr"] = lr
-
- def log_step(self):
- logger.logkv("step", self.step + self.resume_step)
- logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
- if self.use_fp16:
- logger.logkv("lg_loss_scale", self.lg_loss_scale)
-
- def save(self):
- def save_checkpoint(rate, params):
- state_dict = self._master_params_to_state_dict(params)
- if dist.get_rank() == 0:
- logger.log(f"saving model {rate}...")
- if not rate:
- filename = f"model{(self.step+self.resume_step):06d}.pt"
- else:
- filename = f"ema_{rate}_{(self.step+self.resume_step):06d}.pt"
- with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
- th.save(state_dict, f)
-
- save_checkpoint(0, self.master_params)
- for rate, params in zip(self.ema_rate, self.ema_params):
- save_checkpoint(rate, params)
-
- if dist.get_rank() == 0:
- with bf.BlobFile(
- bf.join(get_blob_logdir(), f"opt{(self.step+self.resume_step):06d}.pt"),
- "wb",
- ) as f:
- th.save(self.opt.state_dict(), f)
-
- dist.barrier()
-
- def _master_params_to_state_dict(self, master_params):
- if self.use_fp16:
- master_params = unflatten_master_params(
- self.model.parameters(), master_params
- )
- state_dict = self.model.state_dict()
- for i, (name, _value) in enumerate(self.model.named_parameters()):
- assert name in state_dict
- state_dict[name] = master_params[i]
- return state_dict
-
- def _state_dict_to_master_params(self, state_dict):
- params = [state_dict[name] for name, _ in self.model.named_parameters()]
- if self.use_fp16:
- return make_master_params(params)
- else:
- return params
-
-
-def parse_resume_step_from_filename(filename):
- """
- Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
- checkpoint's number of steps.
- """
- split = filename.split("model")
- if len(split) < 2:
- return 0
- split1 = split[-1].split(".")[0]
- try:
- return int(split1)
- except ValueError:
- return 0
-
-
-def get_blob_logdir():
- return os.environ.get("DIFFUSION_BLOB_LOGDIR", logger.get_dir())
-
-
-def find_resume_checkpoint():
- # On your infrastructure, you may want to override this to automatically
- # discover the latest checkpoint on your blob storage, etc.
- return None
-
-
-def find_ema_checkpoint(main_checkpoint, step, rate):
- if main_checkpoint is None:
- return None
- filename = f"ema_{rate}_{(step):06d}.pt"
- path = bf.join(bf.dirname(main_checkpoint), filename)
- if bf.exists(path):
- return path
- return None
-
-
-def log_loss_dict(diffusion, ts, losses):
- for key, values in losses.items():
- logger.logkv_mean(key, values.mean().item())
- # Log the quantiles (four quartiles, in particular).
- for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
- quartile = int(4 * sub_t / diffusion.num_timesteps)
- logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py
deleted file mode 100644
index c2819477abb070b724d0295ccf028025918b263a..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/instaboost/mask_rcnn_r101_fpn_instaboost_4x_coco.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './mask_rcnn_r50_fpn_instaboost_4x_coco.py'
-model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/cosine_lr_scheduler.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/cosine_lr_scheduler.py
deleted file mode 100644
index 1e4f0bbf28f1ad893a301f1bfac1da8e97370337..0000000000000000000000000000000000000000
--- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/cosine_lr_scheduler.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-
-from torch.optim import Optimizer
-from torch.optim.lr_scheduler import _LRScheduler
-
-
-class CosineLRScheduler(_LRScheduler):
- """Cosine LR scheduler.
-
- Args:
- optimizer (Optimizer): Torch optimizer.
- warmup_steps (int): Number of warmup steps.
- total_steps (int): Total number of steps.
- lr_min_ratio (float): Minimum learning rate.
- cycle_length (float): Cycle length.
- """
- def __init__(self, optimizer: Optimizer, total_steps: int, warmup_steps: int,
- lr_min_ratio: float = 0.0, cycle_length: float = 1.0):
- self.warmup_steps = warmup_steps
- assert self.warmup_steps >= 0
- self.total_steps = total_steps
- assert self.total_steps >= 0
- self.lr_min_ratio = lr_min_ratio
- self.cycle_length = cycle_length
- super().__init__(optimizer)
-
- def _get_sched_lr(self, lr: float, step: int):
- if step < self.warmup_steps:
- lr_ratio = step / self.warmup_steps
- lr = lr_ratio * lr
- elif step <= self.total_steps:
- s = (step - self.warmup_steps) / (self.total_steps - self.warmup_steps)
- lr_ratio = self.lr_min_ratio + 0.5 * (1 - self.lr_min_ratio) * \
- (1. + math.cos(math.pi * s / self.cycle_length))
- lr = lr_ratio * lr
- else:
- lr_ratio = self.lr_min_ratio
- lr = lr_ratio * lr
- return lr
-
- def get_lr(self):
- return [self._get_sched_lr(lr, self.last_epoch) for lr in self.base_lrs]
diff --git a/spaces/Hallucinate/demo/ldm/modules/encoders/__init__.py b/spaces/Hallucinate/demo/ldm/modules/encoders/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/simultaneous_translation/modules/monotonic_transformer_layer.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/simultaneous_translation/modules/monotonic_transformer_layer.py
deleted file mode 100644
index 94bd71fb9c46a64a8b6e1960f47dfc43b78dda43..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/simultaneous_translation/modules/monotonic_transformer_layer.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer
-
-from . import build_monotonic_attention
-
-from typing import Dict, Optional, List
-
-from torch import Tensor
-import torch
-
-
-class TransformerMonotonicEncoderLayer(TransformerEncoderLayer):
- def forward(self, x, encoder_padding_mask):
- seq_len, _, _ = x.size()
- attn_mask = x.new_ones([seq_len, seq_len]).triu(1)
- attn_mask = attn_mask.masked_fill(attn_mask.bool(), float("-inf"))
- return super().forward(x, encoder_padding_mask, attn_mask)
-
-
-class TransformerMonotonicDecoderLayer(TransformerDecoderLayer):
- def __init__(self, args):
- super().__init__(args)
-
- assert args.simul_type is not None, "A --simul-type is needed."
- self.encoder_attn = build_monotonic_attention(args)
-
- def prune_incremental_state(
- self,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
- ):
- input_buffer = self.self_attn._get_input_buffer(incremental_state)
- for key in ["prev_key", "prev_value"]:
- input_buffer_key = input_buffer[key]
- assert input_buffer_key is not None
- if input_buffer_key.size(2) > 1:
- input_buffer[key] = input_buffer_key[:, :, :-1, :]
- else:
- typed_empty_dict: Dict[str, Optional[Tensor]] = {}
- input_buffer = typed_empty_dict
- break
- assert incremental_state is not None
- self.self_attn._set_input_buffer(incremental_state, input_buffer)
-
- def forward(
- self,
- x,
- encoder_out: Optional[Tensor] = None,
- encoder_padding_mask: Optional[Tensor] = None,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- prev_self_attn_state: Optional[List[Tensor]] = None,
- prev_attn_state: Optional[List[Tensor]] = None,
- self_attn_mask: Optional[Tensor] = None,
- self_attn_padding_mask: Optional[Tensor] = None,
- need_attn: bool = False,
- need_head_weights: bool = False,
- ):
- """
- Args:
- x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
- encoder_padding_mask (ByteTensor, optional): binary
- ByteTensor of shape `(batch, src_len)` where padding
- elements are indicated by ``1``.
- need_attn (bool, optional): return attention weights
- need_head_weights (bool, optional): return attention weights
- for each head (default: return average over heads).
-
- Returns:
- encoded output of shape `(seq_len, batch, embed_dim)`
- """
- if need_head_weights:
- need_attn = True
-
- residual = x
- if self.normalize_before:
- x = self.self_attn_layer_norm(x)
- if prev_self_attn_state is not None:
- prev_key, prev_value = prev_self_attn_state[:2]
- saved_state: Dict[str, Optional[Tensor]] = {
- "prev_key": prev_key,
- "prev_value": prev_value,
- }
- if len(prev_self_attn_state) >= 3:
- saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
- assert incremental_state is not None
- self.self_attn._set_input_buffer(incremental_state, saved_state)
- _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
- if self.cross_self_attention and not (
- incremental_state is not None
- and _self_attn_input_buffer is not None
- and "prev_key" in _self_attn_input_buffer
- ):
- if self_attn_mask is not None:
- assert encoder_out is not None
- self_attn_mask = torch.cat(
- (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
- )
- if self_attn_padding_mask is not None:
- if encoder_padding_mask is None:
- assert encoder_out is not None
- encoder_padding_mask = self_attn_padding_mask.new_zeros(
- encoder_out.size(1), encoder_out.size(0)
- )
- self_attn_padding_mask = torch.cat(
- (encoder_padding_mask, self_attn_padding_mask), dim=1
- )
- assert encoder_out is not None
- y = torch.cat((encoder_out, x), dim=0)
- else:
- y = x
-
- x, attn = self.self_attn(
- query=x,
- key=y,
- value=y,
- key_padding_mask=self_attn_padding_mask,
- incremental_state=incremental_state,
- need_weights=False,
- attn_mask=self_attn_mask,
- )
- x = self.dropout_module(x)
- x = self.residual_connection(x, residual)
- if not self.normalize_before:
- x = self.self_attn_layer_norm(x)
-
- assert self.encoder_attn is not None
- residual = x
- if self.normalize_before:
- x = self.encoder_attn_layer_norm(x)
- if prev_attn_state is not None:
- prev_key, prev_value = prev_attn_state[:2]
- saved_state: Dict[str, Optional[Tensor]] = {
- "prev_key": prev_key,
- "prev_value": prev_value,
- }
- if len(prev_attn_state) >= 3:
- saved_state["prev_key_padding_mask"] = prev_attn_state[2]
- assert incremental_state is not None
- self.encoder_attn._set_input_buffer(incremental_state, saved_state)
-
- x, attn = self.encoder_attn(
- query=x,
- key=encoder_out,
- value=encoder_out,
- key_padding_mask=encoder_padding_mask,
- incremental_state=incremental_state,
- static_kv=True,
- need_weights=need_attn or (not self.training and self.need_attn),
- need_head_weights=need_head_weights,
- )
- x = self.dropout_module(x)
- x = self.residual_connection(x, residual)
- if not self.normalize_before:
- x = self.encoder_attn_layer_norm(x)
-
- residual = x
- if self.normalize_before:
- x = self.final_layer_norm(x)
-
- x = self.activation_fn(self.fc1(x))
- x = self.activation_dropout_module(x)
- x = self.fc2(x)
- x = self.dropout_module(x)
- x = self.residual_connection(x, residual)
- if not self.normalize_before:
- x = self.final_layer_norm(x)
- if self.onnx_trace and incremental_state is not None:
- saved_state = self.self_attn._get_input_buffer(incremental_state)
- assert saved_state is not None
- if self_attn_padding_mask is not None:
- self_attn_state = [
- saved_state["prev_key"],
- saved_state["prev_value"],
- saved_state["prev_key_padding_mask"],
- ]
- else:
- self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
- return x, attn, self_attn_state
- return x, attn, None
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/quantization/pq/pq.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/quantization/pq/pq.py
deleted file mode 100644
index eddc2eb34602403f10979f54cd23a45bc2f104d5..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/quantization/pq/pq.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from .em import EM, EmptyClusterResolveError
-
-
-class PQ(EM):
- """
- Quantizes the layer weights W with the standard Product Quantization
- technique. This learns a codebook of codewords or centroids of size
- block_size from W. For further reference on using PQ to quantize
- neural networks, see "And the Bit Goes Down: Revisiting the Quantization
- of Neural Networks", Stock et al., ICLR 2020.
-
- PQ is performed in two steps:
- (1) The matrix W (weights or fully-connected or convolutional layer)
- is reshaped to (block_size, -1).
- - If W is fully-connected (2D), its columns are split into
- blocks of size block_size.
- - If W is convolutional (4D), its filters are split along the
- spatial dimension.
- (2) We apply the standard EM/k-means algorithm to the resulting reshaped matrix.
-
- Args:
- - W: weight matrix to quantize of size (in_features x out_features)
- - block_size: size of the blocks (subvectors)
- - n_centroids: number of centroids
- - n_iter: number of k-means iterations
- - eps: for cluster reassignment when an empty cluster is found
- - max_tentatives for cluster reassignment when an empty cluster is found
- - verbose: print information after each iteration
-
- Remarks:
- - block_size be compatible with the shape of W
- """
-
- def __init__(
- self,
- W,
- block_size,
- n_centroids=256,
- n_iter=20,
- eps=1e-6,
- max_tentatives=30,
- verbose=True,
- ):
- self.block_size = block_size
- W_reshaped = self._reshape(W)
- super(PQ, self).__init__(
- W_reshaped,
- n_centroids=n_centroids,
- n_iter=n_iter,
- eps=eps,
- max_tentatives=max_tentatives,
- verbose=verbose,
- )
-
- def _reshape(self, W):
- """
- Reshapes the matrix W as expained in step (1).
- """
-
- # fully connected: by convention the weight has size out_features x in_features
- if len(W.size()) == 2:
- self.out_features, self.in_features = W.size()
- assert (
- self.in_features % self.block_size == 0
- ), "Linear: n_blocks must be a multiple of in_features"
- return (
- W.reshape(self.out_features, -1, self.block_size)
- .permute(2, 1, 0)
- .flatten(1, 2)
- )
-
- # convolutional: we reshape along the spatial dimension
- elif len(W.size()) == 4:
- self.out_channels, self.in_channels, self.k_h, self.k_w = W.size()
- assert (
- self.in_channels * self.k_h * self.k_w
- ) % self.block_size == 0, (
- "Conv2d: n_blocks must be a multiple of in_channels * k_h * k_w"
- )
- return (
- W.reshape(self.out_channels, -1, self.block_size)
- .permute(2, 1, 0)
- .flatten(1, 2)
- )
- # not implemented
- else:
- raise NotImplementedError(W.size())
-
- def encode(self):
- """
- Performs self.n_iter EM steps.
- """
-
- self.initialize_centroids()
- for i in range(self.n_iter):
- try:
- self.step(i)
- except EmptyClusterResolveError:
- break
-
- def decode(self):
- """
- Returns the encoded full weight matrix. Must be called after
- the encode function.
- """
-
- # fully connected case
- if "k_h" not in self.__dict__:
- return (
- self.centroids[self.assignments]
- .reshape(-1, self.out_features, self.block_size)
- .permute(1, 0, 2)
- .flatten(1, 2)
- )
-
- # convolutional case
- else:
- return (
- self.centroids[self.assignments]
- .reshape(-1, self.out_channels, self.block_size)
- .permute(1, 0, 2)
- .reshape(self.out_channels, self.in_channels, self.k_h, self.k_w)
- )
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/multilingual_masked_lm.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/multilingual_masked_lm.py
deleted file mode 100644
index 9e6ce4b8a2f77ed889a6e1451321a8e3ac21dc67..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/multilingual_masked_lm.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-import os
-
-import numpy as np
-import torch
-from fairseq import utils
-from fairseq.data import (
- ConcatDataset,
- Dictionary,
- IdDataset,
- MaskTokensDataset,
- NestedDictionaryDataset,
- NumelDataset,
- NumSamplesDataset,
- PadDataset,
- PrependTokenDataset,
- RawLabelDataset,
- ResamplingDataset,
- SortDataset,
- TokenBlockDataset,
- data_utils,
- encoders,
-)
-from fairseq.tasks import LegacyFairseqTask, register_task
-
-
-logger = logging.getLogger(__name__)
-
-
-@register_task("multilingual_masked_lm")
-class MultiLingualMaskedLMTask(LegacyFairseqTask):
- """Task for training masked language models (e.g., BERT, RoBERTa)."""
-
- @staticmethod
- def add_args(parser):
- """Add task-specific arguments to the parser."""
- parser.add_argument(
- "data",
- help="colon separated path to data directories list, \
- will be iterated upon during epochs in round-robin manner",
- )
- parser.add_argument(
- "--sample-break-mode",
- default="complete",
- choices=["none", "complete", "complete_doc", "eos"],
- help='If omitted or "none", fills each sample with tokens-per-sample '
- 'tokens. If set to "complete", splits samples only at the end '
- "of sentence, but may include multiple sentences per sample. "
- '"complete_doc" is similar but respects doc boundaries. '
- 'If set to "eos", includes only one sentence per sample.',
- )
- parser.add_argument(
- "--tokens-per-sample",
- default=512,
- type=int,
- help="max number of total tokens over all segments "
- "per sample for BERT dataset",
- )
- parser.add_argument(
- "--mask-prob",
- default=0.15,
- type=float,
- help="probability of replacing a token with mask",
- )
- parser.add_argument(
- "--leave-unmasked-prob",
- default=0.1,
- type=float,
- help="probability that a masked token is unmasked",
- )
- parser.add_argument(
- "--random-token-prob",
- default=0.1,
- type=float,
- help="probability of replacing a token with a random token",
- )
- parser.add_argument(
- "--freq-weighted-replacement",
- action="store_true",
- help="sample random replacement words based on word frequencies",
- )
- parser.add_argument(
- "--mask-whole-words",
- default=False,
- action="store_true",
- help="mask whole words; you may also want to set --bpe",
- )
- parser.add_argument(
- "--multilang-sampling-alpha",
- type=float,
- default=1.0,
- help="smoothing alpha for sample rations across multiple datasets",
- )
-
- def __init__(self, args, dictionary):
- super().__init__(args)
- self.dictionary = dictionary
- self.seed = args.seed
-
- # add mask token
- self.mask_idx = dictionary.add_symbol("")
-
- @classmethod
- def setup_task(cls, args, **kwargs):
- paths = utils.split_paths(args.data)
- assert len(paths) > 0
- dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
- logger.info("dictionary: {} types".format(len(dictionary)))
- return cls(args, dictionary)
-
- def _get_whole_word_mask(self):
- # create masked input and targets
- if self.args.mask_whole_words:
- bpe = encoders.build_bpe(self.args)
- if bpe is not None:
-
- def is_beginning_of_word(i):
- if i < self.source_dictionary.nspecial:
- # special elements are always considered beginnings
- return True
- tok = self.source_dictionary[i]
- if tok.startswith("madeupword"):
- return True
- try:
- return bpe.is_beginning_of_word(tok)
- except ValueError:
- return True
-
- mask_whole_words = torch.ByteTensor(
- list(map(is_beginning_of_word, range(len(self.source_dictionary))))
- )
- else:
- mask_whole_words = None
- return mask_whole_words
-
- def _get_sample_prob(self, dataset_lens):
- """
- Get smoothed sampling porbability by languages. This helps low resource
- languages by upsampling them.
- """
- prob = dataset_lens / dataset_lens.sum()
- smoothed_prob = prob ** self.args.multilang_sampling_alpha
- smoothed_prob = smoothed_prob / smoothed_prob.sum()
- return smoothed_prob
-
- def load_dataset(self, split, epoch=1, combine=False, **kwargs):
- """Load a given dataset split.
-
- Args:
- split (str): name of the split (e.g., train, valid, test)
- """
- paths = utils.split_paths(self.args.data)
- assert len(paths) > 0
- data_path = paths[(epoch - 1) % len(paths)]
-
- languages = sorted(
- name
- for name in os.listdir(data_path)
- if os.path.isdir(os.path.join(data_path, name))
- )
-
- logger.info("Training on {0} languages: {1}".format(len(languages), languages))
- logger.info(
- "Language to id mapping: ", {lang: id for id, lang in enumerate(languages)}
- )
-
- mask_whole_words = self._get_whole_word_mask()
- lang_datasets = []
- for lang_id, language in enumerate(languages):
- split_path = os.path.join(data_path, language, split)
-
- dataset = data_utils.load_indexed_dataset(
- split_path,
- self.source_dictionary,
- self.args.dataset_impl,
- combine=combine,
- )
- if dataset is None:
- raise FileNotFoundError(
- "Dataset not found: {} ({})".format(split, split_path)
- )
-
- # create continuous blocks of tokens
- dataset = TokenBlockDataset(
- dataset,
- dataset.sizes,
- self.args.tokens_per_sample - 1, # one less for
- pad=self.source_dictionary.pad(),
- eos=self.source_dictionary.eos(),
- break_mode=self.args.sample_break_mode,
- )
- logger.info("loaded {} blocks from: {}".format(len(dataset), split_path))
-
- # prepend beginning-of-sentence token (, equiv. to [CLS] in BERT)
- dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
-
- src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
- dataset,
- self.source_dictionary,
- pad_idx=self.source_dictionary.pad(),
- mask_idx=self.mask_idx,
- seed=self.args.seed,
- mask_prob=self.args.mask_prob,
- leave_unmasked_prob=self.args.leave_unmasked_prob,
- random_token_prob=self.args.random_token_prob,
- freq_weighted_replacement=self.args.freq_weighted_replacement,
- mask_whole_words=mask_whole_words,
- )
-
- lang_dataset = NestedDictionaryDataset(
- {
- "net_input": {
- "src_tokens": PadDataset(
- src_dataset,
- pad_idx=self.source_dictionary.pad(),
- left_pad=False,
- ),
- "src_lengths": NumelDataset(src_dataset, reduce=False),
- },
- "target": PadDataset(
- tgt_dataset,
- pad_idx=self.source_dictionary.pad(),
- left_pad=False,
- ),
- "nsentences": NumSamplesDataset(),
- "ntokens": NumelDataset(src_dataset, reduce=True),
- "lang_id": RawLabelDataset([lang_id] * src_dataset.sizes.shape[0]),
- },
- sizes=[src_dataset.sizes],
- )
- lang_datasets.append(lang_dataset)
-
- dataset_lengths = np.array(
- [len(d) for d in lang_datasets],
- dtype=float,
- )
- logger.info(
- "loaded total {} blocks for all languages".format(
- dataset_lengths.sum(),
- )
- )
- if split == self.args.train_subset:
- # For train subset, additionally up or down sample languages.
- sample_probs = self._get_sample_prob(dataset_lengths)
- logger.info(
- "Sample probability by language: ",
- {
- lang: "{0:.4f}".format(sample_probs[id])
- for id, lang in enumerate(languages)
- },
- )
- size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
- logger.info(
- "Up/Down Sampling ratio by language: ",
- {
- lang: "{0:.2f}".format(size_ratio[id])
- for id, lang in enumerate(languages)
- },
- )
-
- resampled_lang_datasets = [
- ResamplingDataset(
- lang_datasets[i],
- size_ratio=size_ratio[i],
- seed=self.args.seed,
- epoch=epoch,
- replace=size_ratio[i] >= 1.0,
- )
- for i, d in enumerate(lang_datasets)
- ]
- dataset = ConcatDataset(resampled_lang_datasets)
- else:
- dataset = ConcatDataset(lang_datasets)
- lang_splits = [split]
- for lang_id, lang_dataset in enumerate(lang_datasets):
- split_name = split + "_" + languages[lang_id]
- lang_splits.append(split_name)
- self.datasets[split_name] = lang_dataset
-
- # [TODO]: This is hacky for now to print validation ppl for each
- # language individually. Maybe need task API changes to allow it
- # in more generic ways.
- if split in self.args.valid_subset:
- self.args.valid_subset = self.args.valid_subset.replace(
- split, ",".join(lang_splits)
- )
-
- with data_utils.numpy_seed(self.args.seed + epoch):
- shuffle = np.random.permutation(len(dataset))
-
- self.datasets[split] = SortDataset(
- dataset,
- sort_order=[
- shuffle,
- dataset.sizes,
- ],
- )
-
- def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True):
- src_dataset = PadDataset(
- TokenBlockDataset(
- src_tokens,
- src_lengths,
- self.args.tokens_per_sample - 1, # one less for
- pad=self.source_dictionary.pad(),
- eos=self.source_dictionary.eos(),
- break_mode="eos",
- ),
- pad_idx=self.source_dictionary.pad(),
- left_pad=False,
- )
- src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos())
- src_dataset = NestedDictionaryDataset(
- {
- "id": IdDataset(),
- "net_input": {
- "src_tokens": src_dataset,
- "src_lengths": NumelDataset(src_dataset, reduce=False),
- },
- },
- sizes=src_lengths,
- )
- if sort:
- src_dataset = SortDataset(src_dataset, sort_order=[src_lengths])
- return src_dataset
-
- @property
- def source_dictionary(self):
- return self.dictionary
-
- @property
- def target_dictionary(self):
- return self.dictionary
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/simultaneous_translation.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/simultaneous_translation.py
deleted file mode 100644
index 11c7dc1ea966a54f8915ef164377e40f90e851a1..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/simultaneous_translation.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-from fairseq.tasks import register_task
-from fairseq.tasks.speech_to_text import SpeechToTextTask
-from fairseq.tasks.translation import (
- TranslationTask, TranslationConfig
-)
-
-try:
- import examples.simultaneous_translation # noqa
- import_successful = True
-except BaseException:
- import_successful = False
-
-
-logger = logging.getLogger(__name__)
-
-
-def check_import(flag):
- if not flag:
- raise ImportError(
- "'examples.simultaneous_translation' is not correctly imported. "
- "Please considering `pip install -e $FAIRSEQ_DIR`."
- )
-
-
-@register_task("simul_speech_to_text")
-class SimulSpeechToTextTask(SpeechToTextTask):
- def __init__(self, args, tgt_dict):
- check_import(import_successful)
- super().__init__(args, tgt_dict)
-
-
-@register_task("simul_text_to_text", dataclass=TranslationConfig)
-class SimulTextToTextTask(TranslationTask):
- def __init__(self, cfg, src_dict, tgt_dict):
- check_import(import_successful)
- super().__init__(cfg, src_dict, tgt_dict)
diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/monotonic_align/setup.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/monotonic_align/setup.py
deleted file mode 100644
index 3a3892f92e3fbb866e3111199a9a4cf1f88e3959..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/monotonic_align/setup.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import numpy
-from setuptools import Extension, find_packages
-from distutils.core import setup
-from Cython.Build import cythonize
-
-
-_VERSION = "1.1"
-
-
-ext_modules = cythonize(
- "monotonic_align/core.pyx",
- compiler_directives={"language_level": "3"},
-)
-
-setup(
- name="monotonic_align",
- ext_modules=ext_modules,
- include_dirs=[numpy.get_include(), "monotonic_align"],
- packages=find_packages(),
- setup_requires=["numpy", "cython"],
- install_requires=["numpy"],
- version=_VERSION,
-)
diff --git a/spaces/HawkingChen/LangFlow/Dockerfile b/spaces/HawkingChen/LangFlow/Dockerfile
deleted file mode 100644
index 5507d40d099b33aeaeb69447359d62714cc505f2..0000000000000000000000000000000000000000
--- a/spaces/HawkingChen/LangFlow/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM python:3.10-slim
-
-RUN apt-get update && apt-get install gcc g++ git make -y
-RUN useradd -m -u 1000 user
-USER user
-ENV HOME=/home/user \
- PATH=/home/user/.local/bin:$PATH
-
-WORKDIR $HOME/app
-
-COPY --chown=user . $HOME/app
-
-RUN pip install langflow>==0.1.7 -U --user
-CMD ["python", "-m", "langflow", "--host", "0.0.0.0", "--port", "7860", "--remove-api-keys" , "--database-url", "sqlite:////home/langflow/langflow.db"]
\ No newline at end of file
diff --git a/spaces/ICML2022/OFA/fairseq/examples/simultaneous_translation/docs/enja-waitk.md b/spaces/ICML2022/OFA/fairseq/examples/simultaneous_translation/docs/enja-waitk.md
deleted file mode 100644
index fb9d82576f80b4405564a99774fc98ac2fe6ad3b..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/simultaneous_translation/docs/enja-waitk.md
+++ /dev/null
@@ -1,106 +0,0 @@
-# An example of English to Japaneses Simultaneous Translation System
-
-This is an example of training and evaluating a transformer *wait-k* English to Japanese simultaneous text-to-text translation model.
-
-## Data Preparation
-This section introduces the data preparation for training and evaluation.
-If you only want to evaluate the model, please jump to [Inference & Evaluation](#inference-&-evaluation)
-
-For illustration, we only use the following subsets of the available data from [WMT20 news translation task](http://www.statmt.org/wmt20/translation-task.html), which results in 7,815,391 sentence pairs.
-- News Commentary v16
-- Wiki Titles v3
-- WikiMatrix V1
-- Japanese-English Subtitle Corpus
-- The Kyoto Free Translation Task Corpus
-
-We use WMT20 development data as development set. Training `transformer_vaswani_wmt_en_de_big` model on such amount of data will result in 17.3 BLEU with greedy search and 19.7 with beam (10) search. Notice that a better performance can be achieved with the full WMT training data.
-
-We use [sentencepiece](https://github.com/google/sentencepiece) toolkit to tokenize the data with a vocabulary size of 32000.
-Additionally, we filtered out the sentences longer than 200 words after tokenization.
-Assuming the tokenized text data is saved at `${DATA_DIR}`,
-we prepare the data binary with the following command.
-
-```bash
-fairseq-preprocess \
- --source-lang en --target-lang ja \
- --trainpref ${DATA_DIR}/train \
- --validpref ${DATA_DIR}/dev \
- --testpref ${DATA_DIR}/test \
- --destdir ${WMT20_ENJA_DATA_BIN} \
- --nwordstgt 32000 --nwordssrc 32000 \
- --workers 20
-```
-
-## Simultaneous Translation Model Training
-To train a wait-k `(k=10)` model.
-```bash
-fairseq-train ${WMT20_ENJA_DATA_BIN} \
- --save-dir ${SAVEDIR}
- --simul-type waitk \
- --waitk-lagging 10 \
- --max-epoch 70 \
- --arch transformer_monotonic_vaswani_wmt_en_de_big \
- --optimizer adam \
- --adam-betas '(0.9, 0.98)' \
- --lr-scheduler inverse_sqrt \
- --warmup-init-lr 1e-07 \
- --warmup-updates 4000 \
- --lr 0.0005 \
- --stop-min-lr 1e-09 \
- --clip-norm 10.0 \
- --dropout 0.3 \
- --weight-decay 0.0 \
- --criterion label_smoothed_cross_entropy \
- --label-smoothing 0.1 \
- --max-tokens 3584
-```
-This command is for training on 8 GPUs. Equivalently, the model can be trained on one GPU with `--update-freq 8`.
-
-## Inference & Evaluation
-First of all, install [SimulEval](https://github.com/facebookresearch/SimulEval) for evaluation.
-
-```bash
-git clone https://github.com/facebookresearch/SimulEval.git
-cd SimulEval
-pip install -e .
-```
-
-The following command is for the evaluation.
-Assuming the source and reference files are `${SRC_FILE}` and `${REF_FILE}`, the sentencepiece model file for English is saved at `${SRC_SPM_PATH}`
-
-
-```bash
-simuleval \
- --source ${SRC_FILE} \
- --target ${TGT_FILE} \
- --data-bin ${WMT20_ENJA_DATA_BIN} \
- --sacrebleu-tokenizer ja-mecab \
- --eval-latency-unit char \
- --no-space \
- --src-splitter-type sentencepiecemodel \
- --src-splitter-path ${SRC_SPM_PATH} \
- --agent ${FAIRSEQ}/examples/simultaneous_translation/agents/simul_trans_text_agent_enja.py \
- --model-path ${SAVE_DIR}/${CHECKPOINT_FILENAME} \
- --output ${OUTPUT} \
- --scores
-```
-
-The `--data-bin` should be the same in previous sections if you prepare the data from the scratch.
-If only for evaluation, a prepared data directory can be found [here](https://dl.fbaipublicfiles.com/simultaneous_translation/wmt20_enja_medium_databin.tgz) and a pretrained checkpoint (wait-k=10 model) can be downloaded from [here](https://dl.fbaipublicfiles.com/simultaneous_translation/wmt20_enja_medium_wait10_ckpt.pt).
-
-The output should look like this:
-```bash
-{
- "Quality": {
- "BLEU": 11.442253287568398
- },
- "Latency": {
- "AL": 8.6587861866951,
- "AP": 0.7863304776251316,
- "DAL": 9.477850951194764
- }
-}
-```
-The latency is evaluated by characters (`--eval-latency-unit`) on the target side. The latency is evaluated with `sacrebleu` with `MeCab` tokenizer `--sacrebleu-tokenizer ja-mecab`. `--no-space` indicates that do not add space when merging the predicted words.
-
-If `--output ${OUTPUT}` option is used, the detailed log and scores will be stored under the `${OUTPUT}` directory.
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/scalar/utils.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/scalar/utils.py
deleted file mode 100644
index 2ec6af3fcb09ccaf853be15a84ed8181f9e2f546..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/scalar/utils.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-from operator import attrgetter
-
-import torch.distributed as dist
-import torch.nn as nn
-
-from ..pq.utils import attrsetter, get_layers
-from .modules import ActivationQuantizer, IntConv2d, IntEmbedding, IntLinear
-
-
-MAPPING = {nn.Linear: IntLinear, nn.Embedding: IntEmbedding, nn.Conv2d: IntConv2d}
-
-
-def quantize_model_(model, p=0.2, bits=8, update_step=3000, method="histogram", remove_weights=False):
- """
- Replaces all modules with their scalar quantized counterpart and
- registers hooks to quantize the post-ativations of those modules.
-
- Args:
- - model: a nn.Module
- - p: amount of noise (0 for no noise, 1 to quantize all the weights/activations)
- - bits: number of bits
- - update_step: update quantization parameters every update_step steps
- """
- # quantize all layers
- # remove weights indicates whether the weights extension should be removed, in addition to
- # weight_orig and weight extension on names
- quantized_layers = get_layers(model, "(.*?)", remove_weights=remove_weights)
-
- for layer in quantized_layers:
-
- # book-keeping
- is_master_process = (not dist.is_initialized()) or (
- dist.is_initialized() and dist.get_rank() == 0
- )
-
- # recover module
- module = attrgetter(layer)(model)
- if is_master_process:
- logging.info(
- f"Quantizing layer {layer} with bits={bits} and QuantNoise={p}"
- )
-
- # quantization params
- q_params = {
- "p": p,
- "update_step": update_step,
- "bits": bits,
- "method": method,
- "counter": 0,
- }
-
- # instantiate the quantized counterpart
- if isinstance(module, tuple(MAPPING.keys())):
- QuantizedModule = MAPPING[module.__class__]
- quantized_module = QuantizedModule.__new__(QuantizedModule)
- params = module.__dict__
- params.update(q_params)
- quantized_module.__dict__.update(params)
-
- else:
- if is_master_process:
- logging.info(f"Module {module} not yet supported for quantization")
- continue
-
- # activation quantization
- a_q = ActivationQuantizer(quantized_module, p=0, bits=bits, method=method)
-
- # replace layer by its quantized counterpart
- attrsetter(layer)(model, quantized_module)
-
- # return name of quantized layers
- return quantized_layers
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/ngram_repeat_block.py b/spaces/ICML2022/OFA/fairseq/fairseq/ngram_repeat_block.py
deleted file mode 100644
index 854125149448a2d37ad2773cd1e6d614e73e0e79..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/ngram_repeat_block.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# Originally from Microsoft Corporation.
-# Licensed under the MIT License.
-
-""" Wrapper for ngram_repeat_block cuda extension """
-import torch
-from torch import nn
-
-import math
-from typing import Dict, List, Optional
-import warnings
-
-try:
- from fairseq import ngram_repeat_block_cuda
-
- EXTENSION_BUILT = True
-except ImportError:
- EXTENSION_BUILT = False
-
-
-def is_cuda_extension_usable() -> bool:
- """Check whether ngram_repeat_block_cuda is built properly"""
- if not EXTENSION_BUILT or not torch.cuda.is_available():
- return False
- bsz = 2
- tokens = torch.tensor([[4, 4, 3, 2], [1, 2, 3, 4]], dtype=torch.long, device="cuda")
- lprobs = torch.rand((8, 12), device="cuda")
- try:
- outputs = ngram_repeat_block_cuda.forward(tokens, lprobs, bsz, 3, 4, 3)
- outputs = outputs + 4 # This line breaks if the extension is built incorrectly.
- return True
- except RuntimeError:
- warnings.warn(
- "NGramRepeatBlock extension must be rebuilt."
- 'Run TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0" python setup.py build_ext --inplace'
- )
- return False
-
-
-class NGramRepeatBlock(nn.Module):
- """ Wrapper class for calling ngram_repeat_block cuda extension """
-
- def __init__(self, no_repeat_ngram_size: int, use_extension: bool = True):
- super().__init__()
- self.use_extension = is_cuda_extension_usable() if use_extension else False
- self.no_repeat_ngram_size = no_repeat_ngram_size
-
- def reset_parameters(self):
- pass
-
- @torch.jit.unused
- def call_cuda_extension(
- self,
- tokens,
- lprobs,
- bsz: int,
- beam_size: int,
- step: int,
- ):
- return ngram_repeat_block_cuda.forward(
- tokens, lprobs, bsz, step, beam_size, self.no_repeat_ngram_size
- )
-
- def forward(
- self,
- tokens,
- lprobs,
- bsz: int,
- beam_size: int,
- step: int,
- ):
- """
- Args:
- tokens(Tensor): Input tokens(Bsz*beam, seq_len)
- lprobs(Tensor): likelihood probability,
- Expected to be updated in place.(Bsz*beam, vocab_size)
- bsz(int): batch size
- step(int): current step
- beam_size(int): beam size
- no_repeat_ngram_size(int): Ngram size
- """
- msg = f"expected {bsz *beam_size} got"
- assert tokens.size(0) == bsz * beam_size, f"{msg} {tokens.size(0)}"
- assert lprobs.size(0) == bsz * beam_size, f"{msg} {lprobs.size(0)}"
- if self.use_extension:
- return self.call_cuda_extension(tokens, lprobs, bsz, beam_size, step)
-
- else:
- return self._no_repeat_ngram(
- tokens,
- lprobs,
- bsz,
- beam_size,
- step,
- )
-
- def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int):
- """For each hypothesis generate a list of previous ngrams and set associated lprobs to -inf"""
- gen_ngrams: List[Dict[str, List[int]]] = [
- torch.jit.annotate(Dict[str, List[int]], {})
- for bbsz_idx in range(bsz * beam_size)
- ]
- cpu_tokens = tokens.cpu()
- for bbsz_idx in range(bsz * beam_size):
- gen_tokens: List[int] = cpu_tokens[bbsz_idx].tolist()
- for ngram in self.transpose_list(
- [gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]
- ):
- key = ",".join([str(x) for x in ngram[:-1]])
- gen_ngrams[bbsz_idx][key] = gen_ngrams[bbsz_idx].get(
- key, torch.jit.annotate(List[int], [])
- ) + [ngram[-1]]
- if step + 2 - self.no_repeat_ngram_size >= 0:
- # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
- banned_tokens = [
- self.calculate_banned_tokens(
- tokens, step, gen_ngrams, self.no_repeat_ngram_size, bbsz_idx
- )
- for bbsz_idx in range(bsz * beam_size)
- ]
- else:
- banned_tokens = [
- torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size)
- ]
- for bbsz_idx in range(bsz * beam_size):
- lprobs[bbsz_idx][
- torch.tensor(banned_tokens[bbsz_idx], dtype=torch.int64)
- ] = torch.tensor(-math.inf).to(lprobs)
- return lprobs
-
- @staticmethod
- def calculate_banned_tokens(
- tokens,
- step: int,
- gen_ngrams: List[Dict[str, List[int]]],
- no_repeat_ngram_size: int,
- bbsz_idx: int,
- ):
- tokens_list: List[int] = tokens[
- bbsz_idx, step + 2 - no_repeat_ngram_size : step + 1
- ].tolist()
- # before decoding the next token, prevent decoding of ngrams that have already appeared
- ngram_index = ",".join([str(x) for x in tokens_list])
- return gen_ngrams[bbsz_idx].get(ngram_index, torch.jit.annotate(List[int], []))
-
- @staticmethod
- def transpose_list(l: List[List[int]]):
- # GeneratorExp aren't supported in TS so ignoring the lint
- min_len = min([len(x) for x in l]) # noqa
- l2 = [[row[i] for row in l] for i in range(min_len)]
- return l2
diff --git a/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/config/GroundingDINO_SwinB.cfg.py b/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/config/GroundingDINO_SwinB.cfg.py
deleted file mode 100644
index f490c4bbd598a35de43d36ceafcbd769e7ff21bf..0000000000000000000000000000000000000000
--- a/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/config/GroundingDINO_SwinB.cfg.py
+++ /dev/null
@@ -1,43 +0,0 @@
-batch_size = 1
-modelname = "groundingdino"
-backbone = "swin_B_384_22k"
-position_embedding = "sine"
-pe_temperatureH = 20
-pe_temperatureW = 20
-return_interm_indices = [1, 2, 3]
-backbone_freeze_keywords = None
-enc_layers = 6
-dec_layers = 6
-pre_norm = False
-dim_feedforward = 2048
-hidden_dim = 256
-dropout = 0.0
-nheads = 8
-num_queries = 900
-query_dim = 4
-num_patterns = 0
-num_feature_levels = 4
-enc_n_points = 4
-dec_n_points = 4
-two_stage_type = "standard"
-two_stage_bbox_embed_share = False
-two_stage_class_embed_share = False
-transformer_activation = "relu"
-dec_pred_bbox_embed_share = True
-dn_box_noise_scale = 1.0
-dn_label_noise_ratio = 0.5
-dn_label_coef = 1.0
-dn_bbox_coef = 1.0
-embed_init_tgt = True
-dn_labelbook_size = 2000
-max_text_len = 256
-text_encoder_type = "bert-base-uncased"
-use_text_enhancer = True
-use_fusion_layer = True
-use_checkpoint = True
-use_transformer_ckpt = True
-use_text_cross_attention = True
-text_dropout = 0.0
-fusion_dropout = 0.0
-fusion_droppath = 0.1
-sub_sentence_present = True
diff --git a/spaces/IPN/demo_/app.py b/spaces/IPN/demo_/app.py
deleted file mode 100644
index 60bf76f875a2cd9840d27d747f4371b754dae4c2..0000000000000000000000000000000000000000
--- a/spaces/IPN/demo_/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("huggingface/google/vit-base-patch16-224").launch();
diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/archs/vgg_arch.py b/spaces/Iceclear/StableSR/StableSR/basicsr/archs/vgg_arch.py
deleted file mode 100644
index 05200334e477e59feefd1e4a0b5e94204e4eb2fa..0000000000000000000000000000000000000000
--- a/spaces/Iceclear/StableSR/StableSR/basicsr/archs/vgg_arch.py
+++ /dev/null
@@ -1,161 +0,0 @@
-import os
-import torch
-from collections import OrderedDict
-from torch import nn as nn
-from torchvision.models import vgg as vgg
-
-from basicsr.utils.registry import ARCH_REGISTRY
-
-VGG_PRETRAIN_PATH = 'experiments/pretrained_models/vgg19-dcbb9e9d.pth'
-NAMES = {
- 'vgg11': [
- 'conv1_1', 'relu1_1', 'pool1', 'conv2_1', 'relu2_1', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2',
- 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2',
- 'pool5'
- ],
- 'vgg13': [
- 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
- 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'pool4',
- 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'pool5'
- ],
- 'vgg16': [
- 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
- 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2',
- 'relu4_2', 'conv4_3', 'relu4_3', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3',
- 'pool5'
- ],
- 'vgg19': [
- 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
- 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 'conv4_1',
- 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
- 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4', 'pool5'
- ]
-}
-
-
-def insert_bn(names):
- """Insert bn layer after each conv.
-
- Args:
- names (list): The list of layer names.
-
- Returns:
- list: The list of layer names with bn layers.
- """
- names_bn = []
- for name in names:
- names_bn.append(name)
- if 'conv' in name:
- position = name.replace('conv', '')
- names_bn.append('bn' + position)
- return names_bn
-
-
-@ARCH_REGISTRY.register()
-class VGGFeatureExtractor(nn.Module):
- """VGG network for feature extraction.
-
- In this implementation, we allow users to choose whether use normalization
- in the input feature and the type of vgg network. Note that the pretrained
- path must fit the vgg type.
-
- Args:
- layer_name_list (list[str]): Forward function returns the corresponding
- features according to the layer_name_list.
- Example: {'relu1_1', 'relu2_1', 'relu3_1'}.
- vgg_type (str): Set the type of vgg network. Default: 'vgg19'.
- use_input_norm (bool): If True, normalize the input image. Importantly,
- the input feature must in the range [0, 1]. Default: True.
- range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].
- Default: False.
- requires_grad (bool): If true, the parameters of VGG network will be
- optimized. Default: False.
- remove_pooling (bool): If true, the max pooling operations in VGG net
- will be removed. Default: False.
- pooling_stride (int): The stride of max pooling operation. Default: 2.
- """
-
- def __init__(self,
- layer_name_list,
- vgg_type='vgg19',
- use_input_norm=True,
- range_norm=False,
- requires_grad=False,
- remove_pooling=False,
- pooling_stride=2):
- super(VGGFeatureExtractor, self).__init__()
-
- self.layer_name_list = layer_name_list
- self.use_input_norm = use_input_norm
- self.range_norm = range_norm
-
- self.names = NAMES[vgg_type.replace('_bn', '')]
- if 'bn' in vgg_type:
- self.names = insert_bn(self.names)
-
- # only borrow layers that will be used to avoid unused params
- max_idx = 0
- for v in layer_name_list:
- idx = self.names.index(v)
- if idx > max_idx:
- max_idx = idx
-
- if os.path.exists(VGG_PRETRAIN_PATH):
- vgg_net = getattr(vgg, vgg_type)(pretrained=False)
- state_dict = torch.load(VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage)
- vgg_net.load_state_dict(state_dict)
- else:
- vgg_net = getattr(vgg, vgg_type)(pretrained=True)
-
- features = vgg_net.features[:max_idx + 1]
-
- modified_net = OrderedDict()
- for k, v in zip(self.names, features):
- if 'pool' in k:
- # if remove_pooling is true, pooling operation will be removed
- if remove_pooling:
- continue
- else:
- # in some cases, we may want to change the default stride
- modified_net[k] = nn.MaxPool2d(kernel_size=2, stride=pooling_stride)
- else:
- modified_net[k] = v
-
- self.vgg_net = nn.Sequential(modified_net)
-
- if not requires_grad:
- self.vgg_net.eval()
- for param in self.parameters():
- param.requires_grad = False
- else:
- self.vgg_net.train()
- for param in self.parameters():
- param.requires_grad = True
-
- if self.use_input_norm:
- # the mean is for image with range [0, 1]
- self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
- # the std is for image with range [0, 1]
- self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
-
- def forward(self, x):
- """Forward function.
-
- Args:
- x (Tensor): Input tensor with shape (n, c, h, w).
-
- Returns:
- Tensor: Forward results.
- """
- if self.range_norm:
- x = (x + 1) / 2
- if self.use_input_norm:
- x = (x - self.mean) / self.std
-
- output = {}
- for key, layer in self.vgg_net._modules.items():
- x = layer(x)
- if key in self.layer_name_list:
- output[key] = x.clone()
-
- return output
diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/models/lr_scheduler.py b/spaces/Iceclear/StableSR/StableSR/basicsr/models/lr_scheduler.py
deleted file mode 100644
index 11e1c6c7a74f5233accda52370f92681d3d3cecf..0000000000000000000000000000000000000000
--- a/spaces/Iceclear/StableSR/StableSR/basicsr/models/lr_scheduler.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import math
-from collections import Counter
-from torch.optim.lr_scheduler import _LRScheduler
-
-
-class MultiStepRestartLR(_LRScheduler):
- """ MultiStep with restarts learning rate scheme.
-
- Args:
- optimizer (torch.nn.optimizer): Torch optimizer.
- milestones (list): Iterations that will decrease learning rate.
- gamma (float): Decrease ratio. Default: 0.1.
- restarts (list): Restart iterations. Default: [0].
- restart_weights (list): Restart weights at each restart iteration.
- Default: [1].
- last_epoch (int): Used in _LRScheduler. Default: -1.
- """
-
- def __init__(self, optimizer, milestones, gamma=0.1, restarts=(0, ), restart_weights=(1, ), last_epoch=-1):
- self.milestones = Counter(milestones)
- self.gamma = gamma
- self.restarts = restarts
- self.restart_weights = restart_weights
- assert len(self.restarts) == len(self.restart_weights), 'restarts and their weights do not match.'
- super(MultiStepRestartLR, self).__init__(optimizer, last_epoch)
-
- def get_lr(self):
- if self.last_epoch in self.restarts:
- weight = self.restart_weights[self.restarts.index(self.last_epoch)]
- return [group['initial_lr'] * weight for group in self.optimizer.param_groups]
- if self.last_epoch not in self.milestones:
- return [group['lr'] for group in self.optimizer.param_groups]
- return [group['lr'] * self.gamma**self.milestones[self.last_epoch] for group in self.optimizer.param_groups]
-
-
-def get_position_from_periods(iteration, cumulative_period):
- """Get the position from a period list.
-
- It will return the index of the right-closest number in the period list.
- For example, the cumulative_period = [100, 200, 300, 400],
- if iteration == 50, return 0;
- if iteration == 210, return 2;
- if iteration == 300, return 2.
-
- Args:
- iteration (int): Current iteration.
- cumulative_period (list[int]): Cumulative period list.
-
- Returns:
- int: The position of the right-closest number in the period list.
- """
- for i, period in enumerate(cumulative_period):
- if iteration <= period:
- return i
-
-
-class CosineAnnealingRestartLR(_LRScheduler):
- """ Cosine annealing with restarts learning rate scheme.
-
- An example of config:
- periods = [10, 10, 10, 10]
- restart_weights = [1, 0.5, 0.5, 0.5]
- eta_min=1e-7
-
- It has four cycles, each has 10 iterations. At 10th, 20th, 30th, the
- scheduler will restart with the weights in restart_weights.
-
- Args:
- optimizer (torch.nn.optimizer): Torch optimizer.
- periods (list): Period for each cosine anneling cycle.
- restart_weights (list): Restart weights at each restart iteration.
- Default: [1].
- eta_min (float): The minimum lr. Default: 0.
- last_epoch (int): Used in _LRScheduler. Default: -1.
- """
-
- def __init__(self, optimizer, periods, restart_weights=(1, ), eta_min=0, last_epoch=-1):
- self.periods = periods
- self.restart_weights = restart_weights
- self.eta_min = eta_min
- assert (len(self.periods) == len(
- self.restart_weights)), 'periods and restart_weights should have the same length.'
- self.cumulative_period = [sum(self.periods[0:i + 1]) for i in range(0, len(self.periods))]
- super(CosineAnnealingRestartLR, self).__init__(optimizer, last_epoch)
-
- def get_lr(self):
- idx = get_position_from_periods(self.last_epoch, self.cumulative_period)
- current_weight = self.restart_weights[idx]
- nearest_restart = 0 if idx == 0 else self.cumulative_period[idx - 1]
- current_period = self.periods[idx]
-
- return [
- self.eta_min + current_weight * 0.5 * (base_lr - self.eta_min) *
- (1 + math.cos(math.pi * ((self.last_epoch - nearest_restart) / current_period)))
- for base_lr in self.base_lrs
- ]
diff --git a/spaces/Ikaros521/moe-tts/text/english.py b/spaces/Ikaros521/moe-tts/text/english.py
deleted file mode 100644
index 6817392ba8a9eb830351de89fb7afc5ad72f5e42..0000000000000000000000000000000000000000
--- a/spaces/Ikaros521/moe-tts/text/english.py
+++ /dev/null
@@ -1,188 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-
-'''
-Cleaners are transformations that run over the input text at both training and eval time.
-
-Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
-hyperparameter. Some cleaners are English-specific. You'll typically want to use:
- 1. "english_cleaners" for English text
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
- the symbols in symbols.py to match your data).
-'''
-
-
-# Regular expression matching whitespace:
-
-
-import re
-import inflect
-from unidecode import unidecode
-import eng_to_ipa as ipa
-_inflect = inflect.engine()
-_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
-_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
-_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
-_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
-_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
-_number_re = re.compile(r'[0-9]+')
-
-# List of (regular expression, replacement) pairs for abbreviations:
-_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
- ('mrs', 'misess'),
- ('mr', 'mister'),
- ('dr', 'doctor'),
- ('st', 'saint'),
- ('co', 'company'),
- ('jr', 'junior'),
- ('maj', 'major'),
- ('gen', 'general'),
- ('drs', 'doctors'),
- ('rev', 'reverend'),
- ('lt', 'lieutenant'),
- ('hon', 'honorable'),
- ('sgt', 'sergeant'),
- ('capt', 'captain'),
- ('esq', 'esquire'),
- ('ltd', 'limited'),
- ('col', 'colonel'),
- ('ft', 'fort'),
-]]
-
-
-# List of (ipa, lazy ipa) pairs:
-_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('r', 'ɹ'),
- ('æ', 'e'),
- ('ɑ', 'a'),
- ('ɔ', 'o'),
- ('ð', 'z'),
- ('θ', 's'),
- ('ɛ', 'e'),
- ('ɪ', 'i'),
- ('ʊ', 'u'),
- ('ʒ', 'ʥ'),
- ('ʤ', 'ʥ'),
- ('ˈ', '↓'),
-]]
-
-# List of (ipa, lazy ipa2) pairs:
-_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('r', 'ɹ'),
- ('ð', 'z'),
- ('θ', 's'),
- ('ʒ', 'ʑ'),
- ('ʤ', 'dʑ'),
- ('ˈ', '↓'),
-]]
-
-# List of (ipa, ipa2) pairs
-_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('r', 'ɹ'),
- ('ʤ', 'dʒ'),
- ('ʧ', 'tʃ')
-]]
-
-
-def expand_abbreviations(text):
- for regex, replacement in _abbreviations:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def collapse_whitespace(text):
- return re.sub(r'\s+', ' ', text)
-
-
-def _remove_commas(m):
- return m.group(1).replace(',', '')
-
-
-def _expand_decimal_point(m):
- return m.group(1).replace('.', ' point ')
-
-
-def _expand_dollars(m):
- match = m.group(1)
- parts = match.split('.')
- if len(parts) > 2:
- return match + ' dollars' # Unexpected format
- dollars = int(parts[0]) if parts[0] else 0
- cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
- if dollars and cents:
- dollar_unit = 'dollar' if dollars == 1 else 'dollars'
- cent_unit = 'cent' if cents == 1 else 'cents'
- return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
- elif dollars:
- dollar_unit = 'dollar' if dollars == 1 else 'dollars'
- return '%s %s' % (dollars, dollar_unit)
- elif cents:
- cent_unit = 'cent' if cents == 1 else 'cents'
- return '%s %s' % (cents, cent_unit)
- else:
- return 'zero dollars'
-
-
-def _expand_ordinal(m):
- return _inflect.number_to_words(m.group(0))
-
-
-def _expand_number(m):
- num = int(m.group(0))
- if num > 1000 and num < 3000:
- if num == 2000:
- return 'two thousand'
- elif num > 2000 and num < 2010:
- return 'two thousand ' + _inflect.number_to_words(num % 100)
- elif num % 100 == 0:
- return _inflect.number_to_words(num // 100) + ' hundred'
- else:
- return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
- else:
- return _inflect.number_to_words(num, andword='')
-
-
-def normalize_numbers(text):
- text = re.sub(_comma_number_re, _remove_commas, text)
- text = re.sub(_pounds_re, r'\1 pounds', text)
- text = re.sub(_dollars_re, _expand_dollars, text)
- text = re.sub(_decimal_number_re, _expand_decimal_point, text)
- text = re.sub(_ordinal_re, _expand_ordinal, text)
- text = re.sub(_number_re, _expand_number, text)
- return text
-
-
-def mark_dark_l(text):
- return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text)
-
-
-def english_to_ipa(text):
- text = unidecode(text).lower()
- text = expand_abbreviations(text)
- text = normalize_numbers(text)
- phonemes = ipa.convert(text)
- phonemes = collapse_whitespace(phonemes)
- return phonemes
-
-
-def english_to_lazy_ipa(text):
- text = english_to_ipa(text)
- for regex, replacement in _lazy_ipa:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def english_to_ipa2(text):
- text = english_to_ipa(text)
- text = mark_dark_l(text)
- for regex, replacement in _ipa_to_ipa2:
- text = re.sub(regex, replacement, text)
- return text.replace('...', '…')
-
-
-def english_to_lazy_ipa2(text):
- text = english_to_ipa(text)
- for regex, replacement in _lazy_ipa2:
- text = re.sub(regex, replacement, text)
- return text
diff --git a/spaces/Irnkvezz/SIC98-GPT2-python-code-generator/README.md b/spaces/Irnkvezz/SIC98-GPT2-python-code-generator/README.md
deleted file mode 100644
index fa71574e81452b4c99e4eb227b75faad9bf0a2ca..0000000000000000000000000000000000000000
--- a/spaces/Irnkvezz/SIC98-GPT2-python-code-generator/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: SIC98 GPT2 Python Code Generator
-emoji: 🌍
-colorFrom: blue
-colorTo: gray
-sdk: gradio
-sdk_version: 3.18.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/JLD/docker-hello-world/Dockerfile b/spaces/JLD/docker-hello-world/Dockerfile
deleted file mode 100644
index 4a5a821629c9a08569f0e83004405a13032cd177..0000000000000000000000000000000000000000
--- a/spaces/JLD/docker-hello-world/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM python:3.9
-
-WORKDIR /code
-
-COPY ./requirements.txt /code/requirements.txt
-
-RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
-
-COPY . .
-
-CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
diff --git a/spaces/JUNGU/face-swap/utils/utils.py b/spaces/JUNGU/face-swap/utils/utils.py
deleted file mode 100644
index beccaf08edc411529a66d4c11498cd6b43423d0d..0000000000000000000000000000000000000000
--- a/spaces/JUNGU/face-swap/utils/utils.py
+++ /dev/null
@@ -1,377 +0,0 @@
-import json
-from tensorflow.keras.models import model_from_json
-from networks.layers import AdaIN, AdaptiveAttention
-import tensorflow as tf
-
-import numpy as np
-import cv2
-import math
-from skimage import transform as trans
-from scipy.signal import convolve2d
-from skimage.color import rgb2yuv, yuv2rgb
-
-from PIL import Image
-
-
-def save_model_internal(model, path, name, num):
- json_model = model.to_json()
- with open(path + name + '.json', "w") as json_file:
- json_file.write(json_model)
-
- model.save_weights(path + name + '_' + str(num) + '.h5')
-
-
-def load_model_internal(path, name, num):
- with open(path + name + '.json', 'r') as json_file:
- model_dict = json_file.read()
-
- mod = model_from_json(model_dict, custom_objects={'AdaIN': AdaIN, 'AdaptiveAttention': AdaptiveAttention})
- mod.load_weights(path + name + '_' + str(num) + '.h5')
-
- return mod
-
-
-def save_training_meta(state_dict, path, num):
- with open(path + str(num) + '.json', 'w') as json_file:
- json.dump(state_dict, json_file, indent=2)
-
-
-def load_training_meta(path, num):
- with open(path + str(num) + '.json', 'r') as json_file:
- state_dict = json.load(json_file)
- return state_dict
-
-
-def log_info(sw, results_dict, iteration):
- with sw.as_default():
- for key in results_dict.keys():
- tf.summary.scalar(key, results_dict[key], step=iteration)
-
-
-src1 = np.array([[51.642, 50.115], [57.617, 49.990], [35.740, 69.007],
- [51.157, 89.050], [57.025, 89.702]],
- dtype=np.float32)
-# <--left
-src2 = np.array([[45.031, 50.118], [65.568, 50.872], [39.677, 68.111],
- [45.177, 86.190], [64.246, 86.758]],
- dtype=np.float32)
-
-# ---frontal
-src3 = np.array([[39.730, 51.138], [72.270, 51.138], [56.000, 68.493],
- [42.463, 87.010], [69.537, 87.010]],
- dtype=np.float32)
-
-# -->right
-src4 = np.array([[46.845, 50.872], [67.382, 50.118], [72.737, 68.111],
- [48.167, 86.758], [67.236, 86.190]],
- dtype=np.float32)
-
-# -->right profile
-src5 = np.array([[54.796, 49.990], [60.771, 50.115], [76.673, 69.007],
- [55.388, 89.702], [61.257, 89.050]],
- dtype=np.float32)
-
-src = np.array([src1, src2, src3, src4, src5])
-src_map = {112: src, 224: src * 2}
-
-# Left eye, right eye, nose, left mouth, right mouth
-arcface_src = np.array(
- [[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366],
- [41.5493, 92.3655], [70.7299, 92.2041]],
- dtype=np.float32)
-
-arcface_src = np.expand_dims(arcface_src, axis=0)
-
-
-def extract_face(img, bb, absolute_center, mode='arcface', extention_rate=0.05, debug=False):
- """Extract face from image given a bounding box"""
- # bbox
- x1, y1, x2, y2 = bb + 60
- adjusted_absolute_center = (absolute_center[0] + 60, absolute_center[1] + 60)
- if debug:
- print(bb + 60)
- x1, y1, x2, y2 = bb
- cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 3)
- cv2.circle(img, absolute_center, 1, (255, 0, 255), 2)
- Image.fromarray(img).show()
- x1, y1, x2, y2 = bb + 60
- # Pad image in case face is out of frame
- padded_img = np.zeros(shape=(248, 248, 3), dtype=np.uint8)
- padded_img[60:-60, 60:-60, :] = img
-
- if debug:
- cv2.rectangle(padded_img, (x1, y1), (x2, y2), (0, 255, 255), 3)
- cv2.circle(padded_img, adjusted_absolute_center, 1, (255, 255, 255), 2)
- Image.fromarray(padded_img).show()
-
- y_len = abs(y1 - y2)
- x_len = abs(x1 - x2)
-
- new_len = (y_len + x_len) // 2
-
- extension = int(new_len * extention_rate)
-
- x_adjust = (x_len - new_len) // 2
- y_adjust = (y_len - new_len) // 2
-
- x_1_adjusted = x1 + x_adjust - extension
- x_2_adjusted = x2 - x_adjust + extension
-
- if mode == 'arcface':
- y_1_adjusted = y1 - extension
- y_2_adjusted = y2 - 2 * y_adjust + extension
- else:
- y_1_adjusted = y1 + 2 * y_adjust - extension
- y_2_adjusted = y2 + extension
-
- move_x = adjusted_absolute_center[0] - (x_1_adjusted + x_2_adjusted) // 2
- move_y = adjusted_absolute_center[1] - (y_1_adjusted + y_2_adjusted) // 2
-
- x_1_adjusted = x_1_adjusted + move_x
- x_2_adjusted = x_2_adjusted + move_x
- y_1_adjusted = y_1_adjusted + move_y
- y_2_adjusted = y_2_adjusted + move_y
-
- # print(y_1_adjusted, y_2_adjusted, x_1_adjusted, x_2_adjusted)
-
- return padded_img[y_1_adjusted:y_2_adjusted, x_1_adjusted:x_2_adjusted]
-
-
-def distance(a, b):
- return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
-
-
-def euclidean_distance(a, b):
- x1 = a[0]; y1 = a[1]
- x2 = b[0]; y2 = b[1]
- return np.sqrt(((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1)))
-
-
-def align_face(img, landmarks, debug=False):
- nose, right_eye, left_eye = landmarks
-
- left_eye_x = left_eye[0]
- left_eye_y = left_eye[1]
-
- right_eye_x = right_eye[0]
- right_eye_y = right_eye[1]
-
- center_eye = ((left_eye[0] + right_eye[0]) // 2, (left_eye[1] + right_eye[1]) // 2)
-
- if left_eye_y < right_eye_y:
- point_3rd = (right_eye_x, left_eye_y)
- direction = -1
- else:
- point_3rd = (left_eye_x, right_eye_y)
- direction = 1
-
- if debug:
- cv2.circle(img, point_3rd, 1, (255, 0, 0), 1)
- cv2.circle(img, center_eye, 1, (255, 0, 0), 1)
-
- cv2.line(img, right_eye, left_eye, (0, 0, 0), 1)
- cv2.line(img, left_eye, point_3rd, (0, 0, 0), 1)
- cv2.line(img, right_eye, point_3rd, (0, 0, 0), 1)
-
- a = euclidean_distance(left_eye, point_3rd)
- b = euclidean_distance(right_eye, left_eye)
- c = euclidean_distance(right_eye, point_3rd)
-
- cos_a = (b * b + c * c - a * a) / (2 * b * c)
-
- angle = np.arccos(cos_a)
-
- angle = (angle * 180) / np.pi
-
- if direction == -1:
- angle = 90 - angle
- ang = math.radians(direction * angle)
- else:
- ang = math.radians(direction * angle)
- angle = 0 - angle
-
- M = cv2.getRotationMatrix2D((64, 64), angle, 1)
- new_img = cv2.warpAffine(img, M, (128, 128),
- flags=cv2.INTER_CUBIC)
-
- rotated_nose = (int((nose[0] - 64) * np.cos(ang) - (nose[1] - 64) * np.sin(ang) + 64),
- int((nose[0] - 64) * np.sin(ang) + (nose[1] - 64) * np.cos(ang) + 64))
-
- rotated_center_eye = (int((center_eye[0] - 64) * np.cos(ang) - (center_eye[1] - 64) * np.sin(ang) + 64),
- int((center_eye[0] - 64) * np.sin(ang) + (center_eye[1] - 64) * np.cos(ang) + 64))
-
- abolute_center = (rotated_center_eye[0], (rotated_nose[1] + rotated_center_eye[1]) // 2)
-
- if debug:
- cv2.circle(new_img, rotated_nose, 1, (0, 0, 255), 1)
- cv2.circle(new_img, rotated_center_eye, 1, (0, 0, 255), 1)
- cv2.circle(new_img, abolute_center, 1, (0, 0, 255), 1)
-
- return new_img, abolute_center
-
-
-def estimate_norm(lmk, image_size=112, mode='arcface', shrink_factor=1.0):
- assert lmk.shape == (5, 2)
- tform = trans.SimilarityTransform()
- lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1)
- min_M = []
- min_index = []
- min_error = float('inf')
- src_factor = image_size / 112
- if mode == 'arcface':
- src = arcface_src * shrink_factor + (1 - shrink_factor) * 56
- src = src * src_factor
- else:
- src = src_map[image_size] * src_factor
- for i in np.arange(src.shape[0]):
- tform.estimate(lmk, src[i])
- M = tform.params[0:2, :]
- results = np.dot(M, lmk_tran.T)
- results = results.T
- error = np.sum(np.sqrt(np.sum((results - src[i])**2, axis=1)))
- # print(error)
- if error < min_error:
- min_error = error
- min_M = M
- min_index = i
- return min_M, min_index
-
-
-def inverse_estimate_norm(lmk, t_lmk, image_size=112, mode='arcface', shrink_factor=1.0):
- assert lmk.shape == (5, 2)
- tform = trans.SimilarityTransform()
- lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1)
- min_M = []
- min_index = []
- min_error = float('inf')
- src_factor = image_size / 112
- if mode == 'arcface':
- src = arcface_src * shrink_factor + (1 - shrink_factor) * 56
- src = src * src_factor
- else:
- src = src_map[image_size] * src_factor
- for i in np.arange(src.shape[0]):
- tform.estimate(t_lmk, lmk)
- M = tform.params[0:2, :]
- results = np.dot(M, lmk_tran.T)
- results = results.T
- error = np.sum(np.sqrt(np.sum((results - src[i])**2, axis=1)))
- # print(error)
- if error < min_error:
- min_error = error
- min_M = M
- min_index = i
- return min_M, min_index
-
-
-def norm_crop(img, landmark, image_size=112, mode='arcface', shrink_factor=1.0):
- """
- Align and crop the image based of the facial landmarks in the image. The alignment is done with
- a similarity transformation based of source coordinates.
- :param img: Image to transform.
- :param landmark: Five landmark coordinates in the image.
- :param image_size: Desired output size after transformation.
- :param mode: 'arcface' aligns the face for the use of Arcface facial recognition model. Useful for
- both facial recognition tasks and face swapping tasks.
- :param shrink_factor: Shrink factor that shrinks the source landmark coordinates. This will include more border
- information around the face. Useful when you want to include more background information when performing face swaps.
- The lower the shrink factor the more of the face is included. Default value 1.0 will align the image to be ready
- for the Arcface recognition model, but usually omits part of the chin. Value of 0.0 would transform all source points
- to the middle of the image, probably rendering the alignment procedure useless.
-
- If you process the image with a shrink factor of 0.85 and then want to extract the identity embedding with arcface,
- you simply do a central crop of factor 0.85 to yield same cropped result as using shrink factor 1.0. This will
- reduce the resolution, the recommendation is to processed images to output resolutions higher than 112 is using
- Arcface. This will make sure no information is lost by resampling the image after central crop.
- :return: Returns the transformed image.
- """
- M, pose_index = estimate_norm(landmark, image_size, mode, shrink_factor=shrink_factor)
- warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)
- return warped
-
-
-def transform_landmark_points(M, points):
- lmk_tran = np.insert(points, 2, values=np.ones(5), axis=1)
- transformed_lmk = np.dot(M, lmk_tran.T)
- transformed_lmk = transformed_lmk.T
-
- return transformed_lmk
-
-
-def multi_convolver(image, kernel, iterations):
- if kernel == "Sharpen":
- kernel = np.array([[0, -1, 0],
- [-1, 5, -1],
- [0, -1, 0]])
- elif kernel == "Unsharp_mask":
- kernel = np.array([[1, 4, 6, 4, 1],
- [4, 16, 24, 16, 1],
- [6, 24, -476, 24, 1],
- [4, 16, 24, 16, 1],
- [1, 4, 6, 4, 1]]) * (-1 / 256)
- elif kernel == "Blur":
- kernel = (1 / 16.0) * np.array([[1., 2., 1.],
- [2., 4., 2.],
- [1., 2., 1.]])
- for i in range(iterations):
- image = convolve2d(image, kernel, 'same', boundary='fill', fillvalue = 0)
- return image
-
-
-def convolve_rgb(image, kernel, iterations=1):
- img_yuv = rgb2yuv(image)
- img_yuv[:, :, 0] = multi_convolver(img_yuv[:, :, 0], kernel,
- iterations)
- final_image = yuv2rgb(img_yuv)
-
- return final_image.astype('float32')
-
-
-def generate_mask_from_landmarks(lms, im_size):
- blend_mask_lm = np.zeros(shape=(im_size, im_size, 3), dtype='float32')
-
- # EYES
- blend_mask_lm = cv2.circle(blend_mask_lm,
- (int(lms[0][0]), int(lms[0][1])), 12, (255, 255, 255), 30)
- blend_mask_lm = cv2.circle(blend_mask_lm,
- (int(lms[1][0]), int(lms[1][1])), 12, (255, 255, 255), 30)
- blend_mask_lm = cv2.circle(blend_mask_lm,
- (int((lms[0][0] + lms[1][0]) / 2), int((lms[0][1] + lms[1][1]) / 2)),
- 16, (255, 255, 255), 65)
-
- # NOSE
- blend_mask_lm = cv2.circle(blend_mask_lm,
- (int(lms[2][0]), int(lms[2][1])), 5, (255, 255, 255), 5)
- blend_mask_lm = cv2.circle(blend_mask_lm,
- (int((lms[0][0] + lms[1][0]) / 2), int(lms[2][1])), 16, (255, 255, 255), 100)
-
- # MOUTH
- blend_mask_lm = cv2.circle(blend_mask_lm,
- (int(lms[3][0]), int(lms[3][1])), 6, (255, 255, 255), 30)
- blend_mask_lm = cv2.circle(blend_mask_lm,
- (int(lms[4][0]), int(lms[4][1])), 6, (255, 255, 255), 30)
-
- blend_mask_lm = cv2.circle(blend_mask_lm,
- (int((lms[3][0] + lms[4][0]) / 2), int((lms[3][1] + lms[4][1]) / 2)),
- 16, (255, 255, 255), 40)
- return blend_mask_lm
-
-
-def display_distance_text(im, distance, lms, im_w, im_h, scale=2):
- blended_insert = cv2.putText(im, str(distance)[:4],
- (int(lms[4] * im_w * 0.5), int(lms[5] * im_h * 0.8)),
- cv2.FONT_HERSHEY_SIMPLEX, scale * 0.5, (0.08, 0.16, 0.08), int(scale * 2))
- blended_insert = cv2.putText(blended_insert, str(distance)[:4],
- (int(lms[4] * im_w * 0.5), int(lms[5] * im_h * 0.8)),
- cv2.FONT_HERSHEY_SIMPLEX, scale* 0.5, (0.3, 0.7, 0.32), int(scale * 1))
- return blended_insert
-
-
-def get_lm(annotation, im_w, im_h):
- lm_align = np.array([[annotation[4] * im_w, annotation[5] * im_h],
- [annotation[6] * im_w, annotation[7] * im_h],
- [annotation[8] * im_w, annotation[9] * im_h],
- [annotation[10] * im_w, annotation[11] * im_h],
- [annotation[12] * im_w, annotation[13] * im_h]],
- dtype=np.float32)
- return lm_align
diff --git a/spaces/JammyMachina/the-jam-machine-app/playback.py b/spaces/JammyMachina/the-jam-machine-app/playback.py
deleted file mode 100644
index b11503a5292a84bb33614f3e02cb24e4ffdc435a..0000000000000000000000000000000000000000
--- a/spaces/JammyMachina/the-jam-machine-app/playback.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import matplotlib.pyplot as plt
-import librosa.display
-from pretty_midi import PrettyMIDI
-
-
-# Note: these functions are meant to be played within an interactive Python shell
-# Please refer to the synth.ipynb for an example of how to use them
-
-
-def get_music(midi_file):
- """
- Load a midi file and return the PrettyMIDI object and the audio signal
- """
- print(f"Getting MIDI music from: {midi_file}")
- music = PrettyMIDI(midi_file=midi_file)
- waveform = music.fluidsynth()
- return music, waveform
-
-
-def show_piano_roll(music_notes, fs=100):
- """
- Show the piano roll of a music piece, with all instruments squashed onto a single 128xN matrix
- :param music_notes: PrettyMIDI object
- :param fs: sampling frequency
- """
- # get the piano roll
- piano_roll = music_notes.get_piano_roll(fs)
- print("Piano roll shape: {}".format(piano_roll.shape))
-
- # plot the piano roll
- plt.figure(figsize=(12, 4))
- librosa.display.specshow(piano_roll, sr=100, x_axis="time", y_axis="cqt_note")
- plt.colorbar()
- plt.title("Piano roll")
- plt.tight_layout()
- plt.show()
diff --git a/spaces/Jeff2323/ai-comic-factory/src/components/ui/avatar.tsx b/spaces/Jeff2323/ai-comic-factory/src/components/ui/avatar.tsx
deleted file mode 100644
index 88aeea9d9368f2bd7385f0a0885829bf6d789492..0000000000000000000000000000000000000000
--- a/spaces/Jeff2323/ai-comic-factory/src/components/ui/avatar.tsx
+++ /dev/null
@@ -1,50 +0,0 @@
-"use client"
-
-import * as React from "react"
-import * as AvatarPrimitive from "@radix-ui/react-avatar"
-
-import { cn } from "@/lib/utils"
-
-const Avatar = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-Avatar.displayName = AvatarPrimitive.Root.displayName
-
-const AvatarImage = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-AvatarImage.displayName = AvatarPrimitive.Image.displayName
-
-const AvatarFallback = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
->(({ className, ...props }, ref) => (
-
-))
-AvatarFallback.displayName = AvatarPrimitive.Fallback.displayName
-
-export { Avatar, AvatarImage, AvatarFallback }
diff --git a/spaces/JeffJing/ZookChatBot/OpenAIAuth/OpenAIAuth.py b/spaces/JeffJing/ZookChatBot/OpenAIAuth/OpenAIAuth.py
deleted file mode 100644
index c86e5d0fb3103c458b9b5df3aad69ef249117f18..0000000000000000000000000000000000000000
--- a/spaces/JeffJing/ZookChatBot/OpenAIAuth/OpenAIAuth.py
+++ /dev/null
@@ -1,359 +0,0 @@
-# Credits to github.com/rawandahmad698/PyChatGPT
-import re
-import urllib
-
-import tls_client
-
-
-class Debugger:
- def __init__(self, debug: bool = False):
- if debug:
- print("Debugger enabled on OpenAIAuth")
- self.debug = debug
-
- def set_debug(self, debug: bool):
- self.debug = debug
-
- def log(self, message: str, end: str = "\n"):
- if self.debug:
- print(message, end=end)
-
-
-class OpenAIAuth:
- def __init__(
- self,
- email_address: str,
- password: str,
- proxy: str = None,
- debug: bool = False,
- ):
- self.session_token = None
- self.email_address = email_address
- self.password = password
- self.proxy = proxy
- self.session = tls_client.Session(
- client_identifier="chrome_109",
- )
- self.access_token: str = None
- self.debugger = Debugger(debug)
- self.user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36"
-
- @staticmethod
- def url_encode(string: str) -> str:
- """
- URL encode a string
- :param string:
- :return:
- """
- return urllib.parse.quote(string)
-
- def begin(self) -> None:
- """
- Begin the auth process
- """
- self.debugger.log("Beginning auth process")
- if not self.email_address or not self.password:
- return
-
- if self.proxy:
- proxies = {
- "http": self.proxy,
- "https": self.proxy,
- }
- self.session.proxies = proxies
-
- # First, make a request to https://explorer.api.openai.com/auth/login
- url = "https://explorer.api.openai.com/"
- headers = {
- "Host": "ask.openai.com",
- "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
- "User-Agent": self.user_agent,
- "Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8",
- "Accept-Encoding": "gzip, deflate, br",
- "Connection": "keep-alive",
- }
-
- response = self.session.get(
- url=url,
- headers=headers,
- )
- if response.status_code == 200:
- self.__part_two()
- else:
- self.debugger.log("Error in part one")
- self.debugger.log("Response: ", end="")
- self.debugger.log(response.text)
- self.debugger.log("Status code: ", end="")
- self.debugger.log(response.status_code)
- raise Exception("API error")
-
- def __part_two(self) -> None:
- """
- In part two, We make a request to https://explorer.api.openai.com/api/auth/csrf and grab a fresh csrf token
- """
- self.debugger.log("Beginning part two")
-
- url = "https://explorer.api.openai.com/api/auth/csrf"
- headers = {
- "Host": "ask.openai.com",
- "Accept": "*/*",
- "Connection": "keep-alive",
- "User-Agent": self.user_agent,
- "Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8",
- "Referer": "https://explorer.api.openai.com/auth/login",
- "Accept-Encoding": "gzip, deflate, br",
- }
- response = self.session.get(
- url=url,
- headers=headers,
- )
- if response.status_code == 200 and "json" in response.headers["Content-Type"]:
- csrf_token = response.json()["csrfToken"]
- self.__part_three(token=csrf_token)
- else:
- self.debugger.log("Error in part two")
- self.debugger.log("Response: ", end="")
- self.debugger.log(response.text)
- self.debugger.log("Status code: ", end="")
- self.debugger.log(response.status_code)
- raise Exception("Error logging in")
-
- def __part_three(self, token: str) -> None:
- """
- We reuse the token from part to make a request to /api/auth/signin/auth0?prompt=login
- """
- self.debugger.log("Beginning part three")
- url = "https://explorer.api.openai.com/api/auth/signin/auth0?prompt=login"
- payload = f"callbackUrl=%2F&csrfToken={token}&json=true"
- headers = {
- "Host": "explorer.api.openai.com",
- "User-Agent": self.user_agent,
- "Content-Type": "application/x-www-form-urlencoded",
- "Accept": "*/*",
- "Sec-Gpc": "1",
- "Accept-Language": "en-US,en;q=0.8",
- "Origin": "https://explorer.api.openai.com",
- "Sec-Fetch-Site": "same-origin",
- "Sec-Fetch-Mode": "cors",
- "Sec-Fetch-Dest": "empty",
- "Referer": "https://explorer.api.openai.com/auth/login",
- "Accept-Encoding": "gzip, deflate",
- }
- self.debugger.log("Payload: " + payload)
- self.debugger.log("Payload length: " + str(len(payload)))
- response = self.session.post(url=url, headers=headers, data=payload)
- if response.status_code == 200 and "json" in response.headers["Content-Type"]:
- url = response.json()["url"]
- if (
- url
- == "https://explorer.api.openai.com/api/auth/error?error=OAuthSignin"
- or "error" in url
- ):
- self.debugger.log("You have been rate limited")
- raise Exception("You have been rate limited.")
- self.__part_four(url=url)
- else:
- self.debugger.log("Error in part three")
- self.debugger.log("Response: ", end="")
- self.debugger.log("Status code: ", end="")
- self.debugger.log(response.status_code)
- self.debugger.log(response.headers)
- self.debugger.log(self.session.cookies.get_dict())
- raise Exception("Unknown error")
-
- def __part_four(self, url: str) -> None:
- """
- We make a GET request to url
- :param url:
- :return:
- """
- self.debugger.log("Beginning part four")
- headers = {
- "Host": "auth0.openai.com",
- "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
- "Connection": "keep-alive",
- "User-Agent": self.user_agent,
- "Accept-Language": "en-US,en;q=0.9",
- "Referer": "https://explorer.api.openai.com/",
- }
- response = self.session.get(
- url=url,
- headers=headers,
- )
- if response.status_code == 302:
- try:
- state = re.findall(r"state=(.*)", response.text)[0]
- state = state.split('"')[0]
- self.__part_five(state=state)
- except IndexError as exc:
- self.debugger.log("Error in part four")
- self.debugger.log("Status code: ", end="")
- self.debugger.log(response.status_code)
- self.debugger.log("Rate limit hit")
- self.debugger.log("Response: " + str(response.text))
- raise Exception("Rate limit hit") from exc
- else:
- self.debugger.log("Error in part four")
- self.debugger.log("Response: ", end="")
- self.debugger.log(response.text)
- self.debugger.log("Status code: ", end="")
- self.debugger.log(response.status_code)
- self.debugger.log("Wrong response code")
- raise Exception("Unknown error")
-
- def __part_five(self, state: str) -> None:
- """
- We use the state to get the login page & check for a captcha
- """
- self.debugger.log("Beginning part five")
- url = f"https://auth0.openai.com/u/login/identifier?state={state}"
-
- headers = {
- "Host": "auth0.openai.com",
- "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
- "Connection": "keep-alive",
- "User-Agent": self.user_agent,
- "Accept-Language": "en-US,en;q=0.9",
- "Referer": "https://explorer.api.openai.com/",
- }
- response = self.session.get(url, headers=headers)
- if response.status_code == 200:
- self.__part_six(state=state)
- else:
- self.debugger.log("Error in part five")
- self.debugger.log("Response: ", end="")
- self.debugger.log(response.text)
- self.debugger.log("Status code: ", end="")
- self.debugger.log(response.status_code)
- raise ValueError("Invalid response code")
-
- def __part_six(self, state: str) -> None:
- """
- We make a POST request to the login page with the captcha, email
- :param state:
- :return:
- """
- self.debugger.log("Beginning part six")
- url = f"https://auth0.openai.com/u/login/identifier?state={state}"
- email_url_encoded = self.url_encode(self.email_address)
-
- payload = (
- f"state={state}&username={email_url_encoded}&js-available=false&webauthn-available=true&is"
- f"-brave=false&webauthn-platform-available=true&action=default "
- )
-
- headers = {
- "Host": "auth0.openai.com",
- "Origin": "https://auth0.openai.com",
- "Connection": "keep-alive",
- "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
- "User-Agent": self.user_agent,
- "Referer": f"https://auth0.openai.com/u/login/identifier?state={state}",
- "Accept-Language": "en-US,en;q=0.9",
- "Content-Type": "application/x-www-form-urlencoded",
- }
- response = self.session.post(
- url,
- headers=headers,
- data=payload,
- )
- if response.status_code == 302:
- self.__part_seven(state=state)
- else:
- self.debugger.log("Error in part six")
- self.debugger.log("Response: ", end="")
- self.debugger.log(response.text)
- self.debugger.log("Status code: ", end="")
- self.debugger.log(response.status_code)
- raise Exception("Unknown error")
-
- def __part_seven(self, state: str) -> None:
- """
- We enter the password
- :param state:
- :return:
- """
- url = f"https://auth0.openai.com/u/login/password?state={state}"
- self.debugger.log("Beginning part seven")
- email_url_encoded = self.url_encode(self.email_address)
- password_url_encoded = self.url_encode(self.password)
- payload = f"state={state}&username={email_url_encoded}&password={password_url_encoded}&action=default"
- headers = {
- "Host": "auth0.openai.com",
- "Origin": "https://auth0.openai.com",
- "Connection": "keep-alive",
- "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
- "User-Agent": self.user_agent,
- "Referer": f"https://auth0.openai.com/u/login/password?state={state}",
- "Accept-Language": "en-US,en;q=0.9",
- "Content-Type": "application/x-www-form-urlencoded",
- }
- try:
- response = self.session.post(
- url,
- headers=headers,
- data=payload,
- )
- self.debugger.log("Request went through")
- except Exception as exc:
- self.debugger.log("Error in part seven")
- self.debugger.log("Exception: ", end="")
- self.debugger.log(exc)
- raise Exception("Could not get response") from exc
- if response.status_code == 302:
- self.debugger.log("Response code is 302")
- try:
- new_state = re.findall(r"state=(.*)", response.text)[0]
- new_state = new_state.split('"')[0]
- self.debugger.log("New state found")
- self.__part_eight(old_state=state, new_state=new_state)
- except Exception as exc:
- raise Exception("Could not find new state") from exc
- else:
- self.debugger.log("Error in part seven")
- self.debugger.log("Status code: ", end="")
- self.debugger.log(response.status_code)
- raise Exception("Wrong status code")
-
- def __part_eight(self, old_state: str, new_state) -> None:
- self.debugger.log("Beginning part eight")
- url = f"https://auth0.openai.com/authorize/resume?state={new_state}"
- headers = {
- "Host": "auth0.openai.com",
- "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
- "Connection": "keep-alive",
- "User-Agent": self.user_agent,
- "Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8",
- "Referer": f"https://auth0.openai.com/u/login/password?state={old_state}",
- }
- response = self.session.get(
- url,
- headers=headers,
- allow_redirects=True,
- )
- if response.status_code == 200:
- self.session_token = response.cookies.get_dict()[
- "__Secure-next-auth.session-token"
- ]
- self.get_access_token()
-
- def get_access_token(self):
- """
- Gets access token
- """
- self.session.cookies.set(
- "__Secure-next-auth.session-token",
- self.session_token,
- )
- response = self.session.get(
- "https://explorer.api.openai.com/api/auth/session",
- )
- if response.status_code == 200:
- self.access_token = response.json()["accessToken"]
- self.debugger.log("Access token found")
- return self.access_token
- else:
- self.debugger.log("Error in part nine")
- self.debugger.log("Status code: ", end="")
- self.debugger.log(response.status_code)
- raise Exception("Wrong status code")
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/diffq/__init__.py b/spaces/Kangarroar/ApplioRVC-Inference/diffq/__init__.py
deleted file mode 100644
index 2b997ee4ed99a90cc43db7812383927e6fe1a3e8..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/diffq/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-# flake8: noqa
-"""
-This package implements different quantization strategies:
-
-- `diffq.uniform.UniformQuantizer`: classic uniform quantization over n bits.
-- `diffq.diffq.DiffQuantizer`: differentiable quantizer based on scaled noise injection.
-
-Also, do check `diffq.base.BaseQuantizer` for the common methods of all Quantizers.
-"""
-
-from .uniform import UniformQuantizer
-from .diffq import DiffQuantizer
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/train/data_utils.py b/spaces/Kangarroar/ApplioRVC-Inference/train/data_utils.py
deleted file mode 100644
index 71c0eff1815469a52399dc90a093a2f8a29223eb..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/train/data_utils.py
+++ /dev/null
@@ -1,512 +0,0 @@
-import os, traceback
-import numpy as np
-import torch
-import torch.utils.data
-
-from mel_processing import spectrogram_torch
-from utils import load_wav_to_torch, load_filepaths_and_text
-
-
-class TextAudioLoaderMultiNSFsid(torch.utils.data.Dataset):
- """
- 1) loads audio, text pairs
- 2) normalizes text and converts them to sequences of integers
- 3) computes spectrograms from audio files.
- """
-
- def __init__(self, audiopaths_and_text, hparams):
- self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
- self.max_wav_value = hparams.max_wav_value
- self.sampling_rate = hparams.sampling_rate
- self.filter_length = hparams.filter_length
- self.hop_length = hparams.hop_length
- self.win_length = hparams.win_length
- self.sampling_rate = hparams.sampling_rate
- self.min_text_len = getattr(hparams, "min_text_len", 1)
- self.max_text_len = getattr(hparams, "max_text_len", 5000)
- self._filter()
-
- def _filter(self):
- """
- Filter text & store spec lengths
- """
- # Store spectrogram lengths for Bucketing
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
- # spec_length = wav_length // hop_length
- audiopaths_and_text_new = []
- lengths = []
- for audiopath, text, pitch, pitchf, dv in self.audiopaths_and_text:
- if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
- audiopaths_and_text_new.append([audiopath, text, pitch, pitchf, dv])
- lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length))
- self.audiopaths_and_text = audiopaths_and_text_new
- self.lengths = lengths
-
- def get_sid(self, sid):
- sid = torch.LongTensor([int(sid)])
- return sid
-
- def get_audio_text_pair(self, audiopath_and_text):
- # separate filename and text
- file = audiopath_and_text[0]
- phone = audiopath_and_text[1]
- pitch = audiopath_and_text[2]
- pitchf = audiopath_and_text[3]
- dv = audiopath_and_text[4]
-
- phone, pitch, pitchf = self.get_labels(phone, pitch, pitchf)
- spec, wav = self.get_audio(file)
- dv = self.get_sid(dv)
-
- len_phone = phone.size()[0]
- len_spec = spec.size()[-1]
- # print(123,phone.shape,pitch.shape,spec.shape)
- if len_phone != len_spec:
- len_min = min(len_phone, len_spec)
- # amor
- len_wav = len_min * self.hop_length
-
- spec = spec[:, :len_min]
- wav = wav[:, :len_wav]
-
- phone = phone[:len_min, :]
- pitch = pitch[:len_min]
- pitchf = pitchf[:len_min]
-
- return (spec, wav, phone, pitch, pitchf, dv)
-
- def get_labels(self, phone, pitch, pitchf):
- phone = np.load(phone)
- phone = np.repeat(phone, 2, axis=0)
- pitch = np.load(pitch)
- pitchf = np.load(pitchf)
- n_num = min(phone.shape[0], 900) # DistributedBucketSampler
- # print(234,phone.shape,pitch.shape)
- phone = phone[:n_num, :]
- pitch = pitch[:n_num]
- pitchf = pitchf[:n_num]
- phone = torch.FloatTensor(phone)
- pitch = torch.LongTensor(pitch)
- pitchf = torch.FloatTensor(pitchf)
- return phone, pitch, pitchf
-
- def get_audio(self, filename):
- audio, sampling_rate = load_wav_to_torch(filename)
- if sampling_rate != self.sampling_rate:
- raise ValueError(
- "{} SR doesn't match target {} SR".format(
- sampling_rate, self.sampling_rate
- )
- )
- audio_norm = audio
- # audio_norm = audio / self.max_wav_value
- # audio_norm = audio / np.abs(audio).max()
-
- audio_norm = audio_norm.unsqueeze(0)
- spec_filename = filename.replace(".wav", ".spec.pt")
- if os.path.exists(spec_filename):
- try:
- spec = torch.load(spec_filename)
- except:
- print(spec_filename, traceback.format_exc())
- spec = spectrogram_torch(
- audio_norm,
- self.filter_length,
- self.sampling_rate,
- self.hop_length,
- self.win_length,
- center=False,
- )
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
- else:
- spec = spectrogram_torch(
- audio_norm,
- self.filter_length,
- self.sampling_rate,
- self.hop_length,
- self.win_length,
- center=False,
- )
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
- return spec, audio_norm
-
- def __getitem__(self, index):
- return self.get_audio_text_pair(self.audiopaths_and_text[index])
-
- def __len__(self):
- return len(self.audiopaths_and_text)
-
-
-class TextAudioCollateMultiNSFsid:
- """Zero-pads model inputs and targets"""
-
- def __init__(self, return_ids=False):
- self.return_ids = return_ids
-
- def __call__(self, batch):
- """Collate's training batch from normalized text and aduio
- PARAMS
- ------
- batch: [text_normalized, spec_normalized, wav_normalized]
- """
- # Right zero-pad all one-hot text sequences to max input length
- _, ids_sorted_decreasing = torch.sort(
- torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True
- )
-
- max_spec_len = max([x[0].size(1) for x in batch])
- max_wave_len = max([x[1].size(1) for x in batch])
- spec_lengths = torch.LongTensor(len(batch))
- wave_lengths = torch.LongTensor(len(batch))
- spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len)
- wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len)
- spec_padded.zero_()
- wave_padded.zero_()
-
- max_phone_len = max([x[2].size(0) for x in batch])
- phone_lengths = torch.LongTensor(len(batch))
- phone_padded = torch.FloatTensor(
- len(batch), max_phone_len, batch[0][2].shape[1]
- ) # (spec, wav, phone, pitch)
- pitch_padded = torch.LongTensor(len(batch), max_phone_len)
- pitchf_padded = torch.FloatTensor(len(batch), max_phone_len)
- phone_padded.zero_()
- pitch_padded.zero_()
- pitchf_padded.zero_()
- # dv = torch.FloatTensor(len(batch), 256)#gin=256
- sid = torch.LongTensor(len(batch))
-
- for i in range(len(ids_sorted_decreasing)):
- row = batch[ids_sorted_decreasing[i]]
-
- spec = row[0]
- spec_padded[i, :, : spec.size(1)] = spec
- spec_lengths[i] = spec.size(1)
-
- wave = row[1]
- wave_padded[i, :, : wave.size(1)] = wave
- wave_lengths[i] = wave.size(1)
-
- phone = row[2]
- phone_padded[i, : phone.size(0), :] = phone
- phone_lengths[i] = phone.size(0)
-
- pitch = row[3]
- pitch_padded[i, : pitch.size(0)] = pitch
- pitchf = row[4]
- pitchf_padded[i, : pitchf.size(0)] = pitchf
-
- # dv[i] = row[5]
- sid[i] = row[5]
-
- return (
- phone_padded,
- phone_lengths,
- pitch_padded,
- pitchf_padded,
- spec_padded,
- spec_lengths,
- wave_padded,
- wave_lengths,
- # dv
- sid,
- )
-
-
-class TextAudioLoader(torch.utils.data.Dataset):
- """
- 1) loads audio, text pairs
- 2) normalizes text and converts them to sequences of integers
- 3) computes spectrograms from audio files.
- """
-
- def __init__(self, audiopaths_and_text, hparams):
- self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
- self.max_wav_value = hparams.max_wav_value
- self.sampling_rate = hparams.sampling_rate
- self.filter_length = hparams.filter_length
- self.hop_length = hparams.hop_length
- self.win_length = hparams.win_length
- self.sampling_rate = hparams.sampling_rate
- self.min_text_len = getattr(hparams, "min_text_len", 1)
- self.max_text_len = getattr(hparams, "max_text_len", 5000)
- self._filter()
-
- def _filter(self):
- """
- Filter text & store spec lengths
- """
- # Store spectrogram lengths for Bucketing
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
- # spec_length = wav_length // hop_length
- audiopaths_and_text_new = []
- lengths = []
- for audiopath, text, dv in self.audiopaths_and_text:
- if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
- audiopaths_and_text_new.append([audiopath, text, dv])
- lengths.append(os.path.getsize(audiopath) // (3 * self.hop_length))
- self.audiopaths_and_text = audiopaths_and_text_new
- self.lengths = lengths
-
- def get_sid(self, sid):
- sid = torch.LongTensor([int(sid)])
- return sid
-
- def get_audio_text_pair(self, audiopath_and_text):
- # separate filename and text
- file = audiopath_and_text[0]
- phone = audiopath_and_text[1]
- dv = audiopath_and_text[2]
-
- phone = self.get_labels(phone)
- spec, wav = self.get_audio(file)
- dv = self.get_sid(dv)
-
- len_phone = phone.size()[0]
- len_spec = spec.size()[-1]
- if len_phone != len_spec:
- len_min = min(len_phone, len_spec)
- len_wav = len_min * self.hop_length
- spec = spec[:, :len_min]
- wav = wav[:, :len_wav]
- phone = phone[:len_min, :]
- return (spec, wav, phone, dv)
-
- def get_labels(self, phone):
- phone = np.load(phone)
- phone = np.repeat(phone, 2, axis=0)
- n_num = min(phone.shape[0], 900) # DistributedBucketSampler
- phone = phone[:n_num, :]
- phone = torch.FloatTensor(phone)
- return phone
-
- def get_audio(self, filename):
- audio, sampling_rate = load_wav_to_torch(filename)
- if sampling_rate != self.sampling_rate:
- raise ValueError(
- "{} SR doesn't match target {} SR".format(
- sampling_rate, self.sampling_rate
- )
- )
- audio_norm = audio
- # audio_norm = audio / self.max_wav_value
- # audio_norm = audio / np.abs(audio).max()
-
- audio_norm = audio_norm.unsqueeze(0)
- spec_filename = filename.replace(".wav", ".spec.pt")
- if os.path.exists(spec_filename):
- try:
- spec = torch.load(spec_filename)
- except:
- print(spec_filename, traceback.format_exc())
- spec = spectrogram_torch(
- audio_norm,
- self.filter_length,
- self.sampling_rate,
- self.hop_length,
- self.win_length,
- center=False,
- )
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
- else:
- spec = spectrogram_torch(
- audio_norm,
- self.filter_length,
- self.sampling_rate,
- self.hop_length,
- self.win_length,
- center=False,
- )
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_filename, _use_new_zipfile_serialization=False)
- return spec, audio_norm
-
- def __getitem__(self, index):
- return self.get_audio_text_pair(self.audiopaths_and_text[index])
-
- def __len__(self):
- return len(self.audiopaths_and_text)
-
-
-class TextAudioCollate:
- """Zero-pads model inputs and targets"""
-
- def __init__(self, return_ids=False):
- self.return_ids = return_ids
-
- def __call__(self, batch):
- """Collate's training batch from normalized text and aduio
- PARAMS
- ------
- batch: [text_normalized, spec_normalized, wav_normalized]
- """
- # Right zero-pad all one-hot text sequences to max input length
- _, ids_sorted_decreasing = torch.sort(
- torch.LongTensor([x[0].size(1) for x in batch]), dim=0, descending=True
- )
-
- max_spec_len = max([x[0].size(1) for x in batch])
- max_wave_len = max([x[1].size(1) for x in batch])
- spec_lengths = torch.LongTensor(len(batch))
- wave_lengths = torch.LongTensor(len(batch))
- spec_padded = torch.FloatTensor(len(batch), batch[0][0].size(0), max_spec_len)
- wave_padded = torch.FloatTensor(len(batch), 1, max_wave_len)
- spec_padded.zero_()
- wave_padded.zero_()
-
- max_phone_len = max([x[2].size(0) for x in batch])
- phone_lengths = torch.LongTensor(len(batch))
- phone_padded = torch.FloatTensor(
- len(batch), max_phone_len, batch[0][2].shape[1]
- )
- phone_padded.zero_()
- sid = torch.LongTensor(len(batch))
-
- for i in range(len(ids_sorted_decreasing)):
- row = batch[ids_sorted_decreasing[i]]
-
- spec = row[0]
- spec_padded[i, :, : spec.size(1)] = spec
- spec_lengths[i] = spec.size(1)
-
- wave = row[1]
- wave_padded[i, :, : wave.size(1)] = wave
- wave_lengths[i] = wave.size(1)
-
- phone = row[2]
- phone_padded[i, : phone.size(0), :] = phone
- phone_lengths[i] = phone.size(0)
-
- sid[i] = row[3]
-
- return (
- phone_padded,
- phone_lengths,
- spec_padded,
- spec_lengths,
- wave_padded,
- wave_lengths,
- sid,
- )
-
-
-class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
- """
- Maintain similar input lengths in a batch.
- Length groups are specified by boundaries.
- Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
-
- It removes samples which are not included in the boundaries.
- Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
- """
-
- def __init__(
- self,
- dataset,
- batch_size,
- boundaries,
- num_replicas=None,
- rank=None,
- shuffle=True,
- ):
- super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
- self.lengths = dataset.lengths
- self.batch_size = batch_size
- self.boundaries = boundaries
-
- self.buckets, self.num_samples_per_bucket = self._create_buckets()
- self.total_size = sum(self.num_samples_per_bucket)
- self.num_samples = self.total_size // self.num_replicas
-
- def _create_buckets(self):
- buckets = [[] for _ in range(len(self.boundaries) - 1)]
- for i in range(len(self.lengths)):
- length = self.lengths[i]
- idx_bucket = self._bisect(length)
- if idx_bucket != -1:
- buckets[idx_bucket].append(i)
-
- for i in range(len(buckets) - 1, -1, -1): #
- if len(buckets[i]) == 0:
- buckets.pop(i)
- self.boundaries.pop(i + 1)
-
- num_samples_per_bucket = []
- for i in range(len(buckets)):
- len_bucket = len(buckets[i])
- total_batch_size = self.num_replicas * self.batch_size
- rem = (
- total_batch_size - (len_bucket % total_batch_size)
- ) % total_batch_size
- num_samples_per_bucket.append(len_bucket + rem)
- return buckets, num_samples_per_bucket
-
- def __iter__(self):
- # deterministically shuffle based on epoch
- g = torch.Generator()
- g.manual_seed(self.epoch)
-
- indices = []
- if self.shuffle:
- for bucket in self.buckets:
- indices.append(torch.randperm(len(bucket), generator=g).tolist())
- else:
- for bucket in self.buckets:
- indices.append(list(range(len(bucket))))
-
- batches = []
- for i in range(len(self.buckets)):
- bucket = self.buckets[i]
- len_bucket = len(bucket)
- ids_bucket = indices[i]
- num_samples_bucket = self.num_samples_per_bucket[i]
-
- # add extra samples to make it evenly divisible
- rem = num_samples_bucket - len_bucket
- ids_bucket = (
- ids_bucket
- + ids_bucket * (rem // len_bucket)
- + ids_bucket[: (rem % len_bucket)]
- )
-
- # subsample
- ids_bucket = ids_bucket[self.rank :: self.num_replicas]
-
- # batching
- for j in range(len(ids_bucket) // self.batch_size):
- batch = [
- bucket[idx]
- for idx in ids_bucket[
- j * self.batch_size : (j + 1) * self.batch_size
- ]
- ]
- batches.append(batch)
-
- if self.shuffle:
- batch_ids = torch.randperm(len(batches), generator=g).tolist()
- batches = [batches[i] for i in batch_ids]
- self.batches = batches
-
- assert len(self.batches) * self.batch_size == self.num_samples
- return iter(self.batches)
-
- def _bisect(self, x, lo=0, hi=None):
- if hi is None:
- hi = len(self.boundaries) - 1
-
- if hi > lo:
- mid = (hi + lo) // 2
- if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
- return mid
- elif x <= self.boundaries[mid]:
- return self._bisect(x, lo, mid)
- else:
- return self._bisect(x, mid + 1, hi)
- else:
- return -1
-
- def __len__(self):
- return self.num_samples // self.batch_size
diff --git a/spaces/Kevin676/AutoGPT/CODE_OF_CONDUCT.md b/spaces/Kevin676/AutoGPT/CODE_OF_CONDUCT.md
deleted file mode 100644
index d2331b4c60b9fb27f06953273355dcf53b8d4321..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/AutoGPT/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# Code of Conduct for auto-gpt
-
-## 1. Purpose
-
-The purpose of this Code of Conduct is to provide guidelines for contributors to the auto-gpt project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
-
-## 2. Scope
-
-This Code of Conduct applies to all contributors, maintainers, and users of the auto-gpt project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
-
-## 3. Our Standards
-
-We encourage the following behavior:
-
-* Being respectful and considerate to others
-* Actively seeking diverse perspectives
-* Providing constructive feedback and assistance
-* Demonstrating empathy and understanding
-
-We discourage the following behavior:
-
-* Harassment or discrimination of any kind
-* Disrespectful, offensive, or inappropriate language or content
-* Personal attacks or insults
-* Unwarranted criticism or negativity
-
-## 4. Reporting and Enforcement
-
-If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary.
-
-Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations.
-
-## 5. Acknowledgements
-
-This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html).
-
-## 6. Contact
-
-If you have any questions or concerns, please contact the project maintainers.
-
diff --git a/spaces/KyanChen/FunSR/models/rdn.py b/spaces/KyanChen/FunSR/models/rdn.py
deleted file mode 100644
index 39a95054857d485edf08310fed1ac665a2db196a..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/FunSR/models/rdn.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Residual Dense Network for Image Super-Resolution
-# https://arxiv.org/abs/1802.08797
-# modified from: https://github.com/thstkdgus35/EDSR-PyTorch
-
-from argparse import Namespace
-
-import torch
-import torch.nn as nn
-
-from models import register
-
-
-class RDB_Conv(nn.Module):
- def __init__(self, inChannels, growRate, kSize=3):
- super(RDB_Conv, self).__init__()
- Cin = inChannels
- G = growRate
- self.conv = nn.Sequential(*[
- nn.Conv2d(Cin, G, kSize, padding=(kSize-1)//2, stride=1),
- nn.ReLU()
- ])
-
- def forward(self, x):
- out = self.conv(x)
- return torch.cat((x, out), 1)
-
-class RDB(nn.Module):
- def __init__(self, growRate0, growRate, nConvLayers, kSize=3):
- super(RDB, self).__init__()
- G0 = growRate0
- G = growRate
- C = nConvLayers
-
- convs = []
- for c in range(C):
- convs.append(RDB_Conv(G0 + c*G, G))
- self.convs = nn.Sequential(*convs)
-
- # Local Feature Fusion
- self.LFF = nn.Conv2d(G0 + C*G, G0, 1, padding=0, stride=1)
-
- def forward(self, x):
- return self.LFF(self.convs(x)) + x
-
-class RDN(nn.Module):
- def __init__(self, args):
- super(RDN, self).__init__()
- self.args = args
- r = args.scale[0]
- G0 = args.G0
- kSize = args.RDNkSize
-
- # number of RDB blocks, conv layers, out channels
- self.D, C, G = {
- 'A': (20, 6, 32),
- 'B': (16, 8, 64),
- }[args.RDNconfig]
-
- # Shallow feature extraction net
- self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)
- self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
-
- # Redidual dense blocks and dense feature fusion
- self.RDBs = nn.ModuleList()
- for i in range(self.D):
- self.RDBs.append(
- RDB(growRate0 = G0, growRate = G, nConvLayers = C)
- )
-
- # Global Feature Fusion
- self.GFF = nn.Sequential(*[
- nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1),
- nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
- ])
-
- if args.no_upsampling:
- self.out_dim = G0
- else:
- self.out_dim = args.n_colors
- # Up-sampling net
- if r == 2 or r == 3:
- self.UPNet = nn.Sequential(*[
- nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1),
- nn.PixelShuffle(r),
- nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
- ])
- elif r == 4:
- self.UPNet = nn.Sequential(*[
- nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1),
- nn.PixelShuffle(2),
- nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1),
- nn.PixelShuffle(2),
- nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
- ])
- else:
- raise ValueError("scale must be 2 or 3 or 4.")
-
- def forward(self, x):
- f__1 = self.SFENet1(x)
- x = self.SFENet2(f__1)
-
- RDBs_out = []
- for i in range(self.D):
- x = self.RDBs[i](x)
- RDBs_out.append(x)
-
- x = self.GFF(torch.cat(RDBs_out,1))
- x += f__1
-
- if self.args.no_upsampling:
- return x
- else:
- return self.UPNet(x)
-
-
-@register('rdn')
-def make_rdn(G0=64, RDNkSize=3, RDNconfig='B',
- scale=2, no_upsampling=False):
- args = Namespace()
- args.G0 = G0
- args.RDNkSize = RDNkSize
- args.RDNconfig = RDNconfig
-
- args.scale = [scale]
- args.no_upsampling = no_upsampling
-
- args.n_colors = 3
- return RDN(args)
diff --git a/spaces/KyanChen/RSPrompter/configs/rsprompter/samseg_mask2former_whu_config.py b/spaces/KyanChen/RSPrompter/configs/rsprompter/samseg_mask2former_whu_config.py
deleted file mode 100644
index 09aabea151d06a6fc91eda4b3e35139ab43cff7c..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/configs/rsprompter/samseg_mask2former_whu_config.py
+++ /dev/null
@@ -1,349 +0,0 @@
-custom_imports = dict(imports=['mmseg.datasets', 'mmseg.models'], allow_failed_imports=False)
-
-sub_model_train = [
- 'panoptic_head',
- 'sam_neck',
- 'data_preprocessor'
-]
-
-sub_model_optim = {
- 'sam_neck': {'lr_mult': 1},
- 'panoptic_head': {'lr_mult': 1},
-}
-
-max_epochs = 400
-
-optimizer = dict(
- type='AdamW',
- sub_model=sub_model_optim,
- lr=0.0005,
- weight_decay=1e-3
-)
-
-param_scheduler = [
- # warm up learning rate scheduler
- dict(
- type='LinearLR',
- start_factor=5e-4,
- by_epoch=True,
- begin=0,
- end=1,
- # update by iter
- convert_to_iter_based=True),
- # main learning rate scheduler
- dict(
- type='CosineAnnealingLR',
- T_max=max_epochs,
- by_epoch=True,
- begin=1,
- end=max_epochs,
- ),
-]
-
-param_scheduler_callback = dict(
- type='ParamSchedulerHook'
-)
-
-evaluator_ = dict(
- type='CocoPLMetric',
- metric=['bbox', 'segm'],
- proposal_nums=[1, 10, 100]
-)
-
-evaluator = dict(
- # train_evaluator=evaluator_,
- val_evaluator=evaluator_,
-)
-
-
-image_size = (1024, 1024)
-
-data_preprocessor = dict(
- type='mmdet.DetDataPreprocessor',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- bgr_to_rgb=True,
- pad_size_divisor=32,
- pad_mask=True,
- mask_pad_value=0,
-)
-
-num_things_classes = 1
-num_stuff_classes = 0
-num_classes = num_things_classes + num_stuff_classes
-
-num_queries = 100
-model_cfg = dict(
- type='SegSAMPLer',
- hyperparameters=dict(
- optimizer=optimizer,
- param_scheduler=param_scheduler,
- evaluator=evaluator,
- ),
- need_train_names=sub_model_train,
- data_preprocessor=data_preprocessor,
- backbone=dict(
- type='vit_h',
- checkpoint='pretrain/sam/sam_vit_h_4b8939.pth',
- # type='vit_b',
- # checkpoint='pretrain/sam/sam_vit_b_01ec64.pth',
- ),
- sam_neck=dict(
- type='SAMAggregatorNeck',
- in_channels=[1280] * 32,
- # in_channels=[768] * 12,
- inner_channels=32,
- selected_channels=range(4, 32, 2),
- # selected_channels=range(4, 12, 2),
- out_channels=256,
- up_sample_scale=4,
- ),
- panoptic_head=dict(
- type='mmdet.Mask2FormerHead',
- in_channels=[256, 256, 256], # pass to pixel_decoder inside
- strides=[8, 16, 32],
- feat_channels=256,
- out_channels=256,
- num_things_classes=num_things_classes,
- num_stuff_classes=num_stuff_classes,
- num_queries=num_queries,
- num_transformer_feat_level=3,
- pixel_decoder=dict(
- type='mmdet.MSDeformAttnPixelDecoder',
- num_outs=3,
- norm_cfg=dict(type='GN', num_groups=32),
- act_cfg=dict(type='ReLU'),
- encoder=dict( # DeformableDetrTransformerEncoder
- # num_layers=6,
- num_layers=2,
- layer_cfg=dict( # DeformableDetrTransformerEncoderLayer
- self_attn_cfg=dict( # MultiScaleDeformableAttention
- embed_dims=256,
- num_heads=8,
- num_levels=3,
- num_points=4,
- dropout=0.1,
- batch_first=True),
- ffn_cfg=dict(
- embed_dims=256,
- feedforward_channels=1024,
- num_fcs=2,
- ffn_drop=0.1,
- act_cfg=dict(type='ReLU', inplace=True)))),
- positional_encoding=dict(num_feats=128, normalize=True)),
- enforce_decoder_input_project=False,
- positional_encoding=dict(num_feats=128, normalize=True),
- transformer_decoder=dict( # Mask2FormerTransformerDecoder
- return_intermediate=True,
- # num_layers=9,
- num_layers=3,
- layer_cfg=dict( # Mask2FormerTransformerDecoderLayer
- self_attn_cfg=dict( # MultiheadAttention
- embed_dims=256,
- num_heads=8,
- dropout=0.1,
- batch_first=True),
- cross_attn_cfg=dict( # MultiheadAttention
- embed_dims=256,
- num_heads=8,
- dropout=0.1,
- batch_first=True),
- ffn_cfg=dict(
- embed_dims=256,
- feedforward_channels=2048,
- num_fcs=2,
- ffn_drop=0.1,
- act_cfg=dict(type='ReLU', inplace=True))),
- init_cfg=None),
- loss_cls=dict(
- type='mmdet.CrossEntropyLoss',
- use_sigmoid=False,
- loss_weight=2.0,
- reduction='mean',
- class_weight=[1.0] * num_classes + [0.1]),
- loss_mask=dict(
- type='mmdet.CrossEntropyLoss',
- use_sigmoid=True,
- reduction='mean',
- loss_weight=5.0),
- loss_dice=dict(
- type='mmdet.DiceLoss',
- use_sigmoid=True,
- activate=True,
- reduction='mean',
- naive_dice=True,
- eps=1.0,
- loss_weight=5.0)),
- panoptic_fusion_head=dict(
- type='mmdet.MaskFormerFusionHead',
- num_things_classes=num_things_classes,
- num_stuff_classes=num_stuff_classes,
- loss_panoptic=None,
- init_cfg=None),
- train_cfg=dict(
- num_points=12544,
- oversample_ratio=3.0,
- importance_sample_ratio=0.75,
- assigner=dict(
- type='mmdet.HungarianAssigner',
- match_costs=[
- dict(type='mmdet.ClassificationCost', weight=2.0),
- dict(
- type='mmdet.CrossEntropyLossCost', weight=5.0, use_sigmoid=True),
- dict(type='mmdet.DiceCost', weight=5.0, pred_act=True, eps=1.0)
- ]),
- sampler=dict(type='mmdet.MaskPseudoSampler')),
- test_cfg=dict(
- panoptic_on=False,
- # For now, the dataset does not support
- # evaluating semantic segmentation metric.
- semantic_on=False,
- instance_on=True,
- # max_per_image is for instance segmentation.
- max_per_image=num_queries,
- iou_thr=0.8,
- # In Mask2Former's panoptic postprocessing,
- # it will filter mask area where score is less than 0.5 .
- filter_low_score=True),
- init_cfg=None)
-
-task_name = 'whu_ins'
-exp_name = 'E20230531_2'
-logger = dict(
- type='WandbLogger',
- project=task_name,
- group='samcls-mask2former',
- name=exp_name
-)
-# logger = None
-
-callbacks = [
- param_scheduler_callback,
- dict(
- type='ModelCheckpoint',
- dirpath=f'results/{task_name}/{exp_name}/checkpoints',
- save_last=True,
- mode='max',
- monitor='valsegm_map_0',
- save_top_k=2,
- filename='epoch_{epoch}-map_{valsegm_map_0:.4f}'
- ),
- dict(
- type='LearningRateMonitor',
- logging_interval='step'
- )
-]
-
-
-trainer_cfg = dict(
- compiled_model=False,
- accelerator="auto",
- strategy="auto",
- # strategy="ddp",
- # strategy='ddp_find_unused_parameters_true',
- # precision='32',
- # precision='16-mixed',
- devices=8,
- default_root_dir=f'results/{task_name}/{exp_name}',
- # default_root_dir='results/tmp',
- max_epochs=max_epochs,
- logger=logger,
- callbacks=callbacks,
- log_every_n_steps=20,
- check_val_every_n_epoch=5,
- benchmark=True,
- # sync_batchnorm=True,
- # fast_dev_run=True,
-
- # limit_train_batches=1,
- # limit_val_batches=0,
- # limit_test_batches=None,
- # limit_predict_batches=None,
- # overfit_batches=0.0,
-
- # val_check_interval=None,
- # num_sanity_val_steps=0,
- # enable_checkpointing=None,
- # enable_progress_bar=None,
- # enable_model_summary=None,
- # accumulate_grad_batches=32,
- # gradient_clip_val=15,
- # gradient_clip_algorithm='norm',
- # deterministic=None,
- # inference_mode: bool=True,
- use_distributed_sampler=True,
- # profiler="simple",
- # detect_anomaly=False,
- # barebones=False,
- # plugins=None,
- # reload_dataloaders_every_n_epochs=0,
-)
-
-
-backend_args = None
-train_pipeline = [
- dict(type='mmdet.LoadImageFromFile'),
- dict(type='mmdet.LoadAnnotations', with_bbox=True, with_mask=True),
- dict(type='mmdet.Resize', scale=image_size),
- dict(type='mmdet.RandomFlip', prob=0.5),
- dict(type='mmdet.PackDetInputs')
-]
-
-test_pipeline = [
- dict(type='mmdet.LoadImageFromFile', backend_args=backend_args),
- dict(type='mmdet.Resize', scale=image_size),
- # If you don't have a gt annotation, delete the pipeline
- dict(type='mmdet.LoadAnnotations', with_bbox=True, with_mask=True),
- dict(
- type='mmdet.PackDetInputs',
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
- 'scale_factor'))
-]
-
-
-train_batch_size_per_gpu = 6
-train_num_workers = 4
-test_batch_size_per_gpu = 6
-test_num_workers = 4
-persistent_workers = True
-
-data_parent = '/mnt/search01/dataset/cky_data/WHU'
-train_data_prefix = 'train/'
-val_data_prefix = 'test/'
-dataset_type = 'WHUInsSegDataset'
-
-val_loader = dict(
- batch_size=test_batch_size_per_gpu,
- num_workers=test_num_workers,
- persistent_workers=persistent_workers,
- pin_memory=True,
- dataset=dict(
- type=dataset_type,
- data_root=data_parent,
- ann_file='annotations/WHU_building_test.json',
- data_prefix=dict(img_path=val_data_prefix + '/image', seg_path=val_data_prefix + '/label'),
- test_mode=True,
- filter_cfg=dict(filter_empty_gt=True, min_size=32),
- pipeline=test_pipeline,
- backend_args=backend_args))
-
-datamodule_cfg = dict(
- type='PLDataModule',
- train_loader=dict(
- batch_size=train_batch_size_per_gpu,
- num_workers=train_num_workers,
- persistent_workers=persistent_workers,
- pin_memory=True,
- dataset=dict(
- type=dataset_type,
- data_root=data_parent,
- ann_file='annotations/WHU_building_train.json',
- data_prefix=dict(img_path=train_data_prefix + '/image', seg_path=train_data_prefix + '/label'),
- filter_cfg=dict(filter_empty_gt=True, min_size=32),
- pipeline=train_pipeline,
- backend_args=backend_args)
- ),
- val_loader=val_loader,
- # test_loader=val_loader
- predict_loader=val_loader
-)
\ No newline at end of file
diff --git a/spaces/KyanChen/RSPrompter/mmdet/datasets/samplers/class_aware_sampler.py b/spaces/KyanChen/RSPrompter/mmdet/datasets/samplers/class_aware_sampler.py
deleted file mode 100644
index 6ca2f9b3ffb7c780ab25cc3704b67589763259e0..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/datasets/samplers/class_aware_sampler.py
+++ /dev/null
@@ -1,192 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import math
-from typing import Dict, Iterator, Optional, Union
-
-import numpy as np
-import torch
-from mmengine.dataset import BaseDataset
-from mmengine.dist import get_dist_info, sync_random_seed
-from torch.utils.data import Sampler
-
-from mmdet.registry import DATA_SAMPLERS
-
-
-@DATA_SAMPLERS.register_module()
-class ClassAwareSampler(Sampler):
- r"""Sampler that restricts data loading to the label of the dataset.
-
- A class-aware sampling strategy to effectively tackle the
- non-uniform class distribution. The length of the training data is
- consistent with source data. Simple improvements based on `Relay
- Backpropagation for Effective Learning of Deep Convolutional
- Neural Networks `_
-
- The implementation logic is referred to
- https://github.com/Sense-X/TSD/blob/master/mmdet/datasets/samplers/distributed_classaware_sampler.py
-
- Args:
- dataset: Dataset used for sampling.
- seed (int, optional): random seed used to shuffle the sampler.
- This number should be identical across all
- processes in the distributed group. Defaults to None.
- num_sample_class (int): The number of samples taken from each
- per-label list. Defaults to 1.
- """
-
- def __init__(self,
- dataset: BaseDataset,
- seed: Optional[int] = None,
- num_sample_class: int = 1) -> None:
- rank, world_size = get_dist_info()
- self.rank = rank
- self.world_size = world_size
-
- self.dataset = dataset
- self.epoch = 0
- # Must be the same across all workers. If None, will use a
- # random seed shared among workers
- # (require synchronization among all workers)
- if seed is None:
- seed = sync_random_seed()
- self.seed = seed
-
- # The number of samples taken from each per-label list
- assert num_sample_class > 0 and isinstance(num_sample_class, int)
- self.num_sample_class = num_sample_class
- # Get per-label image list from dataset
- self.cat_dict = self.get_cat2imgs()
-
- self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / world_size))
- self.total_size = self.num_samples * self.world_size
-
- # get number of images containing each category
- self.num_cat_imgs = [len(x) for x in self.cat_dict.values()]
- # filter labels without images
- self.valid_cat_inds = [
- i for i, length in enumerate(self.num_cat_imgs) if length != 0
- ]
- self.num_classes = len(self.valid_cat_inds)
-
- def get_cat2imgs(self) -> Dict[int, list]:
- """Get a dict with class as key and img_ids as values.
-
- Returns:
- dict[int, list]: A dict of per-label image list,
- the item of the dict indicates a label index,
- corresponds to the image index that contains the label.
- """
- classes = self.dataset.metainfo.get('classes', None)
- if classes is None:
- raise ValueError('dataset metainfo must contain `classes`')
- # sort the label index
- cat2imgs = {i: [] for i in range(len(classes))}
- for i in range(len(self.dataset)):
- cat_ids = set(self.dataset.get_cat_ids(i))
- for cat in cat_ids:
- cat2imgs[cat].append(i)
- return cat2imgs
-
- def __iter__(self) -> Iterator[int]:
- # deterministically shuffle based on epoch
- g = torch.Generator()
- g.manual_seed(self.epoch + self.seed)
-
- # initialize label list
- label_iter_list = RandomCycleIter(self.valid_cat_inds, generator=g)
- # initialize each per-label image list
- data_iter_dict = dict()
- for i in self.valid_cat_inds:
- data_iter_dict[i] = RandomCycleIter(self.cat_dict[i], generator=g)
-
- def gen_cat_img_inds(cls_list, data_dict, num_sample_cls):
- """Traverse the categories and extract `num_sample_cls` image
- indexes of the corresponding categories one by one."""
- id_indices = []
- for _ in range(len(cls_list)):
- cls_idx = next(cls_list)
- for _ in range(num_sample_cls):
- id = next(data_dict[cls_idx])
- id_indices.append(id)
- return id_indices
-
- # deterministically shuffle based on epoch
- num_bins = int(
- math.ceil(self.total_size * 1.0 / self.num_classes /
- self.num_sample_class))
- indices = []
- for i in range(num_bins):
- indices += gen_cat_img_inds(label_iter_list, data_iter_dict,
- self.num_sample_class)
-
- # fix extra samples to make it evenly divisible
- if len(indices) >= self.total_size:
- indices = indices[:self.total_size]
- else:
- indices += indices[:(self.total_size - len(indices))]
- assert len(indices) == self.total_size
-
- # subsample
- offset = self.num_samples * self.rank
- indices = indices[offset:offset + self.num_samples]
- assert len(indices) == self.num_samples
-
- return iter(indices)
-
- def __len__(self) -> int:
- """The number of samples in this rank."""
- return self.num_samples
-
- def set_epoch(self, epoch: int) -> None:
- """Sets the epoch for this sampler.
-
- When :attr:`shuffle=True`, this ensures all replicas use a different
- random ordering for each epoch. Otherwise, the next iteration of this
- sampler will yield the same ordering.
-
- Args:
- epoch (int): Epoch number.
- """
- self.epoch = epoch
-
-
-class RandomCycleIter:
- """Shuffle the list and do it again after the list have traversed.
-
- The implementation logic is referred to
- https://github.com/wutong16/DistributionBalancedLoss/blob/master/mllt/datasets/loader/sampler.py
-
- Example:
- >>> label_list = [0, 1, 2, 4, 5]
- >>> g = torch.Generator()
- >>> g.manual_seed(0)
- >>> label_iter_list = RandomCycleIter(label_list, generator=g)
- >>> index = next(label_iter_list)
- Args:
- data (list or ndarray): The data that needs to be shuffled.
- generator: An torch.Generator object, which is used in setting the seed
- for generating random numbers.
- """ # noqa: W605
-
- def __init__(self,
- data: Union[list, np.ndarray],
- generator: torch.Generator = None) -> None:
- self.data = data
- self.length = len(data)
- self.index = torch.randperm(self.length, generator=generator).numpy()
- self.i = 0
- self.generator = generator
-
- def __iter__(self) -> Iterator:
- return self
-
- def __len__(self) -> int:
- return len(self.data)
-
- def __next__(self):
- if self.i == self.length:
- self.index = torch.randperm(
- self.length, generator=self.generator).numpy()
- self.i = 0
- idx = self.data[self.index[self.i]]
- self.i += 1
- return idx
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/detectors/yolact.py b/spaces/KyanChen/RSPrompter/mmdet/models/detectors/yolact.py
deleted file mode 100644
index f15fb7b70263b0c4018751067771b1365af96f67..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/detectors/yolact.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from mmdet.registry import MODELS
-from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
-from .single_stage_instance_seg import SingleStageInstanceSegmentor
-
-
-@MODELS.register_module()
-class YOLACT(SingleStageInstanceSegmentor):
- """Implementation of `YOLACT `_"""
-
- def __init__(self,
- backbone: ConfigType,
- neck: ConfigType,
- bbox_head: ConfigType,
- mask_head: ConfigType,
- train_cfg: OptConfigType = None,
- test_cfg: OptConfigType = None,
- data_preprocessor: OptConfigType = None,
- init_cfg: OptMultiConfig = None) -> None:
- super().__init__(
- backbone=backbone,
- neck=neck,
- bbox_head=bbox_head,
- mask_head=mask_head,
- train_cfg=train_cfg,
- test_cfg=test_cfg,
- data_preprocessor=data_preprocessor,
- init_cfg=init_cfg)
diff --git a/spaces/KyanChen/RSPrompter/mmpretrain/apis/base.py b/spaces/KyanChen/RSPrompter/mmpretrain/apis/base.py
deleted file mode 100644
index 7bff6bd18675a3a0996dcd09081a15728311657f..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmpretrain/apis/base.py
+++ /dev/null
@@ -1,390 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from abc import abstractmethod
-from math import ceil
-from typing import Callable, Iterable, List, Optional, Tuple, Union
-
-import numpy as np
-import torch
-from mmengine.config import Config
-from mmengine.dataset import default_collate
-from mmengine.fileio import get_file_backend
-from mmengine.model import BaseModel
-from mmengine.runner import load_checkpoint
-
-from mmpretrain.structures import DataSample
-from mmpretrain.utils import track
-from .model import get_model, list_models
-
-ModelType = Union[BaseModel, str, Config]
-InputType = Union[str, np.ndarray, list]
-
-
-class BaseInferencer:
- """Base inferencer for various tasks.
-
- The BaseInferencer provides the standard workflow for inference as follows:
-
- 1. Preprocess the input data by :meth:`preprocess`.
- 2. Forward the data to the model by :meth:`forward`. ``BaseInferencer``
- assumes the model inherits from :class:`mmengine.models.BaseModel` and
- will call `model.test_step` in :meth:`forward` by default.
- 3. Visualize the results by :meth:`visualize`.
- 4. Postprocess and return the results by :meth:`postprocess`.
-
- When we call the subclasses inherited from BaseInferencer (not overriding
- ``__call__``), the workflow will be executed in order.
-
- All subclasses of BaseInferencer could define the following class
- attributes for customization:
-
- - ``preprocess_kwargs``: The keys of the kwargs that will be passed to
- :meth:`preprocess`.
- - ``forward_kwargs``: The keys of the kwargs that will be passed to
- :meth:`forward`
- - ``visualize_kwargs``: The keys of the kwargs that will be passed to
- :meth:`visualize`
- - ``postprocess_kwargs``: The keys of the kwargs that will be passed to
- :meth:`postprocess`
-
- All attributes mentioned above should be a ``set`` of keys (strings),
- and each key should not be duplicated. Actually, :meth:`__call__` will
- dispatch all the arguments to the corresponding methods according to the
- ``xxx_kwargs`` mentioned above.
-
- Subclasses inherited from ``BaseInferencer`` should implement
- :meth:`_init_pipeline`, :meth:`visualize` and :meth:`postprocess`:
-
- - _init_pipeline: Return a callable object to preprocess the input data.
- - visualize: Visualize the results returned by :meth:`forward`.
- - postprocess: Postprocess the results returned by :meth:`forward` and
- :meth:`visualize`.
-
- Args:
- model (BaseModel | str | Config): A model name or a path to the config
- file, or a :obj:`BaseModel` object. The model name can be found
- by ``cls.list_models()`` and you can also query it in
- :doc:`/modelzoo_statistics`.
- pretrained (str, optional): Path to the checkpoint. If None, it will
- try to find a pre-defined weight from the model you specified
- (only work if the ``model`` is a model name). Defaults to None.
- device (str | torch.device | None): Transfer the model to the target
- device. Defaults to None.
- device_map (str | dict | None): A map that specifies where each
- submodule should go. It doesn't need to be refined to each
- parameter/buffer name, once a given module name is inside, every
- submodule of it will be sent to the same device. You can use
- `device_map="auto"` to automatically generate the device map.
- Defaults to None.
- offload_folder (str | None): If the `device_map` contains any value
- `"disk"`, the folder where we will offload weights.
- **kwargs: Other keyword arguments to initialize the model (only work if
- the ``model`` is a model name).
- """
-
- preprocess_kwargs: set = set()
- forward_kwargs: set = set()
- visualize_kwargs: set = set()
- postprocess_kwargs: set = set()
-
- def __init__(self,
- model: ModelType,
- pretrained: Union[bool, str] = True,
- device: Union[str, torch.device, None] = None,
- device_map=None,
- offload_folder=None,
- **kwargs) -> None:
-
- if isinstance(model, BaseModel):
- if isinstance(pretrained, str):
- load_checkpoint(model, pretrained, map_location='cpu')
- if device_map is not None:
- from .utils import dispatch_model
- model = dispatch_model(
- model,
- device_map=device_map,
- offload_folder=offload_folder)
- elif device is not None:
- model.to(device)
- else:
- model = get_model(
- model,
- pretrained,
- device=device,
- device_map=device_map,
- offload_folder=offload_folder,
- **kwargs)
-
- model.eval()
-
- self.config = model._config
- self.model = model
- self.pipeline = self._init_pipeline(self.config)
- self.visualizer = None
-
- def __call__(
- self,
- inputs,
- return_datasamples: bool = False,
- batch_size: int = 1,
- **kwargs,
- ) -> dict:
- """Call the inferencer.
-
- Args:
- inputs (InputsType): Inputs for the inferencer.
- return_datasamples (bool): Whether to return results as
- :obj:`BaseDataElement`. Defaults to False.
- batch_size (int): Batch size. Defaults to 1.
- **kwargs: Key words arguments passed to :meth:`preprocess`,
- :meth:`forward`, :meth:`visualize` and :meth:`postprocess`.
- Each key in kwargs should be in the corresponding set of
- ``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``
- and ``postprocess_kwargs``.
-
- Returns:
- dict: Inference and visualization results.
- """
- (
- preprocess_kwargs,
- forward_kwargs,
- visualize_kwargs,
- postprocess_kwargs,
- ) = self._dispatch_kwargs(**kwargs)
-
- ori_inputs = self._inputs_to_list(inputs)
- inputs = self.preprocess(
- ori_inputs, batch_size=batch_size, **preprocess_kwargs)
- preds = []
- for data in track(
- inputs, 'Inference', total=ceil(len(ori_inputs) / batch_size)):
- preds.extend(self.forward(data, **forward_kwargs))
- visualization = self.visualize(ori_inputs, preds, **visualize_kwargs)
- results = self.postprocess(preds, visualization, return_datasamples,
- **postprocess_kwargs)
- return results
-
- def _inputs_to_list(self, inputs: InputType) -> list:
- """Preprocess the inputs to a list.
-
- Cast the input data to a list of data.
-
- - list or tuple: return inputs
- - str:
- - Directory path: return all files in the directory
- - other cases: return a list containing the string. The string
- could be a path to file, a url or other types of string according
- to the task.
- - other: return a list with one item.
-
- Args:
- inputs (str | array | list): Inputs for the inferencer.
-
- Returns:
- list: List of input for the :meth:`preprocess`.
- """
- if isinstance(inputs, str):
- backend = get_file_backend(inputs)
- if hasattr(backend, 'isdir') and backend.isdir(inputs):
- # Backends like HttpsBackend do not implement `isdir`, so only
- # those backends that implement `isdir` could accept the inputs
- # as a directory
- file_list = backend.list_dir_or_file(inputs, list_dir=False)
- inputs = [
- backend.join_path(inputs, file) for file in file_list
- ]
-
- if not isinstance(inputs, (list, tuple)):
- inputs = [inputs]
-
- return list(inputs)
-
- def preprocess(self, inputs: InputType, batch_size: int = 1, **kwargs):
- """Process the inputs into a model-feedable format.
-
- Customize your preprocess by overriding this method. Preprocess should
- return an iterable object, of which each item will be used as the
- input of ``model.test_step``.
-
- ``BaseInferencer.preprocess`` will return an iterable chunked data,
- which will be used in __call__ like this:
-
- .. code-block:: python
-
- def __call__(self, inputs, batch_size=1, **kwargs):
- chunked_data = self.preprocess(inputs, batch_size, **kwargs)
- for batch in chunked_data:
- preds = self.forward(batch, **kwargs)
-
- Args:
- inputs (InputsType): Inputs given by user.
- batch_size (int): batch size. Defaults to 1.
-
- Yields:
- Any: Data processed by the ``pipeline`` and ``default_collate``.
- """
- chunked_data = self._get_chunk_data(
- map(self.pipeline, inputs), batch_size)
- yield from map(default_collate, chunked_data)
-
- @torch.no_grad()
- def forward(self, inputs: Union[dict, tuple], **kwargs):
- """Feed the inputs to the model."""
- return self.model.test_step(inputs)
-
- def visualize(self,
- inputs: list,
- preds: List[DataSample],
- show: bool = False,
- **kwargs) -> List[np.ndarray]:
- """Visualize predictions.
-
- Customize your visualization by overriding this method. visualize
- should return visualization results, which could be np.ndarray or any
- other objects.
-
- Args:
- inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`.
- preds (Any): Predictions of the model.
- show (bool): Whether to display the image in a popup window.
- Defaults to False.
-
- Returns:
- List[np.ndarray]: Visualization results.
- """
- if show:
- raise NotImplementedError(
- f'The `visualize` method of {self.__class__.__name__} '
- 'is not implemented.')
-
- @abstractmethod
- def postprocess(
- self,
- preds: List[DataSample],
- visualization: List[np.ndarray],
- return_datasample=False,
- **kwargs,
- ) -> dict:
- """Process the predictions and visualization results from ``forward``
- and ``visualize``.
-
- This method should be responsible for the following tasks:
-
- 1. Convert datasamples into a json-serializable dict if needed.
- 2. Pack the predictions and visualization results and return them.
- 3. Dump or log the predictions.
-
- Customize your postprocess by overriding this method. Make sure
- ``postprocess`` will return a dict with visualization results and
- inference results.
-
- Args:
- preds (List[Dict]): Predictions of the model.
- visualization (np.ndarray): Visualized predictions.
- return_datasample (bool): Whether to return results as datasamples.
- Defaults to False.
-
- Returns:
- dict: Inference and visualization results with key ``predictions``
- and ``visualization``
-
- - ``visualization (Any)``: Returned by :meth:`visualize`
- - ``predictions`` (dict or DataSample): Returned by
- :meth:`forward` and processed in :meth:`postprocess`.
- If ``return_datasample=False``, it usually should be a
- json-serializable dict containing only basic data elements such
- as strings and numbers.
- """
-
- @abstractmethod
- def _init_pipeline(self, cfg: Config) -> Callable:
- """Initialize the test pipeline.
-
- Return a pipeline to handle various input data, such as ``str``,
- ``np.ndarray``. It is an abstract method in BaseInferencer, and should
- be implemented in subclasses.
-
- The returned pipeline will be used to process a single data.
- It will be used in :meth:`preprocess` like this:
-
- .. code-block:: python
- def preprocess(self, inputs, batch_size, **kwargs):
- ...
- dataset = map(self.pipeline, dataset)
- ...
- """
-
- def _get_chunk_data(self, inputs: Iterable, chunk_size: int):
- """Get batch data from dataset.
-
- Args:
- inputs (Iterable): An iterable dataset.
- chunk_size (int): Equivalent to batch size.
-
- Yields:
- list: batch data.
- """
- inputs_iter = iter(inputs)
- while True:
- try:
- chunk_data = []
- for _ in range(chunk_size):
- processed_data = next(inputs_iter)
- chunk_data.append(processed_data)
- yield chunk_data
- except StopIteration:
- if chunk_data:
- yield chunk_data
- break
-
- def _dispatch_kwargs(self, **kwargs) -> Tuple[dict, dict, dict, dict]:
- """Dispatch kwargs to preprocess(), forward(), visualize() and
- postprocess() according to the actual demands.
-
- Returns:
- Tuple[Dict, Dict, Dict, Dict]: kwargs passed to preprocess,
- forward, visualize and postprocess respectively.
- """
- # Ensure each argument only matches one function
- method_kwargs = self.preprocess_kwargs | self.forward_kwargs | \
- self.visualize_kwargs | self.postprocess_kwargs
-
- union_kwargs = method_kwargs | set(kwargs.keys())
- if union_kwargs != method_kwargs:
- unknown_kwargs = union_kwargs - method_kwargs
- raise ValueError(
- f'unknown argument {unknown_kwargs} for `preprocess`, '
- '`forward`, `visualize` and `postprocess`')
-
- preprocess_kwargs = {}
- forward_kwargs = {}
- visualize_kwargs = {}
- postprocess_kwargs = {}
-
- for key, value in kwargs.items():
- if key in self.preprocess_kwargs:
- preprocess_kwargs[key] = value
- if key in self.forward_kwargs:
- forward_kwargs[key] = value
- if key in self.visualize_kwargs:
- visualize_kwargs[key] = value
- if key in self.postprocess_kwargs:
- postprocess_kwargs[key] = value
-
- return (
- preprocess_kwargs,
- forward_kwargs,
- visualize_kwargs,
- postprocess_kwargs,
- )
-
- @staticmethod
- def list_models(pattern: Optional[str] = None):
- """List models defined in metafile of corresponding packages.
-
- Args:
- pattern (str | None): A wildcard pattern to match model names.
-
- Returns:
- List[str]: a list of model names.
- """
- return list_models(pattern=pattern)
diff --git "a/spaces/Liu-LAB/GPT-academic/crazy_functions/chatglm\345\276\256\350\260\203\345\267\245\345\205\267.py" "b/spaces/Liu-LAB/GPT-academic/crazy_functions/chatglm\345\276\256\350\260\203\345\267\245\345\205\267.py"
deleted file mode 100644
index 336d7cfc85ac159841758123fa057bd20a0bbbec..0000000000000000000000000000000000000000
--- "a/spaces/Liu-LAB/GPT-academic/crazy_functions/chatglm\345\276\256\350\260\203\345\267\245\345\205\267.py"
+++ /dev/null
@@ -1,141 +0,0 @@
-from toolbox import CatchException, update_ui, promote_file_to_downloadzone
-from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
-import datetime, json
-
-def fetch_items(list_of_items, batch_size):
- for i in range(0, len(list_of_items), batch_size):
- yield list_of_items[i:i + batch_size]
-
-def string_to_options(arguments):
- import argparse
- import shlex
-
- # Create an argparse.ArgumentParser instance
- parser = argparse.ArgumentParser()
-
- # Add command-line arguments
- parser.add_argument("--llm_to_learn", type=str, help="LLM model to learn", default="gpt-3.5-turbo")
- parser.add_argument("--prompt_prefix", type=str, help="Prompt prefix", default='')
- parser.add_argument("--system_prompt", type=str, help="System prompt", default='')
- parser.add_argument("--batch", type=int, help="System prompt", default=50)
- parser.add_argument("--pre_seq_len", type=int, help="pre_seq_len", default=50)
- parser.add_argument("--learning_rate", type=float, help="learning_rate", default=2e-2)
- parser.add_argument("--num_gpus", type=int, help="num_gpus", default=1)
- parser.add_argument("--json_dataset", type=str, help="json_dataset", default="")
- parser.add_argument("--ptuning_directory", type=str, help="ptuning_directory", default="")
-
-
-
- # Parse the arguments
- args = parser.parse_args(shlex.split(arguments))
-
- return args
-
-@CatchException
-def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- """
- txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
- llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
- plugin_kwargs 插件模型的参数
- chatbot 聊天显示框的句柄,用于显示给用户
- history 聊天历史,前情提要
- system_prompt 给gpt的静默提醒
- web_port 当前软件运行的端口号
- """
- history = [] # 清空历史,以免输入溢出
- chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
- if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
- args = plugin_kwargs.get("advanced_arg", None)
- if args is None:
- chatbot.append(("没给定指令", "退出"))
- yield from update_ui(chatbot=chatbot, history=history); return
- else:
- arguments = string_to_options(arguments=args)
-
- dat = []
- with open(txt, 'r', encoding='utf8') as f:
- for line in f.readlines():
- json_dat = json.loads(line)
- dat.append(json_dat["content"])
-
- llm_kwargs['llm_model'] = arguments.llm_to_learn
- for batch in fetch_items(dat, arguments.batch):
- res = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
- inputs_array=[f"{arguments.prompt_prefix}\n\n{b}" for b in (batch)],
- inputs_show_user_array=[f"Show Nothing" for _ in (batch)],
- llm_kwargs=llm_kwargs,
- chatbot=chatbot,
- history_array=[[] for _ in (batch)],
- sys_prompt_array=[arguments.system_prompt for _ in (batch)],
- max_workers=10 # OpenAI所允许的最大并行过载
- )
-
- with open(txt+'.generated.json', 'a+', encoding='utf8') as f:
- for b, r in zip(batch, res[1::2]):
- f.write(json.dumps({"content":b, "summary":r}, ensure_ascii=False)+'\n')
-
- promote_file_to_downloadzone(txt+'.generated.json', rename_file='generated.json', chatbot=chatbot)
- return
-
-
-
-@CatchException
-def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- """
- txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
- llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
- plugin_kwargs 插件模型的参数
- chatbot 聊天显示框的句柄,用于显示给用户
- history 聊天历史,前情提要
- system_prompt 给gpt的静默提醒
- web_port 当前软件运行的端口号
- """
- import subprocess
- history = [] # 清空历史,以免输入溢出
- chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
- if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
- args = plugin_kwargs.get("advanced_arg", None)
- if args is None:
- chatbot.append(("没给定指令", "退出"))
- yield from update_ui(chatbot=chatbot, history=history); return
- else:
- arguments = string_to_options(arguments=args)
-
-
-
- pre_seq_len = arguments.pre_seq_len # 128
- learning_rate = arguments.learning_rate # 2e-2
- num_gpus = arguments.num_gpus # 1
- json_dataset = arguments.json_dataset # 't_code.json'
- ptuning_directory = arguments.ptuning_directory # '/home/hmp/ChatGLM2-6B/ptuning'
-
- command = f"torchrun --standalone --nnodes=1 --nproc-per-node={num_gpus} main.py \
- --do_train \
- --train_file AdvertiseGen/{json_dataset} \
- --validation_file AdvertiseGen/{json_dataset} \
- --preprocessing_num_workers 20 \
- --prompt_column content \
- --response_column summary \
- --overwrite_cache \
- --model_name_or_path THUDM/chatglm2-6b \
- --output_dir output/clothgen-chatglm2-6b-pt-{pre_seq_len}-{learning_rate} \
- --overwrite_output_dir \
- --max_source_length 256 \
- --max_target_length 256 \
- --per_device_train_batch_size 1 \
- --per_device_eval_batch_size 1 \
- --gradient_accumulation_steps 16 \
- --predict_with_generate \
- --max_steps 100 \
- --logging_steps 10 \
- --save_steps 20 \
- --learning_rate {learning_rate} \
- --pre_seq_len {pre_seq_len} \
- --quantization_bit 4"
-
- process = subprocess.Popen(command, shell=True, cwd=ptuning_directory)
- try:
- process.communicate(timeout=3600*24)
- except subprocess.TimeoutExpired:
- process.kill()
- return
diff --git a/spaces/Mahiruoshi/MyGO_VIts-bert/text/__init__.py b/spaces/Mahiruoshi/MyGO_VIts-bert/text/__init__.py
deleted file mode 100644
index 8dd10db04d90f336f96a2447555264a488913c02..0000000000000000000000000000000000000000
--- a/spaces/Mahiruoshi/MyGO_VIts-bert/text/__init__.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from text.symbols import *
-
-
-_symbol_to_id = {s: i for i, s in enumerate(symbols)}
-
-
-def cleaned_text_to_sequence(cleaned_text, tones, language):
- """Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- Returns:
- List of integers corresponding to the symbols in the text
- """
- phones = [_symbol_to_id[symbol] for symbol in cleaned_text]
- tone_start = language_tone_start_map[language]
- tones = [i + tone_start for i in tones]
- lang_id = language_id_map[language]
- lang_ids = [lang_id for i in phones]
- return phones, tones, lang_ids
-
-
-def get_bert(norm_text, word2ph, language, device):
- from .chinese_bert import get_bert_feature as zh_bert
- from .english_bert_mock import get_bert_feature as en_bert
- from .japanese_bert import get_bert_feature as jp_bert
-
- lang_bert_func_map = {"ZH": zh_bert, "EN": en_bert, "JP": jp_bert}
- bert = lang_bert_func_map[language](norm_text, word2ph, device)
- return bert
diff --git a/spaces/Manjushri/Dall-E-Mini/style.css b/spaces/Manjushri/Dall-E-Mini/style.css
deleted file mode 100644
index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000
--- a/spaces/Manjushri/Dall-E-Mini/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/Manjushri/MusicGen/app.py b/spaces/Manjushri/MusicGen/app.py
deleted file mode 100644
index 247513bacaea7bcf797b936d7da52449a17be3a5..0000000000000000000000000000000000000000
--- a/spaces/Manjushri/MusicGen/app.py
+++ /dev/null
@@ -1,351 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-# Updated to account for UI changes from https://github.com/rkfg/audiocraft/blob/long/app.py
-# also released under the MIT license.
-
-import argparse
-from concurrent.futures import ProcessPoolExecutor
-import os
-import subprocess as sp
-from tempfile import NamedTemporaryFile
-import time
-import warnings
-import modin.pandas as pd
-import torch
-import gradio as gr
-
-from audiocraft.data.audio_utils import convert_audio
-from audiocraft.data.audio import audio_write
-from audiocraft.models import MusicGen
-
-
-MODEL = None # Last used model
-IS_BATCHED = "facebook/MusicGen" in os.environ.get('SPACE_ID', '')
-MAX_BATCH_SIZE = 6
-BATCHED_DURATION = 15
-INTERRUPTING = False
-# We have to wrap subprocess call to clean a bit the log when using gr.make_waveform
-_old_call = sp.call
-
-
-def _call_nostderr(*args, **kwargs):
- # Avoid ffmpeg vomitting on the logs.
- kwargs['stderr'] = sp.DEVNULL
- kwargs['stdout'] = sp.DEVNULL
- _old_call(*args, **kwargs)
-
-
-sp.call = _call_nostderr
-# Preallocating the pool of processes.
-pool = ProcessPoolExecutor(3)
-pool.__enter__()
-
-
-def interrupt():
- global INTERRUPTING
- INTERRUPTING = True
-
-
-def make_waveform(*args, **kwargs):
- # Further remove some warnings.
- be = time.time()
- with warnings.catch_warnings():
- warnings.simplefilter('ignore')
- out = gr.make_waveform(*args, **kwargs)
- print("Make a video took", time.time() - be)
- return out
-
-
-def load_model(version='melody'):
- global MODEL
- print("Loading model", version)
- if MODEL is None or MODEL.name != version:
- MODEL = MusicGen.get_pretrained(version)
-
-
-def _do_predictions(texts, melodies, duration, progress=False, **gen_kwargs):
- MODEL.set_generation_params(duration=duration, **gen_kwargs)
- print("new batch", len(texts), texts, [None if m is None else (m[0], m[1].shape) for m in melodies])
- be = time.time()
- processed_melodies = []
- target_sr = 32000
- target_ac = 1
- for melody in melodies:
- if melody is None:
- processed_melodies.append(None)
- else:
- sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t()
- if melody.dim() == 1:
- melody = melody[None]
- melody = melody[..., :int(sr * duration)]
- melody = convert_audio(melody, sr, target_sr, target_ac)
- processed_melodies.append(melody)
-
- if any(m is not None for m in processed_melodies):
- outputs = MODEL.generate_with_chroma(
- descriptions=texts,
- melody_wavs=processed_melodies,
- melody_sample_rate=target_sr,
- progress=progress,
- )
- else:
- outputs = MODEL.generate(texts, progress=progress)
-
- outputs = outputs.detach().cpu().float()
- out_files = []
- for output in outputs:
- with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file:
- audio_write(
- file.name, output, MODEL.sample_rate, strategy="loudness",
- loudness_headroom_db=16, loudness_compressor=True, add_suffix=False)
- out_files.append(pool.submit(make_waveform, file.name))
- res = [out_file.result() for out_file in out_files]
- print("batch finished", len(texts), time.time() - be)
- return res
-
-
-def predict_batched(texts, melodies):
- max_text_length = 512
- texts = [text[:max_text_length] for text in texts]
- load_model('melody')
- res = _do_predictions(texts, melodies, BATCHED_DURATION)
- return [res]
-
-
-def predict_full(model, text, melody, duration, topk, topp, temperature, cfg_coef, progress=gr.Progress()):
- global INTERRUPTING
- INTERRUPTING = False
- topk = int(topk)
- load_model(model)
-
- def _progress(generated, to_generate):
- progress((generated, to_generate))
- if INTERRUPTING:
- raise gr.Error("Interrupted.")
- MODEL.set_custom_progress_callback(_progress)
-
- outs = _do_predictions(
- [text], [melody], duration, progress=True,
- top_k=topk, top_p=topp, temperature=temperature, cfg_coef=cfg_coef)
- return outs[0]
-
-def toggle_audio_src(choice):
- if choice == "mic":
- return gr.update(source="microphone", value=None, label="Microphone")
- else:
- return gr.update(source="upload", value=None, label="File")
-
-
-def ui_full(launch_kwargs):
- with gr.Blocks() as interface:
- gr.Markdown(
- """
- # MusicGen
- This is a demo for [MusicGen](https://github.com/facebookresearch/audiocraft), a simple and controllable model for music generation
- presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284)
- """
- )
- with gr.Row():
- with gr.Column():
- with gr.Row():
- text = gr.Text(label="Input Text", interactive=True)
- with gr.Column():
- radio = gr.Radio(["file", "mic"], value="file", label="Condition on a melody (optional) File or Mic")
- melody = gr.Audio(source="upload", type="numpy", label="Melody Condition (optional)", interactive=True)
- with gr.Row():
- submit = gr.Button("Submit")
- # Adapted from https://github.com/rkfg/audiocraft/blob/long/app.py, MIT license.
- #_ = gr.Button("Interrupt").click(fn=interrupt, queue=False)
- with gr.Row():
- model = gr.Radio(["melody", "medium", "small"], label="Model", value="melody", interactive=True)
- with gr.Row():
- duration = gr.Slider(minimum=1, maximum=16, value=8, label="Duration", interactive=True)
- with gr.Row():
- topk = gr.Number(label="Top-k", value=250, interactive=True)
- topp = gr.Number(label="Top-p", value=0, interactive=True)
- temperature = gr.Number(label="Temperature", value=1.0, interactive=True)
- cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
- with gr.Column():
- output = gr.Video(label="Generated Music")
- submit.click(predict_full, inputs=[model, text, melody, duration, topk, topp, temperature, cfg_coef], outputs=[output])
- radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
- gr.Examples(
- fn=predict_full,
- examples=[
- [
- "An 80s driving pop song with heavy drums and synth pads in the background",
- "./assets/bach.mp3",
- "melody"
- ],
- [
- "A cheerful country song with acoustic guitars",
- "./assets/bolero_ravel.mp3",
- "melody"
- ],
- [
- "90s rock song with electric guitar and heavy drums",
- None,
- "medium"
- ],
- [
- "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions",
- "./assets/bach.mp3",
- "melody"
- ],
- [
- "lofi slow bpm electro chill with organic samples",
- None,
- "medium",
- ],
- ],
- inputs=[text, melody, model],
- outputs=[output]
- )
- gr.Markdown(
- """
- ### More details
-
- The model will generate a short music extract based on the description you provided.
- The model can generate up to 30 seconds of audio in one pass. It is now possible
- to extend the generation by feeding back the end of the previous chunk of audio.
- This can take a long time, and the model might lose consistency. The model might also
- decide at arbitrary positions that the song ends.
-
- **WARNING:** Choosing long durations will take a long time to generate (2min might take ~10min). An overlap of 12 seconds
- is kept with the previously generated chunk, and 18 "new" seconds are generated each time.
-
- We present 4 model variations:
- 1. Melody -- a music generation model capable of generating music condition on text and melody inputs. **Note**, you can also use text only.
- 2. Small -- a 300M transformer decoder conditioned on text only.
- 3. Medium -- a 1.5B transformer decoder conditioned on text only.
- 4. Large -- a 3.3B transformer decoder conditioned on text only (might OOM for the longest sequences.)
-
- When using `melody`, you can optionaly provide a reference audio from
- which a broad melody will be extracted. The model will then try to follow both the description and melody provided.
-
- You can also use your own GPU or a Google Colab by following the instructions on our repo.
- See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
- for more details.
- """
- )
-
- interface.queue(max_size=2).launch(**launch_kwargs)
-
-
-def ui_batched(launch_kwargs):
- with gr.Blocks() as demo:
- gr.Markdown(
- """
- # MusicGen
- This is the demo for [MusicGen](https://github.com/facebookresearch/audiocraft), a simple and controllable model for music generation
- presented at: ["Simple and Controllable Music Generation"](https://huggingface.co/papers/2306.05284).
-
-
-
- for longer sequences, more control and no queue.
- """
- )
- with gr.Row():
- with gr.Column():
- with gr.Row():
- text = gr.Text(label="Describe your music", lines=2, interactive=True)
- with gr.Column():
- radio = gr.Radio(["file", "mic"], value="file", label="Condition on a melody (optional) File or Mic")
- melody = gr.Audio(source="upload", type="numpy", label="Melody Condition (optional)", interactive=True)
- with gr.Row():
- submit = gr.Button("Generate")
- with gr.Column():
- output = gr.Video(label="Generated Music")
- submit.click(predict_batched, inputs=[text, melody], outputs=[output], batch=True, max_batch_size=MAX_BATCH_SIZE)
- radio.change(toggle_audio_src, radio, [melody], queue=False, show_progress=False)
- gr.Examples(
- fn=predict_batched,
- examples=[
- [
- "An 80s driving pop song with heavy drums and synth pads in the background",
- "./assets/bach.mp3",
- ],
- [
- "A cheerful country song with acoustic guitars",
- "./assets/bolero_ravel.mp3",
- ],
- [
- "90s rock song with electric guitar and heavy drums",
- None,
- ],
- [
- "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions bpm: 130",
- "./assets/bach.mp3",
- ],
- [
- "lofi slow bpm electro chill with organic samples",
- None,
- ],
- ],
- inputs=[text, melody],
- outputs=[output]
- )
- gr.Markdown("""
- ### More details
- The model will generate 12 seconds of audio based on the description you provided.
- You can optionaly provide a reference audio from which a broad melody will be extracted.
- The model will then try to follow both the description and melody provided.
- All samples are generated with the `melody` model.
- You can also use your own GPU or a Google Colab by following the instructions on our repo.
- See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
- for more details.
- """)
-
- demo.queue(max_size=3).launch(**launch_kwargs)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--listen',
- type=str,
- default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1',
- help='IP to listen on for connections to Gradio',
- )
- parser.add_argument(
- '--username', type=str, default='', help='Username for authentication'
- )
- parser.add_argument(
- '--password', type=str, default='', help='Password for authentication'
- )
- parser.add_argument(
- '--server_port',
- type=int,
- default=0,
- help='Port to run the server listener on',
- )
- parser.add_argument(
- '--inbrowser', action='store_true', help='Open in browser'
- )
- parser.add_argument(
- '--share', action='store_true', help='Share the gradio UI'
- )
-
- args = parser.parse_args()
-
- launch_kwargs = {}
- launch_kwargs['server_name'] = args.listen
-
- if args.username and args.password:
- launch_kwargs['auth'] = (args.username, args.password)
- if args.server_port:
- launch_kwargs['server_port'] = args.server_port
- if args.inbrowser:
- launch_kwargs['inbrowser'] = args.inbrowser
- if args.share:
- launch_kwargs['share'] = args.share
-
- # Show the interface
- if IS_BATCHED:
- ui_batched(launch_kwargs)
- else:
- ui_full(launch_kwargs)
\ No newline at end of file
diff --git a/spaces/MarcusSu1216/XingTong/vdecoder/nsf_hifigan/utils.py b/spaces/MarcusSu1216/XingTong/vdecoder/nsf_hifigan/utils.py
deleted file mode 100644
index 84bff024f4d2e2de194b2a88ee7bbe5f0d33f67c..0000000000000000000000000000000000000000
--- a/spaces/MarcusSu1216/XingTong/vdecoder/nsf_hifigan/utils.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import glob
-import os
-import matplotlib
-import torch
-from torch.nn.utils import weight_norm
-matplotlib.use("Agg")
-import matplotlib.pylab as plt
-
-
-def plot_spectrogram(spectrogram):
- fig, ax = plt.subplots(figsize=(10, 2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
-
- fig.canvas.draw()
- plt.close()
-
- return fig
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def apply_weight_norm(m):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- weight_norm(m)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size*dilation - dilation)/2)
-
-
-def load_checkpoint(filepath, device):
- assert os.path.isfile(filepath)
- print("Loading '{}'".format(filepath))
- checkpoint_dict = torch.load(filepath, map_location=device)
- print("Complete.")
- return checkpoint_dict
-
-
-def save_checkpoint(filepath, obj):
- print("Saving checkpoint to {}".format(filepath))
- torch.save(obj, filepath)
- print("Complete.")
-
-
-def del_old_checkpoints(cp_dir, prefix, n_models=2):
- pattern = os.path.join(cp_dir, prefix + '????????')
- cp_list = glob.glob(pattern) # get checkpoint paths
- cp_list = sorted(cp_list)# sort by iter
- if len(cp_list) > n_models: # if more than n_models models are found
- for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models
- open(cp, 'w').close()# empty file contents
- os.unlink(cp)# delete file (move to trash when using Colab)
-
-
-def scan_checkpoint(cp_dir, prefix):
- pattern = os.path.join(cp_dir, prefix + '????????')
- cp_list = glob.glob(pattern)
- if len(cp_list) == 0:
- return None
- return sorted(cp_list)[-1]
-
diff --git a/spaces/Marshalls/testmtd/training/options/train_options.py b/spaces/Marshalls/testmtd/training/options/train_options.py
deleted file mode 100644
index aef495cd05dbb06c4285a34c69d44f283eb84f06..0000000000000000000000000000000000000000
--- a/spaces/Marshalls/testmtd/training/options/train_options.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from .base_options import BaseOptions
-from pytorch_lightning import Trainer
-
-class TrainOptions(BaseOptions):
-
- def __init__(self):
- super(TrainOptions, self).__init__()
- parser = self.parser
- parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
- parser.add_argument('--nepoch_decay', type=int, default=100, help='# of epochs to linearly decay learning rate to zero')
- parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...')
- parser.add_argument('--optimizer', type=str, default='adam', help='the optimizer to use')
- parser.add_argument('-lr', '--learning_rate', default=1e-4, type=float, help="learning rate")
- parser.add_argument('--momentum', default=0, type=float)
- parser.add_argument('--weight_decay', default=0, type=float)
- parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau|cosine')
- parser.add_argument('--warmup_epochs', type=int, default=10, help='the number of warmup epochs when using lr policy LinearWarmupCosineAnnealing')
- parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
- parser.add_argument('--lr_decay_factor', default=0.1, type=float, help="decay factor to use with multiplicative learning rate schedulers")
- parser.add_argument('--lr_decay_milestones', type=str, default='[500,1000]', help='the milestones at which to decay the learning rate, when using the multi step lr policy')
- parser = Trainer.add_argparse_args(parser)
- self.parser = parser
- self.is_train = True
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/masked_conv.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/masked_conv.py
deleted file mode 100644
index cd514cc204c1d571ea5dc7e74b038c0f477a008b..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/masked_conv.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import math
-
-import torch
-import torch.nn as nn
-from torch.autograd import Function
-from torch.autograd.function import once_differentiable
-from torch.nn.modules.utils import _pair
-
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext(
- '_ext', ['masked_im2col_forward', 'masked_col2im_forward'])
-
-
-class MaskedConv2dFunction(Function):
-
- @staticmethod
- def symbolic(g, features, mask, weight, bias, padding, stride):
- return g.op(
- 'mmcv::MMCVMaskedConv2d',
- features,
- mask,
- weight,
- bias,
- padding_i=padding,
- stride_i=stride)
-
- @staticmethod
- def forward(ctx, features, mask, weight, bias, padding=0, stride=1):
- assert mask.dim() == 3 and mask.size(0) == 1
- assert features.dim() == 4 and features.size(0) == 1
- assert features.size()[2:] == mask.size()[1:]
- pad_h, pad_w = _pair(padding)
- stride_h, stride_w = _pair(stride)
- if stride_h != 1 or stride_w != 1:
- raise ValueError(
- 'Stride could not only be 1 in masked_conv2d currently.')
- out_channel, in_channel, kernel_h, kernel_w = weight.size()
-
- batch_size = features.size(0)
- out_h = int(
- math.floor((features.size(2) + 2 * pad_h -
- (kernel_h - 1) - 1) / stride_h + 1))
- out_w = int(
- math.floor((features.size(3) + 2 * pad_w -
- (kernel_h - 1) - 1) / stride_w + 1))
- mask_inds = torch.nonzero(mask[0] > 0, as_tuple=False)
- output = features.new_zeros(batch_size, out_channel, out_h, out_w)
- if mask_inds.numel() > 0:
- mask_h_idx = mask_inds[:, 0].contiguous()
- mask_w_idx = mask_inds[:, 1].contiguous()
- data_col = features.new_zeros(in_channel * kernel_h * kernel_w,
- mask_inds.size(0))
- ext_module.masked_im2col_forward(
- features,
- mask_h_idx,
- mask_w_idx,
- data_col,
- kernel_h=kernel_h,
- kernel_w=kernel_w,
- pad_h=pad_h,
- pad_w=pad_w)
-
- masked_output = torch.addmm(1, bias[:, None], 1,
- weight.view(out_channel, -1), data_col)
- ext_module.masked_col2im_forward(
- masked_output,
- mask_h_idx,
- mask_w_idx,
- output,
- height=out_h,
- width=out_w,
- channels=out_channel)
- return output
-
- @staticmethod
- @once_differentiable
- def backward(ctx, grad_output):
- return (None, ) * 5
-
-
-masked_conv2d = MaskedConv2dFunction.apply
-
-
-class MaskedConv2d(nn.Conv2d):
- """A MaskedConv2d which inherits the official Conv2d.
-
- The masked forward doesn't implement the backward function and only
- supports the stride parameter to be 1 currently.
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- bias=True):
- super(MaskedConv2d,
- self).__init__(in_channels, out_channels, kernel_size, stride,
- padding, dilation, groups, bias)
-
- def forward(self, input, mask=None):
- if mask is None: # fallback to the normal Conv2d
- return super(MaskedConv2d, self).forward(input)
- else:
- return masked_conv2d(input, mask, self.weight, self.bias,
- self.padding)
diff --git a/spaces/MirageML/sjc/sd1/ldm/models/diffusion/plms.py b/spaces/MirageML/sjc/sd1/ldm/models/diffusion/plms.py
deleted file mode 100644
index 78eeb1003aa45d27bdbfc6b4a1d7ccbff57cd2e3..0000000000000000000000000000000000000000
--- a/spaces/MirageML/sjc/sd1/ldm/models/diffusion/plms.py
+++ /dev/null
@@ -1,236 +0,0 @@
-"""SAMPLING ONLY."""
-
-import torch
-import numpy as np
-from tqdm import tqdm
-from functools import partial
-
-from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
-
-
-class PLMSSampler(object):
- def __init__(self, model, schedule="linear", **kwargs):
- super().__init__()
- self.model = model
- self.ddpm_num_timesteps = model.num_timesteps
- self.schedule = schedule
-
- def register_buffer(self, name, attr):
- if type(attr) == torch.Tensor:
- if attr.device != torch.device("cuda"):
- attr = attr.to(torch.device("cuda"))
- setattr(self, name, attr)
-
- def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
- if ddim_eta != 0:
- raise ValueError('ddim_eta must be 0 for PLMS')
- self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
- num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
- alphas_cumprod = self.model.alphas_cumprod
- assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
- to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
-
- self.register_buffer('betas', to_torch(self.model.betas))
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
- self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
-
- # calculations for diffusion q(x_t | x_{t-1}) and others
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
-
- # ddim sampling parameters
- ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
- ddim_timesteps=self.ddim_timesteps,
- eta=ddim_eta,verbose=verbose)
- self.register_buffer('ddim_sigmas', ddim_sigmas)
- self.register_buffer('ddim_alphas', ddim_alphas)
- self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
- self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
- sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
- (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
- 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
- self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
-
- @torch.no_grad()
- def sample(self,
- S,
- batch_size,
- shape,
- conditioning=None,
- callback=None,
- normals_sequence=None,
- img_callback=None,
- quantize_x0=False,
- eta=0.,
- mask=None,
- x0=None,
- temperature=1.,
- noise_dropout=0.,
- score_corrector=None,
- corrector_kwargs=None,
- verbose=True,
- x_T=None,
- log_every_t=100,
- unconditional_guidance_scale=1.,
- unconditional_conditioning=None,
- # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
- **kwargs
- ):
- if conditioning is not None:
- if isinstance(conditioning, dict):
- cbs = conditioning[list(conditioning.keys())[0]].shape[0]
- if cbs != batch_size:
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
- else:
- if conditioning.shape[0] != batch_size:
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
-
- self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
- # sampling
- C, H, W = shape
- size = (batch_size, C, H, W)
- print(f'Data shape for PLMS sampling is {size}')
-
- samples, intermediates = self.plms_sampling(conditioning, size,
- callback=callback,
- img_callback=img_callback,
- quantize_denoised=quantize_x0,
- mask=mask, x0=x0,
- ddim_use_original_steps=False,
- noise_dropout=noise_dropout,
- temperature=temperature,
- score_corrector=score_corrector,
- corrector_kwargs=corrector_kwargs,
- x_T=x_T,
- log_every_t=log_every_t,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=unconditional_conditioning,
- )
- return samples, intermediates
-
- @torch.no_grad()
- def plms_sampling(self, cond, shape,
- x_T=None, ddim_use_original_steps=False,
- callback=None, timesteps=None, quantize_denoised=False,
- mask=None, x0=None, img_callback=None, log_every_t=100,
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
- unconditional_guidance_scale=1., unconditional_conditioning=None,):
- device = self.model.betas.device
- b = shape[0]
- if x_T is None:
- img = torch.randn(shape, device=device)
- else:
- img = x_T
-
- if timesteps is None:
- timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
- elif timesteps is not None and not ddim_use_original_steps:
- subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
- timesteps = self.ddim_timesteps[:subset_end]
-
- intermediates = {'x_inter': [img], 'pred_x0': [img]}
- time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
- total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
- print(f"Running PLMS Sampling with {total_steps} timesteps")
-
- iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
- old_eps = []
-
- for i, step in enumerate(iterator):
- index = total_steps - i - 1
- ts = torch.full((b,), step, device=device, dtype=torch.long)
- ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
-
- if mask is not None:
- assert x0 is not None
- img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
- img = img_orig * mask + (1. - mask) * img
-
- outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
- quantize_denoised=quantize_denoised, temperature=temperature,
- noise_dropout=noise_dropout, score_corrector=score_corrector,
- corrector_kwargs=corrector_kwargs,
- unconditional_guidance_scale=unconditional_guidance_scale,
- unconditional_conditioning=unconditional_conditioning,
- old_eps=old_eps, t_next=ts_next)
- img, pred_x0, e_t = outs
- old_eps.append(e_t)
- if len(old_eps) >= 4:
- old_eps.pop(0)
- if callback: callback(i)
- if img_callback: img_callback(pred_x0, i)
-
- if index % log_every_t == 0 or index == total_steps - 1:
- intermediates['x_inter'].append(img)
- intermediates['pred_x0'].append(pred_x0)
-
- return img, intermediates
-
- @torch.no_grad()
- def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
- unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):
- b, *_, device = *x.shape, x.device
-
- def get_model_output(x, t):
- if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
- e_t = self.model.apply_model(x, t, c)
- else:
- x_in = torch.cat([x] * 2)
- t_in = torch.cat([t] * 2)
- c_in = torch.cat([unconditional_conditioning, c])
- e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
- e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
-
- if score_corrector is not None:
- assert self.model.parameterization == "eps"
- e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
-
- return e_t
-
- alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
- alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
- sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
- sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
-
- def get_x_prev_and_pred_x0(e_t, index):
- # select parameters corresponding to the currently considered timestep
- a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
- a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
- sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
- sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
-
- # current prediction for x_0
- pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
- if quantize_denoised:
- pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
- # direction pointing to x_t
- dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
- noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
- if noise_dropout > 0.:
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
- x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
- return x_prev, pred_x0
-
- e_t = get_model_output(x, t)
- if len(old_eps) == 0:
- # Pseudo Improved Euler (2nd order)
- x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
- e_t_next = get_model_output(x_prev, t_next)
- e_t_prime = (e_t + e_t_next) / 2
- elif len(old_eps) == 1:
- # 2nd order Pseudo Linear Multistep (Adams-Bashforth)
- e_t_prime = (3 * e_t - old_eps[-1]) / 2
- elif len(old_eps) == 2:
- # 3nd order Pseudo Linear Multistep (Adams-Bashforth)
- e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
- elif len(old_eps) >= 3:
- # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
- e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
-
- x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
-
- return x_prev, pred_x0, e_t
diff --git a/spaces/Miuzarte/SUI-svc-4.0/train.py b/spaces/Miuzarte/SUI-svc-4.0/train.py
deleted file mode 100644
index 0fc80bf4aacf143feaf08575eb285910c0c8ce0a..0000000000000000000000000000000000000000
--- a/spaces/Miuzarte/SUI-svc-4.0/train.py
+++ /dev/null
@@ -1,297 +0,0 @@
-import logging
-logging.getLogger('matplotlib').setLevel(logging.WARNING)
-import os
-import json
-import argparse
-import itertools
-import math
-import torch
-from torch import nn, optim
-from torch.nn import functional as F
-from torch.utils.data import DataLoader
-from torch.utils.tensorboard import SummaryWriter
-import torch.multiprocessing as mp
-import torch.distributed as dist
-from torch.nn.parallel import DistributedDataParallel as DDP
-from torch.cuda.amp import autocast, GradScaler
-
-import modules.commons as commons
-import utils
-from data_utils import TextAudioSpeakerLoader, TextAudioCollate
-from models import (
- SynthesizerTrn,
- MultiPeriodDiscriminator,
-)
-from modules.losses import (
- kl_loss,
- generator_loss, discriminator_loss, feature_loss
-)
-
-from modules.mel_processing import mel_spectrogram_torch, spec_to_mel_torch
-
-torch.backends.cudnn.benchmark = True
-global_step = 0
-
-
-# os.environ['TORCH_DISTRIBUTED_DEBUG'] = 'INFO'
-
-
-def main():
- """Assume Single Node Multi GPUs Training Only"""
- assert torch.cuda.is_available(), "CPU training is not allowed."
- hps = utils.get_hparams()
-
- n_gpus = torch.cuda.device_count()
- os.environ['MASTER_ADDR'] = 'localhost'
- os.environ['MASTER_PORT'] = hps.train.port
-
- mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
-
-
-def run(rank, n_gpus, hps):
- global global_step
- if rank == 0:
- logger = utils.get_logger(hps.model_dir)
- logger.info(hps)
- utils.check_git_hash(hps.model_dir)
- writer = SummaryWriter(log_dir=hps.model_dir)
- writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval"))
-
- # for pytorch on win, backend use gloo
- dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank)
- torch.manual_seed(hps.train.seed)
- torch.cuda.set_device(rank)
- collate_fn = TextAudioCollate()
- train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps)
- train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True,
- batch_size=hps.train.batch_size,collate_fn=collate_fn)
- if rank == 0:
- eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps)
- eval_loader = DataLoader(eval_dataset, num_workers=1, shuffle=False,
- batch_size=1, pin_memory=False,
- drop_last=False, collate_fn=collate_fn)
-
- net_g = SynthesizerTrn(
- hps.data.filter_length // 2 + 1,
- hps.train.segment_size // hps.data.hop_length,
- **hps.model).cuda(rank)
- net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank)
- optim_g = torch.optim.AdamW(
- net_g.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- optim_d = torch.optim.AdamW(
- net_d.parameters(),
- hps.train.learning_rate,
- betas=hps.train.betas,
- eps=hps.train.eps)
- net_g = DDP(net_g, device_ids=[rank]) # , find_unused_parameters=True)
- net_d = DDP(net_d, device_ids=[rank])
-
- skip_optimizer = True
- try:
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g,
- optim_g, skip_optimizer)
- _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d,
- optim_d, skip_optimizer)
- global_step = (epoch_str - 1) * len(train_loader)
- except:
- print("load old checkpoint failed...")
- epoch_str = 1
- global_step = 0
- if skip_optimizer:
- epoch_str = 1
- global_step = 0
-
- scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
- scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2)
-
- scaler = GradScaler(enabled=hps.train.fp16_run)
-
- for epoch in range(epoch_str, hps.train.epochs + 1):
- if rank == 0:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler,
- [train_loader, eval_loader], logger, [writer, writer_eval])
- else:
- train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler,
- [train_loader, None], None, None)
- scheduler_g.step()
- scheduler_d.step()
-
-
-def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers):
- net_g, net_d = nets
- optim_g, optim_d = optims
- scheduler_g, scheduler_d = schedulers
- train_loader, eval_loader = loaders
- if writers is not None:
- writer, writer_eval = writers
-
- # train_loader.batch_sampler.set_epoch(epoch)
- global global_step
-
- net_g.train()
- net_d.train()
- for batch_idx, items in enumerate(train_loader):
- c, f0, spec, y, spk, lengths, uv = items
- g = spk.cuda(rank, non_blocking=True)
- spec, y = spec.cuda(rank, non_blocking=True), y.cuda(rank, non_blocking=True)
- c = c.cuda(rank, non_blocking=True)
- f0 = f0.cuda(rank, non_blocking=True)
- uv = uv.cuda(rank, non_blocking=True)
- lengths = lengths.cuda(rank, non_blocking=True)
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
-
- with autocast(enabled=hps.train.fp16_run):
- y_hat, ids_slice, z_mask, \
- (z, z_p, m_p, logs_p, m_q, logs_q), pred_lf0, norm_lf0, lf0 = net_g(c, f0, uv, spec, g=g, c_lengths=lengths,
- spec_lengths=lengths)
-
- y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length)
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
- y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice
-
- # Discriminator
- y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach())
-
- with autocast(enabled=False):
- loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g)
- loss_disc_all = loss_disc
-
- optim_d.zero_grad()
- scaler.scale(loss_disc_all).backward()
- scaler.unscale_(optim_d)
- grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None)
- scaler.step(optim_d)
-
- with autocast(enabled=hps.train.fp16_run):
- # Generator
- y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat)
- with autocast(enabled=False):
- loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
- loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
- loss_fm = feature_loss(fmap_r, fmap_g)
- loss_gen, losses_gen = generator_loss(y_d_hat_g)
- loss_lf0 = F.mse_loss(pred_lf0, lf0)
- loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl + loss_lf0
- optim_g.zero_grad()
- scaler.scale(loss_gen_all).backward()
- scaler.unscale_(optim_g)
- grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None)
- scaler.step(optim_g)
- scaler.update()
-
- if rank == 0:
- if global_step % hps.train.log_interval == 0:
- lr = optim_g.param_groups[0]['lr']
- losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_kl]
- logger.info('Train Epoch: {} [{:.0f}%]'.format(
- epoch,
- 100. * batch_idx / len(train_loader)))
- logger.info([x.item() for x in losses] + [global_step, lr])
-
- scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr,
- "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g}
- scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/kl": loss_kl,
- "loss/g/lf0": loss_lf0})
-
- # scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)})
- # scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)})
- # scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)})
- image_dict = {
- "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()),
- "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()),
- "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()),
- "all/lf0": utils.plot_data_to_numpy(lf0[0, 0, :].cpu().numpy(),
- pred_lf0[0, 0, :].detach().cpu().numpy()),
- "all/norm_lf0": utils.plot_data_to_numpy(lf0[0, 0, :].cpu().numpy(),
- norm_lf0[0, 0, :].detach().cpu().numpy())
- }
-
- utils.summarize(
- writer=writer,
- global_step=global_step,
- images=image_dict,
- scalars=scalar_dict
- )
-
- if global_step % hps.train.eval_interval == 0:
- evaluate(hps, net_g, eval_loader, writer_eval)
- utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch,
- os.path.join(hps.model_dir, "G_{}.pth".format(global_step)), hps.train.eval_interval, global_step)
- utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch,
- os.path.join(hps.model_dir, "D_{}.pth".format(global_step)), hps.train.eval_interval, global_step)
- global_step += 1
-
- if rank == 0:
- logger.info('====> Epoch: {}'.format(epoch))
-
-
-def evaluate(hps, generator, eval_loader, writer_eval):
- generator.eval()
- image_dict = {}
- audio_dict = {}
- with torch.no_grad():
- for batch_idx, items in enumerate(eval_loader):
- c, f0, spec, y, spk, _, uv = items
- g = spk[:1].cuda(0)
- spec, y = spec[:1].cuda(0), y[:1].cuda(0)
- c = c[:1].cuda(0)
- f0 = f0[:1].cuda(0)
- uv= uv[:1].cuda(0)
- mel = spec_to_mel_torch(
- spec,
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.mel_fmin,
- hps.data.mel_fmax)
- y_hat = generator.module.infer(c, f0, uv, g=g)
-
- y_hat_mel = mel_spectrogram_torch(
- y_hat.squeeze(1).float(),
- hps.data.filter_length,
- hps.data.n_mel_channels,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- hps.data.mel_fmin,
- hps.data.mel_fmax
- )
-
- audio_dict.update({
- f"gen/audio_{batch_idx}": y_hat[0],
- f"gt/audio_{batch_idx}": y[0]
- })
- image_dict.update({
- f"gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()),
- "gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())
- })
- utils.summarize(
- writer=writer_eval,
- global_step=global_step,
- images=image_dict,
- audios=audio_dict,
- audio_sampling_rate=hps.data.sampling_rate
- )
- generator.train()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/dbnetpp/dbnetpp_resnet50-dcnv2_fpnc_100k_synthtext.py b/spaces/Mountchicken/MAERec-Gradio/configs/textdet/dbnetpp/dbnetpp_resnet50-dcnv2_fpnc_100k_synthtext.py
deleted file mode 100644
index 7174055dae61e8e4406e891359aa38957acf6a24..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/configs/textdet/dbnetpp/dbnetpp_resnet50-dcnv2_fpnc_100k_synthtext.py
+++ /dev/null
@@ -1,44 +0,0 @@
-_base_ = [
- '_base_dbnetpp_resnet50-dcnv2_fpnc.py',
- '../_base_/pretrain_runtime.py',
- '../_base_/datasets/synthtext.py',
- '../_base_/schedules/schedule_sgd_100k.py',
-]
-
-train_pipeline = [
- dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
- dict(
- type='LoadOCRAnnotations',
- with_bbox=True,
- with_polygon=True,
- with_label=True,
- ),
- dict(type='FixInvalidPolygon'),
- dict(
- type='TorchVisionWrapper',
- op='ColorJitter',
- brightness=32.0 / 255,
- saturation=0.5),
- dict(
- type='ImgAugWrapper',
- args=[['Fliplr', 0.5],
- dict(cls='Affine', rotate=[-10, 10]), ['Resize', [0.5, 3.0]]]),
- dict(type='RandomCrop', min_side_ratio=0.1),
- dict(type='Resize', scale=(640, 640), keep_ratio=True),
- dict(type='Pad', size=(640, 640)),
- dict(
- type='PackTextDetInputs',
- meta_keys=('img_path', 'ori_shape', 'img_shape'))
-]
-
-synthtext_textdet_train = _base_.synthtext_textdet_train
-synthtext_textdet_train.pipeline = train_pipeline
-
-train_dataloader = dict(
- batch_size=16,
- num_workers=8,
- persistent_workers=True,
- sampler=dict(type='DefaultSampler', shuffle=True),
- dataset=synthtext_textdet_train)
-
-auto_scale_lr = dict(base_batch_size=16)
diff --git a/spaces/MrBodean/VoiceClone/synthesizer/synthesize.py b/spaces/MrBodean/VoiceClone/synthesizer/synthesize.py
deleted file mode 100644
index ffc7dc2678e85006b9f66d910fcae3e307c521a8..0000000000000000000000000000000000000000
--- a/spaces/MrBodean/VoiceClone/synthesizer/synthesize.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import torch
-from torch.utils.data import DataLoader
-from synthesizer.hparams import hparams_debug_string
-from synthesizer.synthesizer_dataset import SynthesizerDataset, collate_synthesizer
-from synthesizer.models.tacotron import Tacotron
-from synthesizer.utils.text import text_to_sequence
-from synthesizer.utils.symbols import symbols
-import numpy as np
-from pathlib import Path
-from tqdm import tqdm
-import platform
-
-def run_synthesis(in_dir, out_dir, model_dir, hparams):
- # This generates ground truth-aligned mels for vocoder training
- synth_dir = Path(out_dir).joinpath("mels_gta")
- synth_dir.mkdir(exist_ok=True)
- print(hparams_debug_string())
-
- # Check for GPU
- if torch.cuda.is_available():
- device = torch.device("cuda")
- if hparams.synthesis_batch_size % torch.cuda.device_count() != 0:
- raise ValueError("`hparams.synthesis_batch_size` must be evenly divisible by n_gpus!")
- else:
- device = torch.device("cpu")
- print("Synthesizer using device:", device)
-
- # Instantiate Tacotron model
- model = Tacotron(embed_dims=hparams.tts_embed_dims,
- num_chars=len(symbols),
- encoder_dims=hparams.tts_encoder_dims,
- decoder_dims=hparams.tts_decoder_dims,
- n_mels=hparams.num_mels,
- fft_bins=hparams.num_mels,
- postnet_dims=hparams.tts_postnet_dims,
- encoder_K=hparams.tts_encoder_K,
- lstm_dims=hparams.tts_lstm_dims,
- postnet_K=hparams.tts_postnet_K,
- num_highways=hparams.tts_num_highways,
- dropout=0., # Use zero dropout for gta mels
- stop_threshold=hparams.tts_stop_threshold,
- speaker_embedding_size=hparams.speaker_embedding_size).to(device)
-
- # Load the weights
- model_dir = Path(model_dir)
- model_fpath = model_dir.joinpath(model_dir.stem).with_suffix(".pt")
- print("\nLoading weights at %s" % model_fpath)
- model.load(model_fpath)
- print("Tacotron weights loaded from step %d" % model.step)
-
- # Synthesize using same reduction factor as the model is currently trained
- r = np.int32(model.r)
-
- # Set model to eval mode (disable gradient and zoneout)
- model.eval()
-
- # Initialize the dataset
- in_dir = Path(in_dir)
- metadata_fpath = in_dir.joinpath("train.txt")
- mel_dir = in_dir.joinpath("mels")
- embed_dir = in_dir.joinpath("embeds")
-
- dataset = SynthesizerDataset(metadata_fpath, mel_dir, embed_dir, hparams)
- data_loader = DataLoader(dataset,
- collate_fn=lambda batch: collate_synthesizer(batch, r, hparams),
- batch_size=hparams.synthesis_batch_size,
- num_workers=2 if platform.system() != "Windows" else 0,
- shuffle=False,
- pin_memory=True)
-
- # Generate GTA mels
- meta_out_fpath = Path(out_dir).joinpath("synthesized.txt")
- with open(meta_out_fpath, "w") as file:
- for i, (texts, mels, embeds, idx) in tqdm(enumerate(data_loader), total=len(data_loader)):
- texts = texts.to(device)
- mels = mels.to(device)
- embeds = embeds.to(device)
-
- # Parallelize model onto GPUS using workaround due to python bug
- if device.type == "cuda" and torch.cuda.device_count() > 1:
- _, mels_out, _ = data_parallel_workaround(model, texts, mels, embeds)
- else:
- _, mels_out, _, _ = model(texts, mels, embeds)
-
- for j, k in enumerate(idx):
- # Note: outputs mel-spectrogram files and target ones have same names, just different folders
- mel_filename = Path(synth_dir).joinpath(dataset.metadata[k][1])
- mel_out = mels_out[j].detach().cpu().numpy().T
-
- # Use the length of the ground truth mel to remove padding from the generated mels
- mel_out = mel_out[:int(dataset.metadata[k][4])]
-
- # Write the spectrogram to disk
- np.save(mel_filename, mel_out, allow_pickle=False)
-
- # Write metadata into the synthesized file
- file.write("|".join(dataset.metadata[k]))
diff --git a/spaces/Mrchuw/text-to-image_6_by_6/README.md b/spaces/Mrchuw/text-to-image_6_by_6/README.md
deleted file mode 100644
index 99d798609ac4e3ed9667f7eda6752cc40766fa54..0000000000000000000000000000000000000000
--- a/spaces/Mrchuw/text-to-image_6_by_6/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Text To Image
-emoji: 🌖
-colorFrom: gray
-colorTo: purple
-sdk: gradio
-sdk_version: 3.20.0
-app_file: app.py
-pinned: false
-duplicated_from: xp3857/text-to-image
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/NATSpeech/DiffSpeech/data_gen/tts/runs/binarize.py b/spaces/NATSpeech/DiffSpeech/data_gen/tts/runs/binarize.py
deleted file mode 100644
index e89aeb4795e749c64e565ecb26dfd0c8e3232801..0000000000000000000000000000000000000000
--- a/spaces/NATSpeech/DiffSpeech/data_gen/tts/runs/binarize.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import utils.commons.single_thread_env # NOQA
-from utils.commons.hparams import hparams, set_hparams
-import importlib
-
-
-def binarize():
- binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizer.BaseBinarizer')
- pkg = ".".join(binarizer_cls.split(".")[:-1])
- cls_name = binarizer_cls.split(".")[-1]
- binarizer_cls = getattr(importlib.import_module(pkg), cls_name)
- print("| Binarizer: ", binarizer_cls)
- binarizer_cls().process()
-
-
-if __name__ == '__main__':
- set_hparams()
- binarize()
diff --git a/spaces/NATSpeech/PortaSpeech/tasks/tts/vocoder_infer/base_vocoder.py b/spaces/NATSpeech/PortaSpeech/tasks/tts/vocoder_infer/base_vocoder.py
deleted file mode 100644
index 0ab88f4e78be66ba1821e5a6720193b1d614f4f5..0000000000000000000000000000000000000000
--- a/spaces/NATSpeech/PortaSpeech/tasks/tts/vocoder_infer/base_vocoder.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import librosa
-from utils.audio import librosa_wav2spec
-from utils.commons.hparams import hparams
-import numpy as np
-
-REGISTERED_VOCODERS = {}
-
-
-def register_vocoder(name):
- def _f(cls):
- REGISTERED_VOCODERS[name] = cls
- return cls
-
- return _f
-
-
-def get_vocoder_cls(vocoder_name):
- return REGISTERED_VOCODERS.get(vocoder_name)
-
-
-class BaseVocoder:
- def spec2wav(self, mel):
- """
-
- :param mel: [T, 80]
- :return: wav: [T']
- """
-
- raise NotImplementedError
-
- @staticmethod
- def wav2spec(wav_fn):
- """
-
- :param wav_fn: str
- :return: wav, mel: [T, 80]
- """
- wav_spec_dict = librosa_wav2spec(wav_fn, fft_size=hparams['fft_size'],
- hop_size=hparams['hop_size'],
- win_length=hparams['win_size'],
- num_mels=hparams['audio_num_mel_bins'],
- fmin=hparams['fmin'],
- fmax=hparams['fmax'],
- sample_rate=hparams['audio_sample_rate'],
- loud_norm=hparams['loud_norm'])
- wav = wav_spec_dict['wav']
- mel = wav_spec_dict['mel']
- return wav, mel
-
- @staticmethod
- def wav2mfcc(wav_fn):
- fft_size = hparams['fft_size']
- hop_size = hparams['hop_size']
- win_length = hparams['win_size']
- sample_rate = hparams['audio_sample_rate']
- wav, _ = librosa.core.load(wav_fn, sr=sample_rate)
- mfcc = librosa.feature.mfcc(y=wav, sr=sample_rate, n_mfcc=13,
- n_fft=fft_size, hop_length=hop_size,
- win_length=win_length, pad_mode="constant", power=1.0)
- mfcc_delta = librosa.feature.delta(mfcc, order=1)
- mfcc_delta_delta = librosa.feature.delta(mfcc, order=2)
- mfcc = np.concatenate([mfcc, mfcc_delta, mfcc_delta_delta]).T
- return mfcc
diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/input_pipeline.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/input_pipeline.py
deleted file mode 100644
index ed3fd173d4379a75ab1e2e5a9ba0bbdcbaa0be42..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/nlp/bert/input_pipeline.py
+++ /dev/null
@@ -1,285 +0,0 @@
-# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""BERT model input pipelines."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import tensorflow as tf
-
-
-def decode_record(record, name_to_features):
- """Decodes a record to a TensorFlow example."""
- example = tf.io.parse_single_example(record, name_to_features)
-
- # tf.Example only supports tf.int64, but the TPU only supports tf.int32.
- # So cast all int64 to int32.
- for name in list(example.keys()):
- t = example[name]
- if t.dtype == tf.int64:
- t = tf.cast(t, tf.int32)
- example[name] = t
-
- return example
-
-
-def single_file_dataset(input_file, name_to_features):
- """Creates a single-file dataset to be passed for BERT custom training."""
- # For training, we want a lot of parallel reading and shuffling.
- # For eval, we want no shuffling and parallel reading doesn't matter.
- d = tf.data.TFRecordDataset(input_file)
- d = d.map(
- lambda record: decode_record(record, name_to_features),
- num_parallel_calls=tf.data.experimental.AUTOTUNE)
-
- # When `input_file` is a path to a single file or a list
- # containing a single path, disable auto sharding so that
- # same input file is sent to all workers.
- if isinstance(input_file, str) or len(input_file) == 1:
- options = tf.data.Options()
- options.experimental_distribute.auto_shard_policy = (
- tf.data.experimental.AutoShardPolicy.OFF)
- d = d.with_options(options)
- return d
-
-
-def create_pretrain_dataset(input_patterns,
- seq_length,
- max_predictions_per_seq,
- batch_size,
- is_training=True,
- input_pipeline_context=None,
- use_next_sentence_label=True,
- use_position_id=False,
- output_fake_labels=True):
- """Creates input dataset from (tf)records files for pretraining."""
- name_to_features = {
- 'input_ids':
- tf.io.FixedLenFeature([seq_length], tf.int64),
- 'input_mask':
- tf.io.FixedLenFeature([seq_length], tf.int64),
- 'segment_ids':
- tf.io.FixedLenFeature([seq_length], tf.int64),
- 'masked_lm_positions':
- tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
- 'masked_lm_ids':
- tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
- 'masked_lm_weights':
- tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32),
- }
- if use_next_sentence_label:
- name_to_features['next_sentence_labels'] = tf.io.FixedLenFeature([1],
- tf.int64)
- if use_position_id:
- name_to_features['position_ids'] = tf.io.FixedLenFeature([seq_length],
- tf.int64)
- for input_pattern in input_patterns:
- if not tf.io.gfile.glob(input_pattern):
- raise ValueError('%s does not match any files.' % input_pattern)
-
- dataset = tf.data.Dataset.list_files(input_patterns, shuffle=is_training)
-
- if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:
- dataset = dataset.shard(input_pipeline_context.num_input_pipelines,
- input_pipeline_context.input_pipeline_id)
- if is_training:
- dataset = dataset.repeat()
-
- # We set shuffle buffer to exactly match total number of
- # training files to ensure that training data is well shuffled.
- input_files = []
- for input_pattern in input_patterns:
- input_files.extend(tf.io.gfile.glob(input_pattern))
- dataset = dataset.shuffle(len(input_files))
-
- # In parallel, create tf record dataset for each train files.
- # cycle_length = 8 means that up to 8 files will be read and deserialized in
- # parallel. You may want to increase this number if you have a large number of
- # CPU cores.
- dataset = dataset.interleave(
- tf.data.TFRecordDataset,
- cycle_length=8,
- num_parallel_calls=tf.data.experimental.AUTOTUNE)
-
- if is_training:
- dataset = dataset.shuffle(100)
-
- decode_fn = lambda record: decode_record(record, name_to_features)
- dataset = dataset.map(
- decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
-
- def _select_data_from_record(record):
- """Filter out features to use for pretraining."""
- x = {
- 'input_word_ids': record['input_ids'],
- 'input_mask': record['input_mask'],
- 'input_type_ids': record['segment_ids'],
- 'masked_lm_positions': record['masked_lm_positions'],
- 'masked_lm_ids': record['masked_lm_ids'],
- 'masked_lm_weights': record['masked_lm_weights'],
- }
- if use_next_sentence_label:
- x['next_sentence_labels'] = record['next_sentence_labels']
- if use_position_id:
- x['position_ids'] = record['position_ids']
-
- # TODO(hongkuny): Remove the fake labels after migrating bert pretraining.
- if output_fake_labels:
- return (x, record['masked_lm_weights'])
- else:
- return x
-
- dataset = dataset.map(
- _select_data_from_record,
- num_parallel_calls=tf.data.experimental.AUTOTUNE)
- dataset = dataset.batch(batch_size, drop_remainder=is_training)
- dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
- return dataset
-
-
-def create_classifier_dataset(file_path,
- seq_length,
- batch_size,
- is_training=True,
- input_pipeline_context=None,
- label_type=tf.int64,
- include_sample_weights=False):
- """Creates input dataset from (tf)records files for train/eval."""
- name_to_features = {
- 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
- 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64),
- 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
- 'label_ids': tf.io.FixedLenFeature([], label_type),
- }
- if include_sample_weights:
- name_to_features['weight'] = tf.io.FixedLenFeature([], tf.float32)
- dataset = single_file_dataset(file_path, name_to_features)
-
- # The dataset is always sharded by number of hosts.
- # num_input_pipelines is the number of hosts rather than number of cores.
- if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:
- dataset = dataset.shard(input_pipeline_context.num_input_pipelines,
- input_pipeline_context.input_pipeline_id)
-
- def _select_data_from_record(record):
- x = {
- 'input_word_ids': record['input_ids'],
- 'input_mask': record['input_mask'],
- 'input_type_ids': record['segment_ids']
- }
- y = record['label_ids']
- if include_sample_weights:
- w = record['weight']
- return (x, y, w)
- return (x, y)
-
- if is_training:
- dataset = dataset.shuffle(100)
- dataset = dataset.repeat()
-
- dataset = dataset.map(
- _select_data_from_record,
- num_parallel_calls=tf.data.experimental.AUTOTUNE)
- dataset = dataset.batch(batch_size, drop_remainder=is_training)
- dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
- return dataset
-
-
-def create_squad_dataset(file_path,
- seq_length,
- batch_size,
- is_training=True,
- input_pipeline_context=None):
- """Creates input dataset from (tf)records files for train/eval."""
- name_to_features = {
- 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
- 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64),
- 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
- }
- if is_training:
- name_to_features['start_positions'] = tf.io.FixedLenFeature([], tf.int64)
- name_to_features['end_positions'] = tf.io.FixedLenFeature([], tf.int64)
- else:
- name_to_features['unique_ids'] = tf.io.FixedLenFeature([], tf.int64)
-
- dataset = single_file_dataset(file_path, name_to_features)
-
- # The dataset is always sharded by number of hosts.
- # num_input_pipelines is the number of hosts rather than number of cores.
- if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:
- dataset = dataset.shard(input_pipeline_context.num_input_pipelines,
- input_pipeline_context.input_pipeline_id)
-
- def _select_data_from_record(record):
- """Dispatches record to features and labels."""
- x, y = {}, {}
- for name, tensor in record.items():
- if name in ('start_positions', 'end_positions'):
- y[name] = tensor
- elif name == 'input_ids':
- x['input_word_ids'] = tensor
- elif name == 'segment_ids':
- x['input_type_ids'] = tensor
- else:
- x[name] = tensor
- return (x, y)
-
- if is_training:
- dataset = dataset.shuffle(100)
- dataset = dataset.repeat()
-
- dataset = dataset.map(
- _select_data_from_record,
- num_parallel_calls=tf.data.experimental.AUTOTUNE)
- dataset = dataset.batch(batch_size, drop_remainder=True)
- dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
- return dataset
-
-
-def create_retrieval_dataset(file_path,
- seq_length,
- batch_size,
- input_pipeline_context=None):
- """Creates input dataset from (tf)records files for scoring."""
- name_to_features = {
- 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
- 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64),
- 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
- 'int_iden': tf.io.FixedLenFeature([1], tf.int64),
- }
- dataset = single_file_dataset(file_path, name_to_features)
-
- # The dataset is always sharded by number of hosts.
- # num_input_pipelines is the number of hosts rather than number of cores.
- if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:
- dataset = dataset.shard(input_pipeline_context.num_input_pipelines,
- input_pipeline_context.input_pipeline_id)
-
- def _select_data_from_record(record):
- x = {
- 'input_word_ids': record['input_ids'],
- 'input_mask': record['input_mask'],
- 'input_type_ids': record['segment_ids']
- }
- y = record['int_iden']
- return (x, y)
-
- dataset = dataset.map(
- _select_data_from_record,
- num_parallel_calls=tf.data.experimental.AUTOTUNE)
- dataset = dataset.batch(batch_size, drop_remainder=False)
- dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
- return dataset
diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/region_similarity_calculator.py b/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/region_similarity_calculator.py
deleted file mode 100644
index 0af2ce495ad53c9df0f8d2eb79f7431b02ab430e..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/region_similarity_calculator.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-"""Region Similarity Calculators for BoxLists.
-
-Region Similarity Calculators compare a pairwise measure of similarity
-between the boxes in two BoxLists.
-"""
-from abc import ABCMeta
-from abc import abstractmethod
-
-import tensorflow as tf
-
-
-def area(boxlist, scope=None):
- """Computes area of boxes.
-
- Args:
- boxlist: BoxList holding N boxes
- scope: name scope.
-
- Returns:
- a tensor with shape [N] representing box areas.
- """
- if not scope:
- scope = 'Area'
- with tf.name_scope(scope):
- y_min, x_min, y_max, x_max = tf.split(
- value=boxlist.get(), num_or_size_splits=4, axis=1)
- return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
-
-
-def intersection(boxlist1, boxlist2, scope=None):
- """Compute pairwise intersection areas between boxes.
-
- Args:
- boxlist1: BoxList holding N boxes
- boxlist2: BoxList holding M boxes
- scope: name scope.
-
- Returns:
- a tensor with shape [N, M] representing pairwise intersections
- """
- if not scope:
- scope = 'Intersection'
- with tf.name_scope(scope):
- y_min1, x_min1, y_max1, x_max1 = tf.split(
- value=boxlist1.get(), num_or_size_splits=4, axis=1)
- y_min2, x_min2, y_max2, x_max2 = tf.split(
- value=boxlist2.get(), num_or_size_splits=4, axis=1)
- all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(a=y_max2))
- all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(a=y_min2))
- intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
- all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(a=x_max2))
- all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(a=x_min2))
- intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
- return intersect_heights * intersect_widths
-
-
-def iou(boxlist1, boxlist2, scope=None):
- """Computes pairwise intersection-over-union between box collections.
-
- Args:
- boxlist1: BoxList holding N boxes
- boxlist2: BoxList holding M boxes
- scope: name scope.
-
- Returns:
- a tensor with shape [N, M] representing pairwise iou scores.
- """
- if not scope:
- scope = 'IOU'
- with tf.name_scope(scope):
- intersections = intersection(boxlist1, boxlist2)
- areas1 = area(boxlist1)
- areas2 = area(boxlist2)
- unions = (
- tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
- return tf.where(
- tf.equal(intersections, 0.0), tf.zeros_like(intersections),
- tf.truediv(intersections, unions))
-
-
-class RegionSimilarityCalculator(object):
- """Abstract base class for region similarity calculator."""
- __metaclass__ = ABCMeta
-
- def compare(self, boxlist1, boxlist2, scope=None):
- """Computes matrix of pairwise similarity between BoxLists.
-
- This op (to be overriden) computes a measure of pairwise similarity between
- the boxes in the given BoxLists. Higher values indicate more similarity.
-
- Note that this method simply measures similarity and does not explicitly
- perform a matching.
-
- Args:
- boxlist1: BoxList holding N boxes.
- boxlist2: BoxList holding M boxes.
- scope: Op scope name. Defaults to 'Compare' if None.
-
- Returns:
- a (float32) tensor of shape [N, M] with pairwise similarity score.
- """
- if not scope:
- scope = 'Compare'
- with tf.name_scope(scope) as scope:
- return self._compare(boxlist1, boxlist2)
-
- @abstractmethod
- def _compare(self, boxlist1, boxlist2):
- pass
-
-
-class IouSimilarity(RegionSimilarityCalculator):
- """Class to compute similarity based on Intersection over Union (IOU) metric.
-
- This class computes pairwise similarity between two BoxLists based on IOU.
- """
-
- def _compare(self, boxlist1, boxlist2):
- """Compute pairwise IOU similarity between the two BoxLists.
-
- Args:
- boxlist1: BoxList holding N boxes.
- boxlist2: BoxList holding M boxes.
-
- Returns:
- A tensor with shape [N, M] representing pairwise iou scores.
- """
- return iou(boxlist1, boxlist2)
diff --git a/spaces/Naveentalluri/NaveenGenAIAvatar/README.md b/spaces/Naveentalluri/NaveenGenAIAvatar/README.md
deleted file mode 100644
index d5f9d88d03c625785bdc54e40a2fc5b7f0719dfa..0000000000000000000000000000000000000000
--- a/spaces/Naveentalluri/NaveenGenAIAvatar/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: NaveenGenAIAvatar
-emoji: 🔥
-colorFrom: yellow
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/pad_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/pad_dataset.py
deleted file mode 100644
index 8075bba6a9efc5f8421368ee0b2ae66afe3f5009..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/pad_dataset.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from fairseq.data import data_utils
-
-from . import BaseWrapperDataset
-
-
-class PadDataset(BaseWrapperDataset):
- def __init__(self, dataset, pad_idx, left_pad):
- super().__init__(dataset)
- self.pad_idx = pad_idx
- self.left_pad = left_pad
-
- def collater(self, samples):
- return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad)
-
-
-class LeftPadDataset(PadDataset):
- def __init__(self, dataset, pad_idx):
- super().__init__(dataset, pad_idx, left_pad=True)
-
-
-class RightPadDataset(PadDataset):
- def __init__(self, dataset, pad_idx):
- super().__init__(dataset, pad_idx, left_pad=False)
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/__init__.py
deleted file mode 100644
index 44bb24ae614941f23fea29c56d60167650c39bcb..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-try:
- from fairseq.version import __version__ # noqa
-except ImportError:
- pass
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/nat/insertion_transformer.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/nat/insertion_transformer.py
deleted file mode 100644
index bc28000f59a3b9e8098f9fe710cc8335d39eea3e..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/nat/insertion_transformer.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-from fairseq.models import register_model, register_model_architecture
-from fairseq.models.nat import (
- FairseqNATModel,
- LevenshteinTransformerDecoder,
- LevenshteinTransformerModel,
- ensemble_decoder,
-)
-from fairseq.models.transformer import Linear
-from fairseq.modules.transformer_sentence_encoder import init_bert_params
-from fairseq.utils import new_arange
-
-
-class NegativeDistanceScore(object):
- def __init__(self):
-
- # pre-compute some values
- self.scores = {}
-
- self.scores[0.5] = self.compute_score_full(50, 0.5)
- self.scores[1.0] = self.compute_score_full(50, 1.0)
- self.scores[2.0] = self.compute_score_full(50, 2.0)
-
- def __call__(self, i, L, tau):
- if (tau is None) or (tau > 1000):
- return 1 / L
-
- if tau in self.scores:
- if L < self.scores[tau].shape[0]:
- return self.scores[tau][L - 1, i]
- return self.compute_score(L, tau)[i]
-
- def compute_score(self, L, tau):
- s = np.array([-abs(L / 2 - i) / tau for i in range(L)])
- s = np.exp(s - s.max())
- return s / s.sum()
-
- def compute_score_full(self, L, tau):
- s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau
- s = np.tril(s, 0) + np.triu(s - float("inf"), 1)
- s = np.exp(s - s.max(1, keepdims=True))
- return s / s.sum(1, keepdims=True)
-
-
-neg_scorer = NegativeDistanceScore()
-
-
-def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None):
- try:
- from fairseq import libnat
- except ImportError as e:
- import sys
-
- sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n")
- raise e
-
- B = in_tokens.size(0)
- T = in_tokens.size(1)
- V = vocab_size
-
- with torch.cuda.device_of(in_tokens):
- in_tokens_list = [
- [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist())
- ]
- out_tokens_list = [
- [t for t in s if t != padding_idx]
- for i, s in enumerate(out_tokens.tolist())
- ]
-
- full_labels = libnat.suggested_ed2_path(
- in_tokens_list, out_tokens_list, padding_idx
- )
- insert_labels = [a[:-1] for a in full_labels]
-
- # numericalize1
- insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float()
- insert_index, insert_labels = zip(
- *[
- (w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau))
- for i, labels in enumerate(insert_labels)
- for j, label in enumerate(labels[1:-1])
- for k, w in enumerate(label)
- ]
- ) # HACK 1:-1
- insert_index, insert_labels = [
- torch.tensor(list(a), device=in_tokens.device)
- for a in [insert_index, insert_labels]
- ]
- insert_label_tensors.scatter_(0, insert_index.long(), insert_labels)
- insert_label_tensors = insert_label_tensors.view(B, T - 1, V)
-
- return insert_label_tensors
-
-
-def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx):
-
- padding_masks = in_tokens[:, 1:].eq(padding_idx)
- word_ins_scores.masked_fill_(padding_masks, 0.0)
- word_ins_pred.masked_fill_(padding_masks, padding_idx)
-
- in_coords = new_arange(in_tokens).type_as(in_scores)
-
- # shift all padding predictions to infinite
- out_coords = (in_coords[:, 1:] - 0.5).masked_fill(
- word_ins_pred.eq(padding_idx), float("inf")
- )
- out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1]
- out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords)
- out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords)
- return out_tokens, out_scores
-
-
-@register_model("insertion_transformer")
-class InsertionTransformerModel(LevenshteinTransformerModel):
- def __init__(self, args, encoder, decoder):
- super().__init__(args, encoder, decoder)
-
- @staticmethod
- def add_args(parser):
- FairseqNATModel.add_args(parser)
- parser.add_argument("--label-tau", default=None, type=float)
-
- @classmethod
- def build_decoder(cls, args, tgt_dict, embed_tokens):
- decoder = InsertionTransformerDecoder(args, tgt_dict, embed_tokens)
- if getattr(args, "apply_bert_init", False):
- decoder.apply(init_bert_params)
- return decoder
-
- def forward(
- self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
- ):
-
- assert tgt_tokens is not None, "forward function only supports training."
-
- # encoding
- encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
-
- # generate training labels for insertion
- word_ins_out = self.decoder.forward_word_ins(
- normalize=False,
- prev_output_tokens=prev_output_tokens,
- encoder_out=encoder_out,
- )
-
- word_ins_tgt = _get_ins_targets(
- prev_output_tokens,
- tgt_tokens,
- self.pad,
- self.unk,
- len(self.tgt_dict),
- tau=self.decoder.label_tau,
- ).type_as(word_ins_out)
- word_ins_masks = prev_output_tokens[:, 1:].ne(self.pad)
-
- return {
- "word_ins": {
- "out": word_ins_out,
- "tgt": word_ins_tgt,
- "mask": word_ins_masks,
- "ls": self.args.label_smoothing,
- "nll_loss": True,
- }
- }
-
- def forward_decoder(
- self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs
- ):
-
- output_tokens = decoder_out.output_tokens
- output_scores = decoder_out.output_scores
- history = decoder_out.history
-
- # TODO: decoding for InsertionTransformer
- word_ins_score = self.decoder.forward_word_ins(
- normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out
- )
-
- if eos_penalty > 0.0:
- word_ins_score[:, :, self.pad] -= eos_penalty
- word_ins_score, word_ins_pred = word_ins_score.max(-1)
- output_tokens, output_scores = _apply_ins_words(
- output_tokens, output_scores, word_ins_pred, word_ins_score, self.pad
- )
-
- # delete some unnecessary paddings
- cut_off = output_tokens.ne(self.pad).sum(1).max()
- output_tokens = output_tokens[:, :cut_off]
- output_scores = output_scores[:, :cut_off]
-
- if history is not None:
- history.append(output_tokens.clone())
-
- return decoder_out._replace(
- output_tokens=output_tokens,
- output_scores=output_scores,
- attn=None,
- history=history,
- )
-
-
-class InsertionTransformerDecoder(LevenshteinTransformerDecoder):
- def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
- # use the TransformerDecoder's __init__
- super(LevenshteinTransformerDecoder, self).__init__(
- args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
- )
-
- self.dictionary = dictionary
- self.bos = dictionary.bos()
- self.unk = dictionary.unk()
- self.eos = dictionary.eos()
- self.pool_out = Linear(self.output_embed_dim * 2, self.output_embed_dim)
-
- self.label_tau = getattr(args, "label_tau", None)
-
- @ensemble_decoder
- def forward_word_ins(self, normalize, encoder_out, prev_output_tokens):
- features = self.extract_features(prev_output_tokens, encoder_out=encoder_out)[0]
- features = self.pool_out(
- torch.cat([features[:, :-1, :], features[:, 1:, :]], 2)
- )
- decoder_out = self.output_layer(features)
- return F.log_softmax(decoder_out, -1) if normalize else decoder_out
-
- def forward_mask_ins(self, *args, **kwargs):
- raise NotImplementedError
-
- def forward_word_del(self, *args, **kwargs):
- raise NotImplementedError
-
-
-@register_model_architecture("insertion_transformer", "insertion_transformer")
-def insertion_base_architecture(args):
- args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
- args.encoder_layers = getattr(args, "encoder_layers", 6)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
- args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
- args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
- args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
- args.decoder_ffn_embed_dim = getattr(
- args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
- )
- args.decoder_layers = getattr(args, "decoder_layers", 6)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
- args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
- args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
- args.attention_dropout = getattr(args, "attention_dropout", 0.0)
- args.activation_dropout = getattr(args, "activation_dropout", 0.0)
- args.activation_fn = getattr(args, "activation_fn", "relu")
- args.dropout = getattr(args, "dropout", 0.1)
- args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
- args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
- args.share_decoder_input_output_embed = getattr(
- args, "share_decoder_input_output_embed", False
- )
- args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
- args.no_token_positional_embeddings = getattr(
- args, "no_token_positional_embeddings", False
- )
- args.adaptive_input = getattr(args, "adaptive_input", False)
- args.apply_bert_init = getattr(args, "apply_bert_init", False)
-
- args.decoder_output_dim = getattr(
- args, "decoder_output_dim", args.decoder_embed_dim
- )
- args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
-
- # special for insertion transformer
- args.label_tau = getattr(args, "label_tau", None)
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py
deleted file mode 100644
index d6cf06e5872cb86e5c2e726153c7a80c78db9d1e..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/modules/quantization/scalar/modules/qemb.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from ..ops import emulate_int
-
-
-class IntEmbedding(nn.Module):
- """
- Quantized counterpart of the nn.Embedding module that applies QuantNoise during training.
-
- Args:
- - num_embeddings: number of tokens
- - embedding_dim: embedding dimension
- - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights)
- - bits: number of bits
- - method: choose among {"tensor", "histogram", "channel"}
- - update_step: recompute scale and zero_point every update_steps iterations
-
- Remarks:
- - We use the straight-through estimator so that the gradients
- back-propagate nicely in the network, this is implemented with
- the detach() trick
- - Parameters scale and zero_point are recomputed every update_step
- forward pass to reduce the overhead
- - At test time, the weights are fully quantized
- """
-
- def __init__(
- self,
- num_embeddings,
- embedding_dim,
- padding_idx=None,
- max_norm=None,
- norm_type=2.0,
- scale_grad_by_freq=False,
- sparse=False,
- _weight=None,
- p=0,
- update_step=1000,
- bits=8,
- method="histogram",
- ):
- super(IntEmbedding, self).__init__()
- self.num_embeddings = num_embeddings
- self.embedding_dim = embedding_dim
- if padding_idx is not None:
- if padding_idx > 0:
- assert (
- padding_idx < self.num_embeddings
- ), "Padding_idx must be within num_embeddings"
- elif padding_idx < 0:
- assert (
- padding_idx >= -self.num_embeddings
- ), "Padding_idx must be within num_embeddings"
- padding_idx = self.num_embeddings + padding_idx
- self.padding_idx = padding_idx
- self.max_norm = max_norm
- self.norm_type = norm_type
- self.scale_grad_by_freq = scale_grad_by_freq
- if _weight is None:
- self.weight = nn.Parameter(torch.Tensor(num_embeddings, embedding_dim))
- self.reset_parameters()
- else:
- assert list(_weight.shape) == [
- num_embeddings,
- embedding_dim,
- ], "Shape of weight does not match num_embeddings and embedding_dim"
- self.weight = nn.Parameter(_weight)
- self.sparse = sparse
-
- # quantization parameters
- self.p = p
- self.bits = bits
- self.method = method
- self.update_step = update_step
- self.counter = 0
-
- def reset_parameters(self):
- nn.init.normal_(self.weight)
- if self.padding_idx is not None:
- with torch.no_grad():
- self.weight[self.padding_idx].fill_(0)
-
- def forward(self, input):
- # train with QuantNoise and evaluate the fully quantized network
- p = self.p if self.training else 1
-
- # update parameters every 1000 iterations
- if self.counter % self.update_step == 0:
- self.scale = None
- self.zero_point = None
- self.counter += 1
-
- # quantize weight
- weight_quantized, self.scale, self.zero_point = emulate_int(
- self.weight.detach(),
- bits=self.bits,
- method=self.method,
- scale=self.scale,
- zero_point=self.zero_point,
- )
-
- # mask to apply noise
- mask = torch.zeros_like(self.weight)
- mask.bernoulli_(1 - p)
- noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0)
-
- # using straight-through estimator (STE)
- clamp_low = -self.scale * self.zero_point
- clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
- weight = (
- torch.clamp(self.weight, clamp_low.item(), clamp_high.item())
- + noise.detach()
- )
-
- # return output
- output = F.embedding(
- input,
- weight,
- self.padding_idx,
- self.max_norm,
- self.norm_type,
- self.scale_grad_by_freq,
- self.sparse,
- )
- return output
-
- def extra_repr(self):
- s = "{num_embeddings}, {embedding_dim}"
- if self.padding_idx is not None:
- s += ", padding_idx={padding_idx}"
- if self.max_norm is not None:
- s += ", max_norm={max_norm}"
- if self.norm_type != 2:
- s += ", norm_type={norm_type}"
- if self.scale_grad_by_freq is not False:
- s += ", scale_grad_by_freq={scale_grad_by_freq}"
- if self.sparse is not False:
- s += ", sparse=True"
- s += "quant_noise={p}, bits={bits}, method={method}"
- return s.format(**self.__dict__)
diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/backtranslation/tokenized_bleu.sh b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/backtranslation/tokenized_bleu.sh
deleted file mode 100644
index c6d6aaa193f6059299bc98909324fe4b9b060372..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/backtranslation/tokenized_bleu.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-
-if [ $# -ne 5 ]; then
- echo "usage: $0 [dataset=wmt14/full] [langpair=en-de] [databin] [bpecode] [model]"
- exit
-fi
-
-
-DATASET=$1
-LANGPAIR=$2
-DATABIN=$3
-BPECODE=$4
-MODEL=$5
-
-SRCLANG=$(echo $LANGPAIR | cut -d '-' -f 1)
-TGTLANG=$(echo $LANGPAIR | cut -d '-' -f 2)
-
-
-BPEROOT=examples/backtranslation/subword-nmt/subword_nmt
-if [ ! -e $BPEROOT ]; then
- BPEROOT=subword-nmt/subword_nmt
- if [ ! -e $BPEROOT ]; then
- echo 'Cloning Subword NMT repository (for BPE pre-processing)...'
- git clone https://github.com/rsennrich/subword-nmt.git
- fi
-fi
-
-
-TMP_REF=$(mktemp)
-
-sacrebleu -t $DATASET -l $LANGPAIR --echo ref -q \
-| sacremoses normalize -l $TGTLANG -q \
-| sacremoses tokenize -a -l $TGTLANG -q \
-> $TMP_REF
-
-sacrebleu -t $DATASET -l $LANGPAIR --echo src -q \
-| sacremoses normalize -l $SRCLANG -q \
-| sacremoses tokenize -a -l $SRCLANG -q \
-| python $BPEROOT/apply_bpe.py -c $BPECODE \
-| fairseq-interactive $DATABIN --path $MODEL \
- -s $SRCLANG -t $TGTLANG \
- --beam 5 --remove-bpe --buffer-size 1024 --max-tokens 8000 \
-| grep ^H- | cut -f 3- \
-| fairseq-score --ref $TMP_REF
-
-rm -f $TMP_REF
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_to_text/README.md b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_to_text/README.md
deleted file mode 100644
index f639d300d342f8de1392c98bfc44ec8690188539..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/speech_to_text/README.md
+++ /dev/null
@@ -1,77 +0,0 @@
-# Speech-to-Text (S2T) Modeling
-
-[https://www.aclweb.org/anthology/2020.aacl-demo.6](https://www.aclweb.org/anthology/2020.aacl-demo.6.pdf)
-
-Speech recognition (ASR) and speech-to-text translation (ST) with fairseq.
-
-## Data Preparation
-S2T modeling data consists of source speech features, target text and other optional information
-(source text, speaker id, etc.). Fairseq S2T uses per-dataset-split TSV manifest files
-to store these information. Each data field is represented by a column in the TSV file.
-
-Unlike text token embeddings, speech features (e.g. log mel-scale filter banks) are usually fixed
-during model training and can be pre-computed. The manifest file contains the path to
-either the feature file in NumPy format or the WAV/FLAC audio file. For the latter,
-features will be extracted on-the-fly by fairseq S2T. Optionally, feature/audio files can be packed
-into uncompressed ZIP files (then accessed via byte offset and length) to improve I/O performance.
-
-Fairseq S2T also employs a YAML file for data related configurations: tokenizer type and dictionary path
-for the target text, feature transforms such as CMVN (cepstral mean and variance normalization) and SpecAugment,
-temperature-based resampling, etc.
-
-## Model Training
-Fairseq S2T uses the unified `fairseq-train` interface for model training. It requires arguments `--task speech_to_text`,
- `--arch ` and `--config-yaml `.
-
-## Inference & Evaluation
-Fairseq S2T uses the unified `fairseq-generate`/`fairseq-interactive` interface for inference and evaluation. It
-requires arguments `--task speech_to_text` and `--config-yaml `. The interactive console takes
-audio paths (one per line) as inputs.
-
-
-## Examples
-- [Speech Recognition (ASR) on LibriSpeech](docs/librispeech_example.md)
-
-- [Speech-to-Text Translation (ST) on MuST-C](docs/mustc_example.md)
-
-- [Speech-to-Text Translation (ST) on CoVoST 2](docs/covost_example.md)
-
-- [Speech-to-Text Translation (ST) on Multilingual TEDx](docs/mtedx_example.md)
-- [Simultaneous Speech-to-Text Translation (SimulST) on MuST-C](docs/simulst_mustc_example.md)
-
-## Updates
-- 02/04/2021: Added interactive decoding (`fairseq-interactive`) support. Examples:
- [ASR (LibriSpeech)](docs/librispeech_example.md#interactive-decoding)
- and [ST (CoVoST 2)](docs/covost_example.md#interactive-decoding).
-- 01/08/2021: Several fixes for S2T Transformer model, inference-time de-tokenization, scorer configuration and data
- preparation scripts. We also add pre-trained models to the examples and revise the instructions.
- Breaking changes: the data preparation scripts now extract filterbank features without CMVN. CMVN is instead applied
- on-the-fly (defined in the config YAML).
-
-## What's Next
-- We are migrating the old fairseq [ASR example](../speech_recognition) into this S2T framework and
- merging the features from both sides.
-- The following papers also base their experiments on fairseq S2T. We are adding more examples for replication.
- - [Improving Cross-Lingual Transfer Learning for End-to-End Speech Recognition with Speech Translation (Wang et al., 2020)](https://arxiv.org/abs/2006.05474)
- - [Self-Supervised Representations Improve End-to-End Speech Translation (Wu et al., 2020)](https://arxiv.org/abs/2006.12124)
- - [Self-Training for End-to-End Speech Translation (Pino et al., 2020)](https://arxiv.org/abs/2006.02490)
- - [CoVoST: A Diverse Multilingual Speech-To-Text Translation Corpus (Wang et al., 2020)](https://arxiv.org/abs/2002.01320)
- - [Harnessing Indirect Training Data for End-to-End Automatic Speech Translation: Tricks of the Trade (Pino et al., 2019)](https://arxiv.org/abs/1909.06515)
-
-## Citation
-Please cite as:
-```
-@inproceedings{wang2020fairseqs2t,
- title = {fairseq S2T: Fast Speech-to-Text Modeling with fairseq},
- author = {Changhan Wang and Yun Tang and Xutai Ma and Anne Wu and Dmytro Okhonko and Juan Pino},
- booktitle = {Proceedings of the 2020 Conference of the Asian Chapter of the Association for Computational Linguistics (AACL): System Demonstrations},
- year = {2020},
-}
-
-@inproceedings{ott2019fairseq,
- title = {fairseq: A Fast, Extensible Toolkit for Sequence Modeling},
- author = {Myle Ott and Sergey Edunov and Alexei Baevski and Angela Fan and Sam Gross and Nathan Ng and David Grangier and Michael Auli},
- booktitle = {Proceedings of NAACL-HLT 2019: Demonstrations},
- year = {2019},
-}
-```
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/wav2vec/unsupervised/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/pad_dataset.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/pad_dataset.py
deleted file mode 100644
index 8075bba6a9efc5f8421368ee0b2ae66afe3f5009..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/data/pad_dataset.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from fairseq.data import data_utils
-
-from . import BaseWrapperDataset
-
-
-class PadDataset(BaseWrapperDataset):
- def __init__(self, dataset, pad_idx, left_pad):
- super().__init__(dataset)
- self.pad_idx = pad_idx
- self.left_pad = left_pad
-
- def collater(self, samples):
- return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad)
-
-
-class LeftPadDataset(PadDataset):
- def __init__(self, dataset, pad_idx):
- super().__init__(dataset, pad_idx, left_pad=True)
-
-
-class RightPadDataset(PadDataset):
- def __init__(self, dataset, pad_idx):
- super().__init__(dataset, pad_idx, left_pad=False)
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/lstm.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/lstm.py
deleted file mode 100644
index e1e66a7d50fa1b1b313e9d1a6e7862ac9bfaa074..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/lstm.py
+++ /dev/null
@@ -1,753 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from typing import Dict, List, Optional, Tuple
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.models import (
- FairseqEncoder,
- FairseqEncoderDecoderModel,
- FairseqIncrementalDecoder,
- register_model,
- register_model_architecture,
-)
-from fairseq.modules import AdaptiveSoftmax, FairseqDropout
-from torch import Tensor
-
-
-DEFAULT_MAX_SOURCE_POSITIONS = 1e5
-DEFAULT_MAX_TARGET_POSITIONS = 1e5
-
-
-@register_model("lstm")
-class LSTMModel(FairseqEncoderDecoderModel):
- def __init__(self, encoder, decoder):
- super().__init__(encoder, decoder)
-
- @staticmethod
- def add_args(parser):
- """Add model-specific arguments to the parser."""
- # fmt: off
- parser.add_argument('--dropout', type=float, metavar='D',
- help='dropout probability')
- parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
- help='encoder embedding dimension')
- parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
- help='path to pre-trained encoder embedding')
- parser.add_argument('--encoder-freeze-embed', action='store_true',
- help='freeze encoder embeddings')
- parser.add_argument('--encoder-hidden-size', type=int, metavar='N',
- help='encoder hidden size')
- parser.add_argument('--encoder-layers', type=int, metavar='N',
- help='number of encoder layers')
- parser.add_argument('--encoder-bidirectional', action='store_true',
- help='make all layers of encoder bidirectional')
- parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
- help='decoder embedding dimension')
- parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
- help='path to pre-trained decoder embedding')
- parser.add_argument('--decoder-freeze-embed', action='store_true',
- help='freeze decoder embeddings')
- parser.add_argument('--decoder-hidden-size', type=int, metavar='N',
- help='decoder hidden size')
- parser.add_argument('--decoder-layers', type=int, metavar='N',
- help='number of decoder layers')
- parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
- help='decoder output embedding dimension')
- parser.add_argument('--decoder-attention', type=str, metavar='BOOL',
- help='decoder attention')
- parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
- help='comma separated list of adaptive softmax cutoff points. '
- 'Must be used with adaptive_loss criterion')
- parser.add_argument('--share-decoder-input-output-embed', default=False,
- action='store_true',
- help='share decoder input and output embeddings')
- parser.add_argument('--share-all-embeddings', default=False, action='store_true',
- help='share encoder, decoder and output embeddings'
- ' (requires shared dictionary and embed dim)')
-
- # Granular dropout settings (if not specified these default to --dropout)
- parser.add_argument('--encoder-dropout-in', type=float, metavar='D',
- help='dropout probability for encoder input embedding')
- parser.add_argument('--encoder-dropout-out', type=float, metavar='D',
- help='dropout probability for encoder output')
- parser.add_argument('--decoder-dropout-in', type=float, metavar='D',
- help='dropout probability for decoder input embedding')
- parser.add_argument('--decoder-dropout-out', type=float, metavar='D',
- help='dropout probability for decoder output')
- # fmt: on
-
- @classmethod
- def build_model(cls, args, task):
- """Build a new model instance."""
- # make sure that all args are properly defaulted (in case there are any new ones)
- base_architecture(args)
-
- if args.encoder_layers != args.decoder_layers:
- raise ValueError("--encoder-layers must match --decoder-layers")
-
- max_source_positions = getattr(
- args, "max_source_positions", DEFAULT_MAX_SOURCE_POSITIONS
- )
- max_target_positions = getattr(
- args, "max_target_positions", DEFAULT_MAX_TARGET_POSITIONS
- )
-
- def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
- embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
- embed_dict = utils.parse_embedding(embed_path)
- utils.print_embed_overlap(embed_dict, dictionary)
- return utils.load_embedding(embed_dict, dictionary, embed_tokens)
-
- if args.encoder_embed_path:
- pretrained_encoder_embed = load_pretrained_embedding_from_file(
- args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim
- )
- else:
- num_embeddings = len(task.source_dictionary)
- pretrained_encoder_embed = Embedding(
- num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad()
- )
-
- if args.share_all_embeddings:
- # double check all parameters combinations are valid
- if task.source_dictionary != task.target_dictionary:
- raise ValueError("--share-all-embeddings requires a joint dictionary")
- if args.decoder_embed_path and (
- args.decoder_embed_path != args.encoder_embed_path
- ):
- raise ValueError(
- "--share-all-embed not compatible with --decoder-embed-path"
- )
- if args.encoder_embed_dim != args.decoder_embed_dim:
- raise ValueError(
- "--share-all-embeddings requires --encoder-embed-dim to "
- "match --decoder-embed-dim"
- )
- pretrained_decoder_embed = pretrained_encoder_embed
- args.share_decoder_input_output_embed = True
- else:
- # separate decoder input embeddings
- pretrained_decoder_embed = None
- if args.decoder_embed_path:
- pretrained_decoder_embed = load_pretrained_embedding_from_file(
- args.decoder_embed_path,
- task.target_dictionary,
- args.decoder_embed_dim,
- )
- # one last double check of parameter combinations
- if args.share_decoder_input_output_embed and (
- args.decoder_embed_dim != args.decoder_out_embed_dim
- ):
- raise ValueError(
- "--share-decoder-input-output-embeddings requires "
- "--decoder-embed-dim to match --decoder-out-embed-dim"
- )
-
- if args.encoder_freeze_embed:
- pretrained_encoder_embed.weight.requires_grad = False
- if args.decoder_freeze_embed:
- pretrained_decoder_embed.weight.requires_grad = False
-
- encoder = LSTMEncoder(
- dictionary=task.source_dictionary,
- embed_dim=args.encoder_embed_dim,
- hidden_size=args.encoder_hidden_size,
- num_layers=args.encoder_layers,
- dropout_in=args.encoder_dropout_in,
- dropout_out=args.encoder_dropout_out,
- bidirectional=args.encoder_bidirectional,
- pretrained_embed=pretrained_encoder_embed,
- max_source_positions=max_source_positions,
- )
- decoder = LSTMDecoder(
- dictionary=task.target_dictionary,
- embed_dim=args.decoder_embed_dim,
- hidden_size=args.decoder_hidden_size,
- out_embed_dim=args.decoder_out_embed_dim,
- num_layers=args.decoder_layers,
- dropout_in=args.decoder_dropout_in,
- dropout_out=args.decoder_dropout_out,
- attention=utils.eval_bool(args.decoder_attention),
- encoder_output_units=encoder.output_units,
- pretrained_embed=pretrained_decoder_embed,
- share_input_output_embed=args.share_decoder_input_output_embed,
- adaptive_softmax_cutoff=(
- utils.eval_str_list(args.adaptive_softmax_cutoff, type=int)
- if args.criterion == "adaptive_loss"
- else None
- ),
- max_target_positions=max_target_positions,
- residuals=False,
- )
- return cls(encoder, decoder)
-
- def forward(
- self,
- src_tokens,
- src_lengths,
- prev_output_tokens,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- ):
- encoder_out = self.encoder(src_tokens, src_lengths=src_lengths)
- decoder_out = self.decoder(
- prev_output_tokens,
- encoder_out=encoder_out,
- incremental_state=incremental_state,
- )
- return decoder_out
-
-
-class LSTMEncoder(FairseqEncoder):
- """LSTM encoder."""
-
- def __init__(
- self,
- dictionary,
- embed_dim=512,
- hidden_size=512,
- num_layers=1,
- dropout_in=0.1,
- dropout_out=0.1,
- bidirectional=False,
- left_pad=True,
- pretrained_embed=None,
- padding_idx=None,
- max_source_positions=DEFAULT_MAX_SOURCE_POSITIONS,
- ):
- super().__init__(dictionary)
- self.num_layers = num_layers
- self.dropout_in_module = FairseqDropout(
- dropout_in*1.0, module_name=self.__class__.__name__
- )
- self.dropout_out_module = FairseqDropout(
- dropout_out*1.0, module_name=self.__class__.__name__
- )
- self.bidirectional = bidirectional
- self.hidden_size = hidden_size
- self.max_source_positions = max_source_positions
-
- num_embeddings = len(dictionary)
- self.padding_idx = padding_idx if padding_idx is not None else dictionary.pad()
- if pretrained_embed is None:
- self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
- else:
- self.embed_tokens = pretrained_embed
-
- self.lstm = LSTM(
- input_size=embed_dim,
- hidden_size=hidden_size,
- num_layers=num_layers,
- dropout=self.dropout_out_module.p if num_layers > 1 else 0.0,
- bidirectional=bidirectional,
- )
- self.left_pad = left_pad
-
- self.output_units = hidden_size
- if bidirectional:
- self.output_units *= 2
-
- def forward(
- self,
- src_tokens: Tensor,
- src_lengths: Tensor,
- enforce_sorted: bool = True,
- ):
- """
- Args:
- src_tokens (LongTensor): tokens in the source language of
- shape `(batch, src_len)`
- src_lengths (LongTensor): lengths of each source sentence of
- shape `(batch)`
- enforce_sorted (bool, optional): if True, `src_tokens` is
- expected to contain sequences sorted by length in a
- decreasing order. If False, this condition is not
- required. Default: True.
- """
- if self.left_pad:
- # nn.utils.rnn.pack_padded_sequence requires right-padding;
- # convert left-padding to right-padding
- src_tokens = utils.convert_padding_direction(
- src_tokens,
- torch.zeros_like(src_tokens).fill_(self.padding_idx),
- left_to_right=True,
- )
-
- bsz, seqlen = src_tokens.size()
-
- # embed tokens
- x = self.embed_tokens(src_tokens)
- x = self.dropout_in_module(x)
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- # pack embedded source tokens into a PackedSequence
- packed_x = nn.utils.rnn.pack_padded_sequence(
- x, src_lengths.cpu(), enforce_sorted=enforce_sorted
- )
-
- # apply LSTM
- if self.bidirectional:
- state_size = 2 * self.num_layers, bsz, self.hidden_size
- else:
- state_size = self.num_layers, bsz, self.hidden_size
- h0 = x.new_zeros(*state_size)
- c0 = x.new_zeros(*state_size)
- packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0))
-
- # unpack outputs and apply dropout
- x, _ = nn.utils.rnn.pad_packed_sequence(
- packed_outs, padding_value=self.padding_idx * 1.0
- )
- x = self.dropout_out_module(x)
- assert list(x.size()) == [seqlen, bsz, self.output_units]
-
- if self.bidirectional:
- final_hiddens = self.combine_bidir(final_hiddens, bsz)
- final_cells = self.combine_bidir(final_cells, bsz)
-
- encoder_padding_mask = src_tokens.eq(self.padding_idx).t()
-
- return tuple(
- (
- x, # seq_len x batch x hidden
- final_hiddens, # num_layers x batch x num_directions*hidden
- final_cells, # num_layers x batch x num_directions*hidden
- encoder_padding_mask, # seq_len x batch
- )
- )
-
- def combine_bidir(self, outs, bsz: int):
- out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous()
- return out.view(self.num_layers, bsz, -1)
-
- def reorder_encoder_out(self, encoder_out: Tuple[Tensor, Tensor, Tensor, Tensor], new_order):
- return tuple(
- (
- encoder_out[0].index_select(1, new_order),
- encoder_out[1].index_select(1, new_order),
- encoder_out[2].index_select(1, new_order),
- encoder_out[3].index_select(1, new_order),
- )
- )
-
- def max_positions(self):
- """Maximum input length supported by the encoder."""
- return self.max_source_positions
-
-
-class AttentionLayer(nn.Module):
- def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False):
- super().__init__()
-
- self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias)
- self.output_proj = Linear(
- input_embed_dim + source_embed_dim, output_embed_dim, bias=bias
- )
-
- def forward(self, input, source_hids, encoder_padding_mask):
- # input: bsz x input_embed_dim
- # source_hids: srclen x bsz x source_embed_dim
-
- # x: bsz x source_embed_dim
- x = self.input_proj(input)
-
- # compute attention
- attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2)
-
- # don't attend over padding
- if encoder_padding_mask is not None:
- attn_scores = (
- attn_scores.float()
- .masked_fill_(encoder_padding_mask, float("-inf"))
- .type_as(attn_scores)
- ) # FP16 support: cast to float and back
-
- attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz
-
- # sum weighted sources
- x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0)
-
- x = torch.tanh(self.output_proj(torch.cat((x, input), dim=1)))
- return x, attn_scores
-
-
-class LSTMDecoder(FairseqIncrementalDecoder):
- """LSTM decoder."""
-
- def __init__(
- self,
- dictionary,
- embed_dim=512,
- hidden_size=512,
- out_embed_dim=512,
- num_layers=1,
- dropout_in=0.1,
- dropout_out=0.1,
- attention=True,
- encoder_output_units=512,
- pretrained_embed=None,
- share_input_output_embed=False,
- adaptive_softmax_cutoff=None,
- max_target_positions=DEFAULT_MAX_TARGET_POSITIONS,
- residuals=False,
- ):
- super().__init__(dictionary)
- self.dropout_in_module = FairseqDropout(
- dropout_in*1.0, module_name=self.__class__.__name__
- )
- self.dropout_out_module = FairseqDropout(
- dropout_out*1.0, module_name=self.__class__.__name__
- )
- self.hidden_size = hidden_size
- self.share_input_output_embed = share_input_output_embed
- self.need_attn = True
- self.max_target_positions = max_target_positions
- self.residuals = residuals
- self.num_layers = num_layers
-
- self.adaptive_softmax = None
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
- if pretrained_embed is None:
- self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
- else:
- self.embed_tokens = pretrained_embed
-
- self.encoder_output_units = encoder_output_units
- if encoder_output_units != hidden_size and encoder_output_units != 0:
- self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size)
- self.encoder_cell_proj = Linear(encoder_output_units, hidden_size)
- else:
- self.encoder_hidden_proj = self.encoder_cell_proj = None
-
- # disable input feeding if there is no encoder
- # input feeding is described in arxiv.org/abs/1508.04025
- input_feed_size = 0 if encoder_output_units == 0 else hidden_size
- self.layers = nn.ModuleList(
- [
- LSTMCell(
- input_size=input_feed_size + embed_dim
- if layer == 0
- else hidden_size,
- hidden_size=hidden_size,
- )
- for layer in range(num_layers)
- ]
- )
-
- if attention:
- # TODO make bias configurable
- self.attention = AttentionLayer(
- hidden_size, encoder_output_units, hidden_size, bias=False
- )
- else:
- self.attention = None
-
- if hidden_size != out_embed_dim:
- self.additional_fc = Linear(hidden_size, out_embed_dim)
-
- if adaptive_softmax_cutoff is not None:
- # setting adaptive_softmax dropout to dropout_out for now but can be redefined
- self.adaptive_softmax = AdaptiveSoftmax(
- num_embeddings,
- hidden_size,
- adaptive_softmax_cutoff,
- dropout=dropout_out,
- )
- elif not self.share_input_output_embed:
- self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out)
-
- def forward(
- self,
- prev_output_tokens,
- encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- src_lengths: Optional[Tensor] = None,
- ):
- x, attn_scores = self.extract_features(
- prev_output_tokens, encoder_out, incremental_state
- )
- return self.output_layer(x), attn_scores
-
- def extract_features(
- self,
- prev_output_tokens,
- encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None,
- incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
- ):
- """
- Similar to *forward* but only return features.
- """
- # get outputs from encoder
- if encoder_out is not None:
- encoder_outs = encoder_out[0]
- encoder_hiddens = encoder_out[1]
- encoder_cells = encoder_out[2]
- encoder_padding_mask = encoder_out[3]
- else:
- encoder_outs = torch.empty(0)
- encoder_hiddens = torch.empty(0)
- encoder_cells = torch.empty(0)
- encoder_padding_mask = torch.empty(0)
- srclen = encoder_outs.size(0)
-
- if incremental_state is not None and len(incremental_state) > 0:
- prev_output_tokens = prev_output_tokens[:, -1:]
-
- bsz, seqlen = prev_output_tokens.size()
-
- # embed tokens
- x = self.embed_tokens(prev_output_tokens)
- x = self.dropout_in_module(x)
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- # initialize previous states (or get from cache during incremental generation)
- if incremental_state is not None and len(incremental_state) > 0:
- prev_hiddens, prev_cells, input_feed = self.get_cached_state(
- incremental_state
- )
- elif encoder_out is not None:
- # setup recurrent cells
- prev_hiddens = [encoder_hiddens[i] for i in range(self.num_layers)]
- prev_cells = [encoder_cells[i] for i in range(self.num_layers)]
- if self.encoder_hidden_proj is not None:
- prev_hiddens = [self.encoder_hidden_proj(y) for y in prev_hiddens]
- prev_cells = [self.encoder_cell_proj(y) for y in prev_cells]
- input_feed = x.new_zeros(bsz, self.hidden_size)
- else:
- # setup zero cells, since there is no encoder
- zero_state = x.new_zeros(bsz, self.hidden_size)
- prev_hiddens = [zero_state for i in range(self.num_layers)]
- prev_cells = [zero_state for i in range(self.num_layers)]
- input_feed = None
-
- assert (
- srclen > 0 or self.attention is None
- ), "attention is not supported if there are no encoder outputs"
- attn_scores: Optional[Tensor] = (
- x.new_zeros(srclen, seqlen, bsz) if self.attention is not None else None
- )
- outs = []
- for j in range(seqlen):
- # input feeding: concatenate context vector from previous time step
- if input_feed is not None:
- input = torch.cat((x[j, :, :], input_feed), dim=1)
- else:
- input = x[j]
-
- for i, rnn in enumerate(self.layers):
- # recurrent cell
- hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i]))
-
- # hidden state becomes the input to the next layer
- input = self.dropout_out_module(hidden)
- if self.residuals:
- input = input + prev_hiddens[i]
-
- # save state for next time step
- prev_hiddens[i] = hidden
- prev_cells[i] = cell
-
- # apply attention using the last layer's hidden state
- if self.attention is not None:
- assert attn_scores is not None
- out, attn_scores[:, j, :] = self.attention(
- hidden, encoder_outs, encoder_padding_mask
- )
- else:
- out = hidden
- out = self.dropout_out_module(out)
-
- # input feeding
- if input_feed is not None:
- input_feed = out
-
- # save final output
- outs.append(out)
-
- # Stack all the necessary tensors together and store
- prev_hiddens_tensor = torch.stack(prev_hiddens)
- prev_cells_tensor = torch.stack(prev_cells)
- cache_state = torch.jit.annotate(
- Dict[str, Optional[Tensor]],
- {
- "prev_hiddens": prev_hiddens_tensor,
- "prev_cells": prev_cells_tensor,
- "input_feed": input_feed,
- },
- )
- self.set_incremental_state(incremental_state, "cached_state", cache_state)
-
- # collect outputs across time steps
- x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)
-
- # T x B x C -> B x T x C
- x = x.transpose(1, 0)
-
- if hasattr(self, "additional_fc") and self.adaptive_softmax is None:
- x = self.additional_fc(x)
- x = self.dropout_out_module(x)
- # srclen x tgtlen x bsz -> bsz x tgtlen x srclen
- if not self.training and self.need_attn and self.attention is not None:
- assert attn_scores is not None
- attn_scores = attn_scores.transpose(0, 2)
- else:
- attn_scores = None
- return x, attn_scores
-
- def output_layer(self, x):
- """Project features to the vocabulary size."""
- if self.adaptive_softmax is None:
- if self.share_input_output_embed:
- x = F.linear(x, self.embed_tokens.weight)
- else:
- x = self.fc_out(x)
- return x
-
- def get_cached_state(
- self,
- incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
- ) -> Tuple[List[Tensor], List[Tensor], Optional[Tensor]]:
- cached_state = self.get_incremental_state(incremental_state, "cached_state")
- assert cached_state is not None
- prev_hiddens_ = cached_state["prev_hiddens"]
- assert prev_hiddens_ is not None
- prev_cells_ = cached_state["prev_cells"]
- assert prev_cells_ is not None
- prev_hiddens = [prev_hiddens_[i] for i in range(self.num_layers)]
- prev_cells = [prev_cells_[j] for j in range(self.num_layers)]
- input_feed = cached_state[
- "input_feed"
- ] # can be None for decoder-only language models
- return prev_hiddens, prev_cells, input_feed
-
- def reorder_incremental_state(
- self,
- incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
- new_order: Tensor,
- ):
- if incremental_state is None or len(incremental_state) == 0:
- return
- prev_hiddens, prev_cells, input_feed = self.get_cached_state(incremental_state)
- prev_hiddens = [p.index_select(0, new_order) for p in prev_hiddens]
- prev_cells = [p.index_select(0, new_order) for p in prev_cells]
- if input_feed is not None:
- input_feed = input_feed.index_select(0, new_order)
- cached_state_new = torch.jit.annotate(
- Dict[str, Optional[Tensor]],
- {
- "prev_hiddens": torch.stack(prev_hiddens),
- "prev_cells": torch.stack(prev_cells),
- "input_feed": input_feed,
- },
- )
- self.set_incremental_state(incremental_state, "cached_state", cached_state_new),
- return
-
- def max_positions(self):
- """Maximum output length supported by the decoder."""
- return self.max_target_positions
-
- def make_generation_fast_(self, need_attn=False, **kwargs):
- self.need_attn = need_attn
-
-
-def Embedding(num_embeddings, embedding_dim, padding_idx):
- m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
- nn.init.uniform_(m.weight, -0.1, 0.1)
- nn.init.constant_(m.weight[padding_idx], 0)
- return m
-
-
-def LSTM(input_size, hidden_size, **kwargs):
- m = nn.LSTM(input_size, hidden_size, **kwargs)
- for name, param in m.named_parameters():
- if "weight" in name or "bias" in name:
- param.data.uniform_(-0.1, 0.1)
- return m
-
-
-def LSTMCell(input_size, hidden_size, **kwargs):
- m = nn.LSTMCell(input_size, hidden_size, **kwargs)
- for name, param in m.named_parameters():
- if "weight" in name or "bias" in name:
- param.data.uniform_(-0.1, 0.1)
- return m
-
-
-def Linear(in_features, out_features, bias=True, dropout=0.0):
- """Linear layer (input: N x T x C)"""
- m = nn.Linear(in_features, out_features, bias=bias)
- m.weight.data.uniform_(-0.1, 0.1)
- if bias:
- m.bias.data.uniform_(-0.1, 0.1)
- return m
-
-
-@register_model_architecture("lstm", "lstm")
-def base_architecture(args):
- args.dropout = getattr(args, "dropout", 0.1)
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
- args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
- args.encoder_freeze_embed = getattr(args, "encoder_freeze_embed", False)
- args.encoder_hidden_size = getattr(
- args, "encoder_hidden_size", args.encoder_embed_dim
- )
- args.encoder_layers = getattr(args, "encoder_layers", 1)
- args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False)
- args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout)
- args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
- args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
- args.decoder_freeze_embed = getattr(args, "decoder_freeze_embed", False)
- args.decoder_hidden_size = getattr(
- args, "decoder_hidden_size", args.decoder_embed_dim
- )
- args.decoder_layers = getattr(args, "decoder_layers", 1)
- args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
- args.decoder_attention = getattr(args, "decoder_attention", "1")
- args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout)
- args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout)
- args.share_decoder_input_output_embed = getattr(
- args, "share_decoder_input_output_embed", False
- )
- args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
- args.adaptive_softmax_cutoff = getattr(
- args, "adaptive_softmax_cutoff", "10000,50000,200000"
- )
-
-
-@register_model_architecture("lstm", "lstm_wiseman_iwslt_de_en")
-def lstm_wiseman_iwslt_de_en(args):
- args.dropout = getattr(args, "dropout", 0.1)
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
- args.encoder_dropout_in = getattr(args, "encoder_dropout_in", 0)
- args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
- args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256)
- args.decoder_dropout_in = getattr(args, "decoder_dropout_in", 0)
- args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout)
- base_architecture(args)
-
-
-@register_model_architecture("lstm", "lstm_luong_wmt_en_de")
-def lstm_luong_wmt_en_de(args):
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1000)
- args.encoder_layers = getattr(args, "encoder_layers", 4)
- args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1000)
- args.decoder_layers = getattr(args, "decoder_layers", 4)
- args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 1000)
- args.decoder_dropout_out = getattr(args, "decoder_dropout_out", 0)
- base_architecture(args)
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/transformer/transformer_legacy.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/transformer/transformer_legacy.py
deleted file mode 100644
index af9646740a79ce720eeba513e2d994b39509ac49..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/transformer/transformer_legacy.py
+++ /dev/null
@@ -1,275 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from fairseq.dataclass.utils import gen_parser_from_dataclass
-from fairseq.models import (
- register_model,
- register_model_architecture,
-)
-from fairseq.models.transformer.transformer_config import (
- TransformerConfig,
- DEFAULT_MAX_SOURCE_POSITIONS,
- DEFAULT_MAX_TARGET_POSITIONS,
- DEFAULT_MIN_PARAMS_TO_WRAP,
-)
-from fairseq.models.transformer.transformer_base import (
- TransformerModelBase,
-)
-
-
-@register_model("transformer")
-class TransformerModel(TransformerModelBase):
- """
- This is the legacy implementation of the transformer model that
- uses argparse for configuration.
- """
-
- @classmethod
- def hub_models(cls):
- # fmt: off
-
- def moses_subword(path):
- return {
- 'path': path,
- 'tokenizer': 'moses',
- 'bpe': 'subword_nmt',
- }
-
- def moses_fastbpe(path):
- return {
- 'path': path,
- 'tokenizer': 'moses',
- 'bpe': 'fastbpe',
- }
-
- def spm(path):
- return {
- 'path': path,
- 'bpe': 'sentencepiece',
- 'tokenizer': 'space',
- }
-
- return {
- 'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'),
- 'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2',
- 'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'),
- 'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'),
- 'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'),
- 'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'),
- 'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'),
- 'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'),
- 'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'),
- 'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'),
- 'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'),
- 'transformer.wmt20.en-ta': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz'),
- 'transformer.wmt20.en-iu.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz'),
- 'transformer.wmt20.en-iu.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz'),
- 'transformer.wmt20.ta-en': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz'),
- 'transformer.wmt20.iu-en.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz'),
- 'transformer.wmt20.iu-en.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz'),
- 'transformer.flores101.mm100.615M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_615M.tar.gz'),
- 'transformer.flores101.mm100.175M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_175M.tar.gz'),
- }
- # fmt: on
-
- def __init__(self, args, encoder, decoder):
- cfg = TransformerConfig.from_namespace(args)
- super().__init__(cfg, encoder, decoder)
- self.args = args
-
- @classmethod
- def add_args(cls, parser):
- """Add model-specific arguments to the parser."""
- # we want to build the args recursively in this case.
- # do not set defaults so that settings defaults from various architectures still works
- gen_parser_from_dataclass(
- parser, TransformerConfig(), delete_default=True, with_prefix=""
- )
-
- @classmethod
- def build_model(cls, args, task):
- """Build a new model instance."""
-
- # make sure all arguments are present in older models
- base_architecture(args)
-
- if args.encoder_layers_to_keep:
- args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
- if args.decoder_layers_to_keep:
- args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
-
- if getattr(args, "max_source_positions", None) is None:
- args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
- if getattr(args, "max_target_positions", None) is None:
- args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
-
- src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
-
- if args.share_all_embeddings:
- if src_dict != tgt_dict:
- raise ValueError("--share-all-embeddings requires a joined dictionary")
- if args.encoder_embed_dim != args.decoder_embed_dim:
- raise ValueError(
- "--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
- )
- if args.decoder_embed_path and (
- args.decoder_embed_path != args.encoder_embed_path
- ):
- raise ValueError(
- "--share-all-embeddings not compatible with --decoder-embed-path"
- )
- args.share_decoder_input_output_embed = True
-
- if getattr(args, "offload_activations", False):
- args.checkpoint_activations = True # offloading implies checkpointing
-
- if not args.share_all_embeddings:
- args.min_params_to_wrap = getattr(
- args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP
- )
- cfg = TransformerConfig.from_namespace(args)
- return super().build_model(cfg, task)
-
- @classmethod
- def build_embedding(cls, args, dictionary, embed_dim, path=None):
- return super().build_embedding(
- TransformerConfig.from_namespace(args), dictionary, embed_dim, path
- )
-
- @classmethod
- def build_encoder(cls, args, src_dict, embed_tokens):
- return super().build_encoder(
- TransformerConfig.from_namespace(args), src_dict, embed_tokens
- )
-
- @classmethod
- def build_decoder(cls, args, tgt_dict, embed_tokens):
- return super().build_decoder(
- TransformerConfig.from_namespace(args), tgt_dict, embed_tokens
- )
-
-
-# architectures
-
-
-@register_model_architecture("transformer", "transformer_tiny")
-def tiny_architecture(args):
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 64)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 64)
- args.encoder_layers = getattr(args, "encoder_layers", 2)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
- args.decoder_layers = getattr(args, "decoder_layers", 2)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
- return base_architecture(args)
-
-
-@register_model_architecture("transformer", "transformer")
-def base_architecture(args):
- args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
- args.encoder_layers = getattr(args, "encoder_layers", 6)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
- args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
- args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
- args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
- args.decoder_ffn_embed_dim = getattr(
- args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
- )
- args.decoder_layers = getattr(args, "decoder_layers", 6)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
- args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
- args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
- args.attention_dropout = getattr(args, "attention_dropout", 0.0)
- args.activation_dropout = getattr(args, "activation_dropout", 0.0)
- args.activation_fn = getattr(args, "activation_fn", "relu")
- args.dropout = getattr(args, "dropout", 0.1)
- args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
- args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
- args.share_decoder_input_output_embed = getattr(
- args, "share_decoder_input_output_embed", False
- )
- args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
- args.no_token_positional_embeddings = getattr(
- args, "no_token_positional_embeddings", False
- )
- args.adaptive_input = getattr(args, "adaptive_input", False)
- args.no_cross_attention = getattr(args, "no_cross_attention", False)
- args.cross_self_attention = getattr(args, "cross_self_attention", False)
-
- args.decoder_output_dim = getattr(
- args, "decoder_output_dim", args.decoder_embed_dim
- )
- args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
-
- args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
- args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
- args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
- args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
- args.offload_activations = getattr(args, "offload_activations", False)
- if args.offload_activations:
- args.checkpoint_activations = True
- args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
- args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
- args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
- args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
- args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
- args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
- args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
-
-
-@register_model_architecture("transformer", "transformer_iwslt_de_en")
-def transformer_iwslt_de_en(args):
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
- args.encoder_layers = getattr(args, "encoder_layers", 6)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
- args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
- args.decoder_layers = getattr(args, "decoder_layers", 6)
- base_architecture(args)
-
-
-@register_model_architecture("transformer", "transformer_wmt_en_de")
-def transformer_wmt_en_de(args):
- base_architecture(args)
-
-
-# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
-@register_model_architecture("transformer", "transformer_vaswani_wmt_en_de_big")
-def transformer_vaswani_wmt_en_de_big(args):
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
- args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
- args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
- args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
- args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
- args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
- args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
- args.dropout = getattr(args, "dropout", 0.3)
- base_architecture(args)
-
-
-@register_model_architecture("transformer", "transformer_vaswani_wmt_en_fr_big")
-def transformer_vaswani_wmt_en_fr_big(args):
- args.dropout = getattr(args, "dropout", 0.1)
- transformer_vaswani_wmt_en_de_big(args)
-
-
-@register_model_architecture("transformer", "transformer_wmt_en_de_big")
-def transformer_wmt_en_de_big(args):
- args.attention_dropout = getattr(args, "attention_dropout", 0.1)
- transformer_vaswani_wmt_en_de_big(args)
-
-
-# default parameters used in tensor2tensor implementation
-@register_model_architecture("transformer", "transformer_wmt_en_de_big_t2t")
-def transformer_wmt_en_de_big_t2t(args):
- args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
- args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
- args.attention_dropout = getattr(args, "attention_dropout", 0.1)
- args.activation_dropout = getattr(args, "activation_dropout", 0.1)
- transformer_vaswani_wmt_en_de_big(args)
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/speech_recognition/test_vggtransformer.py b/spaces/OFA-Sys/OFA-vqa/fairseq/tests/speech_recognition/test_vggtransformer.py
deleted file mode 100644
index 4dc73b8c7379970dc0bcc16fcb088a64a1bd7e3b..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/speech_recognition/test_vggtransformer.py
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/usr/bin/env python3
-
-# import models/encoder/decoder to be tested
-from examples.speech_recognition.models.vggtransformer import (
- TransformerDecoder,
- VGGTransformerEncoder,
- VGGTransformerModel,
- vggtransformer_1,
- vggtransformer_2,
- vggtransformer_base,
-)
-
-# import base test class
-from .asr_test_base import (
- DEFAULT_TEST_VOCAB_SIZE,
- TestFairseqDecoderBase,
- TestFairseqEncoderBase,
- TestFairseqEncoderDecoderModelBase,
- get_dummy_dictionary,
- get_dummy_encoder_output,
- get_dummy_input,
-)
-
-
-class VGGTransformerModelTest_mid(TestFairseqEncoderDecoderModelBase):
- def setUp(self):
- def override_config(args):
- """
- vggtrasformer_1 use 14 layers of transformer,
- for testing purpose, it is too expensive. For fast turn-around
- test, reduce the number of layers to 3.
- """
- args.transformer_enc_config = (
- "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3"
- )
-
- super().setUp()
- extra_args_setter = [vggtransformer_1, override_config]
-
- self.setUpModel(VGGTransformerModel, extra_args_setter)
- self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
-
-
-class VGGTransformerModelTest_big(TestFairseqEncoderDecoderModelBase):
- def setUp(self):
- def override_config(args):
- """
- vggtrasformer_2 use 16 layers of transformer,
- for testing purpose, it is too expensive. For fast turn-around
- test, reduce the number of layers to 3.
- """
- args.transformer_enc_config = (
- "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3"
- )
-
- super().setUp()
- extra_args_setter = [vggtransformer_2, override_config]
-
- self.setUpModel(VGGTransformerModel, extra_args_setter)
- self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
-
-
-class VGGTransformerModelTest_base(TestFairseqEncoderDecoderModelBase):
- def setUp(self):
- def override_config(args):
- """
- vggtrasformer_base use 12 layers of transformer,
- for testing purpose, it is too expensive. For fast turn-around
- test, reduce the number of layers to 3.
- """
- args.transformer_enc_config = (
- "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 3"
- )
-
- super().setUp()
- extra_args_setter = [vggtransformer_base, override_config]
-
- self.setUpModel(VGGTransformerModel, extra_args_setter)
- self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
-
-
-class VGGTransformerEncoderTest(TestFairseqEncoderBase):
- def setUp(self):
- super().setUp()
-
- self.setUpInput(get_dummy_input(T=50, D=80, B=5))
-
- def test_forward(self):
- print("1. test standard vggtransformer")
- self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80))
- super().test_forward()
- print("2. test vggtransformer with limited right context")
- self.setUpEncoder(
- VGGTransformerEncoder(
- input_feat_per_channel=80, transformer_context=(-1, 5)
- )
- )
- super().test_forward()
- print("3. test vggtransformer with limited left context")
- self.setUpEncoder(
- VGGTransformerEncoder(
- input_feat_per_channel=80, transformer_context=(5, -1)
- )
- )
- super().test_forward()
- print("4. test vggtransformer with limited right context and sampling")
- self.setUpEncoder(
- VGGTransformerEncoder(
- input_feat_per_channel=80,
- transformer_context=(-1, 12),
- transformer_sampling=(2, 2),
- )
- )
- super().test_forward()
- print("5. test vggtransformer with windowed context and sampling")
- self.setUpEncoder(
- VGGTransformerEncoder(
- input_feat_per_channel=80,
- transformer_context=(12, 12),
- transformer_sampling=(2, 2),
- )
- )
-
-
-class TransformerDecoderTest(TestFairseqDecoderBase):
- def setUp(self):
- super().setUp()
-
- dict = get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE)
- decoder = TransformerDecoder(dict)
- dummy_encoder_output = get_dummy_encoder_output(encoder_out_shape=(50, 5, 256))
-
- self.setUpDecoder(decoder)
- self.setUpInput(dummy_encoder_output)
- self.setUpPrevOutputTokens()
diff --git a/spaces/OFA-Sys/expertllama/app.py b/spaces/OFA-Sys/expertllama/app.py
deleted file mode 100644
index a218786da57f59f4eda034bd5f452a5b4829d717..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/expertllama/app.py
+++ /dev/null
@@ -1,135 +0,0 @@
-'''
-CREDIT:
-script adapted from [alpaca](https://huggingface.co/spaces/tloen/alpaca-lora/blob/main/app.py).
-'''
-
-import gradio as gr
-import random
-import time
-import transformers
-import os
-import json
-import torch
-import argparse
-from tqdm import tqdm
-from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
-
-
-def apply_delta(base_model_path, target_model_path, delta_path):
- print(f"Loading the delta weights from {delta_path}")
- delta_tokenizer = LlamaTokenizer.from_pretrained(delta_path, use_fast=False)
- delta = LlamaForCausalLM.from_pretrained(
- delta_path, low_cpu_mem_usage=True, torch_dtype=torch.float16
- )
-
- print(f"Loading the base model from {base_model_path}")
- base_tokenizer = LlamaTokenizer.from_pretrained(base_model_path, use_fast=False)
- base = LlamaForCausalLM.from_pretrained(
- base_model_path, low_cpu_mem_usage=True, torch_dtype=torch.float16
- )
-
- # following alpaca training recipe, we have added new initialized tokens
- DEFAULT_PAD_TOKEN = "[PAD]"
- DEFAULT_EOS_TOKEN = " "
- DEFAULT_BOS_TOKEN = ""
- DEFAULT_UNK_TOKEN = ""
- special_tokens_dict = {
- "pad_token": DEFAULT_PAD_TOKEN,
- "eos_token": DEFAULT_EOS_TOKEN,
- "bos_token": DEFAULT_BOS_TOKEN,
- "unk_token": DEFAULT_UNK_TOKEN,
- }
- num_new_tokens = base_tokenizer.add_special_tokens(special_tokens_dict)
- base.resize_token_embeddings(len(base_tokenizer))
- input_embeddings = base.get_input_embeddings().weight.data
- output_embeddings = base.get_output_embeddings().weight.data
-
- input_embeddings[-num_new_tokens:] = 0
- output_embeddings[-num_new_tokens:] = 0
-
- print("Applying the delta")
- target_weights = {}
- for name, param in tqdm(base.state_dict().items(), desc="Applying delta"):
- assert name in delta.state_dict()
- param.data += delta.state_dict()[name]
- target_weights[name] = param.data
-
- print(f"Saving the target model to {target_model_path}")
- base.load_state_dict(target_weights)
- # base.save_pretrained(target_model_path)
- # delta_tokenizer.save_pretrained(target_model_path)
-
- delta = None
-
- return base, delta_tokenizer
-
-
-base_weights = 'decapoda-research/llama-7b-hf'
-target_weights = 'expertllama' # local path
-delta_weights = 'OFA-Sys/expertllama-7b-delta'
-model, tokenizer = apply_delta(base_weights, target_weights, delta_weights)
-model = model.to(torch.float)
-
-if torch.__version__ >= "2":
- model = torch.compile(model)
-
-def respond(
- instruction,
- temperature=0.1,
- top_p=0.75,
- top_k=40,
- num_beams=4,
- max_new_tokens=128,
- **kwargs,
-):
- # prompt wrapper, only single-turn is allowed for now
- prompt = f"### Human:\n{instruction}\n\n### Assistant:\n"
- inputs = tokenizer(
- prompt,
- return_tensors="pt",
- add_special_tokens=False
- )
- generation_config = GenerationConfig(
- temperature=temperature,
- top_p=top_p,
- top_k=top_k,
- num_beams=num_beams,
- **kwargs,
- )
- with torch.no_grad():
- generation_output = model.generate(
- input_ids=inputs["input_ids"],
- generation_config=generation_config,
- return_dict_in_generate=True,
- output_scores=True,
- max_new_tokens=max_new_tokens,
- )
- response = tokenizer.decode(generation_output.sequences[0][:-2]).split("### Assistant:\n", 1)[1]
- return response
-
-
-g = gr.Interface(
- fn=respond,
- inputs=[
- gr.components.Textbox(
- lines=2, label="Instruction"
- ),
- gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
- gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
- gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
- gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label="Beams"),
- gr.components.Slider(
- minimum=1, maximum=768, step=1, value=512, label="Max tokens"
- ),
- ],
- outputs=[
- gr.inputs.Textbox(
- lines=8,
- label="Output",
- )
- ],
- title="ExpertLLaMA",
- description="ExpertLLaMA is an open-source chatbot trained on expert-like data produced with GPT-3.5, see our [project repo](https://github.com/OFA-Sys/ExpertLLaMA) for details.",
-)
-g.queue(concurrency_count=1)
-g.launch()
\ No newline at end of file
diff --git a/spaces/ORI-Muchim/RaidenTTS/modules.py b/spaces/ORI-Muchim/RaidenTTS/modules.py
deleted file mode 100644
index 9c7fd9cd6eb8b7e0ec0e08957e970744a374a924..0000000000000000000000000000000000000000
--- a/spaces/ORI-Muchim/RaidenTTS/modules.py
+++ /dev/null
@@ -1,390 +0,0 @@
-import copy
-import math
-import numpy as np
-import scipy
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm
-
-import commons
-from commons import init_weights, get_padding
-from transforms import piecewise_rational_quadratic_transform
-
-
-LRELU_SLOPE = 0.1
-
-
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-class ConvReluNorm(nn.Module):
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
- super().__init__()
- self.in_channels = in_channels
- self.hidden_channels = hidden_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
- assert n_layers > 1, "Number of layers should be larger than 0."
-
- self.conv_layers = nn.ModuleList()
- self.norm_layers = nn.ModuleList()
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.relu_drop = nn.Sequential(
- nn.ReLU(),
- nn.Dropout(p_dropout))
- for _ in range(n_layers-1):
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
- self.norm_layers.append(LayerNorm(hidden_channels))
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask):
- x_org = x
- for i in range(self.n_layers):
- x = self.conv_layers[i](x * x_mask)
- x = self.norm_layers[i](x)
- x = self.relu_drop(x)
- x = x_org + self.proj(x)
- return x * x_mask
-
-
-class DDSConv(nn.Module):
- """
- Dialted and Depth-Separable Convolution
- """
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
- super().__init__()
- self.channels = channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.p_dropout = p_dropout
-
- self.drop = nn.Dropout(p_dropout)
- self.convs_sep = nn.ModuleList()
- self.convs_1x1 = nn.ModuleList()
- self.norms_1 = nn.ModuleList()
- self.norms_2 = nn.ModuleList()
- for i in range(n_layers):
- dilation = kernel_size ** i
- padding = (kernel_size * dilation - dilation) // 2
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
- groups=channels, dilation=dilation, padding=padding
- ))
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
- self.norms_1.append(LayerNorm(channels))
- self.norms_2.append(LayerNorm(channels))
-
- def forward(self, x, x_mask, g=None):
- if g is not None:
- x = x + g
- for i in range(self.n_layers):
- y = self.convs_sep[i](x * x_mask)
- y = self.norms_1[i](y)
- y = F.gelu(y)
- y = self.convs_1x1[i](y)
- y = self.norms_2[i](y)
- y = F.gelu(y)
- y = self.drop(y)
- x = x + y
- return x * x_mask
-
-
-class WN(torch.nn.Module):
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
- super(WN, self).__init__()
- assert(kernel_size % 2 == 1)
- self.hidden_channels =hidden_channels
- self.kernel_size = kernel_size,
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.p_dropout = p_dropout
-
- self.in_layers = torch.nn.ModuleList()
- self.res_skip_layers = torch.nn.ModuleList()
- self.drop = nn.Dropout(p_dropout)
-
- if gin_channels != 0:
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
-
- for i in range(n_layers):
- dilation = dilation_rate ** i
- padding = int((kernel_size * dilation - dilation) / 2)
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
- dilation=dilation, padding=padding)
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
- self.in_layers.append(in_layer)
-
- # last one is not necessary
- if i < n_layers - 1:
- res_skip_channels = 2 * hidden_channels
- else:
- res_skip_channels = hidden_channels
-
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
- self.res_skip_layers.append(res_skip_layer)
-
- def forward(self, x, x_mask, g=None, **kwargs):
- output = torch.zeros_like(x)
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
-
- if g is not None:
- g = self.cond_layer(g)
-
- for i in range(self.n_layers):
- x_in = self.in_layers[i](x)
- if g is not None:
- cond_offset = i * 2 * self.hidden_channels
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
- else:
- g_l = torch.zeros_like(x_in)
-
- acts = commons.fused_add_tanh_sigmoid_multiply(
- x_in,
- g_l,
- n_channels_tensor)
- acts = self.drop(acts)
-
- res_skip_acts = self.res_skip_layers[i](acts)
- if i < self.n_layers - 1:
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
- x = (x + res_acts) * x_mask
- output = output + res_skip_acts[:,self.hidden_channels:,:]
- else:
- output = output + res_skip_acts
- return output * x_mask
-
- def remove_weight_norm(self):
- if self.gin_channels != 0:
- torch.nn.utils.remove_weight_norm(self.cond_layer)
- for l in self.in_layers:
- torch.nn.utils.remove_weight_norm(l)
- for l in self.res_skip_layers:
- torch.nn.utils.remove_weight_norm(l)
-
-
-class ResBlock1(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
- super(ResBlock1, self).__init__()
- self.convs1 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
- padding=get_padding(kernel_size, dilation[2])))
- ])
- self.convs1.apply(init_weights)
-
- self.convs2 = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
- padding=get_padding(kernel_size, 1)))
- ])
- self.convs2.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c1, c2 in zip(self.convs1, self.convs2):
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c1(xt)
- xt = F.leaky_relu(xt, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c2(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs1:
- remove_weight_norm(l)
- for l in self.convs2:
- remove_weight_norm(l)
-
-
-class ResBlock2(torch.nn.Module):
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
- super(ResBlock2, self).__init__()
- self.convs = nn.ModuleList([
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
- padding=get_padding(kernel_size, dilation[0]))),
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
- padding=get_padding(kernel_size, dilation[1])))
- ])
- self.convs.apply(init_weights)
-
- def forward(self, x, x_mask=None):
- for c in self.convs:
- xt = F.leaky_relu(x, LRELU_SLOPE)
- if x_mask is not None:
- xt = xt * x_mask
- xt = c(xt)
- x = xt + x
- if x_mask is not None:
- x = x * x_mask
- return x
-
- def remove_weight_norm(self):
- for l in self.convs:
- remove_weight_norm(l)
-
-
-class Log(nn.Module):
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
- logdet = torch.sum(-y, [1, 2])
- return y, logdet
- else:
- x = torch.exp(x) * x_mask
- return x
-
-
-class Flip(nn.Module):
- def forward(self, x, *args, reverse=False, **kwargs):
- x = torch.flip(x, [1])
- if not reverse:
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
- return x, logdet
- else:
- return x
-
-
-class ElementwiseAffine(nn.Module):
- def __init__(self, channels):
- super().__init__()
- self.channels = channels
- self.m = nn.Parameter(torch.zeros(channels,1))
- self.logs = nn.Parameter(torch.zeros(channels,1))
-
- def forward(self, x, x_mask, reverse=False, **kwargs):
- if not reverse:
- y = self.m + torch.exp(self.logs) * x
- y = y * x_mask
- logdet = torch.sum(self.logs * x_mask, [1,2])
- return y, logdet
- else:
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
- return x
-
-
-class ResidualCouplingLayer(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- p_dropout=0,
- gin_channels=0,
- mean_only=False):
- assert channels % 2 == 0, "channels should be divisible by 2"
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.half_channels = channels // 2
- self.mean_only = mean_only
-
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
- self.post.weight.data.zero_()
- self.post.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0) * x_mask
- h = self.enc(h, x_mask, g=g)
- stats = self.post(h) * x_mask
- if not self.mean_only:
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
- else:
- m = stats
- logs = torch.zeros_like(m)
-
- if not reverse:
- x1 = m + x1 * torch.exp(logs) * x_mask
- x = torch.cat([x0, x1], 1)
- logdet = torch.sum(logs, [1,2])
- return x, logdet
- else:
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
- x = torch.cat([x0, x1], 1)
- return x
-
-
-class ConvFlow(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
- super().__init__()
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.num_bins = num_bins
- self.tail_bound = tail_bound
- self.half_channels = in_channels // 2
-
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
- self.proj.weight.data.zero_()
- self.proj.bias.data.zero_()
-
- def forward(self, x, x_mask, g=None, reverse=False):
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
- h = self.pre(x0)
- h = self.convs(h, x_mask, g=g)
- h = self.proj(h) * x_mask
-
- b, c, t = x0.shape
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
-
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
-
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=reverse,
- tails='linear',
- tail_bound=self.tail_bound
- )
-
- x = torch.cat([x0, x1], 1) * x_mask
- logdet = torch.sum(logabsdet * x_mask, [1,2])
- if not reverse:
- return x, logdet
- else:
- return x
diff --git a/spaces/OgiKazus/vits-uma-genshin-honkai/attentions.py b/spaces/OgiKazus/vits-uma-genshin-honkai/attentions.py
deleted file mode 100644
index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000
--- a/spaces/OgiKazus/vits-uma-genshin-honkai/attentions.py
+++ /dev/null
@@ -1,300 +0,0 @@
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-from modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert t_s == t_t, "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert t_s == t_t, "Local attention is only available for self-attention."
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/Olivier-Truong/faster-whisper-webui-v2/src/__init__.py b/spaces/Olivier-Truong/faster-whisper-webui-v2/src/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/inpainting_src/ldm_inpainting/ldm/modules/diffusionmodules/__init__.py b/spaces/OpenGVLab/InternGPT/iGPT/models/inpainting_src/ldm_inpainting/ldm/modules/diffusionmodules/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/docker/3_evaluate.sh b/spaces/OpenGVLab/InternGPT/third-party/lama/docker/3_evaluate.sh
deleted file mode 100644
index d01e0a39da620e38c1ebf28beead59f286437321..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/third-party/lama/docker/3_evaluate.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-
-
-if (( $# < 3 ))
-then
- echo "Usage: $0 original_dataset_dir predictions_dir output_dir [other arguments to evaluate_predicts.py]"
- exit 1
-fi
-
-CURDIR="$(dirname $0)"
-SRCDIR="$CURDIR/.."
-SRCDIR="$(realpath $SRCDIR)"
-
-ORIG_DATASET_LOCAL_DIR="$(realpath $1)"
-PREDICTIONS_LOCAL_DIR="$(realpath $2)"
-OUTPUT_LOCAL_DIR="$(realpath $3)"
-shift 3
-
-mkdir -p "$OUTPUT_LOCAL_DIR"
-
-docker run \
- -v "$SRCDIR":/home/user/project \
- -v "$ORIG_DATASET_LOCAL_DIR":/data/orig_dataset \
- -v "$PREDICTIONS_LOCAL_DIR":/data/predictions \
- -v "$OUTPUT_LOCAL_DIR":/data/output \
- -u $(id -u):$(id -g) \
- --name="lama-eval" \
- --rm \
- windj007/lama \
- /home/user/project/bin/evaluate_predicts.py \
- /home/user/project/configs/eval2_cpu.yaml \
- /data/orig_dataset \
- /data/predictions \
- /data/output/metrics.yaml \
- $@
diff --git a/spaces/OptimalScale/Robin-7b/lmflow/datasets/dataset.py b/spaces/OptimalScale/Robin-7b/lmflow/datasets/dataset.py
deleted file mode 100644
index 8228d20ab4165515c2d1d09ae679473a53dbb6ed..0000000000000000000000000000000000000000
--- a/spaces/OptimalScale/Robin-7b/lmflow/datasets/dataset.py
+++ /dev/null
@@ -1,308 +0,0 @@
-#!/usr/bin/env python
-# coding=utf-8
-"""This Python code defines a class Dataset with methods for initializing, loading,
-and manipulating datasets from different backends such as Hugging Face and JSON.
-
-The `Dataset` class includes methods for loading datasets from a dictionary and a Hugging
-Face dataset, mapping datasets, and retrieving the backend dataset and arguments.
-"""
-
-
-
-# Importing necessary libraries and modules
-import json
-from pathlib import Path
-from typing import Optional
-
-from datasets import load_dataset
-from datasets import Dataset as HFDataset
-
-from lmflow.args import DatasetArguments
-
-DATASET_TYPES = [
- "text_only",
- "text2text",
-]
-
-KEY_TYPE = "type"
-KEY_INSTANCES = "instances"
-
-class Dataset:
- r"""
- Initializes the Dataset object with the given parameters.
-
- Parameters
- ------------
- data_args : DatasetArguments object.
- Contains the arguments required to load the dataset.
-
- backend : str, default="huggingface"
- A string representing the dataset backend. Defaults to "huggingface".
-
- args : Optional.
- Positional arguments.
-
- kwargs : Optional.
- Keyword arguments.
- """
- def __init__(self, data_args=None, backend: str="huggingface", *args, **kwargs):
- self.data_args = data_args
- self.backend = backend
- self.backend_dataset = None
- self.type = None # Original type of the dataset
- self.dataset_path = data_args.dataset_path
-
- if data_args.dataset_path is None:
- return
-
- if backend == "huggingface":
- data_files = [
- x.absolute().as_posix()
- for x in Path(self.dataset_path).glob("*.json")
- ]
-
- # Iterate through all the files and ensure they have the same data type
- for single_file in data_files:
- with open(single_file) as fin:
- json_data = json.load(fin)
- if KEY_TYPE not in json_data.keys():
- raise ValueError(
- f'"{KEY_TYPE}" field must be specified for data, e.g.'
- '{\n'
- f' "{KEY_TYPE}: "text_only",\n'
- f' "{KEY_INSTANCES}": [\n'
- ' { "text": "Sentence 1: This is a sentence." }\n'
- ' { "text": "Sentence 2: This is another sentence." }\n'
- f' ]\n'
- '}'
- )
-
- if self.type is None:
- self.type = json_data[KEY_TYPE]
- elif self.type != json_data[KEY_TYPE]:
- raise ValueError(
- 'All task files must have same data types. Previous'
- f' files have type "{self.type}", but in file'
- f' {single_file}, it has type "{self.type}".'
- )
-
- # Load the dataset using the HuggingFace dataset library
- extensions = "json"
- raw_dataset = load_dataset(
- extensions,
- data_files=data_files,
- field=KEY_INSTANCES,
- split="train",
- use_auth_token=None,
- )
- self.backend_dataset = raw_dataset
- elif backend == "json":
- # TODO (@Jiachun)
- pass
- else:
- raise NotImplementedError(f'Unsupported dataset backend "{backend}"')
-
-
- def _check_data_type(self):
- # TODO: check if data type and data structure matches, raise messages
- # with hints
- pass
-
-
- def from_dict(self, dict_obj: dict, *args, **kwargs):
- r"""
- Create a Dataset object from a dictionary.
-
- Return a Dataset given a dict with format:
- {
- "type": TYPE,
- "instances": [
- {
- "key_1": VALUE_1.1,
- "key_2": VALUE_1.2,
- ...
- },
- {
- "key_1": VALUE_2.1,
- "key_2": VALUE_2.2,
- ...
- },
- ...
- ]
- }
-
- Parameters
- -----------
-
- dict_obj : dict.
- A dictionary containing the dataset information.
-
- args : Optional.
- Positional arguments.
-
- kwargs : Optional.
- Keyword arguments.
-
- Returns
- ---------
-
- self : Dataset object.
- """
- if self.backend == "huggingface":
- if KEY_TYPE not in dict_obj:
- raise ValueError(
- f'"{KEY_TYPE}" must be provided to initialize a dataset'
- )
- if KEY_INSTANCES not in dict_obj:
- raise ValueError(
- f'"{KEY_INSTANCES}" must be provided to initialize a dataset'
- )
-
- self.type = dict_obj[KEY_TYPE]
-
- hf_dict = {}
- if len(dict_obj[KEY_INSTANCES]) > 0:
- for key in dict_obj[KEY_INSTANCES][0].keys():
- hf_dict[key] = [ instance[key] for instance in dict_obj[KEY_INSTANCES] ]
-
- self.backend_dataset = HFDataset.from_dict(hf_dict, *args, **kwargs)
- return self
- else:
- raise NotImplementedError(
- f'Currently .from_dict is not supported for backend "{backend}"'
- )
-
-
- @classmethod
- def create_from_dict(cls, dict_obj, *args, **kwargs):
- r"""
- Returns
- --------
-
- Returns a Dataset object given a dict.
- """
- empty_data_args = DatasetArguments(dataset_path=None)
- dataset = Dataset(empty_data_args)
- return dataset.from_dict(dict_obj)
-
-
- def to_dict(self):
- r"""
- Returns
- ---------
-
- Return a dict represents the dataset:
- {
- "type": TYPE,
- "instances": [
- {
- "key_1": VALUE_1.1,
- "key_2": VALUE_1.2,
- ...
- },
- {
- "key_1": VALUE_2.1,
- "key_2": VALUE_2.2,
- ...
- },
- ...
- ]
- }
-
- A python dict object represents the content of this dataset.
- """
- if self.backend == "huggingface":
- dict_obj = {}
- dict_obj[KEY_TYPE] = self.get_type()
-
- hf_dict = self.backend_dataset.to_dict()
- dict_obj[KEY_INSTANCES] = []
-
- first_key = None
- for key in hf_dict.keys():
- first_key = key
- break
-
- if first_key is not None:
- num_instances = len(hf_dict[first_key])
- dict_obj[KEY_INSTANCES] = [
- {
- key: hf_dict[key][i] for key in hf_dict.keys()
- }
- for i in range(num_instances)
- ]
-
- return dict_obj
- else:
- raise NotImplementedError(
- f'Current .to_dict is not supported for backend "{backend}"'
- )
-
-
- def map(self, *args, **kwargs):
- r"""
- Parameters
- ------------
- args : Optional.
- Positional arguments.
-
- kwargs : Optional.
- Keyword arguments.
-
- Returns
- ---------
-
- self : Dataset object.
- """
- # If the dataset uses Hugging Face as the backend,
- # call the `map()` function of the Hugging Face backend dataset
- if self.backend == "huggingface":
- # Set the mapped dataset as the backend dataset of the current dataset
- mapped_backend_dataset = self.backend_dataset.map(*args, **kwargs)
- self.backend_dataset = mapped_backend_dataset
- return self
- else:
- # If the backend is not Hugging Face, raise a NotImplementedError
- raise NotImplementedError(
- f'Currently .map is not supported for backend "{backend}"'
- )
-
-
- def get_backend(self) -> Optional[str]:
- r"""
- Returns
- ---------
-
- self.backend
- """
- return self.backend
-
-
- def get_backend_dataset(self):
- r"""
- Returns
- ---------
-
- self.backend_dataset
- """
- return self.backend_dataset
-
-
- def get_data_args(self):
- r"""
- Returns
- ---------
-
- self.data_args
- """
- return self.data_args
-
-
- def get_type(self):
- r"""
- Returns
- ---------
-
- self.type
- """
- return self.type
diff --git a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/datasets/prepare_coco_semantic_annos_from_panoptic_annos.py b/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/datasets/prepare_coco_semantic_annos_from_panoptic_annos.py
deleted file mode 100644
index 3090c9bc2f9a63156a4132e89c635613691eb350..0000000000000000000000000000000000000000
--- a/spaces/PAIR/PAIR-Diffusion/annotator/OneFormer/datasets/prepare_coco_semantic_annos_from_panoptic_annos.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import functools
-import json
-import multiprocessing as mp
-import numpy as np
-import os
-import time
-from fvcore.common.download import download
-from panopticapi.utils import rgb2id
-from PIL import Image
-
-from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
-
-
-def _process_panoptic_to_semantic(input_panoptic, output_semantic, segments, id_map):
- panoptic = np.asarray(Image.open(input_panoptic), dtype=np.uint32)
- panoptic = rgb2id(panoptic)
- output = np.zeros_like(panoptic, dtype=np.uint8) + 255
- for seg in segments:
- cat_id = seg["category_id"]
- new_cat_id = id_map[cat_id]
- output[panoptic == seg["id"]] = new_cat_id
- Image.fromarray(output).save(output_semantic)
-
-
-def separate_coco_semantic_from_panoptic(panoptic_json, panoptic_root, sem_seg_root, categories):
- """
- Create semantic segmentation annotations from panoptic segmentation
- annotations, to be used by PanopticFPN.
- It maps all thing categories to class 0, and maps all unlabeled pixels to class 255.
- It maps all stuff categories to contiguous ids starting from 1.
- Args:
- panoptic_json (str): path to the panoptic json file, in COCO's format.
- panoptic_root (str): a directory with panoptic annotation files, in COCO's format.
- sem_seg_root (str): a directory to output semantic annotation files
- categories (list[dict]): category metadata. Each dict needs to have:
- "id": corresponds to the "category_id" in the json annotations
- "isthing": 0 or 1
- """
- os.makedirs(sem_seg_root, exist_ok=True)
-
- id_map = {} # map from category id to id in the output semantic annotation
- assert len(categories) <= 254
- for i, k in enumerate(categories):
- id_map[k["id"]] = i
- # what is id = 0?
- # id_map[0] = 255
- print(id_map)
-
- with open(panoptic_json) as f:
- obj = json.load(f)
-
- pool = mp.Pool(processes=max(mp.cpu_count() // 2, 4))
-
- def iter_annotations():
- for anno in obj["annotations"]:
- file_name = anno["file_name"]
- segments = anno["segments_info"]
- input = os.path.join(panoptic_root, file_name)
- output = os.path.join(sem_seg_root, file_name)
- yield input, output, segments
-
- print("Start writing to {} ...".format(sem_seg_root))
- start = time.time()
- pool.starmap(
- functools.partial(_process_panoptic_to_semantic, id_map=id_map),
- iter_annotations(),
- chunksize=100,
- )
- print("Finished. time: {:.2f}s".format(time.time() - start))
-
-
-if __name__ == "__main__":
- dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "coco")
- for s in ["val2017", "train2017"]:
- separate_coco_semantic_from_panoptic(
- os.path.join(dataset_dir, "annotations/panoptic_{}.json".format(s)),
- os.path.join(dataset_dir, "panoptic_{}".format(s)),
- os.path.join(dataset_dir, "panoptic_semseg_{}".format(s)),
- COCO_CATEGORIES,
- )
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/corner_pool.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/corner_pool.py
deleted file mode 100644
index a33d798b43d405e4c86bee4cd6389be21ca9c637..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/corner_pool.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-from torch import nn
-from torch.autograd import Function
-
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext('_ext', [
- 'top_pool_forward', 'top_pool_backward', 'bottom_pool_forward',
- 'bottom_pool_backward', 'left_pool_forward', 'left_pool_backward',
- 'right_pool_forward', 'right_pool_backward'
-])
-
-_mode_dict = {'top': 0, 'bottom': 1, 'left': 2, 'right': 3}
-
-
-class TopPoolFunction(Function):
-
- @staticmethod
- def symbolic(g, input):
- output = g.op(
- 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['top']))
- return output
-
- @staticmethod
- def forward(ctx, input):
- output = ext_module.top_pool_forward(input)
- ctx.save_for_backward(input)
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- input, = ctx.saved_tensors
- output = ext_module.top_pool_backward(input, grad_output)
- return output
-
-
-class BottomPoolFunction(Function):
-
- @staticmethod
- def symbolic(g, input):
- output = g.op(
- 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['bottom']))
- return output
-
- @staticmethod
- def forward(ctx, input):
- output = ext_module.bottom_pool_forward(input)
- ctx.save_for_backward(input)
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- input, = ctx.saved_tensors
- output = ext_module.bottom_pool_backward(input, grad_output)
- return output
-
-
-class LeftPoolFunction(Function):
-
- @staticmethod
- def symbolic(g, input):
- output = g.op(
- 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['left']))
- return output
-
- @staticmethod
- def forward(ctx, input):
- output = ext_module.left_pool_forward(input)
- ctx.save_for_backward(input)
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- input, = ctx.saved_tensors
- output = ext_module.left_pool_backward(input, grad_output)
- return output
-
-
-class RightPoolFunction(Function):
-
- @staticmethod
- def symbolic(g, input):
- output = g.op(
- 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['right']))
- return output
-
- @staticmethod
- def forward(ctx, input):
- output = ext_module.right_pool_forward(input)
- ctx.save_for_backward(input)
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- input, = ctx.saved_tensors
- output = ext_module.right_pool_backward(input, grad_output)
- return output
-
-
-class CornerPool(nn.Module):
- """Corner Pooling.
-
- Corner Pooling is a new type of pooling layer that helps a
- convolutional network better localize corners of bounding boxes.
-
- Please refer to https://arxiv.org/abs/1808.01244 for more details.
- Code is modified from https://github.com/princeton-vl/CornerNet-Lite.
-
- Args:
- mode(str): Pooling orientation for the pooling layer
-
- - 'bottom': Bottom Pooling
- - 'left': Left Pooling
- - 'right': Right Pooling
- - 'top': Top Pooling
-
- Returns:
- Feature map after pooling.
- """
-
- pool_functions = {
- 'bottom': BottomPoolFunction,
- 'left': LeftPoolFunction,
- 'right': RightPoolFunction,
- 'top': TopPoolFunction,
- }
-
- cummax_dim_flip = {
- 'bottom': (2, False),
- 'left': (3, True),
- 'right': (3, False),
- 'top': (2, True),
- }
-
- def __init__(self, mode):
- super(CornerPool, self).__init__()
- assert mode in self.pool_functions
- self.mode = mode
- self.corner_pool = self.pool_functions[mode]
-
- def forward(self, x):
- if torch.__version__ != 'parrots' and torch.__version__ >= '1.5.0':
- if torch.onnx.is_in_onnx_export():
- assert torch.__version__ >= '1.7.0', \
- 'When `cummax` serves as an intermediate component whose '\
- 'outputs is used as inputs for another modules, it\'s '\
- 'expected that pytorch version must be >= 1.7.0, '\
- 'otherwise Error appears like: `RuntimeError: tuple '\
- 'appears in op that does not forward tuples, unsupported '\
- 'kind: prim::PythonOp`.'
-
- dim, flip = self.cummax_dim_flip[self.mode]
- if flip:
- x = x.flip(dim)
- pool_tensor, _ = torch.cummax(x, dim=dim)
- if flip:
- pool_tensor = pool_tensor.flip(dim)
- return pool_tensor
- else:
- return self.corner_pool.apply(x)
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/voxelize.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/voxelize.py
deleted file mode 100644
index ca3226a4fbcbfe58490fa2ea8e1c16b531214121..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/ops/voxelize.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-from torch import nn
-from torch.autograd import Function
-from torch.nn.modules.utils import _pair
-
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext(
- '_ext', ['dynamic_voxelize_forward', 'hard_voxelize_forward'])
-
-
-class _Voxelization(Function):
-
- @staticmethod
- def forward(ctx,
- points,
- voxel_size,
- coors_range,
- max_points=35,
- max_voxels=20000):
- """Convert kitti points(N, >=3) to voxels.
-
- Args:
- points (torch.Tensor): [N, ndim]. Points[:, :3] contain xyz points
- and points[:, 3:] contain other information like reflectivity.
- voxel_size (tuple or float): The size of voxel with the shape of
- [3].
- coors_range (tuple or float): The coordinate range of voxel with
- the shape of [6].
- max_points (int, optional): maximum points contained in a voxel. if
- max_points=-1, it means using dynamic_voxelize. Default: 35.
- max_voxels (int, optional): maximum voxels this function create.
- for second, 20000 is a good choice. Users should shuffle points
- before call this function because max_voxels may drop points.
- Default: 20000.
-
- Returns:
- voxels_out (torch.Tensor): Output voxels with the shape of [M,
- max_points, ndim]. Only contain points and returned when
- max_points != -1.
- coors_out (torch.Tensor): Output coordinates with the shape of
- [M, 3].
- num_points_per_voxel_out (torch.Tensor): Num points per voxel with
- the shape of [M]. Only returned when max_points != -1.
- """
- if max_points == -1 or max_voxels == -1:
- coors = points.new_zeros(size=(points.size(0), 3), dtype=torch.int)
- ext_module.dynamic_voxelize_forward(points, coors, voxel_size,
- coors_range, 3)
- return coors
- else:
- voxels = points.new_zeros(
- size=(max_voxels, max_points, points.size(1)))
- coors = points.new_zeros(size=(max_voxels, 3), dtype=torch.int)
- num_points_per_voxel = points.new_zeros(
- size=(max_voxels, ), dtype=torch.int)
- voxel_num = ext_module.hard_voxelize_forward(
- points, voxels, coors, num_points_per_voxel, voxel_size,
- coors_range, max_points, max_voxels, 3)
- # select the valid voxels
- voxels_out = voxels[:voxel_num]
- coors_out = coors[:voxel_num]
- num_points_per_voxel_out = num_points_per_voxel[:voxel_num]
- return voxels_out, coors_out, num_points_per_voxel_out
-
-
-voxelization = _Voxelization.apply
-
-
-class Voxelization(nn.Module):
- """Convert kitti points(N, >=3) to voxels.
-
- Please refer to `PVCNN `_ for more
- details.
-
- Args:
- voxel_size (tuple or float): The size of voxel with the shape of [3].
- point_cloud_range (tuple or float): The coordinate range of voxel with
- the shape of [6].
- max_num_points (int): maximum points contained in a voxel. if
- max_points=-1, it means using dynamic_voxelize.
- max_voxels (int, optional): maximum voxels this function create.
- for second, 20000 is a good choice. Users should shuffle points
- before call this function because max_voxels may drop points.
- Default: 20000.
- """
-
- def __init__(self,
- voxel_size,
- point_cloud_range,
- max_num_points,
- max_voxels=20000):
- super().__init__()
-
- self.voxel_size = voxel_size
- self.point_cloud_range = point_cloud_range
- self.max_num_points = max_num_points
- if isinstance(max_voxels, tuple):
- self.max_voxels = max_voxels
- else:
- self.max_voxels = _pair(max_voxels)
-
- point_cloud_range = torch.tensor(
- point_cloud_range, dtype=torch.float32)
- voxel_size = torch.tensor(voxel_size, dtype=torch.float32)
- grid_size = (point_cloud_range[3:] -
- point_cloud_range[:3]) / voxel_size
- grid_size = torch.round(grid_size).long()
- input_feat_shape = grid_size[:2]
- self.grid_size = grid_size
- # the origin shape is as [x-len, y-len, z-len]
- # [w, h, d] -> [d, h, w]
- self.pcd_shape = [*input_feat_shape, 1][::-1]
-
- def forward(self, input):
- if self.training:
- max_voxels = self.max_voxels[0]
- else:
- max_voxels = self.max_voxels[1]
-
- return voxelization(input, self.voxel_size, self.point_cloud_range,
- self.max_num_points, max_voxels)
-
- def __repr__(self):
- s = self.__class__.__name__ + '('
- s += 'voxel_size=' + str(self.voxel_size)
- s += ', point_cloud_range=' + str(self.point_cloud_range)
- s += ', max_num_points=' + str(self.max_num_points)
- s += ', max_voxels=' + str(self.max_voxels)
- s += ')'
- return s
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/losses/utils.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/losses/utils.py
deleted file mode 100644
index 85aec9f3045240c3de96a928324ae8f5c3aebe8b..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmseg/models/losses/utils.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import functools
-
-import annotator.uniformer.mmcv as mmcv
-import numpy as np
-import torch.nn.functional as F
-
-
-def get_class_weight(class_weight):
- """Get class weight for loss function.
-
- Args:
- class_weight (list[float] | str | None): If class_weight is a str,
- take it as a file name and read from it.
- """
- if isinstance(class_weight, str):
- # take it as a file path
- if class_weight.endswith('.npy'):
- class_weight = np.load(class_weight)
- else:
- # pkl, json or yaml
- class_weight = mmcv.load(class_weight)
-
- return class_weight
-
-
-def reduce_loss(loss, reduction):
- """Reduce loss as specified.
-
- Args:
- loss (Tensor): Elementwise loss tensor.
- reduction (str): Options are "none", "mean" and "sum".
-
- Return:
- Tensor: Reduced loss tensor.
- """
- reduction_enum = F._Reduction.get_enum(reduction)
- # none: 0, elementwise_mean:1, sum: 2
- if reduction_enum == 0:
- return loss
- elif reduction_enum == 1:
- return loss.mean()
- elif reduction_enum == 2:
- return loss.sum()
-
-
-def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
- """Apply element-wise weight and reduce loss.
-
- Args:
- loss (Tensor): Element-wise loss.
- weight (Tensor): Element-wise weights.
- reduction (str): Same as built-in losses of PyTorch.
- avg_factor (float): Avarage factor when computing the mean of losses.
-
- Returns:
- Tensor: Processed loss values.
- """
- # if weight is specified, apply element-wise weight
- if weight is not None:
- assert weight.dim() == loss.dim()
- if weight.dim() > 1:
- assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
- loss = loss * weight
-
- # if avg_factor is not specified, just reduce the loss
- if avg_factor is None:
- loss = reduce_loss(loss, reduction)
- else:
- # if reduction is mean, then average the loss by avg_factor
- if reduction == 'mean':
- loss = loss.sum() / avg_factor
- # if reduction is 'none', then do nothing, otherwise raise an error
- elif reduction != 'none':
- raise ValueError('avg_factor can not be used with reduction="sum"')
- return loss
-
-
-def weighted_loss(loss_func):
- """Create a weighted version of a given loss function.
-
- To use this decorator, the loss function must have the signature like
- `loss_func(pred, target, **kwargs)`. The function only needs to compute
- element-wise loss without any reduction. This decorator will add weight
- and reduction arguments to the function. The decorated function will have
- the signature like `loss_func(pred, target, weight=None, reduction='mean',
- avg_factor=None, **kwargs)`.
-
- :Example:
-
- >>> import torch
- >>> @weighted_loss
- >>> def l1_loss(pred, target):
- >>> return (pred - target).abs()
-
- >>> pred = torch.Tensor([0, 2, 3])
- >>> target = torch.Tensor([1, 1, 1])
- >>> weight = torch.Tensor([1, 0, 1])
-
- >>> l1_loss(pred, target)
- tensor(1.3333)
- >>> l1_loss(pred, target, weight)
- tensor(1.)
- >>> l1_loss(pred, target, reduction='none')
- tensor([1., 1., 2.])
- >>> l1_loss(pred, target, weight, avg_factor=2)
- tensor(1.5000)
- """
-
- @functools.wraps(loss_func)
- def wrapper(pred,
- target,
- weight=None,
- reduction='mean',
- avg_factor=None,
- **kwargs):
- # get element-wise loss
- loss = loss_func(pred, target, **kwargs)
- loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
- return loss
-
- return wrapper
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/runq.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/runq.go
deleted file mode 100644
index a5eb40984342b818728ae710f73c39b23c36b590..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/runq.go and /dev/null differ
diff --git a/spaces/PeepDaSlan9/AutoGPT/tests/__init__.py b/spaces/PeepDaSlan9/AutoGPT/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/backbones/resnest.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/backbones/resnest.py
deleted file mode 100644
index b45a837f395230029e9d4194ff9f7f2f8f7067b0..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/backbones/resnest.py
+++ /dev/null
@@ -1,314 +0,0 @@
-import math
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.utils.checkpoint as cp
-from annotator.uniformer.mmcv.cnn import build_conv_layer, build_norm_layer
-
-from ..builder import BACKBONES
-from ..utils import ResLayer
-from .resnet import Bottleneck as _Bottleneck
-from .resnet import ResNetV1d
-
-
-class RSoftmax(nn.Module):
- """Radix Softmax module in ``SplitAttentionConv2d``.
-
- Args:
- radix (int): Radix of input.
- groups (int): Groups of input.
- """
-
- def __init__(self, radix, groups):
- super().__init__()
- self.radix = radix
- self.groups = groups
-
- def forward(self, x):
- batch = x.size(0)
- if self.radix > 1:
- x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2)
- x = F.softmax(x, dim=1)
- x = x.reshape(batch, -1)
- else:
- x = torch.sigmoid(x)
- return x
-
-
-class SplitAttentionConv2d(nn.Module):
- """Split-Attention Conv2d in ResNeSt.
-
- Args:
- in_channels (int): Same as nn.Conv2d.
- out_channels (int): Same as nn.Conv2d.
- kernel_size (int | tuple[int]): Same as nn.Conv2d.
- stride (int | tuple[int]): Same as nn.Conv2d.
- padding (int | tuple[int]): Same as nn.Conv2d.
- dilation (int | tuple[int]): Same as nn.Conv2d.
- groups (int): Same as nn.Conv2d.
- radix (int): Radix of SpltAtConv2d. Default: 2
- reduction_factor (int): Reduction factor of inter_channels. Default: 4.
- conv_cfg (dict): Config dict for convolution layer. Default: None,
- which means using conv2d.
- norm_cfg (dict): Config dict for normalization layer. Default: None.
- dcn (dict): Config dict for DCN. Default: None.
- """
-
- def __init__(self,
- in_channels,
- channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- radix=2,
- reduction_factor=4,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- dcn=None):
- super(SplitAttentionConv2d, self).__init__()
- inter_channels = max(in_channels * radix // reduction_factor, 32)
- self.radix = radix
- self.groups = groups
- self.channels = channels
- self.with_dcn = dcn is not None
- self.dcn = dcn
- fallback_on_stride = False
- if self.with_dcn:
- fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
- if self.with_dcn and not fallback_on_stride:
- assert conv_cfg is None, 'conv_cfg must be None for DCN'
- conv_cfg = dcn
- self.conv = build_conv_layer(
- conv_cfg,
- in_channels,
- channels * radix,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- groups=groups * radix,
- bias=False)
- self.norm0_name, norm0 = build_norm_layer(
- norm_cfg, channels * radix, postfix=0)
- self.add_module(self.norm0_name, norm0)
- self.relu = nn.ReLU(inplace=True)
- self.fc1 = build_conv_layer(
- None, channels, inter_channels, 1, groups=self.groups)
- self.norm1_name, norm1 = build_norm_layer(
- norm_cfg, inter_channels, postfix=1)
- self.add_module(self.norm1_name, norm1)
- self.fc2 = build_conv_layer(
- None, inter_channels, channels * radix, 1, groups=self.groups)
- self.rsoftmax = RSoftmax(radix, groups)
-
- @property
- def norm0(self):
- """nn.Module: the normalization layer named "norm0" """
- return getattr(self, self.norm0_name)
-
- @property
- def norm1(self):
- """nn.Module: the normalization layer named "norm1" """
- return getattr(self, self.norm1_name)
-
- def forward(self, x):
- x = self.conv(x)
- x = self.norm0(x)
- x = self.relu(x)
-
- batch, rchannel = x.shape[:2]
- batch = x.size(0)
- if self.radix > 1:
- splits = x.view(batch, self.radix, -1, *x.shape[2:])
- gap = splits.sum(dim=1)
- else:
- gap = x
- gap = F.adaptive_avg_pool2d(gap, 1)
- gap = self.fc1(gap)
-
- gap = self.norm1(gap)
- gap = self.relu(gap)
-
- atten = self.fc2(gap)
- atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
-
- if self.radix > 1:
- attens = atten.view(batch, self.radix, -1, *atten.shape[2:])
- out = torch.sum(attens * splits, dim=1)
- else:
- out = atten * x
- return out.contiguous()
-
-
-class Bottleneck(_Bottleneck):
- """Bottleneck block for ResNeSt.
-
- Args:
- inplane (int): Input planes of this block.
- planes (int): Middle planes of this block.
- groups (int): Groups of conv2.
- width_per_group (int): Width per group of conv2. 64x4d indicates
- ``groups=64, width_per_group=4`` and 32x8d indicates
- ``groups=32, width_per_group=8``.
- radix (int): Radix of SpltAtConv2d. Default: 2
- reduction_factor (int): Reduction factor of inter_channels in
- SplitAttentionConv2d. Default: 4.
- avg_down_stride (bool): Whether to use average pool for stride in
- Bottleneck. Default: True.
- kwargs (dict): Key word arguments for base class.
- """
- expansion = 4
-
- def __init__(self,
- inplanes,
- planes,
- groups=1,
- base_width=4,
- base_channels=64,
- radix=2,
- reduction_factor=4,
- avg_down_stride=True,
- **kwargs):
- """Bottleneck block for ResNeSt."""
- super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
-
- if groups == 1:
- width = self.planes
- else:
- width = math.floor(self.planes *
- (base_width / base_channels)) * groups
-
- self.avg_down_stride = avg_down_stride and self.conv2_stride > 1
-
- self.norm1_name, norm1 = build_norm_layer(
- self.norm_cfg, width, postfix=1)
- self.norm3_name, norm3 = build_norm_layer(
- self.norm_cfg, self.planes * self.expansion, postfix=3)
-
- self.conv1 = build_conv_layer(
- self.conv_cfg,
- self.inplanes,
- width,
- kernel_size=1,
- stride=self.conv1_stride,
- bias=False)
- self.add_module(self.norm1_name, norm1)
- self.with_modulated_dcn = False
- self.conv2 = SplitAttentionConv2d(
- width,
- width,
- kernel_size=3,
- stride=1 if self.avg_down_stride else self.conv2_stride,
- padding=self.dilation,
- dilation=self.dilation,
- groups=groups,
- radix=radix,
- reduction_factor=reduction_factor,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- dcn=self.dcn)
- delattr(self, self.norm2_name)
-
- if self.avg_down_stride:
- self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)
-
- self.conv3 = build_conv_layer(
- self.conv_cfg,
- width,
- self.planes * self.expansion,
- kernel_size=1,
- bias=False)
- self.add_module(self.norm3_name, norm3)
-
- def forward(self, x):
-
- def _inner_forward(x):
- identity = x
-
- out = self.conv1(x)
- out = self.norm1(out)
- out = self.relu(out)
-
- if self.with_plugins:
- out = self.forward_plugin(out, self.after_conv1_plugin_names)
-
- out = self.conv2(out)
-
- if self.avg_down_stride:
- out = self.avd_layer(out)
-
- if self.with_plugins:
- out = self.forward_plugin(out, self.after_conv2_plugin_names)
-
- out = self.conv3(out)
- out = self.norm3(out)
-
- if self.with_plugins:
- out = self.forward_plugin(out, self.after_conv3_plugin_names)
-
- if self.downsample is not None:
- identity = self.downsample(x)
-
- out += identity
-
- return out
-
- if self.with_cp and x.requires_grad:
- out = cp.checkpoint(_inner_forward, x)
- else:
- out = _inner_forward(x)
-
- out = self.relu(out)
-
- return out
-
-
-@BACKBONES.register_module()
-class ResNeSt(ResNetV1d):
- """ResNeSt backbone.
-
- Args:
- groups (int): Number of groups of Bottleneck. Default: 1
- base_width (int): Base width of Bottleneck. Default: 4
- radix (int): Radix of SpltAtConv2d. Default: 2
- reduction_factor (int): Reduction factor of inter_channels in
- SplitAttentionConv2d. Default: 4.
- avg_down_stride (bool): Whether to use average pool for stride in
- Bottleneck. Default: True.
- kwargs (dict): Keyword arguments for ResNet.
- """
-
- arch_settings = {
- 50: (Bottleneck, (3, 4, 6, 3)),
- 101: (Bottleneck, (3, 4, 23, 3)),
- 152: (Bottleneck, (3, 8, 36, 3)),
- 200: (Bottleneck, (3, 24, 36, 3))
- }
-
- def __init__(self,
- groups=1,
- base_width=4,
- radix=2,
- reduction_factor=4,
- avg_down_stride=True,
- **kwargs):
- self.groups = groups
- self.base_width = base_width
- self.radix = radix
- self.reduction_factor = reduction_factor
- self.avg_down_stride = avg_down_stride
- super(ResNeSt, self).__init__(**kwargs)
-
- def make_res_layer(self, **kwargs):
- """Pack all blocks in a stage into a ``ResLayer``."""
- return ResLayer(
- groups=self.groups,
- base_width=self.base_width,
- base_channels=self.base_channels,
- radix=self.radix,
- reduction_factor=self.reduction_factor,
- avg_down_stride=self.avg_down_stride,
- **kwargs)
diff --git a/spaces/Pie31415/control-animation/annotator/util.py b/spaces/Pie31415/control-animation/annotator/util.py
deleted file mode 100644
index 90831643d19cc1b9b0940df3d4fd4d846ba74a05..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/util.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import numpy as np
-import cv2
-import os
-
-
-annotator_ckpts_path = os.path.join(os.path.dirname(__file__), 'ckpts')
-
-
-def HWC3(x):
- assert x.dtype == np.uint8
- if x.ndim == 2:
- x = x[:, :, None]
- assert x.ndim == 3
- H, W, C = x.shape
- assert C == 1 or C == 3 or C == 4
- if C == 3:
- return x
- if C == 1:
- return np.concatenate([x, x, x], axis=2)
- if C == 4:
- color = x[:, :, 0:3].astype(np.float32)
- alpha = x[:, :, 3:4].astype(np.float32) / 255.0
- y = color * alpha + 255.0 * (1.0 - alpha)
- y = y.clip(0, 255).astype(np.uint8)
- return y
-
-
-def resize_image(input_image, resolution):
- H, W, C = input_image.shape
- H = float(H)
- W = float(W)
- k = float(resolution) / min(H, W)
- H *= k
- W *= k
- H = int(np.round(H / 64.0)) * 64
- W = int(np.round(W / 64.0)) * 64
- img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
- return img
diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/object365.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/object365.py
deleted file mode 100644
index 0106a059a565d77d9a52b8e0131c0c3db19c7b94..0000000000000000000000000000000000000000
--- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/object365.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import torch
-import torchvision
-import torch.utils.data as data
-from maskrcnn_benchmark.data.datasets.coco_dt import CocoDetectionTSV
-
-
-class Object365DetectionTSV(CocoDetectionTSV):
- pass
diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/models/__init__.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/models/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Plachta/VALL-E-X/modules/embedding.py b/spaces/Plachta/VALL-E-X/modules/embedding.py
deleted file mode 100644
index 17f6c316da3de6a432f4d43f9563800fdb6d58c4..0000000000000000000000000000000000000000
--- a/spaces/Plachta/VALL-E-X/modules/embedding.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2023 (authors: Feiteng Li)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import math
-
-import torch
-import torch.nn as nn
-
-
-class TokenEmbedding(nn.Module):
- def __init__(
- self,
- dim_model: int,
- vocab_size: int,
- dropout: float = 0.0,
- ):
- super().__init__()
-
- self.vocab_size = vocab_size
- self.dim_model = dim_model
-
- self.dropout = torch.nn.Dropout(p=dropout)
- self.word_embeddings = nn.Embedding(self.vocab_size, self.dim_model)
-
- @property
- def weight(self) -> torch.Tensor:
- return self.word_embeddings.weight
-
- def embedding(self, index: int) -> torch.Tensor:
- return self.word_embeddings.weight[index : index + 1]
-
- def forward(self, x: torch.Tensor):
- X = self.word_embeddings(x)
- X = self.dropout(X)
-
- return X
-
-
-class SinePositionalEmbedding(nn.Module):
- def __init__(
- self,
- dim_model: int,
- dropout: float = 0.0,
- scale: bool = False,
- alpha: bool = False,
- ):
- super().__init__()
- self.dim_model = dim_model
- self.x_scale = math.sqrt(dim_model) if scale else 1.0
- self.alpha = nn.Parameter(torch.ones(1), requires_grad=alpha)
- self.dropout = torch.nn.Dropout(p=dropout)
-
- self.reverse = False
- self.pe = None
- self.extend_pe(torch.tensor(0.0).expand(1, 4000))
-
- def extend_pe(self, x):
- """Reset the positional encodings."""
- if self.pe is not None:
- if self.pe.size(1) >= x.size(1):
- if self.pe.dtype != x.dtype or self.pe.device != x.device:
- self.pe = self.pe.to(dtype=x.dtype, device=x.device)
- return
- pe = torch.zeros(x.size(1), self.dim_model)
- if self.reverse:
- position = torch.arange(
- x.size(1) - 1, -1, -1.0, dtype=torch.float32
- ).unsqueeze(1)
- else:
- position = torch.arange(
- 0, x.size(1), dtype=torch.float32
- ).unsqueeze(1)
- div_term = torch.exp(
- torch.arange(0, self.dim_model, 2, dtype=torch.float32)
- * -(math.log(10000.0) / self.dim_model)
- )
- pe[:, 0::2] = torch.sin(position * div_term)
- pe[:, 1::2] = torch.cos(position * div_term)
- pe = pe.unsqueeze(0)
- self.pe = pe.to(device=x.device, dtype=x.dtype).detach()
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- self.extend_pe(x)
- output = x.unsqueeze(-1) if x.ndim == 2 else x
- output = output * self.x_scale + self.alpha * self.pe[:, : x.size(1)]
- return self.dropout(output)
diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/modules/test_codebooks_patterns.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/modules/test_codebooks_patterns.py
deleted file mode 100644
index b658f4779a369f9ec8dde692a61b7f0fe3485724..0000000000000000000000000000000000000000
--- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/modules/test_codebooks_patterns.py
+++ /dev/null
@@ -1,246 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import pytest
-import torch
-
-from audiocraft.modules.codebooks_patterns import (
- DelayedPatternProvider,
- ParallelPatternProvider,
- Pattern,
- UnrolledPatternProvider,
-)
-
-
-class TestParallelPatternProvider:
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [0, 1, 16, 100])
- def test_get_pattern(self, n_q: int, timesteps: int):
- provider = ParallelPatternProvider(n_q)
- pattern = provider.get_pattern(timesteps)
- # + 1 to account for 1st step
- assert len(pattern.layout) == timesteps + 1
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [8, 16, 100])
- def test_pattern_content(self, n_q: int, timesteps: int):
- provider = ParallelPatternProvider(n_q)
- pattern = provider.get_pattern(timesteps)
- for s, v in enumerate(pattern.layout):
- for i, code in enumerate(v):
- assert i == code.q
- assert code.t == s - 1 # account for the 1st empty step
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [8, 16, 100])
- def test_pattern_max_delay(self, n_q: int, timesteps: int):
- provider = ParallelPatternProvider(n_q)
- pattern = provider.get_pattern(timesteps)
- assert pattern.max_delay == 0
- assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay
-
-
-class TestDelayedPatternProvider:
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [0, 1, 16, 100])
- def test_get_pattern(self, n_q: int, timesteps: int):
- delays = [
- list(range(n_q)),
- [0] + [1] * (n_q - 1),
- [0] + [4] * (n_q - 1),
- ]
- for delay in delays:
- provider = DelayedPatternProvider(n_q, delay)
- pattern = provider.get_pattern(timesteps)
- # + 1 to account for 1st step
- assert len(pattern.layout) == timesteps + max(delay) + 1
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [8, 16, 100])
- def test_pattern_content(self, n_q: int, timesteps: int):
- provider = DelayedPatternProvider(n_q)
- pattern = provider.get_pattern(timesteps)
- for s, v in enumerate(pattern.layout):
- for i, code in enumerate(v):
- assert i == code.q
- assert code.t == max(0, s - code.q - 1)
-
- @pytest.mark.parametrize("timesteps", [8, 16, 100])
- @pytest.mark.parametrize("delay", [[0, 1, 2, 3], [0, 1, 1, 1], [0, 3, 3, 3], [0, 3]])
- def test_pattern_max_delay(self, timesteps: int, delay: list):
- provider = DelayedPatternProvider(len(delay), delay)
- pattern = provider.get_pattern(timesteps)
- assert pattern.max_delay == max(delay)
- assert len(pattern.valid_layout) == len(pattern.layout) - pattern.max_delay
-
-
-class TestUnrolledPatternProvider:
-
- @pytest.mark.parametrize("timesteps", [0, 1, 16])
- @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]])
- @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]])
- def test_get_pattern(self, timesteps: int, flattening: list, delays: list):
- n_q = len(flattening)
- max_delay = max(delays)
- provider = UnrolledPatternProvider(n_q, flattening, delays)
- pattern = provider.get_pattern(timesteps)
- assert len(pattern.layout) == provider.num_virtual_steps(timesteps) + max_delay
-
- @pytest.mark.parametrize("timesteps", [0, 1, 16])
- @pytest.mark.parametrize("flattening", [[0, 1, 2], [0, 1, 1]])
- @pytest.mark.parametrize("delays", [[0, 0, 0], [0, 5, 5]])
- def test_pattern_max_delay(self, timesteps: int, flattening: list, delays: list):
- n_q = len(flattening)
- max_delay = max(delays)
- provider = UnrolledPatternProvider(n_q, flattening, delays)
- pattern = provider.get_pattern(timesteps)
- assert pattern.max_delay == max_delay
-
-
-class TestPattern:
-
- def ref_build_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int):
- """Reference method to build the sequence from the pattern without using fancy scatter."""
- bs, n_q, T = z.shape
- z = z.cpu().numpy()
- assert n_q == pattern.n_q
- assert T <= pattern.timesteps
- inp = torch.full((bs, n_q, len(pattern.layout)), special_token, dtype=torch.long).numpy()
- inp[:] = special_token
- for s, v in enumerate(pattern.layout):
- for (t, q) in v:
- if t < T:
- inp[:, q, s] = z[:, q, t]
- return torch.from_numpy(inp)
-
- def ref_revert_pattern_sequence(self, z: torch.Tensor, pattern: Pattern, special_token: int):
- """Reference method to revert the sequence from the pattern without using fancy scatter."""
- z = z.cpu().numpy()
- bs, n_q, S = z.shape
- assert pattern.n_q == n_q
- inp = torch.full((bs, pattern.n_q, pattern.timesteps), special_token, dtype=torch.long).numpy()
- inp[:] = special_token
- for s, v in enumerate(pattern.layout):
- for (t, q) in v:
- if t < pattern.timesteps:
- inp[:, q, t] = z[:, q, s]
- return torch.from_numpy(inp)
-
- def ref_revert_pattern_logits(self, z: torch.Tensor, pattern: Pattern, special_token: float):
- """Reference method to revert the logits from the pattern without using fancy scatter."""
- z = z.cpu().numpy()
- bs, card, n_q, S = z.shape
- assert pattern.n_q == n_q
- ref_layout = pattern.layout
- inp = torch.full((bs, card, pattern.n_q, pattern.timesteps), special_token, dtype=torch.float).numpy()
- inp[:] = special_token
- for s, v in enumerate(ref_layout[1:]):
- if s < S:
- for (t, q) in v:
- if t < pattern.timesteps:
- inp[:, :, q, t] = z[:, :, q, s]
- return torch.from_numpy(inp)
-
- def _get_pattern_providers(self, n_q: int):
- pattern_provider_1 = ParallelPatternProvider(n_q)
- pattern_provider_2 = DelayedPatternProvider(n_q, list(range(n_q)))
- pattern_provider_3 = DelayedPatternProvider(n_q, [0] + [1] * (n_q - 1))
- pattern_provider_4 = UnrolledPatternProvider(
- n_q, flattening=list(range(n_q)), delays=[0] * n_q
- )
- pattern_provider_5 = UnrolledPatternProvider(
- n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] * n_q
- )
- pattern_provider_6 = UnrolledPatternProvider(
- n_q, flattening=[0] + [1] * (n_q - 1), delays=[0] + [5] * (n_q - 1)
- )
- return [
- pattern_provider_1,
- pattern_provider_2,
- pattern_provider_3,
- pattern_provider_4,
- pattern_provider_5,
- pattern_provider_6,
- ]
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [16, 72])
- def test_build_pattern_sequence(self, n_q: int, timesteps: int):
- bs = 2
- card = 256
- special_token = card
-
- pattern_providers = self._get_pattern_providers(n_q)
- for pattern_provider in pattern_providers:
- pattern = pattern_provider.get_pattern(timesteps)
- # we can correctly build the sequence from the pattern
- z = torch.randint(0, card, (bs, n_q, timesteps))
- ref_res = self.ref_build_pattern_sequence(z, pattern, special_token)
- res, indexes, mask = pattern.build_pattern_sequence(z, special_token)
- assert (res == ref_res).float().mean() == 1.0
-
- # expected assertion fails on the number of timesteps
- invalid_timesteps = [timesteps + 1]
- if pattern.num_sequence_steps != pattern.timesteps:
- invalid_timesteps.append(pattern.num_sequence_steps)
- for i_timesteps in invalid_timesteps:
- z2 = torch.randint(0, card, (bs, n_q, i_timesteps))
- with pytest.raises(AssertionError):
- pattern.build_pattern_sequence(z2, special_token)
-
- # expected assertion fails on the number of codebooks
- invalid_qs = [0, n_q - 1, n_q + 1]
- for i_q in invalid_qs:
- z3 = torch.randint(0, card, (bs, i_q, timesteps))
- with pytest.raises(AssertionError):
- pattern.build_pattern_sequence(z3, special_token)
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [16, 72])
- def test_revert_pattern_sequence(self, n_q: int, timesteps: int):
- bs = 2
- card = 256
- special_token = card
-
- pattern_providers = self._get_pattern_providers(n_q)
- for pattern_provider in pattern_providers:
- pattern = pattern_provider.get_pattern(timesteps)
- # this works assuming previous tests are successful
- z = torch.randint(0, card, (bs, n_q, timesteps))
- s = self.ref_build_pattern_sequence(z, pattern, special_token)
- ref_out = self.ref_revert_pattern_sequence(s, pattern, special_token)
- # ensure our reference script retrieve the original sequence
- assert z.shape == ref_out.shape
- assert (z == ref_out).float().mean() == 1.0
- # now we can test the scatter version
- out, indexes, mask = pattern.revert_pattern_sequence(s, special_token)
- assert out.shape == ref_out.shape
- assert (out == ref_out).float().mean() == 1.0
-
- @pytest.mark.parametrize("n_q", [1, 4, 32])
- @pytest.mark.parametrize("timesteps", [16, 72])
- @pytest.mark.parametrize("card", [1, 2, 256, 1024])
- def test_revert_pattern_logits(self, n_q: int, timesteps: int, card: int):
- bs = 2
- special_token = card
- logits_special_token = float('nan')
-
- pattern_providers = self._get_pattern_providers(n_q)
- for pattern_provider in pattern_providers:
- pattern = pattern_provider.get_pattern(timesteps)
- # this works assuming previous tests are successful
- z = torch.randint(0, card, (bs, n_q, timesteps))
- s = self.ref_build_pattern_sequence(z, pattern, special_token)
- logits = torch.randn((bs, card, n_q, s.shape[-1]))
- ref_out = self.ref_revert_pattern_logits(logits, pattern, logits_special_token)
- # ensure our reference script retrieve the original sequence
- assert ref_out.shape == torch.Size([bs, card, n_q, timesteps])
- # now we can test the scatter version
- out, indexes, mask = pattern.revert_pattern_logits(logits, logits_special_token)
- assert out.shape == ref_out.shape
- assert (out == ref_out).float().mean() == 1.0
diff --git a/spaces/ProteinDesignLab/protpardelle/output_helpers.py b/spaces/ProteinDesignLab/protpardelle/output_helpers.py
deleted file mode 100644
index ec42d57fd16bae401e1e0f5178694668e4bc75ef..0000000000000000000000000000000000000000
--- a/spaces/ProteinDesignLab/protpardelle/output_helpers.py
+++ /dev/null
@@ -1,1566 +0,0 @@
-
-import json
-import os
-
-local_url = "http://localhost:8888/frontend/"
-remote_url = "https://cdn.jsdelivr.net/gh/duerrsimon/vue_3dmol_gradio@v.02/"
-
-
-if os.environ.get("GRADIO_LOCAL") is not None:
- url = local_url
-else:
- url = remote_url
-
-def viewer_html(path_to_file, name="input", selectionStyle={"color": "greenCarbon",
- "representation": "cartoon",
- "multiple": True}, representations={}):
-
- ext = path_to_file.split(".")[-1]
- with open(path_to_file, "r") as f:
- data = f.read()
- moldata = {"moldata": [{"data": data, "name": name, "selectionStyle": selectionStyle,"format": ext,
- "selectable": True,
- "asFrames":False,
- "clickable": False}]
- }
- # dict to json
- moldata = json.dumps(moldata)
- representations = json.dumps(representations)
-
- return """
-
-
-
-
-
-
-
-
-
- Molecule View
-
-
-
-
-
-
-
-
Resampling the whole PDB file, select
- residues in the
- sequence view to resample only parts
- of the structure
-
Resampling the selected
- residues
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-"""
-import os
-import subprocess
-import shlex
-
-def get_color(i):
- colors= ["orange", "cyan", "blue", "yellow", "magenta"]
- return colors[i % len(colors)]+"Carbon"
-
-
-def output_html(path_to_file,path_to_designs,metrics, resample_idx="",mode="unconditional", selectionStyle={"color": "greenCarbon",
- "representation": "cartoon",
- "multiple": True}):
-
- if mode=="conditional":
- ext = path_to_file.split(".")[-1]
- with open(path_to_file, "r") as f:
- data = f.read()
- moldata = [{"data": data, "name": os.path.basename(path_to_file),
- "selectionStyle": selectionStyle,
- "format": ext,
- "selectable": True,
- "asFrames":False,
- "clickable": False}]
- representations = [{
- "model": 0,
- "chain": "",
- "resname": "",
- "style": "cartoon",
- "color": "whiteCarbon",
- "residue_range": "",
- "around": 0,
- "byres": False,
- "visible": False,
- }]
- if resample_idx!="":
- representations.append({
- "model": 0,
- "chain": "",
- "resname": "",
- "style": "cartoon",
- "color": "greenCarbon",
- "residue_range": resample_idx[1:-1], #remove leading and trailing quotes
- "around": 0,
- "byres": False,
- "visible": False,
- })
- # move file from temp to save dir
- subprocess.run(shlex.split(f"cp {path_to_file} {os.path.dirname(path_to_designs[0])}/template.pdb"))
- path_to_file = f"{os.path.dirname(path_to_designs[0])}/template.pdb"
- designs = [{
- "model":0,
- "name":"template.pdb",
- "fullpath": path_to_file,
- "len":76,
- "metric":{
- "resample idx": resample_idx[1:-1],
- },
- "visible":True,
- "color":"gray"
- }]
- add_index = 1
- else:
- designs = []
- moldata = []
- representations = []
- add_index = 0
-
- for i,d in enumerate(path_to_designs):
- ext = d.split(".")[-1]
- with open(d, "r") as f:
- data = f.read()
- moldata.append({"data": data, "name": os.path.basename(d),
- "selectionStyle": selectionStyle,
- "format": ext,
- "selectable": True,
- "asFrames":False,
- "clickable": False})
- representations.append({
- "model": i+add_index,
- "chain": "",
- "resname": "",
- "style": "cartoon",
- "color": get_color(i),
- "residue_range": "",
- "around": 0,
- "byres": False,
- "visible": False,
- })
- designs.append({
- "model":i+add_index,
- "fullpath": d,
- "name":os.path.basename(d),
- "metric":metrics[i],
- "visible":True,
- "color":""
- })
- # dict to json
- moldata = json.dumps(moldata)
- representations = json.dumps(representations)
- designs = json.dumps(designs)
- return """
-
-
-
-
-
-
-
- Molecule View
-
-
-
-
-
-
-
-
-
-
-
-"""
-
-load_js = """
-
-async () => {
- // create empty textarea with id selectedAtoms that is hidden
- // and append it to the body
- var selectedAtoms = document.createElement("textarea");
- selectedAtoms.id = "selectedAtoms";
- selectedAtoms.style.display = "none";
- document.body.appendChild(selectedAtoms);
-
-window.onmessage = function(e) {
-selectedAtoms.value = JSON.stringify(e.data);
-};
-}
-"""
-
-get_js = """
-async (resample_idxs) => {
-
- var selectedAtoms = document.getElementById("selectedAtoms");
- var selectedAtomsValue = selectedAtoms.value;
-
- var hasNumber = /\d/;
-
-
- if (hasNumber.test(selectedAtomsValue)==false) {
- selectedAtomsValue = resample_idxs.replace(/.$/,' "')
-
- }
- return selectedAtomsValue;
-
-}
-"""
diff --git a/spaces/PrussianBlue/White-box-Cartoonization/wbc/cartoonize.py b/spaces/PrussianBlue/White-box-Cartoonization/wbc/cartoonize.py
deleted file mode 100644
index 25faf1ceb95aaed9a3f7a7982d17a03dc6bc32b1..0000000000000000000000000000000000000000
--- a/spaces/PrussianBlue/White-box-Cartoonization/wbc/cartoonize.py
+++ /dev/null
@@ -1,112 +0,0 @@
-import os
-import cv2
-import numpy as np
-import tensorflow as tf
-import wbc.network as network
-import wbc.guided_filter as guided_filter
-from tqdm import tqdm
-
-
-def resize_crop(image):
- h, w, c = np.shape(image)
- if min(h, w) > 720:
- if h > w:
- h, w = int(720 * h / w), 720
- else:
- h, w = 720, int(720 * w / h)
- image = cv2.resize(image, (w, h),
- interpolation=cv2.INTER_AREA)
- h, w = (h // 8) * 8, (w // 8) * 8
- image = image[:h, :w, :]
- return image
-
-
-def cartoonize(load_folder, save_folder, model_path):
- print(model_path)
- input_photo = tf.placeholder(tf.float32, [1, None, None, 3])
- network_out = network.unet_generator(input_photo)
- final_out = guided_filter.guided_filter(input_photo, network_out, r=1, eps=5e-3)
-
- all_vars = tf.trainable_variables()
- gene_vars = [var for var in all_vars if 'generator' in var.name]
- saver = tf.train.Saver(var_list=gene_vars)
-
- config = tf.ConfigProto()
- config.gpu_options.allow_growth = True
- sess = tf.Session(config=config)
-
- sess.run(tf.global_variables_initializer())
- saver.restore(sess, tf.train.latest_checkpoint(model_path))
- name_list = os.listdir(load_folder)
- for name in tqdm(name_list):
- try:
- load_path = os.path.join(load_folder, name)
- save_path = os.path.join(save_folder, name)
- image = cv2.imread(load_path)
- image = resize_crop(image)
- batch_image = image.astype(np.float32) / 127.5 - 1
- batch_image = np.expand_dims(batch_image, axis=0)
- output = sess.run(final_out, feed_dict={input_photo: batch_image})
- output = (np.squeeze(output) + 1) * 127.5
- output = np.clip(output, 0, 255).astype(np.uint8)
- cv2.imwrite(save_path, output)
- except:
- print('cartoonize {} failed'.format(load_path))
-
-
-class Cartoonize:
- def __init__(self, model_path):
- print(model_path)
- self.input_photo = tf.placeholder(tf.float32, [1, None, None, 3])
- network_out = network.unet_generator(self.input_photo)
- self.final_out = guided_filter.guided_filter(self.input_photo, network_out, r=1, eps=5e-3)
-
- all_vars = tf.trainable_variables()
- gene_vars = [var for var in all_vars if 'generator' in var.name]
- saver = tf.train.Saver(var_list=gene_vars)
-
- config = tf.ConfigProto()
- config.gpu_options.allow_growth = True
- self.sess = tf.Session(config=config)
-
- self.sess.run(tf.global_variables_initializer())
- saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
-
- def run(self, load_folder, save_folder):
- name_list = os.listdir(load_folder)
- for name in tqdm(name_list):
- try:
- load_path = os.path.join(load_folder, name)
- save_path = os.path.join(save_folder, name)
- image = cv2.imread(load_path)
- image = resize_crop(image)
- batch_image = image.astype(np.float32) / 127.5 - 1
- batch_image = np.expand_dims(batch_image, axis=0)
- output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image})
- output = (np.squeeze(output) + 1) * 127.5
- output = np.clip(output, 0, 255).astype(np.uint8)
- cv2.imwrite(save_path, output)
- except:
- print('cartoonize {} failed'.format(load_path))
-
- def run_sigle(self, load_path, save_path):
- try:
- image = cv2.imread(load_path)
- image = resize_crop(image)
- batch_image = image.astype(np.float32) / 127.5 - 1
- batch_image = np.expand_dims(batch_image, axis=0)
- output = self.sess.run(self.final_out, feed_dict={self.input_photo: batch_image})
- output = (np.squeeze(output) + 1) * 127.5
- output = np.clip(output, 0, 255).astype(np.uint8)
- cv2.imwrite(save_path, output)
- except:
- print('cartoonize {} failed'.format(load_path))
-
-
-if __name__ == '__main__':
- model_path = 'saved_models'
- load_folder = 'test_images'
- save_folder = 'cartoonized_images'
- if not os.path.exists(save_folder):
- os.mkdir(save_folder)
- cartoonize(load_folder, save_folder, model_path)
diff --git a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/annotated_objects_coco.py b/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/annotated_objects_coco.py
deleted file mode 100644
index af000ecd943d7b8a85d7eb70195c9ecd10ab5edc..0000000000000000000000000000000000000000
--- a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/annotated_objects_coco.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import json
-from itertools import chain
-from pathlib import Path
-from typing import Iterable, Dict, List, Callable, Any
-from collections import defaultdict
-
-from tqdm import tqdm
-
-from taming.data.annotated_objects_dataset import AnnotatedObjectsDataset
-from taming.data.helper_types import Annotation, ImageDescription, Category
-
-COCO_PATH_STRUCTURE = {
- 'train': {
- 'top_level': '',
- 'instances_annotations': 'annotations/instances_train2017.json',
- 'stuff_annotations': 'annotations/stuff_train2017.json',
- 'files': 'train2017'
- },
- 'validation': {
- 'top_level': '',
- 'instances_annotations': 'annotations/instances_val2017.json',
- 'stuff_annotations': 'annotations/stuff_val2017.json',
- 'files': 'val2017'
- }
-}
-
-
-def load_image_descriptions(description_json: List[Dict]) -> Dict[str, ImageDescription]:
- return {
- str(img['id']): ImageDescription(
- id=img['id'],
- license=img.get('license'),
- file_name=img['file_name'],
- coco_url=img['coco_url'],
- original_size=(img['width'], img['height']),
- date_captured=img.get('date_captured'),
- flickr_url=img.get('flickr_url')
- )
- for img in description_json
- }
-
-
-def load_categories(category_json: Iterable) -> Dict[str, Category]:
- return {str(cat['id']): Category(id=str(cat['id']), super_category=cat['supercategory'], name=cat['name'])
- for cat in category_json if cat['name'] != 'other'}
-
-
-def load_annotations(annotations_json: List[Dict], image_descriptions: Dict[str, ImageDescription],
- category_no_for_id: Callable[[str], int], split: str) -> Dict[str, List[Annotation]]:
- annotations = defaultdict(list)
- total = sum(len(a) for a in annotations_json)
- for ann in tqdm(chain(*annotations_json), f'Loading {split} annotations', total=total):
- image_id = str(ann['image_id'])
- if image_id not in image_descriptions:
- raise ValueError(f'image_id [{image_id}] has no image description.')
- category_id = ann['category_id']
- try:
- category_no = category_no_for_id(str(category_id))
- except KeyError:
- continue
-
- width, height = image_descriptions[image_id].original_size
- bbox = (ann['bbox'][0] / width, ann['bbox'][1] / height, ann['bbox'][2] / width, ann['bbox'][3] / height)
-
- annotations[image_id].append(
- Annotation(
- id=ann['id'],
- area=bbox[2]*bbox[3], # use bbox area
- is_group_of=ann['iscrowd'],
- image_id=ann['image_id'],
- bbox=bbox,
- category_id=str(category_id),
- category_no=category_no
- )
- )
- return dict(annotations)
-
-
-class AnnotatedObjectsCoco(AnnotatedObjectsDataset):
- def __init__(self, use_things: bool = True, use_stuff: bool = True, **kwargs):
- """
- @param data_path: is the path to the following folder structure:
- coco/
- ├── annotations
- │ ├── instances_train2017.json
- │ ├── instances_val2017.json
- │ ├── stuff_train2017.json
- │ └── stuff_val2017.json
- ├── train2017
- │ ├── 000000000009.jpg
- │ ├── 000000000025.jpg
- │ └── ...
- ├── val2017
- │ ├── 000000000139.jpg
- │ ├── 000000000285.jpg
- │ └── ...
- @param: split: one of 'train' or 'validation'
- @param: desired image size (give square images)
- """
- super().__init__(**kwargs)
- self.use_things = use_things
- self.use_stuff = use_stuff
-
- with open(self.paths['instances_annotations']) as f:
- inst_data_json = json.load(f)
- with open(self.paths['stuff_annotations']) as f:
- stuff_data_json = json.load(f)
-
- category_jsons = []
- annotation_jsons = []
- if self.use_things:
- category_jsons.append(inst_data_json['categories'])
- annotation_jsons.append(inst_data_json['annotations'])
- if self.use_stuff:
- category_jsons.append(stuff_data_json['categories'])
- annotation_jsons.append(stuff_data_json['annotations'])
-
- self.categories = load_categories(chain(*category_jsons))
- self.filter_categories()
- self.setup_category_id_and_number()
-
- self.image_descriptions = load_image_descriptions(inst_data_json['images'])
- annotations = load_annotations(annotation_jsons, self.image_descriptions, self.get_category_number, self.split)
- self.annotations = self.filter_object_number(annotations, self.min_object_area,
- self.min_objects_per_image, self.max_objects_per_image)
- self.image_ids = list(self.annotations.keys())
- self.clean_up_annotations_and_image_descriptions()
-
- def get_path_structure(self) -> Dict[str, str]:
- if self.split not in COCO_PATH_STRUCTURE:
- raise ValueError(f'Split [{self.split} does not exist for COCO data.]')
- return COCO_PATH_STRUCTURE[self.split]
-
- def get_image_path(self, image_id: str) -> Path:
- return self.paths['files'].joinpath(self.image_descriptions[str(image_id)].file_name)
-
- def get_image_description(self, image_id: str) -> Dict[str, Any]:
- # noinspection PyProtectedMember
- return self.image_descriptions[image_id]._asdict()
diff --git a/spaces/QianFeng/White-box-Cartoonization2308/wbc/guided_filter.py b/spaces/QianFeng/White-box-Cartoonization2308/wbc/guided_filter.py
deleted file mode 100644
index fd019d145efc7f308cd96de90f4e7b648f6820b4..0000000000000000000000000000000000000000
--- a/spaces/QianFeng/White-box-Cartoonization2308/wbc/guided_filter.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import tensorflow as tf
-import numpy as np
-
-
-
-
-def tf_box_filter(x, r):
- k_size = int(2*r+1)
- ch = x.get_shape().as_list()[-1]
- weight = 1/(k_size**2)
- box_kernel = weight*np.ones((k_size, k_size, ch, 1))
- box_kernel = np.array(box_kernel).astype(np.float32)
- output = tf.nn.depthwise_conv2d(x, box_kernel, [1, 1, 1, 1], 'SAME')
- return output
-
-
-
-def guided_filter(x, y, r, eps=1e-2):
-
- x_shape = tf.shape(x)
- #y_shape = tf.shape(y)
-
- N = tf_box_filter(tf.ones((1, x_shape[1], x_shape[2], 1), dtype=x.dtype), r)
-
- mean_x = tf_box_filter(x, r) / N
- mean_y = tf_box_filter(y, r) / N
- cov_xy = tf_box_filter(x * y, r) / N - mean_x * mean_y
- var_x = tf_box_filter(x * x, r) / N - mean_x * mean_x
-
- A = cov_xy / (var_x + eps)
- b = mean_y - A * mean_x
-
- mean_A = tf_box_filter(A, r) / N
- mean_b = tf_box_filter(b, r) / N
-
- output = mean_A * x + mean_b
-
- return output
-
-
-
-def fast_guided_filter(lr_x, lr_y, hr_x, r=1, eps=1e-8):
-
- #assert lr_x.shape.ndims == 4 and lr_y.shape.ndims == 4 and hr_x.shape.ndims == 4
-
- lr_x_shape = tf.shape(lr_x)
- #lr_y_shape = tf.shape(lr_y)
- hr_x_shape = tf.shape(hr_x)
-
- N = tf_box_filter(tf.ones((1, lr_x_shape[1], lr_x_shape[2], 1), dtype=lr_x.dtype), r)
-
- mean_x = tf_box_filter(lr_x, r) / N
- mean_y = tf_box_filter(lr_y, r) / N
- cov_xy = tf_box_filter(lr_x * lr_y, r) / N - mean_x * mean_y
- var_x = tf_box_filter(lr_x * lr_x, r) / N - mean_x * mean_x
-
- A = cov_xy / (var_x + eps)
- b = mean_y - A * mean_x
-
- mean_A = tf.image.resize_images(A, hr_x_shape[1: 3])
- mean_b = tf.image.resize_images(b, hr_x_shape[1: 3])
-
- output = mean_A * hr_x + mean_b
-
- return output
-
-
-if __name__ == '__main__':
- import cv2
- from tqdm import tqdm
-
- input_photo = tf.placeholder(tf.float32, [1, None, None, 3])
- #input_superpixel = tf.placeholder(tf.float32, [16, 256, 256, 3])
- output = guided_filter(input_photo, input_photo, 5, eps=1)
- image = cv2.imread('output_figure1/cartoon2.jpg')
- image = image/127.5 - 1
- image = np.expand_dims(image, axis=0)
-
- config = tf.ConfigProto()
- config.gpu_options.allow_growth = True
- sess = tf.Session(config=config)
- sess.run(tf.global_variables_initializer())
-
- out = sess.run(output, feed_dict={input_photo: image})
- out = (np.squeeze(out)+1)*127.5
- out = np.clip(out, 0, 255).astype(np.uint8)
- cv2.imwrite('output_figure1/cartoon2_filter.jpg', out)
diff --git a/spaces/RamAnanth1/T2I-Adapter/gradio_pose.py b/spaces/RamAnanth1/T2I-Adapter/gradio_pose.py
deleted file mode 100644
index 3b0f50b5599431173444c3f070787e695c4c0ef3..0000000000000000000000000000000000000000
--- a/spaces/RamAnanth1/T2I-Adapter/gradio_pose.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import gradio as gr
-
-def create_demo(process):
- block = gr.Blocks().queue()
- with block:
- with gr.Row():
- with gr.Column():
- input_img = gr.Image(source='upload', type="numpy")
- prompt = gr.Textbox(label="Prompt")
- neg_prompt = gr.Textbox(label="Negative Prompt",
- value='ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, bad anatomy, watermark, signature, cut off, low contrast, underexposed, overexposed, bad art, beginner, amateur, distorted face')
- run_button = gr.Button(label="Run")
- with gr.Accordion("Advanced options", open=False):
- con_strength = gr.Slider(label="Controling Strength (The guidance strength of the sketch to the result)", minimum=0, maximum=1, value=0.4, step=0.1)
- scale = gr.Slider(label="Guidance Scale (Classifier free guidance)", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
- fix_sample = gr.inputs.Radio(['True', 'False'], type="value", default='False', label='Fix Sampling\n (Fix the random seed)')
- base_model = gr.inputs.Radio(['sd-v1-4.ckpt', 'anything-v4.0-pruned.ckpt'], type="value", default='sd-v1-4.ckpt', label='The base model you want to use')
- with gr.Column():
- result = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
- ips = [input_img,prompt, neg_prompt, fix_sample, scale, con_strength, base_model]
- run_button.click(fn=process, inputs=ips, outputs=[result])
-
- examples_list = [["human.png", "beautiful girl",
- "ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, bad anatomy, watermark, signature, cut off, low contrast, underexposed, overexposed, bad art, beginner, amateur, distorted face",
- 'True',
- 7.5,
- 0.4,
- 'anything-v4.0-pruned.ckpt']]
-
- examples = gr.Examples(examples=examples_list,inputs = [input_img, prompt,neg_prompt, fix_sample, scale, con_strength,base_model], outputs = [result], cache_examples = True, fn = process)
-
- return block
\ No newline at end of file
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/requests/utils.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/requests/utils.py
deleted file mode 100644
index 33f394d265d5da17dd5b3c2467e2e4e71af1395d..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/requests/utils.py
+++ /dev/null
@@ -1,1086 +0,0 @@
-"""
-requests.utils
-~~~~~~~~~~~~~~
-
-This module provides utility functions that are used within Requests
-that are also useful for external consumption.
-"""
-
-import codecs
-import contextlib
-import io
-import os
-import re
-import socket
-import struct
-import sys
-import tempfile
-import warnings
-import zipfile
-from collections import OrderedDict
-
-from pip._vendor.urllib3.util import make_headers, parse_url
-
-from . import certs
-from .__version__ import __version__
-
-# to_native_string is unused here, but imported here for backwards compatibility
-from ._internal_utils import HEADER_VALIDATORS, to_native_string # noqa: F401
-from .compat import (
- Mapping,
- basestring,
- bytes,
- getproxies,
- getproxies_environment,
- integer_types,
-)
-from .compat import parse_http_list as _parse_list_header
-from .compat import (
- proxy_bypass,
- proxy_bypass_environment,
- quote,
- str,
- unquote,
- urlparse,
- urlunparse,
-)
-from .cookies import cookiejar_from_dict
-from .exceptions import (
- FileModeWarning,
- InvalidHeader,
- InvalidURL,
- UnrewindableBodyError,
-)
-from .structures import CaseInsensitiveDict
-
-NETRC_FILES = (".netrc", "_netrc")
-
-DEFAULT_CA_BUNDLE_PATH = certs.where()
-
-DEFAULT_PORTS = {"http": 80, "https": 443}
-
-# Ensure that ', ' is used to preserve previous delimiter behavior.
-DEFAULT_ACCEPT_ENCODING = ", ".join(
- re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"])
-)
-
-
-if sys.platform == "win32":
- # provide a proxy_bypass version on Windows without DNS lookups
-
- def proxy_bypass_registry(host):
- try:
- import winreg
- except ImportError:
- return False
-
- try:
- internetSettings = winreg.OpenKey(
- winreg.HKEY_CURRENT_USER,
- r"Software\Microsoft\Windows\CurrentVersion\Internet Settings",
- )
- # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
- proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0])
- # ProxyOverride is almost always a string
- proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0]
- except (OSError, ValueError):
- return False
- if not proxyEnable or not proxyOverride:
- return False
-
- # make a check value list from the registry entry: replace the
- # '' string by the localhost entry and the corresponding
- # canonical entry.
- proxyOverride = proxyOverride.split(";")
- # now check if we match one of the registry values.
- for test in proxyOverride:
- if test == "":
- if "." not in host:
- return True
- test = test.replace(".", r"\.") # mask dots
- test = test.replace("*", r".*") # change glob sequence
- test = test.replace("?", r".") # change glob char
- if re.match(test, host, re.I):
- return True
- return False
-
- def proxy_bypass(host): # noqa
- """Return True, if the host should be bypassed.
-
- Checks proxy settings gathered from the environment, if specified,
- or the registry.
- """
- if getproxies_environment():
- return proxy_bypass_environment(host)
- else:
- return proxy_bypass_registry(host)
-
-
-def dict_to_sequence(d):
- """Returns an internal sequence dictionary update."""
-
- if hasattr(d, "items"):
- d = d.items()
-
- return d
-
-
-def super_len(o):
- total_length = None
- current_position = 0
-
- if hasattr(o, "__len__"):
- total_length = len(o)
-
- elif hasattr(o, "len"):
- total_length = o.len
-
- elif hasattr(o, "fileno"):
- try:
- fileno = o.fileno()
- except (io.UnsupportedOperation, AttributeError):
- # AttributeError is a surprising exception, seeing as how we've just checked
- # that `hasattr(o, 'fileno')`. It happens for objects obtained via
- # `Tarfile.extractfile()`, per issue 5229.
- pass
- else:
- total_length = os.fstat(fileno).st_size
-
- # Having used fstat to determine the file length, we need to
- # confirm that this file was opened up in binary mode.
- if "b" not in o.mode:
- warnings.warn(
- (
- "Requests has determined the content-length for this "
- "request using the binary size of the file: however, the "
- "file has been opened in text mode (i.e. without the 'b' "
- "flag in the mode). This may lead to an incorrect "
- "content-length. In Requests 3.0, support will be removed "
- "for files in text mode."
- ),
- FileModeWarning,
- )
-
- if hasattr(o, "tell"):
- try:
- current_position = o.tell()
- except OSError:
- # This can happen in some weird situations, such as when the file
- # is actually a special file descriptor like stdin. In this
- # instance, we don't know what the length is, so set it to zero and
- # let requests chunk it instead.
- if total_length is not None:
- current_position = total_length
- else:
- if hasattr(o, "seek") and total_length is None:
- # StringIO and BytesIO have seek but no usable fileno
- try:
- # seek to end of file
- o.seek(0, 2)
- total_length = o.tell()
-
- # seek back to current position to support
- # partially read file-like objects
- o.seek(current_position or 0)
- except OSError:
- total_length = 0
-
- if total_length is None:
- total_length = 0
-
- return max(0, total_length - current_position)
-
-
-def get_netrc_auth(url, raise_errors=False):
- """Returns the Requests tuple auth for a given url from netrc."""
-
- netrc_file = os.environ.get("NETRC")
- if netrc_file is not None:
- netrc_locations = (netrc_file,)
- else:
- netrc_locations = (f"~/{f}" for f in NETRC_FILES)
-
- try:
- from netrc import NetrcParseError, netrc
-
- netrc_path = None
-
- for f in netrc_locations:
- try:
- loc = os.path.expanduser(f)
- except KeyError:
- # os.path.expanduser can fail when $HOME is undefined and
- # getpwuid fails. See https://bugs.python.org/issue20164 &
- # https://github.com/psf/requests/issues/1846
- return
-
- if os.path.exists(loc):
- netrc_path = loc
- break
-
- # Abort early if there isn't one.
- if netrc_path is None:
- return
-
- ri = urlparse(url)
-
- # Strip port numbers from netloc. This weird `if...encode`` dance is
- # used for Python 3.2, which doesn't support unicode literals.
- splitstr = b":"
- if isinstance(url, str):
- splitstr = splitstr.decode("ascii")
- host = ri.netloc.split(splitstr)[0]
-
- try:
- _netrc = netrc(netrc_path).authenticators(host)
- if _netrc:
- # Return with login / password
- login_i = 0 if _netrc[0] else 1
- return (_netrc[login_i], _netrc[2])
- except (NetrcParseError, OSError):
- # If there was a parsing error or a permissions issue reading the file,
- # we'll just skip netrc auth unless explicitly asked to raise errors.
- if raise_errors:
- raise
-
- # App Engine hackiness.
- except (ImportError, AttributeError):
- pass
-
-
-def guess_filename(obj):
- """Tries to guess the filename of the given object."""
- name = getattr(obj, "name", None)
- if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">":
- return os.path.basename(name)
-
-
-def extract_zipped_paths(path):
- """Replace nonexistent paths that look like they refer to a member of a zip
- archive with the location of an extracted copy of the target, or else
- just return the provided path unchanged.
- """
- if os.path.exists(path):
- # this is already a valid path, no need to do anything further
- return path
-
- # find the first valid part of the provided path and treat that as a zip archive
- # assume the rest of the path is the name of a member in the archive
- archive, member = os.path.split(path)
- while archive and not os.path.exists(archive):
- archive, prefix = os.path.split(archive)
- if not prefix:
- # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split),
- # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users
- break
- member = "/".join([prefix, member])
-
- if not zipfile.is_zipfile(archive):
- return path
-
- zip_file = zipfile.ZipFile(archive)
- if member not in zip_file.namelist():
- return path
-
- # we have a valid zip archive and a valid member of that archive
- tmp = tempfile.gettempdir()
- extracted_path = os.path.join(tmp, member.split("/")[-1])
- if not os.path.exists(extracted_path):
- # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition
- with atomic_open(extracted_path) as file_handler:
- file_handler.write(zip_file.read(member))
- return extracted_path
-
-
-@contextlib.contextmanager
-def atomic_open(filename):
- """Write a file to the disk in an atomic fashion"""
- tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename))
- try:
- with os.fdopen(tmp_descriptor, "wb") as tmp_handler:
- yield tmp_handler
- os.replace(tmp_name, filename)
- except BaseException:
- os.remove(tmp_name)
- raise
-
-
-def from_key_val_list(value):
- """Take an object and test to see if it can be represented as a
- dictionary. Unless it can not be represented as such, return an
- OrderedDict, e.g.,
-
- ::
-
- >>> from_key_val_list([('key', 'val')])
- OrderedDict([('key', 'val')])
- >>> from_key_val_list('string')
- Traceback (most recent call last):
- ...
- ValueError: cannot encode objects that are not 2-tuples
- >>> from_key_val_list({'key': 'val'})
- OrderedDict([('key', 'val')])
-
- :rtype: OrderedDict
- """
- if value is None:
- return None
-
- if isinstance(value, (str, bytes, bool, int)):
- raise ValueError("cannot encode objects that are not 2-tuples")
-
- return OrderedDict(value)
-
-
-def to_key_val_list(value):
- """Take an object and test to see if it can be represented as a
- dictionary. If it can be, return a list of tuples, e.g.,
-
- ::
-
- >>> to_key_val_list([('key', 'val')])
- [('key', 'val')]
- >>> to_key_val_list({'key': 'val'})
- [('key', 'val')]
- >>> to_key_val_list('string')
- Traceback (most recent call last):
- ...
- ValueError: cannot encode objects that are not 2-tuples
-
- :rtype: list
- """
- if value is None:
- return None
-
- if isinstance(value, (str, bytes, bool, int)):
- raise ValueError("cannot encode objects that are not 2-tuples")
-
- if isinstance(value, Mapping):
- value = value.items()
-
- return list(value)
-
-
-# From mitsuhiko/werkzeug (used with permission).
-def parse_list_header(value):
- """Parse lists as described by RFC 2068 Section 2.
-
- In particular, parse comma-separated lists where the elements of
- the list may include quoted-strings. A quoted-string could
- contain a comma. A non-quoted string could have quotes in the
- middle. Quotes are removed automatically after parsing.
-
- It basically works like :func:`parse_set_header` just that items
- may appear multiple times and case sensitivity is preserved.
-
- The return value is a standard :class:`list`:
-
- >>> parse_list_header('token, "quoted value"')
- ['token', 'quoted value']
-
- To create a header from the :class:`list` again, use the
- :func:`dump_header` function.
-
- :param value: a string with a list header.
- :return: :class:`list`
- :rtype: list
- """
- result = []
- for item in _parse_list_header(value):
- if item[:1] == item[-1:] == '"':
- item = unquote_header_value(item[1:-1])
- result.append(item)
- return result
-
-
-# From mitsuhiko/werkzeug (used with permission).
-def parse_dict_header(value):
- """Parse lists of key, value pairs as described by RFC 2068 Section 2 and
- convert them into a python dict:
-
- >>> d = parse_dict_header('foo="is a fish", bar="as well"')
- >>> type(d) is dict
- True
- >>> sorted(d.items())
- [('bar', 'as well'), ('foo', 'is a fish')]
-
- If there is no value for a key it will be `None`:
-
- >>> parse_dict_header('key_without_value')
- {'key_without_value': None}
-
- To create a header from the :class:`dict` again, use the
- :func:`dump_header` function.
-
- :param value: a string with a dict header.
- :return: :class:`dict`
- :rtype: dict
- """
- result = {}
- for item in _parse_list_header(value):
- if "=" not in item:
- result[item] = None
- continue
- name, value = item.split("=", 1)
- if value[:1] == value[-1:] == '"':
- value = unquote_header_value(value[1:-1])
- result[name] = value
- return result
-
-
-# From mitsuhiko/werkzeug (used with permission).
-def unquote_header_value(value, is_filename=False):
- r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
- This does not use the real unquoting but what browsers are actually
- using for quoting.
-
- :param value: the header value to unquote.
- :rtype: str
- """
- if value and value[0] == value[-1] == '"':
- # this is not the real unquoting, but fixing this so that the
- # RFC is met will result in bugs with internet explorer and
- # probably some other browsers as well. IE for example is
- # uploading files with "C:\foo\bar.txt" as filename
- value = value[1:-1]
-
- # if this is a filename and the starting characters look like
- # a UNC path, then just return the value without quotes. Using the
- # replace sequence below on a UNC path has the effect of turning
- # the leading double slash into a single slash and then
- # _fix_ie_filename() doesn't work correctly. See #458.
- if not is_filename or value[:2] != "\\\\":
- return value.replace("\\\\", "\\").replace('\\"', '"')
- return value
-
-
-def dict_from_cookiejar(cj):
- """Returns a key/value dictionary from a CookieJar.
-
- :param cj: CookieJar object to extract cookies from.
- :rtype: dict
- """
-
- cookie_dict = {}
-
- for cookie in cj:
- cookie_dict[cookie.name] = cookie.value
-
- return cookie_dict
-
-
-def add_dict_to_cookiejar(cj, cookie_dict):
- """Returns a CookieJar from a key/value dictionary.
-
- :param cj: CookieJar to insert cookies into.
- :param cookie_dict: Dict of key/values to insert into CookieJar.
- :rtype: CookieJar
- """
-
- return cookiejar_from_dict(cookie_dict, cj)
-
-
-def get_encodings_from_content(content):
- """Returns encodings from given content string.
-
- :param content: bytestring to extract encodings from.
- """
- warnings.warn(
- (
- "In requests 3.0, get_encodings_from_content will be removed. For "
- "more information, please see the discussion on issue #2266. (This"
- " warning should only appear once.)"
- ),
- DeprecationWarning,
- )
-
- charset_re = re.compile(r']', flags=re.I)
- pragma_re = re.compile(r']', flags=re.I)
- xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
-
- return (
- charset_re.findall(content)
- + pragma_re.findall(content)
- + xml_re.findall(content)
- )
-
-
-def _parse_content_type_header(header):
- """Returns content type and parameters from given header
-
- :param header: string
- :return: tuple containing content type and dictionary of
- parameters
- """
-
- tokens = header.split(";")
- content_type, params = tokens[0].strip(), tokens[1:]
- params_dict = {}
- items_to_strip = "\"' "
-
- for param in params:
- param = param.strip()
- if param:
- key, value = param, True
- index_of_equals = param.find("=")
- if index_of_equals != -1:
- key = param[:index_of_equals].strip(items_to_strip)
- value = param[index_of_equals + 1 :].strip(items_to_strip)
- params_dict[key.lower()] = value
- return content_type, params_dict
-
-
-def get_encoding_from_headers(headers):
- """Returns encodings from given HTTP Header Dict.
-
- :param headers: dictionary to extract encoding from.
- :rtype: str
- """
-
- content_type = headers.get("content-type")
-
- if not content_type:
- return None
-
- content_type, params = _parse_content_type_header(content_type)
-
- if "charset" in params:
- return params["charset"].strip("'\"")
-
- if "text" in content_type:
- return "ISO-8859-1"
-
- if "application/json" in content_type:
- # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset
- return "utf-8"
-
-
-def stream_decode_response_unicode(iterator, r):
- """Stream decodes an iterator."""
-
- if r.encoding is None:
- yield from iterator
- return
-
- decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace")
- for chunk in iterator:
- rv = decoder.decode(chunk)
- if rv:
- yield rv
- rv = decoder.decode(b"", final=True)
- if rv:
- yield rv
-
-
-def iter_slices(string, slice_length):
- """Iterate over slices of a string."""
- pos = 0
- if slice_length is None or slice_length <= 0:
- slice_length = len(string)
- while pos < len(string):
- yield string[pos : pos + slice_length]
- pos += slice_length
-
-
-def get_unicode_from_response(r):
- """Returns the requested content back in unicode.
-
- :param r: Response object to get unicode content from.
-
- Tried:
-
- 1. charset from content-type
- 2. fall back and replace all unicode characters
-
- :rtype: str
- """
- warnings.warn(
- (
- "In requests 3.0, get_unicode_from_response will be removed. For "
- "more information, please see the discussion on issue #2266. (This"
- " warning should only appear once.)"
- ),
- DeprecationWarning,
- )
-
- tried_encodings = []
-
- # Try charset from content-type
- encoding = get_encoding_from_headers(r.headers)
-
- if encoding:
- try:
- return str(r.content, encoding)
- except UnicodeError:
- tried_encodings.append(encoding)
-
- # Fall back:
- try:
- return str(r.content, encoding, errors="replace")
- except TypeError:
- return r.content
-
-
-# The unreserved URI characters (RFC 3986)
-UNRESERVED_SET = frozenset(
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~"
-)
-
-
-def unquote_unreserved(uri):
- """Un-escape any percent-escape sequences in a URI that are unreserved
- characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
-
- :rtype: str
- """
- parts = uri.split("%")
- for i in range(1, len(parts)):
- h = parts[i][0:2]
- if len(h) == 2 and h.isalnum():
- try:
- c = chr(int(h, 16))
- except ValueError:
- raise InvalidURL(f"Invalid percent-escape sequence: '{h}'")
-
- if c in UNRESERVED_SET:
- parts[i] = c + parts[i][2:]
- else:
- parts[i] = f"%{parts[i]}"
- else:
- parts[i] = f"%{parts[i]}"
- return "".join(parts)
-
-
-def requote_uri(uri):
- """Re-quote the given URI.
-
- This function passes the given URI through an unquote/quote cycle to
- ensure that it is fully and consistently quoted.
-
- :rtype: str
- """
- safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
- safe_without_percent = "!#$&'()*+,/:;=?@[]~"
- try:
- # Unquote only the unreserved characters
- # Then quote only illegal characters (do not quote reserved,
- # unreserved, or '%')
- return quote(unquote_unreserved(uri), safe=safe_with_percent)
- except InvalidURL:
- # We couldn't unquote the given URI, so let's try quoting it, but
- # there may be unquoted '%'s in the URI. We need to make sure they're
- # properly quoted so they do not cause issues elsewhere.
- return quote(uri, safe=safe_without_percent)
-
-
-def address_in_network(ip, net):
- """This function allows you to check if an IP belongs to a network subnet
-
- Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
- returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
-
- :rtype: bool
- """
- ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0]
- netaddr, bits = net.split("/")
- netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0]
- network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask
- return (ipaddr & netmask) == (network & netmask)
-
-
-def dotted_netmask(mask):
- """Converts mask from /xx format to xxx.xxx.xxx.xxx
-
- Example: if mask is 24 function returns 255.255.255.0
-
- :rtype: str
- """
- bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1
- return socket.inet_ntoa(struct.pack(">I", bits))
-
-
-def is_ipv4_address(string_ip):
- """
- :rtype: bool
- """
- try:
- socket.inet_aton(string_ip)
- except OSError:
- return False
- return True
-
-
-def is_valid_cidr(string_network):
- """
- Very simple check of the cidr format in no_proxy variable.
-
- :rtype: bool
- """
- if string_network.count("/") == 1:
- try:
- mask = int(string_network.split("/")[1])
- except ValueError:
- return False
-
- if mask < 1 or mask > 32:
- return False
-
- try:
- socket.inet_aton(string_network.split("/")[0])
- except OSError:
- return False
- else:
- return False
- return True
-
-
-@contextlib.contextmanager
-def set_environ(env_name, value):
- """Set the environment variable 'env_name' to 'value'
-
- Save previous value, yield, and then restore the previous value stored in
- the environment variable 'env_name'.
-
- If 'value' is None, do nothing"""
- value_changed = value is not None
- if value_changed:
- old_value = os.environ.get(env_name)
- os.environ[env_name] = value
- try:
- yield
- finally:
- if value_changed:
- if old_value is None:
- del os.environ[env_name]
- else:
- os.environ[env_name] = old_value
-
-
-def should_bypass_proxies(url, no_proxy):
- """
- Returns whether we should bypass proxies or not.
-
- :rtype: bool
- """
- # Prioritize lowercase environment variables over uppercase
- # to keep a consistent behaviour with other http projects (curl, wget).
- def get_proxy(key):
- return os.environ.get(key) or os.environ.get(key.upper())
-
- # First check whether no_proxy is defined. If it is, check that the URL
- # we're getting isn't in the no_proxy list.
- no_proxy_arg = no_proxy
- if no_proxy is None:
- no_proxy = get_proxy("no_proxy")
- parsed = urlparse(url)
-
- if parsed.hostname is None:
- # URLs don't always have hostnames, e.g. file:/// urls.
- return True
-
- if no_proxy:
- # We need to check whether we match here. We need to see if we match
- # the end of the hostname, both with and without the port.
- no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host)
-
- if is_ipv4_address(parsed.hostname):
- for proxy_ip in no_proxy:
- if is_valid_cidr(proxy_ip):
- if address_in_network(parsed.hostname, proxy_ip):
- return True
- elif parsed.hostname == proxy_ip:
- # If no_proxy ip was defined in plain IP notation instead of cidr notation &
- # matches the IP of the index
- return True
- else:
- host_with_port = parsed.hostname
- if parsed.port:
- host_with_port += f":{parsed.port}"
-
- for host in no_proxy:
- if parsed.hostname.endswith(host) or host_with_port.endswith(host):
- # The URL does match something in no_proxy, so we don't want
- # to apply the proxies on this URL.
- return True
-
- with set_environ("no_proxy", no_proxy_arg):
- # parsed.hostname can be `None` in cases such as a file URI.
- try:
- bypass = proxy_bypass(parsed.hostname)
- except (TypeError, socket.gaierror):
- bypass = False
-
- if bypass:
- return True
-
- return False
-
-
-def get_environ_proxies(url, no_proxy=None):
- """
- Return a dict of environment proxies.
-
- :rtype: dict
- """
- if should_bypass_proxies(url, no_proxy=no_proxy):
- return {}
- else:
- return getproxies()
-
-
-def select_proxy(url, proxies):
- """Select a proxy for the url, if applicable.
-
- :param url: The url being for the request
- :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
- """
- proxies = proxies or {}
- urlparts = urlparse(url)
- if urlparts.hostname is None:
- return proxies.get(urlparts.scheme, proxies.get("all"))
-
- proxy_keys = [
- urlparts.scheme + "://" + urlparts.hostname,
- urlparts.scheme,
- "all://" + urlparts.hostname,
- "all",
- ]
- proxy = None
- for proxy_key in proxy_keys:
- if proxy_key in proxies:
- proxy = proxies[proxy_key]
- break
-
- return proxy
-
-
-def resolve_proxies(request, proxies, trust_env=True):
- """This method takes proxy information from a request and configuration
- input to resolve a mapping of target proxies. This will consider settings
- such a NO_PROXY to strip proxy configurations.
-
- :param request: Request or PreparedRequest
- :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
- :param trust_env: Boolean declaring whether to trust environment configs
-
- :rtype: dict
- """
- proxies = proxies if proxies is not None else {}
- url = request.url
- scheme = urlparse(url).scheme
- no_proxy = proxies.get("no_proxy")
- new_proxies = proxies.copy()
-
- if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy):
- environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
-
- proxy = environ_proxies.get(scheme, environ_proxies.get("all"))
-
- if proxy:
- new_proxies.setdefault(scheme, proxy)
- return new_proxies
-
-
-def default_user_agent(name="python-requests"):
- """
- Return a string representing the default user agent.
-
- :rtype: str
- """
- return f"{name}/{__version__}"
-
-
-def default_headers():
- """
- :rtype: requests.structures.CaseInsensitiveDict
- """
- return CaseInsensitiveDict(
- {
- "User-Agent": default_user_agent(),
- "Accept-Encoding": DEFAULT_ACCEPT_ENCODING,
- "Accept": "*/*",
- "Connection": "keep-alive",
- }
- )
-
-
-def parse_header_links(value):
- """Return a list of parsed link headers proxies.
-
- i.e. Link: ; rel=front; type="image/jpeg",; rel=back;type="image/jpeg"
-
- :rtype: list
- """
-
- links = []
-
- replace_chars = " '\""
-
- value = value.strip(replace_chars)
- if not value:
- return links
-
- for val in re.split(", *<", value):
- try:
- url, params = val.split(";", 1)
- except ValueError:
- url, params = val, ""
-
- link = {"url": url.strip("<> '\"")}
-
- for param in params.split(";"):
- try:
- key, value = param.split("=")
- except ValueError:
- break
-
- link[key.strip(replace_chars)] = value.strip(replace_chars)
-
- links.append(link)
-
- return links
-
-
-# Null bytes; no need to recreate these on each call to guess_json_utf
-_null = "\x00".encode("ascii") # encoding to ASCII for Python 3
-_null2 = _null * 2
-_null3 = _null * 3
-
-
-def guess_json_utf(data):
- """
- :rtype: str
- """
- # JSON always starts with two ASCII characters, so detection is as
- # easy as counting the nulls and from their location and count
- # determine the encoding. Also detect a BOM, if present.
- sample = data[:4]
- if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
- return "utf-32" # BOM included
- if sample[:3] == codecs.BOM_UTF8:
- return "utf-8-sig" # BOM included, MS style (discouraged)
- if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
- return "utf-16" # BOM included
- nullcount = sample.count(_null)
- if nullcount == 0:
- return "utf-8"
- if nullcount == 2:
- if sample[::2] == _null2: # 1st and 3rd are null
- return "utf-16-be"
- if sample[1::2] == _null2: # 2nd and 4th are null
- return "utf-16-le"
- # Did not detect 2 valid UTF-16 ascii-range characters
- if nullcount == 3:
- if sample[:3] == _null3:
- return "utf-32-be"
- if sample[1:] == _null3:
- return "utf-32-le"
- # Did not detect a valid UTF-32 ascii-range character
- return None
-
-
-def prepend_scheme_if_needed(url, new_scheme):
- """Given a URL that may or may not have a scheme, prepend the given scheme.
- Does not replace a present scheme with the one provided as an argument.
-
- :rtype: str
- """
- parsed = parse_url(url)
- scheme, auth, host, port, path, query, fragment = parsed
-
- # A defect in urlparse determines that there isn't a netloc present in some
- # urls. We previously assumed parsing was overly cautious, and swapped the
- # netloc and path. Due to a lack of tests on the original defect, this is
- # maintained with parse_url for backwards compatibility.
- netloc = parsed.netloc
- if not netloc:
- netloc, path = path, netloc
-
- if auth:
- # parse_url doesn't provide the netloc with auth
- # so we'll add it ourselves.
- netloc = "@".join([auth, netloc])
- if scheme is None:
- scheme = new_scheme
- if path is None:
- path = ""
-
- return urlunparse((scheme, netloc, path, "", query, fragment))
-
-
-def get_auth_from_url(url):
- """Given a url with authentication components, extract them into a tuple of
- username,password.
-
- :rtype: (str,str)
- """
- parsed = urlparse(url)
-
- try:
- auth = (unquote(parsed.username), unquote(parsed.password))
- except (AttributeError, TypeError):
- auth = ("", "")
-
- return auth
-
-
-def check_header_validity(header):
- """Verifies that header parts don't contain leading whitespace
- reserved characters, or return characters.
-
- :param header: tuple, in the format (name, value).
- """
- name, value = header
-
- for part in header:
- if type(part) not in HEADER_VALIDATORS:
- raise InvalidHeader(
- f"Header part ({part!r}) from {{{name!r}: {value!r}}} must be "
- f"of type str or bytes, not {type(part)}"
- )
-
- _validate_header_part(name, "name", HEADER_VALIDATORS[type(name)][0])
- _validate_header_part(value, "value", HEADER_VALIDATORS[type(value)][1])
-
-
-def _validate_header_part(header_part, header_kind, validator):
- if not validator.match(header_part):
- raise InvalidHeader(
- f"Invalid leading whitespace, reserved character(s), or return"
- f"character(s) in header {header_kind}: {header_part!r}"
- )
-
-
-def urldefragauth(url):
- """
- Given a url remove the fragment and the authentication part.
-
- :rtype: str
- """
- scheme, netloc, path, params, query, fragment = urlparse(url)
-
- # see func:`prepend_scheme_if_needed`
- if not netloc:
- netloc, path = path, netloc
-
- netloc = netloc.rsplit("@", 1)[-1]
-
- return urlunparse((scheme, netloc, path, params, query, ""))
-
-
-def rewind_body(prepared_request):
- """Move file pointer back to its recorded starting position
- so it can be read again on redirect.
- """
- body_seek = getattr(prepared_request.body, "seek", None)
- if body_seek is not None and isinstance(
- prepared_request._body_position, integer_types
- ):
- try:
- body_seek(prepared_request._body_position)
- except OSError:
- raise UnrewindableBodyError(
- "An error occurred when rewinding request body for redirect."
- )
- else:
- raise UnrewindableBodyError("Unable to rewind request body for redirect.")
diff --git a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/trainer_single_norel.py b/spaces/Realcat/image-matching-webui/third_party/DarkFeat/trainer_single_norel.py
deleted file mode 100644
index 5447a37dabba339183f4e50ef44381ebc7a34998..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/third_party/DarkFeat/trainer_single_norel.py
+++ /dev/null
@@ -1,336 +0,0 @@
-import os
-import cv2
-import time
-import yaml
-import torch
-import datetime
-from tensorboardX import SummaryWriter
-import torchvision.transforms as tvf
-import torch.nn as nn
-import torch.nn.functional as F
-import numpy as np
-
-from nets.l2net import Quad_L2Net
-from nets.geom import getK, getWarp, _grid_positions
-from nets.loss import make_detector_loss
-from nets.score import extract_kpts
-from datasets.noise_simulator import NoiseSimulator
-from nets.l2net import Quad_L2Net
-
-
-class SingleTrainerNoRel:
- def __init__(self, config, device, loader, job_name, start_cnt):
- self.config = config
- self.device = device
- self.loader = loader
-
- # tensorboard writer construction
- os.makedirs("./runs/", exist_ok=True)
- if job_name != "":
- self.log_dir = f"runs/{job_name}"
- else:
- self.log_dir = f'runs/{datetime.datetime.now().strftime("%m-%d-%H%M%S")}'
-
- self.writer = SummaryWriter(self.log_dir)
- with open(f"{self.log_dir}/config.yaml", "w") as f:
- yaml.dump(config, f)
-
- if (
- config["network"]["input_type"] == "gray"
- or config["network"]["input_type"] == "raw-gray"
- ):
- self.model = eval(f'{config["network"]["model"]}(inchan=1)').to(device)
- elif (
- config["network"]["input_type"] == "rgb"
- or config["network"]["input_type"] == "raw-demosaic"
- ):
- self.model = eval(f'{config["network"]["model"]}(inchan=3)').to(device)
- elif config["network"]["input_type"] == "raw":
- self.model = eval(f'{config["network"]["model"]}(inchan=4)').to(device)
- else:
- raise NotImplementedError()
-
- # noise maker
- self.noise_maker = NoiseSimulator(device)
-
- # load model
- self.cnt = 0
- if start_cnt != 0:
- self.model.load_state_dict(
- torch.load(f"{self.log_dir}/model_{start_cnt:06d}.pth")
- )
- self.cnt = start_cnt + 1
-
- # optimizer and scheduler
- if self.config["training"]["optimizer"] == "SGD":
- self.optimizer = torch.optim.SGD(
- [
- {
- "params": self.model.parameters(),
- "initial_lr": self.config["training"]["lr"],
- }
- ],
- lr=self.config["training"]["lr"],
- momentum=self.config["training"]["momentum"],
- weight_decay=self.config["training"]["weight_decay"],
- )
- elif self.config["training"]["optimizer"] == "Adam":
- self.optimizer = torch.optim.Adam(
- [
- {
- "params": self.model.parameters(),
- "initial_lr": self.config["training"]["lr"],
- }
- ],
- lr=self.config["training"]["lr"],
- weight_decay=self.config["training"]["weight_decay"],
- )
- else:
- raise NotImplementedError()
-
- self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
- self.optimizer,
- step_size=self.config["training"]["lr_step"],
- gamma=self.config["training"]["lr_gamma"],
- last_epoch=start_cnt,
- )
- for param_tensor in self.model.state_dict():
- print(param_tensor, "\t", self.model.state_dict()[param_tensor].size())
-
- def save(self, iter_num):
- torch.save(self.model.state_dict(), f"{self.log_dir}/model_{iter_num:06d}.pth")
-
- def load(self, path):
- self.model.load_state_dict(torch.load(path))
-
- def train(self):
- self.model.train()
-
- for epoch in range(2):
- for batch_idx, inputs in enumerate(self.loader):
- self.optimizer.zero_grad()
- t = time.time()
-
- # preprocess and add noise
- img0_ori, noise_img0_ori = self.preprocess_noise_pair(
- inputs["img0"], self.cnt
- )
- img1_ori, noise_img1_ori = self.preprocess_noise_pair(
- inputs["img1"], self.cnt
- )
-
- img0 = img0_ori.permute(0, 3, 1, 2).float().to(self.device)
- img1 = img1_ori.permute(0, 3, 1, 2).float().to(self.device)
-
- if self.config["network"]["input_type"] == "rgb":
- # 3-channel rgb
- RGB_mean = [0.485, 0.456, 0.406]
- RGB_std = [0.229, 0.224, 0.225]
- norm_RGB = tvf.Normalize(mean=RGB_mean, std=RGB_std)
- img0 = norm_RGB(img0)
- img1 = norm_RGB(img1)
- noise_img0 = norm_RGB(noise_img0)
- noise_img1 = norm_RGB(noise_img1)
-
- elif self.config["network"]["input_type"] == "gray":
- # 1-channel
- img0 = torch.mean(img0, dim=1, keepdim=True)
- img1 = torch.mean(img1, dim=1, keepdim=True)
- noise_img0 = torch.mean(noise_img0, dim=1, keepdim=True)
- noise_img1 = torch.mean(noise_img1, dim=1, keepdim=True)
- norm_gray0 = tvf.Normalize(mean=img0.mean(), std=img0.std())
- norm_gray1 = tvf.Normalize(mean=img1.mean(), std=img1.std())
- img0 = norm_gray0(img0)
- img1 = norm_gray1(img1)
- noise_img0 = norm_gray0(noise_img0)
- noise_img1 = norm_gray1(noise_img1)
-
- elif self.config["network"]["input_type"] == "raw":
- # 4-channel
- pass
-
- elif self.config["network"]["input_type"] == "raw-demosaic":
- # 3-channel
- pass
-
- else:
- raise NotImplementedError()
-
- desc0, score_map0, _, _ = self.model(img0)
- desc1, score_map1, _, _ = self.model(img1)
-
- cur_feat_size0 = torch.tensor(score_map0.shape[2:])
- cur_feat_size1 = torch.tensor(score_map1.shape[2:])
-
- desc0 = desc0.permute(0, 2, 3, 1)
- desc1 = desc1.permute(0, 2, 3, 1)
- score_map0 = score_map0.permute(0, 2, 3, 1)
- score_map1 = score_map1.permute(0, 2, 3, 1)
-
- r_K0 = getK(inputs["ori_img_size0"], cur_feat_size0, inputs["K0"]).to(
- self.device
- )
- r_K1 = getK(inputs["ori_img_size1"], cur_feat_size1, inputs["K1"]).to(
- self.device
- )
-
- pos0 = _grid_positions(
- cur_feat_size0[0], cur_feat_size0[1], img0.shape[0]
- ).to(self.device)
-
- pos0, pos1, _ = getWarp(
- pos0,
- inputs["rel_pose"].to(self.device),
- inputs["depth0"].to(self.device),
- r_K0,
- inputs["depth1"].to(self.device),
- r_K1,
- img0.shape[0],
- )
-
- det_structured_loss, det_accuracy = make_detector_loss(
- pos0,
- pos1,
- desc0,
- desc1,
- score_map0,
- score_map1,
- img0.shape[0],
- self.config["network"]["use_corr_n"],
- self.config["network"]["loss_type"],
- self.config,
- )
-
- total_loss = det_structured_loss
-
- self.writer.add_scalar("acc/normal_acc", det_accuracy, self.cnt)
- self.writer.add_scalar("loss/total_loss", total_loss, self.cnt)
- self.writer.add_scalar(
- "loss/det_loss_normal", det_structured_loss, self.cnt
- )
- print(
- "iter={},\tloss={:.4f},\tacc={:.4f},\t{:.4f}s/iter".format(
- self.cnt, total_loss, det_accuracy, time.time() - t
- )
- )
-
- if det_structured_loss != 0:
- total_loss.backward()
- self.optimizer.step()
- self.lr_scheduler.step()
-
- if self.cnt % 100 == 0:
- indices0, scores0 = extract_kpts(
- score_map0.permute(0, 3, 1, 2),
- k=self.config["network"]["det"]["kpt_n"],
- score_thld=self.config["network"]["det"]["score_thld"],
- nms_size=self.config["network"]["det"]["nms_size"],
- eof_size=self.config["network"]["det"]["eof_size"],
- edge_thld=self.config["network"]["det"]["edge_thld"],
- )
- indices1, scores1 = extract_kpts(
- score_map1.permute(0, 3, 1, 2),
- k=self.config["network"]["det"]["kpt_n"],
- score_thld=self.config["network"]["det"]["score_thld"],
- nms_size=self.config["network"]["det"]["nms_size"],
- eof_size=self.config["network"]["det"]["eof_size"],
- edge_thld=self.config["network"]["det"]["edge_thld"],
- )
-
- if self.config["network"]["input_type"] == "raw":
- kpt_img0 = self.showKeyPoints(
- img0_ori[0][..., :3] * 255.0, indices0[0]
- )
- kpt_img1 = self.showKeyPoints(
- img1_ori[0][..., :3] * 255.0, indices1[0]
- )
- else:
- kpt_img0 = self.showKeyPoints(img0_ori[0] * 255.0, indices0[0])
- kpt_img1 = self.showKeyPoints(img1_ori[0] * 255.0, indices1[0])
-
- self.writer.add_image(
- "img0/kpts", kpt_img0, self.cnt, dataformats="HWC"
- )
- self.writer.add_image(
- "img1/kpts", kpt_img1, self.cnt, dataformats="HWC"
- )
- self.writer.add_image(
- "img0/score_map", score_map0[0], self.cnt, dataformats="HWC"
- )
- self.writer.add_image(
- "img1/score_map", score_map1[0], self.cnt, dataformats="HWC"
- )
-
- if self.cnt % 10000 == 0:
- self.save(self.cnt)
-
- self.cnt += 1
-
- def showKeyPoints(self, img, indices):
- key_points = cv2.KeyPoint_convert(indices.cpu().float().numpy()[:, ::-1])
- img = img.numpy().astype("uint8")
- img = cv2.drawKeypoints(img, key_points, None, color=(0, 255, 0))
- return img
-
- def preprocess(self, img, iter_idx):
- if (
- not self.config["network"]["noise"]
- and "raw" not in self.config["network"]["input_type"]
- ):
- return img
-
- raw = self.noise_maker.rgb2raw(img, batched=True)
-
- if self.config["network"]["noise"]:
- ratio_dec = (
- min(self.config["network"]["noise_maxstep"], iter_idx)
- / self.config["network"]["noise_maxstep"]
- )
- raw = self.noise_maker.raw2noisyRaw(raw, ratio_dec=ratio_dec, batched=True)
-
- if self.config["network"]["input_type"] == "raw":
- return torch.tensor(self.noise_maker.raw2packedRaw(raw, batched=True))
-
- if self.config["network"]["input_type"] == "raw-demosaic":
- return torch.tensor(self.noise_maker.raw2demosaicRaw(raw, batched=True))
-
- rgb = self.noise_maker.raw2rgb(raw, batched=True)
- if (
- self.config["network"]["input_type"] == "rgb"
- or self.config["network"]["input_type"] == "gray"
- ):
- return torch.tensor(rgb)
-
- raise NotImplementedError()
-
- def preprocess_noise_pair(self, img, iter_idx):
- assert self.config["network"]["noise"]
-
- raw = self.noise_maker.rgb2raw(img, batched=True)
-
- ratio_dec = (
- min(self.config["network"]["noise_maxstep"], iter_idx)
- / self.config["network"]["noise_maxstep"]
- )
- noise_raw = self.noise_maker.raw2noisyRaw(
- raw, ratio_dec=ratio_dec, batched=True
- )
-
- if self.config["network"]["input_type"] == "raw":
- return torch.tensor(
- self.noise_maker.raw2packedRaw(raw, batched=True)
- ), torch.tensor(self.noise_maker.raw2packedRaw(noise_raw, batched=True))
-
- if self.config["network"]["input_type"] == "raw-demosaic":
- return torch.tensor(
- self.noise_maker.raw2demosaicRaw(raw, batched=True)
- ), torch.tensor(self.noise_maker.raw2demosaicRaw(noise_raw, batched=True))
-
- noise_rgb = self.noise_maker.raw2rgb(noise_raw, batched=True)
- if (
- self.config["network"]["input_type"] == "rgb"
- or self.config["network"]["input_type"] == "gray"
- ):
- return img, torch.tensor(noise_rgb)
-
- raise NotImplementedError()
diff --git a/spaces/Reeve/Ohayou_Face/training/stylegan2_multi.py b/spaces/Reeve/Ohayou_Face/training/stylegan2_multi.py
deleted file mode 100644
index 23b003d9003c47c0095ea00b02a0f6e1c987a789..0000000000000000000000000000000000000000
--- a/spaces/Reeve/Ohayou_Face/training/stylegan2_multi.py
+++ /dev/null
@@ -1,414 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-import numpy as np
-import torch
-from torch_utils import misc
-from torch_utils import persistence
-from torch_utils.ops import conv2d_resample
-from torch_utils.ops import upfirdn2d
-from torch_utils.ops import bias_act
-from torch_utils.ops import fma
-
-from .networks import FullyConnectedLayer, Conv2dLayer, ToRGBLayer, MappingNetwork
-
-from util.utilgan import hw_scales, fix_size, multimask
-
-@misc.profiled_function
-def modulated_conv2d(
- x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
- weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
- styles, # Modulation coefficients of shape [batch_size, in_channels].
-# !!! custom
- # latmask, # mask for split-frame latents blending
- countHW = [1,1], # frame split count by height,width
- splitfine = 0., # frame split edge fineness (float from 0+)
- size = None, # custom size
- scale_type = None, # scaling way: fit, centr, side, pad, padside
- noise = None, # Optional noise tensor to add to the output activations.
- up = 1, # Integer upsampling factor.
- down = 1, # Integer downsampling factor.
- padding = 0, # Padding with respect to the upsampled image.
- resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
- demodulate = True, # Apply weight demodulation?
- flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
- fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation?
-):
- batch_size = x.shape[0]
- out_channels, in_channels, kh, kw = weight.shape
- misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
- misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
- misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
-
- # Pre-normalize inputs to avoid FP16 overflow.
- if x.dtype == torch.float16 and demodulate:
- weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk
- styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I
-
- # Calculate per-sample weights and demodulation coefficients.
- w = None
- dcoefs = None
- if demodulate or fused_modconv:
- w = weight.unsqueeze(0) # [NOIkk]
- w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
- if demodulate:
- dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
- if demodulate and fused_modconv:
- w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
-
- # Execute by scaling the activations before and after the convolution.
- if not fused_modconv:
- x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
- x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
-# !!! custom size & multi latent blending
- if size is not None and up==2:
- x = fix_size(x, size, scale_type)
- # x = multimask(x, size, latmask, countHW, splitfine)
- if demodulate and noise is not None:
- x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype))
- elif demodulate:
- x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
- elif noise is not None:
- x = x.add_(noise.to(x.dtype))
- return x
-
- # Execute as one fused op using grouped convolution.
- with misc.suppress_tracer_warnings(): # this value will be treated as a constant
- batch_size = int(batch_size)
- misc.assert_shape(x, [batch_size, in_channels, None, None])
- x = x.reshape(1, -1, *x.shape[2:])
- w = w.reshape(-1, in_channels, kh, kw)
- x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
- x = x.reshape(batch_size, -1, *x.shape[2:])
-# !!! custom size & multi latent blending
- if size is not None and up==2:
- x = fix_size(x, size, scale_type)
- # x = multimask(x, size, latmask, countHW, splitfine)
- if noise is not None:
- x = x.add_(noise)
- return x
-
-#----------------------------------------------------------------------------
-
-@persistence.persistent_class
-class SynthesisLayer(torch.nn.Module):
- def __init__(self,
- in_channels, # Number of input channels.
- out_channels, # Number of output channels.
- w_dim, # Intermediate latent (W) dimensionality.
- resolution, # Resolution of this layer.
-# !!! custom
- countHW = [1,1], # frame split count by height,width
- splitfine = 0., # frame split edge fineness (float from 0+)
- size = None, # custom size
- scale_type = None, # scaling way: fit, centr, side, pad, padside
- init_res = [4,4], # Initial (minimal) resolution for progressive training
- kernel_size = 3, # Convolution kernel size.
- up = 1, # Integer upsampling factor.
- use_noise = True, # Enable noise input?
- activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
- resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
- conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
- channels_last = False, # Use channels_last format for the weights?
- ):
- super().__init__()
- self.resolution = resolution
- self.countHW = countHW # !!! custom
- self.splitfine = splitfine # !!! custom
- self.size = size # !!! custom
- self.scale_type = scale_type # !!! custom
- self.init_res = init_res # !!! custom
- self.up = up
- self.use_noise = use_noise
- self.activation = activation
- self.conv_clamp = conv_clamp
- self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
- self.padding = kernel_size // 2
- self.act_gain = bias_act.activation_funcs[activation].def_gain
-
- self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
- memory_format = torch.channels_last if channels_last else torch.contiguous_format
- self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
- if use_noise:
-# !!! custom
- self.register_buffer('noise_const', torch.randn([resolution * init_res[0]//4, resolution * init_res[1]//4]))
- # self.register_buffer('noise_const', torch.randn([resolution, resolution]))
- self.noise_strength = torch.nn.Parameter(torch.zeros([]))
- self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
-
-# !!! custom
- # def forward(self, x, latmask, w, noise_mode='random', fused_modconv=True, gain=1):
- def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
- assert noise_mode in ['random', 'const', 'none']
- in_resolution = self.resolution // self.up
- # misc.assert_shape(x, [None, self.weight.shape[1], in_resolution, in_resolution])
- styles = self.affine(w)
-
- noise = None
- if self.use_noise and noise_mode == 'random':
-# !!! custom
- sz = self.size if self.up==2 and self.size is not None else x.shape[2:]
- noise = torch.randn([x.shape[0], 1, *sz], device=x.device) * self.noise_strength
- # noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
- if self.use_noise and noise_mode == 'const':
- noise = self.noise_const * self.noise_strength
-# !!! custom noise size
- noise_size = self.size if self.up==2 and self.size is not None and self.resolution > 4 else x.shape[2:]
- noise = fix_size(noise.unsqueeze(0).unsqueeze(0), noise_size, scale_type=self.scale_type)[0][0]
-
- # print(x.shape, noise.shape, self.size, self.up)
-
- flip_weight = (self.up == 1) # slightly faster
- # x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
- # latmask=latmask, countHW=self.countHW, splitfine=self.splitfine, size=self.size, scale_type=self.scale_type, # !!! custom
- # padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
-
- x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
- countHW=self.countHW, splitfine=self.splitfine, size=self.size, scale_type=self.scale_type, # !!! custom
- padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
-
-
- act_gain = self.act_gain * gain
- act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
- x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
- return x
-
-#----------------------------------------------------------------------------
-
-@persistence.persistent_class
-class SynthesisBlock(torch.nn.Module):
- def __init__(self,
- in_channels, # Number of input channels, 0 = first block.
- out_channels, # Number of output channels.
- w_dim, # Intermediate latent (W) dimensionality.
- resolution, # Resolution of this block.
- img_channels, # Number of output color channels.
- is_last, # Is this the last block?
-# !!! custom
- size = None, # custom size
- scale_type = None, # scaling way: fit, centr, side, pad, padside
- init_res = [4,4], # Initial (minimal) resolution for progressive training
- architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
- resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
- conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
- use_fp16 = False, # Use FP16 for this block?
- fp16_channels_last = False, # Use channels-last memory format with FP16?
- **layer_kwargs, # Arguments for SynthesisLayer.
- ):
- assert architecture in ['orig', 'skip', 'resnet']
- super().__init__()
- self.in_channels = in_channels
- self.w_dim = w_dim
- self.resolution = resolution
- self.size = size # !!! custom
- self.scale_type = scale_type # !!! custom
- self.init_res = init_res # !!! custom
- self.img_channels = img_channels
- self.is_last = is_last
- self.architecture = architecture
- self.use_fp16 = use_fp16
- self.channels_last = (use_fp16 and fp16_channels_last)
- self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
- self.num_conv = 0
- self.num_torgb = 0
-
- if in_channels == 0:
-# !!! custom
- self.const = torch.nn.Parameter(torch.randn([out_channels, *init_res]))
- # self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))
-
- if in_channels != 0:
- self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
- init_res=init_res, scale_type=scale_type, size=size, # !!! custom
- resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
- self.num_conv += 1
-
- self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
- init_res=init_res, scale_type=scale_type, size=size, # !!! custom
- conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
- self.num_conv += 1
-
- if is_last or architecture == 'skip':
- self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
- conv_clamp=conv_clamp, channels_last=self.channels_last)
- self.num_torgb += 1
-
- if in_channels != 0 and architecture == 'resnet':
- self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
- resample_filter=resample_filter, channels_last=self.channels_last)
-
-# !!! custom
- # def forward(self, x, img, ws, latmask, dconst, force_fp32=False, fused_modconv=None, **layer_kwargs):
- def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, **layer_kwargs):
- misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim])
- w_iter = iter(ws.unbind(dim=1))
- dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
- memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
- if fused_modconv is None:
- with misc.suppress_tracer_warnings(): # this value will be treated as a constant
- fused_modconv = (not self.training) and (dtype == torch.float32 or int(x.shape[0]) == 1)
-
- # Input.
- if self.in_channels == 0:
- x = self.const.to(dtype=dtype, memory_format=memory_format)
- x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
-# !!! custom const size
- if 'side' in self.scale_type and 'symm' in self.scale_type: # looks better
- const_size = self.init_res if self.size is None else self.size
- x = fix_size(x, const_size, self.scale_type)
-# distortion technique from Aydao
- # x += dconst
- else:
- # misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
- x = x.to(dtype=dtype, memory_format=memory_format)
-
- # Main layers.
- if self.in_channels == 0:
-# !!! custom latmask
- # x = self.conv1(x, None, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
- x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
- elif self.architecture == 'resnet':
- y = self.skip(x, gain=np.sqrt(0.5))
-# !!! custom latmask
- # x = self.conv0(x, latmask, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
- # x = self.conv1(x, None, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs)
- x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
- x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs)
- x = y.add_(x)
- else:
-# !!! custom latmask
- # x = self.conv0(x, latmask, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
- # x = self.conv1(x, None, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
- x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
- x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
-
- # ToRGB.
- if img is not None:
-# !!! custom img size
- # misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
- img = upfirdn2d.upsample2d(img, self.resample_filter)
- img = fix_size(img, self.size, scale_type=self.scale_type)
-
- if self.is_last or self.architecture == 'skip':
- y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
- y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format)
- img = img.add_(y) if img is not None else y
-
- assert x.dtype == dtype
- assert img is None or img.dtype == torch.float32
- return x, img
-
-#----------------------------------------------------------------------------
-
-@persistence.persistent_class
-class SynthesisNetwork(torch.nn.Module):
- def __init__(self,
- w_dim, # Intermediate latent (W) dimensionality.
- img_resolution, # Output image resolution.
- img_channels, # Number of color channels.
-# !!! custom
- init_res = [4,4], # Initial (minimal) resolution for progressive training
- size = None, # Output size
- scale_type = None, # scaling way: fit, centr, side, pad, padside
- channel_base = 32768, # Overall multiplier for the number of channels.
- channel_max = 512, # Maximum number of channels in any layer.
- num_fp16_res = 0, # Use FP16 for the N highest resolutions.
- verbose = False, #
- **block_kwargs, # Arguments for SynthesisBlock.
- ):
- assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
- super().__init__()
- self.w_dim = w_dim
- self.img_resolution = img_resolution
- self.res_log2 = int(np.log2(img_resolution))
- self.img_channels = img_channels
- self.fmap_base = channel_base
- self.block_resolutions = [2 ** i for i in range(2, self.res_log2 + 1)]
- channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions}
- fp16_resolution = max(2 ** (self.res_log2 + 1 - num_fp16_res), 8)
-
- # calculate intermediate layers sizes for arbitrary output resolution
- custom_res = (img_resolution * init_res[0] // 4, img_resolution * init_res[1] // 4)
- if size is None: size = custom_res
- if init_res != [4,4] and verbose:
- print(' .. init res', init_res, size)
- keep_first_layers = 2 if scale_type == 'fit' else None
- hws = hw_scales(size, custom_res, self.res_log2 - 2, keep_first_layers, verbose)
- if verbose: print(hws, '..', custom_res, self.res_log2-1)
-
- self.num_ws = 0
- for i, res in enumerate(self.block_resolutions):
- in_channels = channels_dict[res // 2] if res > 4 else 0
- out_channels = channels_dict[res]
- use_fp16 = (res >= fp16_resolution)
- is_last = (res == self.img_resolution)
- block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
- init_res=init_res, scale_type=scale_type, size=hws[i], # !!! custom
- img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs)
- self.num_ws += block.num_conv
- if is_last:
- self.num_ws += block.num_torgb
- setattr(self, f'b{res}', block)
-
- # def forward(self, ws, latmask, dconst, **block_kwargs):
- def forward(self, ws, **block_kwargs):
- block_ws = []
- with torch.autograd.profiler.record_function('split_ws'):
- misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
- ws = ws.to(torch.float32)
- w_idx = 0
- for res in self.block_resolutions:
- block = getattr(self, f'b{res}')
- block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
- w_idx += block.num_conv
-
- x = img = None
- for res, cur_ws in zip(self.block_resolutions, block_ws):
- block = getattr(self, f'b{res}')
-# !!! custom
- # x, img = block(x, img, cur_ws, latmask, dconst, **block_kwargs)
- x, img = block(x, img, cur_ws, **block_kwargs)
- return img
-
-#----------------------------------------------------------------------------
-
-@persistence.persistent_class
-class Generator(torch.nn.Module):
- def __init__(self,
- z_dim, # Input latent (Z) dimensionality.
- c_dim, # Conditioning label (C) dimensionality.
- w_dim, # Intermediate latent (W) dimensionality.
- img_resolution, # Output resolution.
- img_channels, # Number of output color channels.
-# !!! custom
- init_res = [4,4], # Initial (minimal) resolution for progressive training
- mapping_kwargs = {}, # Arguments for MappingNetwork.
- synthesis_kwargs = {}, # Arguments for SynthesisNetwork.
- ):
- super().__init__()
- self.z_dim = z_dim
- self.c_dim = c_dim
- self.w_dim = w_dim
- self.img_resolution = img_resolution
- self.init_res = init_res # !!! custom
- self.img_channels = img_channels
-# !!! custom
- self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, init_res=init_res, img_channels=img_channels, **synthesis_kwargs) # !!! custom
- self.num_ws = self.synthesis.num_ws
- self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
-# !!! custom
- self.output_shape = [1, img_channels, img_resolution * init_res[0] // 4, img_resolution * init_res[1] // 4]
-
-# !!! custom
- # def forward(self, z, c, latmask, dconst, truncation_psi=1, truncation_cutoff=None, **synthesis_kwargs):
- def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, **synthesis_kwargs):
- # def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, **synthesis_kwargs):
- ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff)
- # img = self.synthesis(ws, latmask, dconst, **synthesis_kwargs) # !!! custom
- img = self.synthesis(ws, **synthesis_kwargs) # !!! custom
- return img
diff --git a/spaces/Rifd/Sdallmodels/app.py b/spaces/Rifd/Sdallmodels/app.py
deleted file mode 100644
index f397cd755aebfbccab9ee9d1a0491461aec2766d..0000000000000000000000000000000000000000
--- a/spaces/Rifd/Sdallmodels/app.py
+++ /dev/null
@@ -1,1280 +0,0 @@
-import gradio as gr
-import os
-import sys
-from pathlib import Path
-
-models = [
- "Yntec/Reanimate",
- "Yntec/Deliberate2",
- "Yntec/AgarthaChadstyle",
- "Yntec/526",
- "Yntec/526Mix",
- "Yntec/UberRealisticLegacy",
- "Yntec/fennPhoto",
- "Yntec/makeitdoubleplz",
- "Yntec/ChiliConCarne",
- "Yntec/m0nst3rfy3",
- "Yntec/DucHaitenLofi",
- "Yntec/DreamWorks",
- "Yntec/SillySymphonies",
- "Yntec/AnythingV3-768",
- "Yntec/MeinaAlter",
- "Yntec/YiffyMix",
- "Yntec/LuckyStrike",
- "Yntec/Crayon",
- "Yntec/Yuzu",
- "Yntec/WoopWoopAnime",
- "Yntec/PotaytoPotahto",
- "Yntec/Protogen",
- "Yntec/Deliberate", #1K
- "Yntec/DeliberateRealisticWoop", #1K
- "Yntec/EstheticRetroAnime", #1K
- "Yntec/DucHaiten-GoldenLife",
- "Yntec/3DCuteWave",
- "Yntec/GoldenEra", #1K
- "Yntec/ClassicEra", #1K
- "Yntec/GoodLife", #1K
- "Yntec/Hassanim", #1K
- "Yntec/DeliberateRemix", #1K
- "Yntec/3DCute", #2K
- "Yntec/SuperCuteRemix", #2K
- "Yntec/Trending", #2K
- "Yntec/DreamWorld", #3K
- "Yntec/MGM", #3K
- "Yntec/3DKX/", #3K
- "Yntec/3DKXv11", #3K
- "Yntec/Cute", #3K
- "Yntec/DreamFulV2", #3K
- "Yntec/DucHaitenDarkside4", #3K
- "Yntec/Citrus", #3K
- "Yntec/Classic", #3K
- "Yntec/BasilRemix", #3K
- "Yntec/BeautyFool", #4K
- "Yntec/CyberRealistic", #4K
- "Yntec/Lyriel", #4K
- "Yntec/3DRendering", #4K
- "Yntec/aMovieTrend", #2K
- "Yntec/Dreamscape", #2K
- "Yntec/elldrethSVividMix", #2K
- "Yntec/elldrethSLucidMix", #2K
- "Yntec/CitrineDreamMix", #2K
- "Yntec/elldrethsImagination", #2K
- "Yntec/ReVAnimated768", #2K
- "Yntec/OpenNijiRemix", #2K
- "Yntec/Dreamful3", #5K
- "Yntec/BrandiMilne", #6K
- "Yntec/dosmixVAE", #3K
- "Yntec/aPhotographicTrend", #3K
- "Yntec/BeenYou", #3K
- "Yntec/level4", #3K
- "Yntec/AbsoluteRemix", #7K
- "Yntec/mistoonAnime2", #7K
- "Yntec/DucHaiten-FANCYxFANCY",#7K
- "Yntec/3Danimation", #4K
- "Yntec/DucHaitenNiji", #4K
- "Yntec/Darkside", #4K
- "Yntec/animeTEN", #4K
- "Yntec/Dreamscapes_n_Dragonfire_v2", #4K
- "Yntec/Cetus", #4K
- "Yntec/lamettaRemix", #5K
- "Yntec/lametta", #2K
- "Yntec/RadiantCinemagic", #5K
- "Yntec/RadiantVibes", #3K
- "Yntec/NeverEndingDream768", #3K
- "Yntec/Dreamlike", #3K
- "Yntec/LAMEanime", #10K
- "Yntec/Dreamshaper8", #12K
- "Yntec/Oiran", #6K
- "Yntec/RealCartoon3D", #6K
- "Yntec/animeTWO", #6K
- "Yntec/lamettaNightly", #6K
- "Yntec/REV", #6K
- "Yntec/Thriller", #13K
- "Yntec/Splash", #7K
- "Yntec/OpenGenDiffusers", #7K
- "Yntec/epiCRealismVAE", #8K
- "Yntec/LehinaModel", #8K
- "Yntec/NaughtyChildren", #9K
- "Yntec/vividicAnime", #9K
- "Yntec/HassanBlend12", #10
- "Yntec/HassanBlend1512VAE", #9K
- "Yntec/animeSEXTILLION/", #15K
- "Yntec/AbsoluteReality", #15K
- "Yntec/CetusRemix", #16K
- "Yntec/edgeOfRealism", #25K
- "Yntec/aMovieX/", #28K
- "Yntec/photoMovieXFinal", #31K
- "Yntec/nuipenimix2", #34K
- "Yntec/epiCPhotoGasm", #40K
- "Yntec/HitenDiffusion", #2K
- "Yntec/DreamShaperRemix",
- "Yntec/DeliShaper",
- "Yntec/dreamlike-photoreal-remix",
- "Yntec/epiCVision",
- "Yntec/realistic-vision-v12",
- "Yntec/MangledMerge3_768",
- "Yntec/OpenLexica",
- "Yntec/DreamLikeRemix",
- "Yntec/humu",
- "Linaqruf/animagine-xl",
- "nerijs/pixel-art-xl",
- "Yntec/MapleSyrup",
- "Yntec/WoopWoopRemix",
- "Yntec/ArcticFowl",
- "Yntec/iComixRemix",
- "Yntec/SamaritanDoesArt",
- "Yntec/samaritan3dCartoon2MVAE",
- "Yntec/CartoonStyleClassic",
- "Yntec/CultClassic",
- "Yntec/CinemaE",
- "Yntec/GalenaVAE",
- "Yntec/a-ZovyaRemix",
- "Yntec/a-ZovyaRPGV3VAE",
- "Yntec/Infinite80s",
- "Yntec/a-ZoviaRPGArtistV2VAE",
- "Yntec/GameAssetsDigitalUnitsCreationKit",
- "Yntec/QToriReloaded",
- "Yntec/Toonify2",
- "Yntec/LunarLuma",
- "Yntec/Lunar",
- "Yntec/Chik2",
- "Yntec/photoMovieRealistic",
- "Yntec/DucHaiten-StyleLikeMeVAE",
- "Yntec/InsaneRealisticCVAE",
- "Yntec/Noosphere_v3_CVAE",
- "Yntec/RealRainbows",
- "Yntec/InsaneM3U",
- "Yntec/ChildrenStoriesAnime",
- "Yntec/theallysMixIV-verisimilar",
- "Yntec/DucHaitenAnime768",
- "Yntec/RainbowClassicAnime",
- "Yntec/DucHaitenClassicAnime768",
- "Yntec/Luma",
- "Yntec/WesternAnimation",
- "Yntec/NeverExisted",
- "Yntec/Rainbowsphere",
- "Yntec/Ninja-Diffusers",
- "Yntec/GOLDFish",
- "Yntec/DreamAnything",
- "Yntec/Dreamsphere",
- "Yntec/Photosphere",
- "Yntec/yabalMixTrue25D_v2_VAE",
- "dreamlike-art/dreamlike-anime-1.0",
- "Yntec/RainbowDreams",
- "Yntec/rainbowpatch",
- "Yntec/DucHaiten-Retro-Diffusers",
- "Yntec/ElldrethsRetroMix_Diffusers",
- "Yntec/sexyToons",
- "Yntec/photoMovieX/",
- "dreamlike-art/dreamlike-photoreal-2.0",
- "dreamlike-art/dreamlike-diffusion-1.0",
- "Yntec/CuteYuki2",
- "Yntec/KIDSILLUSTRATIONS",
- "Yntec/COOLKIDSV2",
- "Yntec/Pavo-Mix-Diffusers",
- "Yntec/RPG_Remix",
- "Yntec/OrangeRemix",
- "Yntec/PeachMix3",
- "Yntec/DucHaitenAIart-beta",
- "Yntec/samdoesartsUlt",
- "Yntec/NovelAI",
- "Yntec/NovelAIRemix",
- "Yntec/Hiten",
- "AIARTCHAN/AbyssHellHero",
- "digiplay/VersaMix_base_diffusers",
- "digiplay/OldFish_fix1.1.997_diffusers",
- "digiplay/VoidnoiseCore_R0829",
- "digiplay/OldFish_v1.1",
- "digiplay/AI-infinity-V1-fp16",
- "digiplay/wantan25D_prototype",
- "digiplay/PotoPhotoRealism_v1",
- "digiplay/LunarDiffusion_v1.27",
- "digiplay/insaneRealistic_v1",
- "digiplay/OLDFish_2348_diffusers",
- "digiplay/OldFish_v1.1_diffusers_recover",
- "digiplay/OldFish_v1.1mix_hello",
- "digiplay/OldFish_v1.1_personal_HDmix",
- "digiplay/FishMix_v1",
- "DucHaiten/DucHaitenDreamWorld",
- "digiplay/LemonteaMixPainterly2_v1",
- "digiplay/SweetMuse_diffusers",
- "digiplay/Realisian_v1",
- "Hius/DreamFul-V2",
- "digiplay/m3u", #263
- "digiplay/RMHF_2.5D_v2",
- "digiplay/FishMix_v1.1",
- "stablediffusionapi/icomix-2",
- "digiplay/Remedy",
- "Hemlok/QuinceMix",
- "digiplay/K-main",
- "digiplay/LusterMix_v1.5_safetensors", #256
- "digiplay/perfectLewdFantasy_v1.01",
- "digiplay/Opiate_v2",
- "digiplay/PhotoSomnia_vFinal",
- "digiplay/polla_mix_2.5D",
- "stablediffusionapi/all-526-animated",
- "AstraliteHeart/pony-diffusion",
- "stablediffusionapi/chilloutmixsf",
- "Masagin/Deliberate", #235
- "DucHaiten/DucHaitenSuperCute",
- "stablediffusionapi/all-526",
- "theintuitiveye/HARDblend",
- "stablediffusionapi/cyberrealistic",
- "stablediffusionapi/cusp-of-serenity",
- "SG161222/Realistic_Vision_V1.4",
- "digiplay/paulEberSRealismMix_v1",
- "Ojimi/anime-kawai-diffusion",
- "hassanblend/hassanblend1.4",
- "digiplay/zodiac_eclipse_DAY1",
- "claudfuen/photorealistic-fuen-v1",
- "stablediffusionapi/chillout-app-factory",
- "DucHaiten/DucHaitenJourney",
- "robotjung/SemiRealMix",
- "Joeythemonster/anything-midjourney-v-4-1",
- "prompthero/midjourney-v4-diffusion",
- "prompthero/openjourney-v4",
- "x67/shortjourney",
- "FredZhang7/paint-journey-v2",
- "digiplay/PersonaStyleCheckpoint",
- "darkstorm2150/Protogen_Infinity_Official_Release",
- "PeggyWang/openjourney-v2",
- "darkstorm2150/Protogen_x3.4_Official_Release",
- "stablediffusionapi/deliberateappfactory", #236
- "digiplay/CrossoverMix_v2",
- "stablediffusionapi/spybg",
- "stablediffusionapi/dreamshaper-v6", #239
- "stablediffusionapi/the-ally",
- "darkstorm2150/Protogen_x5.8_Official_Release",
- "coreco/seek.art_MEGA",
- "digiplay/BlankCanvas_v1", #07.11
- "digiplay/OnlyAnime_v2.3",
- "Korakoe/OpenNiji",
- "digiplay/Photon_v1",
- "digiplay/Pika_v2",
- "digiplay/RealCartoon3D_F16full_v3.1", #254
- "digiplay/realidefmix_3.5VAE",
- "digiplay/realmixUnrealjourney_v1",
- "digiplay/SyncMix_v1.5",
- "digiplay/TWingshadow_v1.2",
- "digiplay/V3_by_Hans_Asian",
- "digiplay/whatamix_v1",
-
- "digiplay/2K", #216
- "digiplay/AIGEN_v1.4_diffusers",
- "digiplay/asyncsMIX_v2",
- "digiplay/BrickAndMortarMix_v2.0_diffusers", #224
- "digiplay/BeautyFool_v1.2VAE_pruned",
- "digiplay/breakdomainrealistic_R2333",
- "digiplay/CCTV2.5d_v1", #219
- "digiplay/ChikMix_V3", #253
- "stablediffusionapi/chilledremixsazyou-r", #195
- "digiplay/CityEdge_StyleMix_v1.44",
- "stablediffusionapi/dalcefopainting2", #199
- "digiplay/EdisonNilMix_v1", #07.10
- "digiplay/DiamondCoalMix_v2_pruned_diffusers",
- "digiplay/DreamShaper_7", #259
- "digiplay/elegantEntropy_v1.1", #221
- "digiplay/EtherRealMix_LUX2",
- "digiplay/KawaiiRealisticAnimeMix_A0.3",
- "digiplay/highQualityCGMIX_v1",
- "digiplay/HIMAWARI_v1",
- "digiplay/Hodgepodge_v2.1", #217
- "digiplay/illustro1stEdition_illustroV1", #214
- "digiplay/Juggernaut_final", #07.11
- "digiplay/Landscape_PhotoReal_v1",
- "digiplay/LuckyStrikeMix0.2Realistic", #07.10
- "digiplay/Matrix_Stellar_VAE_v1",
- "digiplay/PrefixRealisticMix_v1",
- "digiplay/RealEpicMajicRevolution_v1", #07.11
- "digiplay/ShampooMix_4", #252
- "digiplay/ShowmakerMix_v1",
- "digiplay/SoapMix2.5D_v1",
- "digiplay/ZemiHR_v2_diffusers",
-
- "Redamancy2299/dreambooth",
- "Lykon/DreamShaper", #240
- "trysem/DreamShaper-3.3",
- "HusseinHE/hussein-deliberate-1000steps", #237
- "stablediffusionapi/majicmixfantasy",
- "stablediffusionapi/majicmixsombre", #247
- "wavymulder/modelshoot",
- "digiplay/ChillyMix_v1", #215
- "stablediffusionapi/foto-assisted-diffusion", #197
- "wavymulder/portraitplus",
- "stablediffusionapi/chilloutmix-4264",
- "stablediffusionapi/product-design", #194
- "kandinsky-community/kandinsky-2-1", #251
-
- "digiplay/2.5DSET_diffusers", #227
- "digiplay/2-KWI", #213
- "digiplay/alstroemeriaMix_v1",
- "wavymulder/Analog-Diffusion",
- "digiplay/AniRealityMix_v1", #257
- "digiplay/ARRealVX1.1",
- "digiplay/BadAnime_v1",
- "digiplay/BasilKorea_v2", #07.11
- "digiplay/bluePencilRealistic_v01",
- "digiplay/bra_v40_diffusers",
- "digiplay/Burger_Mix_semiR2Lite", #222
- "digiplay/calicomixreal_v2.0_diffusers",
- "digiplay/CampurSari_Gen1",
- "digiplay/cocotifacute_v1", #07.10
- "digiplay/cosfMix_v1", #223
- "digiplay/CounterMix_v2", #211
- "digiplay/CuriousMerge2.5D_v5",
- "digiplay/dosmix",
- "digiplay/epi_2.5Dphotogodess_diffusers",
- "stablediffusionapi/droodlyrielv15",
- "digiplay/fantexi_v0.7",
- "digiplay/fishmix_other_v1",
- "digiplay/FormCleansingMix_v1", #228
- "digiplay/FumizukiMix_v1",
- "digiplay/helloworld_v3",
- "digiplay/HenmixArt_v1",
- "digiplay/ISOmix_v3.22",
- "digiplay/JF-Cu_v1",
- "digiplay/kencanmix_v2.0beta",
- "wavymulder/lomo-diffusion",
- "stablediffusionapi/majicmixv5", #192
- "digiplay/mecha_musume_vivid_soft",
- "digiplay/MGM",
- "digiplay/MiracleMixGlitter_v1",
- "digiplay/MixTape_RocknRoll_v3punk_bake_fp16",
- "digiplay/NextPhoto_v1",
- "digiplay/Noosphere_v3",
- "digiplay/nk15_diffusers", #230
- "digiplay/PeachMixsRelistic_R0", #262
- "wavymulder/timeless-diffusion",
- "digiplay/WhiteDreamyHillMix_v1", #220
- "digiplay/ya3p_VAE", #258
-
- "DucHaiten/DucHaitenAnime",
- "DucHaiten/DucHaitenAIart",
- "digiplay/BeenYouLiteL11_diffusers",
- "Manseo/Colorful-v4.5-Plus", #244
- "Guizmus/SDArt_ChaosAndOrder",
- "DucHaiten/DH_ClassicAnime",
- "stablediffusionapi/disneypixar",
- "johnslegers/epic-diffusion-v1.1",
- "emilianJR/epiCRealism",
- "johnslegers/epic-diffusion",
- "digiplay/endlessMixRenatus_v1.1", #07.10
- "digiplay/fantasticAnime_diffusers",
- "stablediffusionapi/ghostmix",
- "Duskfallcrew/EpicMix_Realism",
- "nitrosocke/Nitro-Diffusion",
- "prompthero/openjourney",
- "Guizmus/SDArt_something",
- "DucHaiten/DucHaiten-StyleLikeMe",
- "ddPn08/subtly", #250
- "22h/vintedois-diffusion-v0-1",
-
- "circulus/sd-anireal-v2.7",
- "0xJustin/Dungeons-and-Diffusion",
- "darkstorm2150/Protogen_v2.2_Official_Release",
- "Guizmus/SDArt_AliceInDiffusionLand",
- "stablediffusionapi/realistic-vision-v20-2047",
- "redstonehero/RPG-v5-itr17_A10T",
-
- "stablediffusionapi/camelliamix25d",
- "Guizmus/SDArt_cosmichorrors",
- "DGSpitzer/DGSpitzer-Art-Diffusion",
- "stablediffusionapi/emotion-puppeteer-v2",
- "stablediffusionapi/fengjing",
- "stablediffusionapi/fuwafuwamix",
- "Fred99774/girlnew1",
- "stablediffusionapi/majicmixrealistic",
- "badmonk/nxka",
- "ItsJayQz/SynthwavePunk-v2",
- "zhyemmmm/ToonYou",
- "stablediffusionapi/uber-realistic-merge",
- "stablediffusionapi/vne732h9dh4",
- "stablediffusionapi/wand-magic2",
- "stablediffusionapi/waifu-journey-2",
- "stablediffusionapi/zovya",
-
- "Guizmus/SDArt_cosmichorrors768",
- "stablediffusionapi/counterfeit-v30",
- "stablediffusionapi/amireal",
- #"JamesFlare/pastel-mix", #"andite/pastel-mix",
- "stablediffusionapi/rev-anim",
- "aipicasso/picasso-diffusion-1-1",
- "xiaolxl/Gf_style2",
- "circulus/sd-semireal-v2.8",
- "Crosstyan/BPModel", #07.11
-
- "digiplay/Dusk-1",
- "ogkalu/Comic-Diffusion",
- "Guizmus/SDArt_ChaosAndOrder768",
- "gsdf/Counterfeit-V2.0",
- "dwancin/memoji", #07.11
- "nousr/robo-diffusion-2-base",
-
- ##"hakurei/waifu-diffusion",
- "WarriorMama777/AbyssOrangeMix2",
- "stablediffusionapi/abyssorangemix2nsfw", #200
- "cag/anything-v3-1",
- "iZELX1/Anything-V3-X",
- "xyn-ai/anything-v4.0", #"andite/anything-v4.0",
- "D1b4l4p/AsianMix",
- #"Fred99774/chilloutvlara",
- "aipicasso/cool-japan-diffusion-2-1-2",
- "stablediffusionapi/corneos-7th-heaven-m", #196
- "DGSpitzer/Cyberpunk-Anime-Diffusion",
- "stablediffusionapi/dark-sushi-mix",
- "joachimsallstrom/Double-Exposure-Diffusion",
- "eimiss/EimisAnimeDiffusion_1.0v",
- "prompthero/funko-diffusion",
- "nitrosocke/Ghibli-Diffusion",
- ###"iZELX1/Grapefruit",
- "xiaolxl/GuoFeng3",
- "stablediffusionapi/tmnd-mix",
- "coder119/Vectorartz_Diffusion", #203
-
- "WarriorMama777/AbyssOrangeMix",
- "AIARTCHAN/7pa",
- "JosephusCheung/ACertainModel",
- "JosephusCheung/ACertainThing",
- "JosephusCheung/ACertainty",
- "AIARTCHAN/AbyssHellVer3",
- "AIARTCHAN/AbyssMapleVer3",
- "stablediffusionapi/abyssorangemixsfw",
- "AIARTCHAN/anidosmixV2",
- "stablediffusionapi/anime-model-v2",
- "kubanemil/AnyLORA",
- "stablediffusionapi/hc-anything-v3-vae", #231
- "mm00/anything-v3.0-light",
- "stablediffusionapi/anythingelse-v4",
- "stablediffusionapi/anything-v45-fixed",
- "stablediffusionapi/anything-v5",
- "nitrosocke/Arcane-Diffusion",
- "nitrosocke/archer-diffusion",
- "stablediffusionapi/architecture-tuned-model",
- "WarriorMama777/BloodOrangeMix",
- "wavymulder/collage-diffusion",
- "stablediffusionapi/camelliamixline",
- "digiplay/chrysanthemumMix_v1",
- "digiplay/CiderMix_ciderR", #260
- "Johnhex/Clam", #243
- "stablediffusionapi/cosmic-babes",
- "digiplay/CoffeeDonut_v1",
- "stablediffusionapi/dark-sushi-25d",
- "digiplay/Defacta_v1_diffusers", #226
- ## "WarriorMama777/EerieOrangeMix",
- "digiplay/DuelAnimeMix_v1", #225
- "Envvi/Inkpunk-Diffusion",
- "digiplay/kotosmix_diffusers", #229
- "stablediffusionapi/meinaalter",
- "Nacholmo/meinamixv7-diffusers",
- "stablediffusionapi/meinapastel",
- "AIARTCHAN/MIX-Pro-V4",
- "stablediffusionapi/shirataki-mix", #191
- "NoCrypt/SomethingV2_2",
- "NoCrypt/SomethingV2",
- "badmonk/sxzumi",
- ## "stablediffusionapi/three-delicacy",
- ## "stablediffusionapi/three-delicacy-wonto",
- "etherealxx/systemy-csrmodel-cutesexyrobutts", #"andite/cutesexyrobutts-diffusion",
- "sd-dreambooth-library/true-guweiz-style", # "andite/guweiz-diffusion",
- "stablediffusionapi/vector-art", #198
- "digiplay/xxMix_4",
- ###"mio/hiten", #"andite/hiten-diffusion",
- ### "andite/mashuu-diffusion",
- ### "andite/mignon-diffusion",
- ### "andite/mikapikazo-diffusion",
- ### "andite/piromizu-diffusion",
- "digiplay/Zevinemix_v1.0/",
-
- "digiplay/AnaMix_v2", #07.11
- "stablediffusionapi/animetestmodelv3",
- "yulet1de/anything", #232
- "hakurei/artstation-diffusion", #07.11
- "Fictiverse/Stable_Diffusion_BalloonArt_Model",
- "stablediffusionapi/bg-dream-irl",
- "stablediffusionapi/bg-dream-model-b", #193
- "Rardilit/Ciffusion_v0.1",
- "circulus/sd-anireal-2d-v2",
- "circulus/sd-photoreal-v2.7",
- "circulus/sd-photoreal-photo-v2",
- "circulus/sd-anireal-2.5d-v2",
- "circulus/sd-anireal-v2.5",
- "circulus/sd-photoreal-semi-v2",
- "circulus/sd-photoreal-real-v2",
- "circulus/sd-photoreal-v2.5",
- "circulus/sd-anireal-3d-v2",
- "circulus/sd-anireal-v2.8",
- "nitrosocke/classic-anim-diffusion",
- "Conflictx/Complex-Lineart", #245
- "sayakpaul/da-vinci-sd-pokemon",
- "nitrosocke/elden-ring-diffusion",
- "digiplay/EtherBluMix_1", #07.11
- "digiplay/fantasticmix_v40_test", #261
- "theintuitiveye/FantasyMix",
- "Fictiverse/Stable_Diffusion_FluidArt_Model",
- "nitrosocke/Future-Diffusion",
- "ItsJayQz/GTA5_Artwork_Diffusion", #205
- "digiplay/hellopure_v2.23",
- "TheLastBen/hrrzg-style-768px", #246
- "nevernotsean/IllustratedPaperMini", #242
- "dallinmackay/JWST-Deep-Space-diffusion",
- "prompthero/linkedin-diffusion",
- "mann-e/mann-e_4_rev-0-1", #210
- "ItsJayQz/Marvel_WhatIf_Diffusion", #206
- "yuanbit/max-15-1e-6-1500",
- "MyneFactory/MF-Base", #248
- "Fictiverse/Stable_Diffusion_Microscopic_model", #249
- "nitrosocke/mo-di-diffusion",
- "luongphamit/NeverEnding-Dream2", #241
- "lambdalabs/sd-naruto-diffusers", #201
- "Vernon-2/output_test",
- "Fictiverse/Stable_Diffusion_PaperCut_Model",
- "bsuutari/path_to_saved_model",
- "bsuutari/path_to_saved_model_rafa",
- "digiplay/PlanetBumix_v1",
- "lambdalabs/sd-pokemon-diffusers", #202
- "prompthero/poolsuite-diffusion",
- "digiplay/RealismEngine_v1",
- "nitrosocke/redshift-diffusion",
- "nitrosocke/redshift-diffusion-768",
- "nousr/robo-diffusion",
- "digiplay/SDVN1-Real_v1", #255
- "nitrosocke/spider-verse-diffusion",
- #"runwayml/stable-diffusion-v1-5",
- "nicky007/stable-diffusion-logo-fine-tuned",
- "stablediffusionapi/three-delicacy", #233
- "stablediffusionapi/three-delicacy-wonto", #234
- "naclbit/trinart_stable_diffusion_v2",
- "dallinmackay/Tron-Legacy-diffusion",
- "digiplay/unstableDiffusersYamerMIX_v3",
- "dallinmackay/Van-Gogh-diffusion",
- "ItsJayQz/Valorant_Diffusion",
- "Fictiverse/Stable_Diffusion_VoxelArt_Model", #204
- "wavymulder/wavyfusion",
- "Yntec/HassanRemix",
- "Yntec/Reddit",
- "Yntec/CinematicReality",
- "Yntec/3DKX2",
- "CompVis/stable-diffusion-v1-4", #530
- "CompVis/stable-diffusion-v1-3", #207
- "CompVis/stable-diffusion-v1-2", #208
- "CompVis/stable-diffusion-v1-1", #209
-]
-current_model = models[0]
-
-text_gen1=gr.Interface.load("spaces/daspartho/prompt-extend")
-#text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
-
-models2=[
- gr.Interface.load(f"models/{models[0]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[1]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[2]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[3]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[4]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[5]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[6]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[7]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[8]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[9]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[10]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[11]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[12]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[13]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[14]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[15]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[16]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[17]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[18]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[19]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[20]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[21]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[22]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[23]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[24]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[25]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[26]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[27]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[28]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[29]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[30]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[31]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[32]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[33]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[34]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[35]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[36]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[37]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[38]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[39]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[40]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[41]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[42]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[43]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[44]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[45]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[46]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[47]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[48]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[49]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[50]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[51]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[52]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[53]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[54]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[55]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[56]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[57]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[58]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[59]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[60]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[61]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[62]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[63]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[64]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[65]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[66]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[67]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[68]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[69]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[70]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[71]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[72]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[73]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[74]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[75]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[76]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[77]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[78]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[79]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[80]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[81]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[82]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[83]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[84]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[85]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[86]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[87]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[88]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[89]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[90]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[91]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[92]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[93]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[94]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[95]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[96]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[97]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[98]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[99]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[100]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[101]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[102]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[103]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[104]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[105]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[106]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[107]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[108]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[109]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[110]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[111]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[112]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[113]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[114]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[115]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[116]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[117]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[118]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[119]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[120]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[121]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[122]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[123]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[124]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[125]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[126]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[127]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[128]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[129]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[130]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[131]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[132]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[133]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[134]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[135]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[136]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[137]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[138]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[139]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[140]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[141]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[142]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[143]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[144]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[145]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[146]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[147]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[148]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[149]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[150]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[151]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[152]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[153]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[154]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[155]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[156]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[157]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[158]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[159]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[160]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[161]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[162]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[163]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[164]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[165]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[166]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[167]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[168]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[169]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[170]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[171]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[172]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[173]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[174]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[175]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[176]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[177]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[178]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[179]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[180]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[181]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[182]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[183]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[184]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[185]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[186]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[187]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[188]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[189]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[190]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[191]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[192]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[193]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[194]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[195]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[196]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[197]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[198]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[199]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[200]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[201]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[202]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[203]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[204]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[205]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[206]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[207]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[208]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[209]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[210]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[211]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[212]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[213]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[214]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[215]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[216]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[217]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[218]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[219]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[220]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[221]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[222]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[223]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[224]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[225]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[226]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[227]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[228]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[229]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[230]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[231]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[232]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[233]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[234]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[235]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[236]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[237]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[238]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[239]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[240]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[241]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[242]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[243]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[244]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[245]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[246]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[247]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[248]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[249]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[250]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[251]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[252]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[253]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[254]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[255]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[256]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[257]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[258]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[259]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[260]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[261]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[262]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[263]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[264]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[265]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[266]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[267]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[268]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[269]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[270]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[271]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[272]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[273]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[274]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[275]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[276]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[277]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[278]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[279]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[280]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[281]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[282]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[283]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[284]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[285]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[286]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[287]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[288]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[289]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[290]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[291]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[292]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[293]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[294]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[295]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[296]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[297]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[298]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[299]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[300]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[301]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[302]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[303]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[304]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[305]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[306]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[307]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[308]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[309]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[310]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[311]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[312]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[313]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[314]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[315]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[316]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[317]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[318]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[319]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[320]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[321]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[322]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[323]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[324]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[325]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[326]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[327]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[328]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[329]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[330]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[331]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[332]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[333]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[334]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[335]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[336]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[337]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[338]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[339]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[340]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[341]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[342]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[343]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[344]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[345]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[346]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[347]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[348]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[349]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[350]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[351]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[352]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[353]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[354]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[355]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[356]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[357]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[358]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[359]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[360]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[361]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[362]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[363]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[364]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[365]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[366]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[367]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[368]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[369]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[370]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[371]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[372]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[373]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[374]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[375]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[376]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[377]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[378]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[379]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[380]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[381]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[382]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[383]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[384]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[385]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[386]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[387]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[388]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[389]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[390]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[391]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[392]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[393]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[394]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[395]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[396]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[397]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[398]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[399]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[400]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[401]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[402]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[403]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[404]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[405]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[406]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[407]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[408]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[409]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[410]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[411]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[412]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[413]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[414]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[415]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[416]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[417]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[418]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[419]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[420]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[421]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[422]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[423]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[424]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[425]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[426]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[427]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[428]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[429]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[430]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[431]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[432]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[433]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[434]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[435]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[436]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[437]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[438]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[439]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[440]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[441]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[442]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[443]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[444]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[445]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[446]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[447]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[448]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[449]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[450]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[451]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[452]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[453]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[454]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[455]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[456]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[457]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[458]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[459]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[460]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[461]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[462]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[463]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[464]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[465]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[466]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[467]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[469]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[470]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[471]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[472]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[473]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[474]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[475]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[476]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[477]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[478]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[479]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[480]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[481]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[482]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[483]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[484]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[485]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[486]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[487]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[488]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[489]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[490]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[491]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[492]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[493]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[494]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[495]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[496]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[497]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[498]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[499]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[500]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[501]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[502]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[503]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[504]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[505]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[506]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[507]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[508]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[509]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[510]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[511]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[512]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[513]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[514]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[515]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[516]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[517]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[518]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[519]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[520]}",live=True,preprocess=False),
-
- gr.Interface.load(f"models/{models[521]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[522]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[523]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[524]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[525]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[526]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[527]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[528]}",live=True,preprocess=False),
- gr.Interface.load(f"models/{models[529]}",live=True,preprocess=False),
- #Because there's a model 0, to know the number of models you add 1 to {models[n]}
-
-]
-
-def text_it1(inputs,text_gen1=text_gen1):
- go_t1=text_gen1(inputs)
- return(go_t1)
-
-def set_model(current_model):
- current_model = models[current_model]
- return gr.update(label=(f"{current_model}"))
-
-
-def send_it1(inputs, model_choice): #negative_prompt,
- proc1=models2[model_choice]
- output1=proc1(inputs)
- #negative_prompt=negative_prompt
- return(output1)
-css=""""""
-
-
-with gr.Blocks(css=css) as myface:
- gr.HTML("""
-
-
-
-
Blitz Diffusion - 530 Stable Diffusion models, but why? For your enjoyment!
-
2023.11.5
Toys to play with: The models AgarthaChadstyle, Deliberate2 and Reanimate have been added!
-
2023.11.3
The models m0nst3rfy3, ChiliConCarne, makeitdoubleplz, fennPhoto, UberRealisticLegacy, 526Mix and 526 have been added!
-
-
If a model is already loaded each new image takes less than 20 seconds to generate!
-
- If you get ERROR it's because that model ran out of memory, try again, or wait a minute and try again, have fun!
-
- """)
- with gr.Row():
- with gr.Column(scale=100):
- #Model selection dropdown
- model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
- with gr.Row():
- with gr.Column(scale=100):
- magic1=gr.Textbox(label="Your Prompt", lines=4) #Positive
- #with gr.Column(scale=100):
- #negative_prompt=gr.Textbox(label="Negative Prompt", lines=1)
- gr.HTML("""""")
- run=gr.Button("Generate Image")
- with gr.Row():
- with gr.Column(style="width=800px"):
- output1=gr.Image(label=(f"{current_model}"))
-
-
- with gr.Row():
- with gr.Column(scale=50):
- input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea",lines=2)
- see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your Prompt´ box above")
- use_short=gr.Button("Copy the contents of this box to the `Your Prompt´ box above")
- def short_prompt(inputs):
- return(inputs)
-
- model_name1.change(set_model,inputs=model_name1,outputs=[output1])
-
- run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
-
- use_short.click(short_prompt,inputs=[input_text],outputs=magic1)
-
- see_prompts.click(text_it1,inputs=[input_text],outputs=magic1)
-
-myface.queue(concurrency_count=200)
-myface.launch(inline=True, show_api=False, max_threads=400)
\ No newline at end of file
diff --git a/spaces/RinInori/Vicuna_ChatBot/README.md b/spaces/RinInori/Vicuna_ChatBot/README.md
deleted file mode 100644
index 99683e542aa5e9ccf5034932ece7ba7e5142437d..0000000000000000000000000000000000000000
--- a/spaces/RinInori/Vicuna_ChatBot/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Vicuna ChatBot
-emoji: 😻
-colorFrom: green
-colorTo: green
-sdk: gradio
-sdk_version: 3.29.0
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/Ritori/play_with_baby_llama2/run.c b/spaces/Ritori/play_with_baby_llama2/run.c
deleted file mode 100644
index 15352aebab3ff890f776d1ae9ab6c3ab2253b373..0000000000000000000000000000000000000000
--- a/spaces/Ritori/play_with_baby_llama2/run.c
+++ /dev/null
@@ -1,490 +0,0 @@
-/*
-Inference for Llama-2 Transformer model in pure C.
-
-Example compile: (see README for more details)
-$ gcc -O3 -o run run.c -lm
-
-Then run with:
-$ ./run
-*/
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-// ----------------------------------------------------------------------------
-// Transformer and RunState structs, and related memory management
-
-typedef struct {
- int dim; // transformer dimension
- int hidden_dim; // for ffn layers
- int n_layers; // number of layers
- int n_heads; // number of query heads
- int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery)
- int vocab_size; // vocabulary size, usually 256 (byte-level)
- int seq_len; // max sequence length
-} Config;
-
-typedef struct {
- // token embedding table
- float* token_embedding_table; // (vocab_size, dim)
- // weights for rmsnorms
- float* rms_att_weight; // (layer, dim) rmsnorm weights
- float* rms_ffn_weight; // (layer, dim)
- // weights for matmuls
- float* wq; // (layer, dim, dim)
- float* wk; // (layer, dim, dim)
- float* wv; // (layer, dim, dim)
- float* wo; // (layer, dim, dim)
- // weights for ffn
- float* w1; // (layer, hidden_dim, dim)
- float* w2; // (layer, dim, hidden_dim)
- float* w3; // (layer, hidden_dim, dim)
- // final rmsnorm
- float* rms_final_weight; // (dim,)
- // freq_cis for RoPE relatively positional embeddings
- float* freq_cis_real; // (seq_len, dim/2)
- float* freq_cis_imag; // (seq_len, dim/2)
-} TransformerWeights;
-
-typedef struct {
- // current wave of activations
- float *x; // activation at current time stamp (dim,)
- float *xb; // same, but inside a residual branch (dim,)
- float *xb2; // an additional buffer just for convenience (dim,)
- float *hb; // buffer for hidden dimension in the ffn (hidden_dim,)
- float *hb2; // buffer for hidden dimension in the ffn (hidden_dim,)
- float *q; // query (dim,)
- float *k; // key (dim,)
- float *v; // value (dim,)
- float *att; // buffer for scores/attention values (seq_len,)
- float *logits; // output logits
- // kv cache
- float* key_cache; // (layer, seq_len, dim)
- float* value_cache; // (layer, seq_len, dim)
-} RunState;
-
-void malloc_run_state(RunState* s, Config* p) {
- // we calloc instead of malloc to keep valgrind happy
- s->x = calloc(p->dim, sizeof(float));
- s->xb = calloc(p->dim, sizeof(float));
- s->xb2 = calloc(p->dim, sizeof(float));
- s->hb = calloc(p->hidden_dim, sizeof(float));
- s->hb2 = calloc(p->hidden_dim, sizeof(float));
- s->q = calloc(p->dim, sizeof(float));
- s->k = calloc(p->dim, sizeof(float));
- s->v = calloc(p->dim, sizeof(float));
- s->att = calloc(p->seq_len, sizeof(float));
- s->logits = calloc(p->vocab_size, sizeof(float));
- s->key_cache = calloc(p->n_layers * p->seq_len * p->dim, sizeof(float));
- s->value_cache = calloc(p->n_layers * p->seq_len * p->dim, sizeof(float));
- // ensure all mallocs went fine
- if (!s->x || !s->xb || !s->xb2 || !s->hb || !s->hb2 || !s->q
- || !s->k || !s->v || !s->att || !s->logits || !s->key_cache
- || !s->value_cache) {
- printf("malloc failed!\n");
- exit(1);
- }
-}
-
-void free_run_state(RunState* s) {
- free(s->x);
- free(s->xb);
- free(s->xb2);
- free(s->hb);
- free(s->hb2);
- free(s->q);
- free(s->k);
- free(s->v);
- free(s->att);
- free(s->logits);
- free(s->key_cache);
- free(s->value_cache);
-}
-
-void malloc_weights(TransformerWeights* w, Config* p) {
- // we calloc instead of malloc to keep valgrind happy
- w->token_embedding_table = calloc(p->vocab_size * p->dim, sizeof(float));
- w->rms_att_weight = calloc(p->n_layers * p->dim, sizeof(float));
- w->rms_ffn_weight = calloc(p->n_layers * p->dim, sizeof(float));
- w->wq = calloc(p->n_layers * p->dim * p->dim, sizeof(float));
- w->wk = calloc(p->n_layers * p->dim * p->dim, sizeof(float));
- w->wv = calloc(p->n_layers * p->dim * p->dim, sizeof(float));
- w->wo = calloc(p->n_layers * p->dim * p->dim, sizeof(float));
- w->w1 = calloc(p->n_layers * p->hidden_dim * p->dim, sizeof(float));
- w->w2 = calloc(p->n_layers * p->dim * p->hidden_dim, sizeof(float));
- w->w3 = calloc(p->n_layers * p->hidden_dim * p->dim, sizeof(float));
- w->rms_final_weight = calloc(p->dim, sizeof(float));
- w->freq_cis_real = calloc(p->seq_len * p->dim / 2, sizeof(float));
- w->freq_cis_imag = calloc(p->seq_len * p->dim / 2, sizeof(float));
- // ensure all mallocs went fine
- if (!w->token_embedding_table || !w->rms_att_weight || !w->rms_ffn_weight
- || !w->wq || !w->wk || !w->wv || !w->wo || !w->w1 || !w->w2 || !w->w3 ||
- !w->rms_final_weight || !w->freq_cis_real || !w->freq_cis_imag) {
- printf("malloc failed!\n");
- exit(1);
- }
-}
-
-void free_weights(TransformerWeights* w) {
- free(w->token_embedding_table);
- free(w->rms_att_weight);
- free(w->rms_ffn_weight);
- free(w->wq);
- free(w->wk);
- free(w->wv);
- free(w->wo);
- free(w->w1);
- free(w->w2);
- free(w->w3);
- free(w->rms_final_weight);
- free(w->freq_cis_real);
- free(w->freq_cis_imag);
-}
-
-// ----------------------------------------------------------------------------
-// initialization: read from checkpoint
-
-int checkpoint_init_weights(TransformerWeights *w, Config* p, FILE* f) {
- if (fread(w->token_embedding_table, sizeof(float), p->vocab_size * p->dim, f) != p->vocab_size * p->dim) return 1;
- if (fread(w->rms_att_weight, sizeof(float), p->n_layers * p->dim, f) != p->n_layers * p->dim) return 1;
- if (fread(w->wq, sizeof(float), p->n_layers * p->dim * p->dim, f) != p->n_layers * p->dim * p->dim) return 1;
- if (fread(w->wk, sizeof(float), p->n_layers * p->dim * p->dim, f) != p->n_layers * p->dim * p->dim) return 1;
- if (fread(w->wv, sizeof(float), p->n_layers * p->dim * p->dim, f) != p->n_layers * p->dim * p->dim) return 1;
- if (fread(w->wo, sizeof(float), p->n_layers * p->dim * p->dim, f) != p->n_layers * p->dim * p->dim) return 1;
- if (fread(w->rms_ffn_weight, sizeof(float), p->n_layers * p->dim, f) != p->n_layers * p->dim) return 1;
- if (fread(w->w1, sizeof(float), p->n_layers * p->dim * p->hidden_dim, f) != p->n_layers * p->dim * p->hidden_dim) return 1;
- if (fread(w->w2, sizeof(float), p->n_layers * p->hidden_dim * p->dim, f) != p->n_layers * p->hidden_dim * p->dim) return 1;
- if (fread(w->w3, sizeof(float), p->n_layers * p->dim * p->hidden_dim, f) != p->n_layers * p->dim * p->hidden_dim) return 1;
- if (fread(w->rms_final_weight, sizeof(float), p->dim, f) != p->dim) return 1;
- int head_size = p->dim / p->n_heads;
- if (fread(w->freq_cis_real, sizeof(float), p->seq_len * head_size / 2, f) != p->seq_len * head_size / 2) return 1;
- if (fread(w->freq_cis_imag, sizeof(float), p->seq_len * head_size / 2, f) != p->seq_len * head_size / 2) return 1;
- return 0;
-}
-
-
-// ----------------------------------------------------------------------------
-// neural net blocks
-
-void accum(float *a, float *b, int size) {
- for (int i = 0; i < size; i++) {
- a[i] += b[i];
- }
-}
-
-void rmsnorm(float* o, float* x, float* weight, int size) {
- // calculate sum of squares
- float ss = 0.0f;
- for (int j = 0; j < size; j++) {
- ss += x[j] * x[j];
- }
- ss /= size;
- ss += 1e-5f;
- ss = 1.0f / sqrt(ss);
- // normalize and scale
- for (int j = 0; j < size; j++) {
- o[j] = weight[j] * (ss * x[j]);
- }
-}
-
-void softmax(float* x, int size) {
- // find max value (for numerical stability)
- float max_val = x[0];
- for (int i = 1; i < size; i++) {
- if (x[i] > max_val) {
- max_val = x[i];
- }
- }
- // exp and sum
- float sum = 0.0f;
- for (int i = 0; i < size; i++) {
- x[i] = exp(x[i] - max_val);
- sum += x[i];
- }
- // normalize
- for (int i = 0; i < size; i++) {
- x[i] /= sum;
- }
-}
-
-void matmul(float* xout, float* x, float* w, int n, int d) {
- // W (d,n) @ x (n,) -> xout (d,)
- #pragma omp parallel for
- for (int i = 0; i < d; i++) {
- float val = 0.0f;
- for (int j = 0; j < n; j++) {
- val += w[i * n + j] * x[j];
- }
- xout[i] = val;
- }
-}
-
-void transformer(int token, int pos, Config* p, RunState* s, TransformerWeights* w) {
-
- // a few convenience variables
- float *x = s->x;
- int dim = p->dim;
- int hidden_dim = p->hidden_dim;
- int head_size = dim / p->n_heads;
-
- // copy the token embedding into x
- float* content_row = &(w->token_embedding_table[token * dim]);
- memcpy(x, content_row, dim*sizeof(*x));
-
- // pluck out the "pos" row of freq_cis_real and freq_cis_imag
- float* freq_cis_real_row = w->freq_cis_real + pos * head_size / 2;
- float* freq_cis_imag_row = w->freq_cis_imag + pos * head_size / 2;
-
- // forward all the layers
- for(int l = 0; l < p->n_layers; l++) {
-
- // attention rmsnorm
- rmsnorm(s->xb, x, w->rms_att_weight + l*dim, dim);
-
- // qkv matmuls for this position
- matmul(s->q, s->xb, w->wq + l*dim*dim, dim, dim);
- matmul(s->k, s->xb, w->wk + l*dim*dim, dim, dim);
- matmul(s->v, s->xb, w->wv + l*dim*dim, dim, dim);
-
- // apply RoPE rotation to the q and k vectors for each head
- for (int h = 0; h < p->n_heads; h++) {
- // get the q and k vectors for this head
- float* q = s->q + h * head_size;
- float* k = s->k + h * head_size;
- // rotate q and k by the freq_cis_real and freq_cis_imag
- for (int i = 0; i < head_size; i+=2) {
- float q0 = q[i];
- float q1 = q[i+1];
- float k0 = k[i];
- float k1 = k[i+1];
- float fcr = freq_cis_real_row[i/2];
- float fci = freq_cis_imag_row[i/2];
- q[i] = q0 * fcr - q1 * fci;
- q[i+1] = q0 * fci + q1 * fcr;
- k[i] = k0 * fcr - k1 * fci;
- k[i+1] = k0 * fci + k1 * fcr;
- }
- }
-
- // save key,value at this time step (pos) to our kv cache
- int loff = l * p->seq_len * dim; // kv cache layer offset for convenience
- float* key_cache_row = s->key_cache + loff + pos * dim;
- float* value_cache_row = s->value_cache + loff + pos * dim;
- memcpy(key_cache_row, s->k, dim*sizeof(*key_cache_row));
- memcpy(value_cache_row, s->v, dim*sizeof(*value_cache_row));
-
- // multihead attention. iterate over all heads
- for (int h = 0; h < p->n_heads; h++) {
- // get the query vector for this head
- float* q = s->q + h * head_size;
- // iterate over all timesteps, including the current one
- for (int t = 0; t <= pos; t++) {
- // get the key vector for this head and at this timestep
- float* k = s->key_cache + loff + t * dim + h * head_size;
- // calculate the attention score as the dot product of q and k
- float score = 0.0f;
- for (int i = 0; i < head_size; i++) {
- score += q[i] * k[i];
- }
- score /= sqrtf(head_size);
- // save the score to the attention buffer
- s->att[t] = score;
- }
-
- // softmax the scores to get attention weights, from 0..pos inclusively
- softmax(s->att, pos + 1);
-
- // weighted sum of the values, store back into xb
- for (int i = 0; i < head_size; i++) {
- float val = 0.0f;
- for (int t = 0; t <= pos; t++) {
- val += s->att[t] * s->value_cache[loff + t * dim + h * head_size + i]; // note bad locality
- }
- s->xb[h * head_size + i] = val;
- }
- }
-
- // final matmul to get the output of the attention
- matmul(s->xb2, s->xb, w->wo + l*dim*dim, dim, dim);
-
- // residual connection back into x
- accum(x, s->xb2, dim);
-
- // ffn rmsnorm
- rmsnorm(s->xb, x, w->rms_ffn_weight + l*dim, dim);
-
- // Now for FFN in PyTorch we have: self.w2(F.silu(self.w1(x)) * self.w3(x))
- // first calculate self.w1(x) and self.w3(x)
- matmul(s->hb, s->xb, w->w1 + l*dim*hidden_dim, dim, hidden_dim);
- matmul(s->hb2, s->xb, w->w3 + l*dim*hidden_dim, dim, hidden_dim);
-
- // F.silu; silu(x)=x*σ(x),where σ(x) is the logistic sigmoid
- for (int i = 0; i < hidden_dim; i++) {
- s->hb[i] = s->hb[i] * (1.0f / (1.0f + expf(-s->hb[i])));
- }
-
- // elementwise multiply with w3(x)
- for (int i = 0; i < hidden_dim; i++) {
- s->hb[i] = s->hb[i] * s->hb2[i];
- }
-
- // final matmul to get the output of the ffn
- matmul(s->xb, s->hb, w->w2 + l*dim*hidden_dim, hidden_dim, dim);
-
- // residual connection
- accum(x, s->xb, dim);
- }
-
- // final rmsnorm
- rmsnorm(x, x, w->rms_final_weight, dim);
-
- // classifier into logits
- matmul(s->logits, x, w->token_embedding_table, p->dim, p->vocab_size);
-}
-
-int sample(float* probabilities, int n) {
- // sample index from probabilities, they must sum to 1
- float r = (float)rand() / (float)RAND_MAX;
- float cdf = 0.0f;
- for (int i = 0; i < n; i++) {
- cdf += probabilities[i];
- if (r < cdf) {
- return i;
- }
- }
- return n - 1; // in case of rounding errors
-}
-
-int argmax(float* v, int n) {
- // return argmax of v in elements 0..n
- int max_i = 0;
- float max_p = v[0];
- for (int i = 1; i < n; i++) {
- if (v[i] > max_p) {
- max_i = i;
- max_p = v[i];
- }
- }
- return max_i;
-}
-
-// ----------------------------------------------------------------------------
-
-long time_in_ms() {
- struct timeval time;
- gettimeofday(&time, NULL);
- return time.tv_sec * 1000 + time.tv_usec / 1000;
-}
-
-int main(int argc, char *argv[]) {
-
- // poor man's C argparse
- char *checkpoint = NULL;
- float temperature = 0.9f;
- // 'checkpoint' is necessary arg
- if (argc < 2) {
- printf("Usage: %s [temperature] [seed]\n", argv[0]);
- return 1;
- }
- checkpoint = argv[1];
- // temperature is optional
- if (argc >= 3) {
- temperature = atof(argv[2]);
- }
- // seed is optional
- if (argc >= 4) {
- unsigned int seed = atoi(argv[3]);
- srand(seed);
- } else {
- time_t current_time;
- time(¤t_time);
- srand((unsigned int)current_time);
- }
-
- // read in the model.bin file
- Config config;
- TransformerWeights weights;
- {
- FILE *file = fopen(checkpoint, "rb");
- if (!file) {
- printf("Unable to open the checkpoint file %s!\n", checkpoint);
- return 1;
- }
- // read in the config header
- if(fread(&config, sizeof(Config), 1, file) != 1) { return 1; }
- // read in the Transformer weights
- malloc_weights(&weights, &config);
- if(checkpoint_init_weights(&weights, &config, file)) { return 1; }
- fclose(file);
- }
-
- // read in the tokenizer.bin file
- char** vocab = (char**)malloc(config.vocab_size * sizeof(char*));
- {
- FILE *file = fopen("tokenizer.bin", "rb");
- if (!file) {
- printf("Unable to open the tokenizer file tokenizer.bin! Run "
- "python tokenizer.py to convert tokenizer.model -> tokenizer.bin\n");
- return 1;
- }
- int len;
- for (int i = 0; i < config.vocab_size; i++) {
- if(fread(&len, sizeof(int), 1, file) != 1) { return 1; }
- vocab[i] = (char *)malloc(len + 1);
- if(fread(vocab[i], len, 1, file) != 1) { return 1; }
- vocab[i][len] = '\0'; // add the string terminating token
- }
- fclose(file);
- }
-
- // create and init the application RunState
- RunState state;
- malloc_run_state(&state, &config);
-
- // the current position we are in
- long start = time_in_ms();
-
- int next;
- int token = 1; // 1 = BOS token in Llama-2 sentencepiece
- int pos = 0;
- while (pos < config.seq_len) {
-
- // forward the transformer to get logits for the next token
- transformer(token, pos, &config, &state, &weights);
-
- // sample the next token
- if(temperature == 0.0f) {
- // greedy argmax sampling
- next = argmax(state.logits, config.vocab_size);
- } else {
- // apply the temperature to the logits
- for (int q=0; q`
- between the backward pass(es) and :meth:`step`.
- If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`.
- Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients::
- ...
- scaler.scale(loss).backward()
- scaler.unscale_(optimizer)
- torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
- scaler.step(optimizer)
- scaler.update()
- Args:
- optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled.
- .. warning::
- :meth:`unscale_` should only be called once per optimizer per :meth:`step` call,
- and only after all gradients for that optimizer's assigned parameters have been accumulated.
- Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError.
- .. warning::
- :meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute.
- """
- if not self._enabled:
- return
-
- self._check_scale_growth_tracker("unscale_")
-
- optimizer_state = self._per_optimizer_states[id(optimizer)]
-
- if optimizer_state["stage"] is OptState.UNSCALED: # pylint: disable=no-else-raise
- raise RuntimeError(
- "unscale_() has already been called on this optimizer since the last update()."
- )
- elif optimizer_state["stage"] is OptState.STEPPED:
- raise RuntimeError("unscale_() is being called after step().")
-
- # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
- assert self._scale is not None
- inv_scale = self._scale.to("cpu").double().reciprocal().float().to(self._scale.device)
- found_inf = torch.full(
- (1,), 0.0, dtype=torch.float32, device=self._scale.device
- )
-
- optimizer_state["found_inf_per_device"] = self._unscale_grads_(
- optimizer, inv_scale, found_inf, False
- )
- optimizer_state["stage"] = OptState.UNSCALED
-
-def update(self, new_scale=None):
- """
- Updates the scale factor.
- If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
- to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
- the scale is multiplied by ``growth_factor`` to increase it.
- Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
- used directly, it's used to fill GradScaler's internal scale tensor. So if
- ``new_scale`` was a tensor, later in-place changes to that tensor will not further
- affect the scale GradScaler uses internally.)
- Args:
- new_scale (float or :class:`torch.FloatTensor`, optional, default=None): New scale factor.
- .. warning::
- :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
- been invoked for all optimizers used this iteration.
- """
- if not self._enabled:
- return
-
- _scale, _growth_tracker = self._check_scale_growth_tracker("update")
-
- if new_scale is not None:
- # Accept a new user-defined scale.
- if isinstance(new_scale, float):
- self._scale.fill_(new_scale) # type: ignore[union-attr]
- else:
- reason = "new_scale should be a float or a 1-element torch.FloatTensor with requires_grad=False."
- assert isinstance(new_scale, torch.FloatTensor), reason # type: ignore[attr-defined]
- assert new_scale.numel() == 1, reason
- assert new_scale.requires_grad is False, reason
- self._scale.copy_(new_scale) # type: ignore[union-attr]
- else:
- # Consume shared inf/nan data collected from optimizers to update the scale.
- # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
- found_infs = [
- found_inf.to(device="cpu", non_blocking=True)
- for state in self._per_optimizer_states.values()
- for found_inf in state["found_inf_per_device"].values()
- ]
-
- assert len(found_infs) > 0, "No inf checks were recorded prior to update."
-
- found_inf_combined = found_infs[0]
- if len(found_infs) > 1:
- for i in range(1, len(found_infs)):
- found_inf_combined += found_infs[i]
-
- to_device = _scale.device
- _scale = _scale.to("cpu")
- _growth_tracker = _growth_tracker.to("cpu")
-
- core._amp_update_scale_(
- _scale,
- _growth_tracker,
- found_inf_combined,
- self._growth_factor,
- self._backoff_factor,
- self._growth_interval,
- )
-
- _scale = _scale.to(to_device)
- _growth_tracker = _growth_tracker.to(to_device)
- # To prepare for next iteration, clear the data collected from optimizers this iteration.
- self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
-
-def gradscaler_init():
- torch.xpu.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler
- torch.xpu.amp.GradScaler._unscale_grads_ = _unscale_grads_
- torch.xpu.amp.GradScaler.unscale_ = unscale_
- torch.xpu.amp.GradScaler.update = update
- return torch.xpu.amp.GradScaler
\ No newline at end of file
diff --git a/spaces/SeyedAli/Persian-Visual-Question-Answering-1/README.md b/spaces/SeyedAli/Persian-Visual-Question-Answering-1/README.md
deleted file mode 100644
index 9fcae7a089eafd33f643d819d143a169db604218..0000000000000000000000000000000000000000
--- a/spaces/SeyedAli/Persian-Visual-Question-Answering-1/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Persian Visual Question Answering (Version 1)
-emoji: 🖼️❓
-colorFrom: pink
-colorTo: pink
-sdk: gradio
-sdk_version: 3.44.4
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Shad0ws/Chat-with-Files/README.md b/spaces/Shad0ws/Chat-with-Files/README.md
deleted file mode 100644
index ac34757d7c091ded8b52dd34d6eb0b5c78e58c28..0000000000000000000000000000000000000000
--- a/spaces/Shad0ws/Chat-with-Files/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Chat With Files
-emoji: ⚡
-colorFrom: blue
-colorTo: yellow
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/test/db/test_system.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/test/db/test_system.py
deleted file mode 100644
index 15271bd480fc069e9ae06a8e12409b876f87435d..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/test/db/test_system.py
+++ /dev/null
@@ -1,317 +0,0 @@
-import pytest
-from typing import Generator, List, Callable, Dict, Union
-from chromadb.types import Collection, Segment, SegmentScope
-from chromadb.db.impl.sqlite import SqliteDB
-from chromadb.config import System, Settings
-from chromadb.db.system import SysDB
-from chromadb.db.base import NotFoundError, UniqueConstraintError
-from pytest import FixtureRequest
-import uuid
-
-
-def sqlite() -> Generator[SysDB, None, None]:
- """Fixture generator for sqlite DB"""
- db = SqliteDB(System(Settings(sqlite_database=":memory:", allow_reset=True)))
- db.start()
- yield db
- db.stop()
-
-
-def db_fixtures() -> List[Callable[[], Generator[SysDB, None, None]]]:
- return [sqlite]
-
-
-@pytest.fixture(scope="module", params=db_fixtures())
-def sysdb(request: FixtureRequest) -> Generator[SysDB, None, None]:
- yield next(request.param())
-
-
-sample_collections = [
- Collection(
- id=uuid.uuid4(),
- name="test_collection_1",
- topic="test_topic_1",
- metadata={"test_str": "str1", "test_int": 1, "test_float": 1.3},
- ),
- Collection(
- id=uuid.uuid4(),
- name="test_collection_2",
- topic="test_topic_2",
- metadata={"test_str": "str2", "test_int": 2, "test_float": 2.3},
- ),
- Collection(
- id=uuid.uuid4(),
- name="test_collection_3",
- topic="test_topic_3",
- metadata={"test_str": "str3", "test_int": 3, "test_float": 3.3},
- ),
-]
-
-
-def test_create_get_delete_collections(sysdb: SysDB) -> None:
- sysdb.reset()
-
- for collection in sample_collections:
- sysdb.create_collection(collection)
-
- results = sysdb.get_collections()
- results = sorted(results, key=lambda c: c["name"])
-
- assert sorted(results, key=lambda c: c["name"]) == sample_collections
-
- # Duplicate create fails
- with pytest.raises(UniqueConstraintError):
- sysdb.create_collection(sample_collections[0])
-
- # Find by name
- for collection in sample_collections:
- result = sysdb.get_collections(name=collection["name"])
- assert result == [collection]
-
- # Find by topic
- for collection in sample_collections:
- result = sysdb.get_collections(topic=collection["topic"])
- assert result == [collection]
-
- # Find by id
- for collection in sample_collections:
- result = sysdb.get_collections(id=collection["id"])
- assert result == [collection]
-
- # Find by id and topic (positive case)
- for collection in sample_collections:
- result = sysdb.get_collections(id=collection["id"], topic=collection["topic"])
- assert result == [collection]
-
- # find by id and topic (negative case)
- for collection in sample_collections:
- result = sysdb.get_collections(id=collection["id"], topic="other_topic")
- assert result == []
-
- # Delete
- c1 = sample_collections[0]
- sysdb.delete_collection(c1["id"])
-
- results = sysdb.get_collections()
- assert c1 not in results
- assert len(results) == len(sample_collections) - 1
- assert sorted(results, key=lambda c: c["name"]) == sample_collections[1:]
-
- by_id_result = sysdb.get_collections(id=c1["id"])
- assert by_id_result == []
-
- # Duplicate delete throws an exception
- with pytest.raises(NotFoundError):
- sysdb.delete_collection(c1["id"])
-
-
-def test_update_collections(sysdb: SysDB) -> None:
- metadata: Dict[str, Union[str, int, float]] = {
- "test_str": "str1",
- "test_int": 1,
- "test_float": 1.3,
- }
- coll = Collection(
- id=uuid.uuid4(),
- name="test_collection_1",
- topic="test_topic_1",
- metadata=metadata,
- )
-
- sysdb.reset()
-
- sysdb.create_collection(coll)
-
- # Update name
- coll["name"] = "new_name"
- sysdb.update_collection(coll["id"], name=coll["name"])
- result = sysdb.get_collections(name=coll["name"])
- assert result == [coll]
-
- # Update topic
- coll["topic"] = "new_topic"
- sysdb.update_collection(coll["id"], topic=coll["topic"])
- result = sysdb.get_collections(topic=coll["topic"])
- assert result == [coll]
-
- # Add a new metadata key
- metadata["test_str2"] = "str2"
- sysdb.update_collection(coll["id"], metadata={"test_str2": "str2"})
- result = sysdb.get_collections(id=coll["id"])
- assert result == [coll]
-
- # Update a metadata key
- metadata["test_str"] = "str3"
- sysdb.update_collection(coll["id"], metadata={"test_str": "str3"})
- result = sysdb.get_collections(id=coll["id"])
- assert result == [coll]
-
- # Delete a metadata key
- del metadata["test_str"]
- sysdb.update_collection(coll["id"], metadata={"test_str": None})
- result = sysdb.get_collections(id=coll["id"])
- assert result == [coll]
-
- # Delete all metadata keys
- coll["metadata"] = None
- sysdb.update_collection(coll["id"], metadata=None)
- result = sysdb.get_collections(id=coll["id"])
- assert result == [coll]
-
-
-sample_segments = [
- Segment(
- id=uuid.UUID("00000000-d7d7-413b-92e1-731098a6e492"),
- type="test_type_a",
- scope=SegmentScope.VECTOR,
- topic=None,
- collection=sample_collections[0]["id"],
- metadata={"test_str": "str1", "test_int": 1, "test_float": 1.3},
- ),
- Segment(
- id=uuid.UUID("11111111-d7d7-413b-92e1-731098a6e492"),
- type="test_type_b",
- topic="test_topic_2",
- scope=SegmentScope.VECTOR,
- collection=sample_collections[1]["id"],
- metadata={"test_str": "str2", "test_int": 2, "test_float": 2.3},
- ),
- Segment(
- id=uuid.UUID("22222222-d7d7-413b-92e1-731098a6e492"),
- type="test_type_b",
- topic="test_topic_3",
- scope=SegmentScope.METADATA,
- collection=None,
- metadata={"test_str": "str3", "test_int": 3, "test_float": 3.3},
- ),
-]
-
-
-def test_create_get_delete_segments(sysdb: SysDB) -> None:
- sysdb.reset()
-
- for collection in sample_collections:
- sysdb.create_collection(collection)
-
- for segment in sample_segments:
- sysdb.create_segment(segment)
-
- results = sysdb.get_segments()
- results = sorted(results, key=lambda c: c["id"])
-
- assert results == sample_segments
-
- # Duplicate create fails
- with pytest.raises(UniqueConstraintError):
- sysdb.create_segment(sample_segments[0])
-
- # Find by id
- for segment in sample_segments:
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Find by type
- result = sysdb.get_segments(type="test_type_a")
- assert result == sample_segments[:1]
-
- result = sysdb.get_segments(type="test_type_b")
- assert result == sample_segments[1:]
-
- # Find by collection ID
- result = sysdb.get_segments(collection=sample_collections[0]["id"])
- assert result == sample_segments[:1]
-
- # Find by type and collection ID (positive case)
- result = sysdb.get_segments(
- type="test_type_a", collection=sample_collections[0]["id"]
- )
- assert result == sample_segments[:1]
-
- # Find by type and collection ID (negative case)
- result = sysdb.get_segments(
- type="test_type_b", collection=sample_collections[0]["id"]
- )
- assert result == []
-
- # Delete
- s1 = sample_segments[0]
- sysdb.delete_segment(s1["id"])
-
- results = sysdb.get_segments()
- assert s1 not in results
- assert len(results) == len(sample_segments) - 1
- assert sorted(results, key=lambda c: c["type"]) == sample_segments[1:]
-
- # Duplicate delete throws an exception
- with pytest.raises(NotFoundError):
- sysdb.delete_segment(s1["id"])
-
-
-def test_update_segment(sysdb: SysDB) -> None:
- metadata: Dict[str, Union[str, int, float]] = {
- "test_str": "str1",
- "test_int": 1,
- "test_float": 1.3,
- }
- segment = Segment(
- id=uuid.uuid4(),
- type="test_type_a",
- scope=SegmentScope.VECTOR,
- topic="test_topic_a",
- collection=sample_collections[0]["id"],
- metadata=metadata,
- )
-
- sysdb.reset()
- for c in sample_collections:
- sysdb.create_collection(c)
-
- sysdb.create_segment(segment)
-
- # Update topic to new value
- segment["topic"] = "new_topic"
- sysdb.update_segment(segment["id"], topic=segment["topic"])
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Update topic to None
- segment["topic"] = None
- sysdb.update_segment(segment["id"], topic=segment["topic"])
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Update collection to new value
- segment["collection"] = sample_collections[1]["id"]
- sysdb.update_segment(segment["id"], collection=segment["collection"])
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Update collection to None
- segment["collection"] = None
- sysdb.update_segment(segment["id"], collection=segment["collection"])
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Add a new metadata key
- metadata["test_str2"] = "str2"
- sysdb.update_segment(segment["id"], metadata={"test_str2": "str2"})
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Update a metadata key
- metadata["test_str"] = "str3"
- sysdb.update_segment(segment["id"], metadata={"test_str": "str3"})
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Delete a metadata key
- del metadata["test_str"]
- sysdb.update_segment(segment["id"], metadata={"test_str": None})
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
-
- # Delete all metadata keys
- segment["metadata"] = None
- sysdb.update_segment(segment["id"], metadata=None)
- result = sysdb.get_segments(id=segment["id"])
- assert result == [segment]
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/constants.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/constants.py
deleted file mode 100644
index a242e559b9463cbcef3c67e8fa883aed93db04ec..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/constants.py
+++ /dev/null
@@ -1,2 +0,0 @@
-PROTOCOL_VERSION_WITH_LOW_CARD = 54405
-CH_VERSION_WITH_PROTOCOL = '23.2.1.2537'
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/setup_pydevd_cython.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/setup_pydevd_cython.py
deleted file mode 100644
index 5b395ddcf09d9e3538a85ad11a25a317925f5543..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/setup_pydevd_cython.py
+++ /dev/null
@@ -1,250 +0,0 @@
-'''
-A simpler setup version just to compile the speedup module.
-
-It should be used as:
-
-python setup_pydevd_cython build_ext --inplace
-
-Note: the .c file and other generated files are regenerated from
-the .pyx file by running "python build_tools/build.py"
-'''
-
-import os
-import sys
-from setuptools import setup
-
-os.chdir(os.path.dirname(os.path.abspath(__file__)))
-
-IS_PY36_OR_GREATER = sys.version_info > (3, 6)
-TODO_PY311 = sys.version_info > (3, 11)
-
-
-def process_args():
- extension_folder = None
- target_pydevd_name = None
- target_frame_eval = None
- force_cython = False
-
- for i, arg in enumerate(sys.argv[:]):
- if arg == '--build-lib':
- extension_folder = sys.argv[i + 1]
- # It shouldn't be removed from sys.argv (among with --build-temp) because they're passed further to setup()
- if arg.startswith('--target-pyd-name='):
- sys.argv.remove(arg)
- target_pydevd_name = arg[len('--target-pyd-name='):]
- if arg.startswith('--target-pyd-frame-eval='):
- sys.argv.remove(arg)
- target_frame_eval = arg[len('--target-pyd-frame-eval='):]
- if arg == '--force-cython':
- sys.argv.remove(arg)
- force_cython = True
-
- return extension_folder, target_pydevd_name, target_frame_eval, force_cython
-
-
-def process_template_lines(template_lines):
- # Create 2 versions of the template, one for Python 3.8 and another for Python 3.9
- for version in ('38', '39'):
- yield '### WARNING: GENERATED CODE, DO NOT EDIT!'
- yield '### WARNING: GENERATED CODE, DO NOT EDIT!'
- yield '### WARNING: GENERATED CODE, DO NOT EDIT!'
-
- for line in template_lines:
- if version == '38':
- line = line.replace('get_bytecode_while_frame_eval(PyFrameObject * frame_obj, int exc)', 'get_bytecode_while_frame_eval_38(PyFrameObject * frame_obj, int exc)')
- line = line.replace('CALL_EvalFrameDefault', 'CALL_EvalFrameDefault_38(frame_obj, exc)')
- else: # 3.9
- line = line.replace('get_bytecode_while_frame_eval(PyFrameObject * frame_obj, int exc)', 'get_bytecode_while_frame_eval_39(PyThreadState* tstate, PyFrameObject * frame_obj, int exc)')
- line = line.replace('CALL_EvalFrameDefault', 'CALL_EvalFrameDefault_39(tstate, frame_obj, exc)')
-
- yield line
-
- yield '### WARNING: GENERATED CODE, DO NOT EDIT!'
- yield '### WARNING: GENERATED CODE, DO NOT EDIT!'
- yield '### WARNING: GENERATED CODE, DO NOT EDIT!'
- yield ''
- yield ''
-
-
-def process_template_file(contents):
- ret = []
- template_lines = []
-
- append_to = ret
- for line in contents.splitlines(keepends=False):
- if line.strip() == '### TEMPLATE_START':
- append_to = template_lines
- elif line.strip() == '### TEMPLATE_END':
- append_to = ret
- for line in process_template_lines(template_lines):
- ret.append(line)
- else:
- append_to.append(line)
-
- return '\n'.join(ret)
-
-
-def build_extension(dir_name, extension_name, target_pydevd_name, force_cython, extended=False, has_pxd=False, template=False):
- pyx_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pyx" % (extension_name,))
-
- if template:
- pyx_template_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.template.pyx" % (extension_name,))
- with open(pyx_template_file, 'r') as stream:
- contents = stream.read()
-
- contents = process_template_file(contents)
-
- with open(pyx_file, 'w') as stream:
- stream.write(contents)
-
- if target_pydevd_name != extension_name:
- # It MUST be there in this case!
- # (otherwise we'll have unresolved externals because the .c file had another name initially).
- import shutil
-
- # We must force cython in this case (but only in this case -- for the regular setup in the user machine, we
- # should always compile the .c file).
- force_cython = True
-
- new_pyx_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pyx" % (target_pydevd_name,))
- new_c_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.c" % (target_pydevd_name,))
- shutil.copy(pyx_file, new_pyx_file)
- pyx_file = new_pyx_file
- if has_pxd:
- pxd_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pxd" % (extension_name,))
- new_pxd_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pxd" % (target_pydevd_name,))
- shutil.copy(pxd_file, new_pxd_file)
- assert os.path.exists(pyx_file)
-
- try:
- c_files = [os.path.join(dir_name, "%s.c" % target_pydevd_name), ]
- if force_cython:
- for c_file in c_files:
- try:
- os.remove(c_file)
- except:
- pass
- from Cython.Build import cythonize # @UnusedImport
- # Generate the .c files in cythonize (will not compile at this point).
-
- target = "%s/%s.pyx" % (dir_name, target_pydevd_name,)
- cythonize([target])
-
- # Workarounds needed in CPython 3.8 and 3.9 to access PyInterpreterState.eval_frame.
- for c_file in c_files:
- with open(c_file, 'r') as stream:
- c_file_contents = stream.read()
-
- if '#include "internal/pycore_gc.h"' not in c_file_contents:
- c_file_contents = c_file_contents.replace('#include "Python.h"', '''#include "Python.h"
-#if PY_VERSION_HEX >= 0x03090000
-#include "internal/pycore_gc.h"
-#include "internal/pycore_interp.h"
-#endif
-''')
-
- if '#include "internal/pycore_pystate.h"' not in c_file_contents:
- c_file_contents = c_file_contents.replace('#include "pystate.h"', '''#include "pystate.h"
-#if PY_VERSION_HEX >= 0x03080000
-#include "internal/pycore_pystate.h"
-#endif
-''')
-
- # We want the same output on Windows and Linux.
- c_file_contents = c_file_contents.replace('\r\n', '\n').replace('\r', '\n')
- c_file_contents = c_file_contents.replace(r'_pydevd_frame_eval\\release_mem.h', '_pydevd_frame_eval/release_mem.h')
- c_file_contents = c_file_contents.replace(r'_pydevd_frame_eval\\pydevd_frame_evaluator.pyx', '_pydevd_frame_eval/pydevd_frame_evaluator.pyx')
- c_file_contents = c_file_contents.replace(r'_pydevd_bundle\\pydevd_cython.pxd', '_pydevd_bundle/pydevd_cython.pxd')
- c_file_contents = c_file_contents.replace(r'_pydevd_bundle\\pydevd_cython.pyx', '_pydevd_bundle/pydevd_cython.pyx')
-
- with open(c_file, 'w') as stream:
- stream.write(c_file_contents)
-
- # Always compile the .c (and not the .pyx) file (which we should keep up-to-date by running build_tools/build.py).
- from distutils.extension import Extension
- extra_compile_args = []
- extra_link_args = []
-
- if 'linux' in sys.platform:
- # Enabling -flto brings executable from 4MB to 0.56MB and -Os to 0.41MB
- # Profiling shows an execution around 3-5% slower with -Os vs -O3,
- # so, kept only -flto.
- extra_compile_args = ["-flto", "-O3"]
- extra_link_args = extra_compile_args[:]
-
- # Note: also experimented with profile-guided optimization. The executable
- # size became a bit smaller (from 0.56MB to 0.5MB) but this would add an
- # extra step to run the debugger to obtain the optimizations
- # so, skipped it for now (note: the actual benchmarks time was in the
- # margin of a 0-1% improvement, which is probably not worth it for
- # speed increments).
- # extra_compile_args = ["-flto", "-fprofile-generate"]
- # ... Run benchmarks ...
- # extra_compile_args = ["-flto", "-fprofile-use", "-fprofile-correction"]
- elif 'win32' in sys.platform:
- pass
- # uncomment to generate pdbs for visual studio.
- # extra_compile_args=["-Zi", "/Od"]
- # extra_link_args=["-debug"]
-
- kwargs = {}
- if extra_link_args:
- kwargs['extra_link_args'] = extra_link_args
- if extra_compile_args:
- kwargs['extra_compile_args'] = extra_compile_args
-
- ext_modules = [
- Extension(
- "%s%s.%s" % (dir_name, "_ext" if extended else "", target_pydevd_name,),
- c_files,
- **kwargs
- )]
-
- # This is needed in CPython 3.8 to be able to include internal/pycore_pystate.h
- # (needed to set PyInterpreterState.eval_frame).
- for module in ext_modules:
- module.define_macros = [('Py_BUILD_CORE_MODULE', '1')]
- setup(
- name='Cythonize',
- ext_modules=ext_modules
- )
- finally:
- if target_pydevd_name != extension_name:
- try:
- os.remove(new_pyx_file)
- except:
- import traceback
- traceback.print_exc()
- try:
- os.remove(new_c_file)
- except:
- import traceback
- traceback.print_exc()
- if has_pxd:
- try:
- os.remove(new_pxd_file)
- except:
- import traceback
- traceback.print_exc()
-
-
-extension_folder, target_pydevd_name, target_frame_eval, force_cython = process_args()
-
-extension_name = "pydevd_cython"
-if target_pydevd_name is None:
- target_pydevd_name = extension_name
-build_extension("_pydevd_bundle", extension_name, target_pydevd_name, force_cython, extension_folder, True)
-
-if IS_PY36_OR_GREATER and not TODO_PY311:
- extension_name = "pydevd_frame_evaluator"
- if target_frame_eval is None:
- target_frame_eval = extension_name
- build_extension("_pydevd_frame_eval", extension_name, target_frame_eval, force_cython, extension_folder, True, template=True)
-
-if extension_folder:
- os.chdir(extension_folder)
- for folder in [file for file in os.listdir(extension_folder) if
- file != 'build' and os.path.isdir(os.path.join(extension_folder, file))]:
- file = os.path.join(folder, "__init__.py")
- if not os.path.exists(file):
- open(file, 'a').close()
diff --git a/spaces/TH5314/newbing/src/components/chat-suggestions.tsx b/spaces/TH5314/newbing/src/components/chat-suggestions.tsx
deleted file mode 100644
index 00c2fee295c9e010946046eb71705a5e131f7a5a..0000000000000000000000000000000000000000
--- a/spaces/TH5314/newbing/src/components/chat-suggestions.tsx
+++ /dev/null
@@ -1,45 +0,0 @@
-import React, { useMemo } from 'react'
-import Image from 'next/image'
-import HelpIcon from '@/assets/images/help.svg'
-import { SuggestedResponse } from '@/lib/bots/bing/types'
-import { useBing } from '@/lib/hooks/use-bing'
-import { atom, useAtom } from 'jotai'
-
-type Suggestions = SuggestedResponse[]
-const helpSuggestions = ['为什么不回应某些主题', '告诉我更多关于必应的资迅', '必应如何使用 AI?'].map((text) => ({ text }))
-const suggestionsAtom = atom([])
-
-type ChatSuggestionsProps = React.ComponentProps<'div'> & Pick, 'setInput'> & { suggestions?: Suggestions }
-
-export function ChatSuggestions({ setInput, suggestions = [] }: ChatSuggestionsProps) {
- const [currentSuggestions, setSuggestions] = useAtom(suggestionsAtom)
- const toggleSuggestions = (() => {
- if (currentSuggestions === helpSuggestions) {
- setSuggestions(suggestions)
- } else {
- setSuggestions(helpSuggestions)
- }
- })
-
- useMemo(() => {
- setSuggestions(suggestions)
- window.scrollBy(0, 2000)
- }, [suggestions.length])
-
- return currentSuggestions?.length ? (
-
-
-
-
-
- {
- currentSuggestions.map(suggestion => (
- setInput(suggestion.text)}>
- {suggestion.text}
-
- ))
- }
-
-
- ) : null
-}
diff --git a/spaces/TNR-5/semantic-image-search.img/src/app/utils.js b/spaces/TNR-5/semantic-image-search.img/src/app/utils.js
deleted file mode 100644
index f0401723a8079fda923d524eabe7ab23fe3a166f..0000000000000000000000000000000000000000
--- a/spaces/TNR-5/semantic-image-search.img/src/app/utils.js
+++ /dev/null
@@ -1,52 +0,0 @@
-
-import { decode } from "blurhash"
-
-const SIZE = 32;
-
-export function blurHashToDataURL(hash) {
- if (!hash) return undefined
-
- const pixels = decode(hash, SIZE, SIZE)
-
- const canvas = document.createElement("canvas");
- canvas.width = SIZE;
- canvas.height = SIZE;
-
- const ctx = canvas.getContext("2d");
- const imageData = ctx.createImageData(SIZE, SIZE);
- imageData.data.set(pixels);
- ctx.putImageData(imageData, 0, 0);
-
- return canvas.toDataURL();
-}
-
-function downloadData(url, filename) {
-
- // Create an anchor element with the data URL as the href attribute
- const downloadLink = document.createElement('a');
- downloadLink.href = url;
-
- // Set the download attribute to specify the desired filename for the downloaded image
- downloadLink.download = filename;
-
- // Trigger the download
- downloadLink.click();
-
- // Clean up: remove the anchor element from the DOM
- downloadLink.remove();
-}
-
-export function downloadImage(url, filename) {
- fetch(url, {
- headers: new Headers({
- Origin: location.origin,
- }),
- mode: 'cors',
- })
- .then((response) => response.blob())
- .then((blob) => {
- let blobUrl = window.URL.createObjectURL(blob)
- downloadData(blobUrl, filename)
- })
- .catch((e) => console.error(e))
-}
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/segment.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/segment.py
deleted file mode 100644
index e125798463512ce4322a2cc139b4e5c1515e5c05..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/segment.py
+++ /dev/null
@@ -1,739 +0,0 @@
-from enum import IntEnum
-from functools import lru_cache
-from itertools import filterfalse
-from logging import getLogger
-from operator import attrgetter
-from typing import (
- TYPE_CHECKING,
- Dict,
- Iterable,
- List,
- NamedTuple,
- Optional,
- Sequence,
- Tuple,
- Type,
- Union,
-)
-
-from .cells import (
- _is_single_cell_widths,
- cached_cell_len,
- cell_len,
- get_character_cell_size,
- set_cell_size,
-)
-from .repr import Result, rich_repr
-from .style import Style
-
-if TYPE_CHECKING:
- from .console import Console, ConsoleOptions, RenderResult
-
-log = getLogger("rich")
-
-
-class ControlType(IntEnum):
- """Non-printable control codes which typically translate to ANSI codes."""
-
- BELL = 1
- CARRIAGE_RETURN = 2
- HOME = 3
- CLEAR = 4
- SHOW_CURSOR = 5
- HIDE_CURSOR = 6
- ENABLE_ALT_SCREEN = 7
- DISABLE_ALT_SCREEN = 8
- CURSOR_UP = 9
- CURSOR_DOWN = 10
- CURSOR_FORWARD = 11
- CURSOR_BACKWARD = 12
- CURSOR_MOVE_TO_COLUMN = 13
- CURSOR_MOVE_TO = 14
- ERASE_IN_LINE = 15
- SET_WINDOW_TITLE = 16
-
-
-ControlCode = Union[
- Tuple[ControlType],
- Tuple[ControlType, Union[int, str]],
- Tuple[ControlType, int, int],
-]
-
-
-@rich_repr()
-class Segment(NamedTuple):
- """A piece of text with associated style. Segments are produced by the Console render process and
- are ultimately converted in to strings to be written to the terminal.
-
- Args:
- text (str): A piece of text.
- style (:class:`~rich.style.Style`, optional): An optional style to apply to the text.
- control (Tuple[ControlCode], optional): Optional sequence of control codes.
-
- Attributes:
- cell_length (int): The cell length of this Segment.
- """
-
- text: str
- style: Optional[Style] = None
- control: Optional[Sequence[ControlCode]] = None
-
- @property
- def cell_length(self) -> int:
- """The number of terminal cells required to display self.text.
-
- Returns:
- int: A number of cells.
- """
- text, _style, control = self
- return 0 if control else cell_len(text)
-
- def __rich_repr__(self) -> Result:
- yield self.text
- if self.control is None:
- if self.style is not None:
- yield self.style
- else:
- yield self.style
- yield self.control
-
- def __bool__(self) -> bool:
- """Check if the segment contains text."""
- return bool(self.text)
-
- @property
- def is_control(self) -> bool:
- """Check if the segment contains control codes."""
- return self.control is not None
-
- @classmethod
- @lru_cache(1024 * 16)
- def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment"]:
-
- text, style, control = segment
- _Segment = Segment
-
- cell_length = segment.cell_length
- if cut >= cell_length:
- return segment, _Segment("", style, control)
-
- cell_size = get_character_cell_size
-
- pos = int((cut / cell_length) * (len(text) - 1))
-
- before = text[:pos]
- cell_pos = cell_len(before)
- if cell_pos == cut:
- return (
- _Segment(before, style, control),
- _Segment(text[pos:], style, control),
- )
- while pos < len(text):
- char = text[pos]
- pos += 1
- cell_pos += cell_size(char)
- before = text[:pos]
- if cell_pos == cut:
- return (
- _Segment(before, style, control),
- _Segment(text[pos:], style, control),
- )
- if cell_pos > cut:
- return (
- _Segment(before[: pos - 1] + " ", style, control),
- _Segment(" " + text[pos:], style, control),
- )
-
- raise AssertionError("Will never reach here")
-
- def split_cells(self, cut: int) -> Tuple["Segment", "Segment"]:
- """Split segment in to two segments at the specified column.
-
- If the cut point falls in the middle of a 2-cell wide character then it is replaced
- by two spaces, to preserve the display width of the parent segment.
-
- Returns:
- Tuple[Segment, Segment]: Two segments.
- """
- text, style, control = self
-
- if _is_single_cell_widths(text):
- # Fast path with all 1 cell characters
- if cut >= len(text):
- return self, Segment("", style, control)
- return (
- Segment(text[:cut], style, control),
- Segment(text[cut:], style, control),
- )
-
- return self._split_cells(self, cut)
-
- @classmethod
- def line(cls) -> "Segment":
- """Make a new line segment."""
- return cls("\n")
-
- @classmethod
- def apply_style(
- cls,
- segments: Iterable["Segment"],
- style: Optional[Style] = None,
- post_style: Optional[Style] = None,
- ) -> Iterable["Segment"]:
- """Apply style(s) to an iterable of segments.
-
- Returns an iterable of segments where the style is replaced by ``style + segment.style + post_style``.
-
- Args:
- segments (Iterable[Segment]): Segments to process.
- style (Style, optional): Base style. Defaults to None.
- post_style (Style, optional): Style to apply on top of segment style. Defaults to None.
-
- Returns:
- Iterable[Segments]: A new iterable of segments (possibly the same iterable).
- """
- result_segments = segments
- if style:
- apply = style.__add__
- result_segments = (
- cls(text, None if control else apply(_style), control)
- for text, _style, control in result_segments
- )
- if post_style:
- result_segments = (
- cls(
- text,
- (
- None
- if control
- else (_style + post_style if _style else post_style)
- ),
- control,
- )
- for text, _style, control in result_segments
- )
- return result_segments
-
- @classmethod
- def filter_control(
- cls, segments: Iterable["Segment"], is_control: bool = False
- ) -> Iterable["Segment"]:
- """Filter segments by ``is_control`` attribute.
-
- Args:
- segments (Iterable[Segment]): An iterable of Segment instances.
- is_control (bool, optional): is_control flag to match in search.
-
- Returns:
- Iterable[Segment]: And iterable of Segment instances.
-
- """
- if is_control:
- return filter(attrgetter("control"), segments)
- else:
- return filterfalse(attrgetter("control"), segments)
-
- @classmethod
- def split_lines(cls, segments: Iterable["Segment"]) -> Iterable[List["Segment"]]:
- """Split a sequence of segments in to a list of lines.
-
- Args:
- segments (Iterable[Segment]): Segments potentially containing line feeds.
-
- Yields:
- Iterable[List[Segment]]: Iterable of segment lists, one per line.
- """
- line: List[Segment] = []
- append = line.append
-
- for segment in segments:
- if "\n" in segment.text and not segment.control:
- text, style, _ = segment
- while text:
- _text, new_line, text = text.partition("\n")
- if _text:
- append(cls(_text, style))
- if new_line:
- yield line
- line = []
- append = line.append
- else:
- append(segment)
- if line:
- yield line
-
- @classmethod
- def split_and_crop_lines(
- cls,
- segments: Iterable["Segment"],
- length: int,
- style: Optional[Style] = None,
- pad: bool = True,
- include_new_lines: bool = True,
- ) -> Iterable[List["Segment"]]:
- """Split segments in to lines, and crop lines greater than a given length.
-
- Args:
- segments (Iterable[Segment]): An iterable of segments, probably
- generated from console.render.
- length (int): Desired line length.
- style (Style, optional): Style to use for any padding.
- pad (bool): Enable padding of lines that are less than `length`.
-
- Returns:
- Iterable[List[Segment]]: An iterable of lines of segments.
- """
- line: List[Segment] = []
- append = line.append
-
- adjust_line_length = cls.adjust_line_length
- new_line_segment = cls("\n")
-
- for segment in segments:
- if "\n" in segment.text and not segment.control:
- text, segment_style, _ = segment
- while text:
- _text, new_line, text = text.partition("\n")
- if _text:
- append(cls(_text, segment_style))
- if new_line:
- cropped_line = adjust_line_length(
- line, length, style=style, pad=pad
- )
- if include_new_lines:
- cropped_line.append(new_line_segment)
- yield cropped_line
- line.clear()
- else:
- append(segment)
- if line:
- yield adjust_line_length(line, length, style=style, pad=pad)
-
- @classmethod
- def adjust_line_length(
- cls,
- line: List["Segment"],
- length: int,
- style: Optional[Style] = None,
- pad: bool = True,
- ) -> List["Segment"]:
- """Adjust a line to a given width (cropping or padding as required).
-
- Args:
- segments (Iterable[Segment]): A list of segments in a single line.
- length (int): The desired width of the line.
- style (Style, optional): The style of padding if used (space on the end). Defaults to None.
- pad (bool, optional): Pad lines with spaces if they are shorter than `length`. Defaults to True.
-
- Returns:
- List[Segment]: A line of segments with the desired length.
- """
- line_length = sum(segment.cell_length for segment in line)
- new_line: List[Segment]
-
- if line_length < length:
- if pad:
- new_line = line + [cls(" " * (length - line_length), style)]
- else:
- new_line = line[:]
- elif line_length > length:
- new_line = []
- append = new_line.append
- line_length = 0
- for segment in line:
- segment_length = segment.cell_length
- if line_length + segment_length < length or segment.control:
- append(segment)
- line_length += segment_length
- else:
- text, segment_style, _ = segment
- text = set_cell_size(text, length - line_length)
- append(cls(text, segment_style))
- break
- else:
- new_line = line[:]
- return new_line
-
- @classmethod
- def get_line_length(cls, line: List["Segment"]) -> int:
- """Get the length of list of segments.
-
- Args:
- line (List[Segment]): A line encoded as a list of Segments (assumes no '\\\\n' characters),
-
- Returns:
- int: The length of the line.
- """
- _cell_len = cell_len
- return sum(_cell_len(text) for text, style, control in line if not control)
-
- @classmethod
- def get_shape(cls, lines: List[List["Segment"]]) -> Tuple[int, int]:
- """Get the shape (enclosing rectangle) of a list of lines.
-
- Args:
- lines (List[List[Segment]]): A list of lines (no '\\\\n' characters).
-
- Returns:
- Tuple[int, int]: Width and height in characters.
- """
- get_line_length = cls.get_line_length
- max_width = max(get_line_length(line) for line in lines) if lines else 0
- return (max_width, len(lines))
-
- @classmethod
- def set_shape(
- cls,
- lines: List[List["Segment"]],
- width: int,
- height: Optional[int] = None,
- style: Optional[Style] = None,
- new_lines: bool = False,
- ) -> List[List["Segment"]]:
- """Set the shape of a list of lines (enclosing rectangle).
-
- Args:
- lines (List[List[Segment]]): A list of lines.
- width (int): Desired width.
- height (int, optional): Desired height or None for no change.
- style (Style, optional): Style of any padding added.
- new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
-
- Returns:
- List[List[Segment]]: New list of lines.
- """
- _height = height or len(lines)
-
- blank = (
- [cls(" " * width + "\n", style)] if new_lines else [cls(" " * width, style)]
- )
-
- adjust_line_length = cls.adjust_line_length
- shaped_lines = lines[:_height]
- shaped_lines[:] = [
- adjust_line_length(line, width, style=style) for line in lines
- ]
- if len(shaped_lines) < _height:
- shaped_lines.extend([blank] * (_height - len(shaped_lines)))
- return shaped_lines
-
- @classmethod
- def align_top(
- cls: Type["Segment"],
- lines: List[List["Segment"]],
- width: int,
- height: int,
- style: Style,
- new_lines: bool = False,
- ) -> List[List["Segment"]]:
- """Aligns lines to top (adds extra lines to bottom as required).
-
- Args:
- lines (List[List[Segment]]): A list of lines.
- width (int): Desired width.
- height (int, optional): Desired height or None for no change.
- style (Style): Style of any padding added.
- new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
-
- Returns:
- List[List[Segment]]: New list of lines.
- """
- extra_lines = height - len(lines)
- if not extra_lines:
- return lines[:]
- lines = lines[:height]
- blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
- lines = lines + [[blank]] * extra_lines
- return lines
-
- @classmethod
- def align_bottom(
- cls: Type["Segment"],
- lines: List[List["Segment"]],
- width: int,
- height: int,
- style: Style,
- new_lines: bool = False,
- ) -> List[List["Segment"]]:
- """Aligns render to bottom (adds extra lines above as required).
-
- Args:
- lines (List[List[Segment]]): A list of lines.
- width (int): Desired width.
- height (int, optional): Desired height or None for no change.
- style (Style): Style of any padding added. Defaults to None.
- new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
-
- Returns:
- List[List[Segment]]: New list of lines.
- """
- extra_lines = height - len(lines)
- if not extra_lines:
- return lines[:]
- lines = lines[:height]
- blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
- lines = [[blank]] * extra_lines + lines
- return lines
-
- @classmethod
- def align_middle(
- cls: Type["Segment"],
- lines: List[List["Segment"]],
- width: int,
- height: int,
- style: Style,
- new_lines: bool = False,
- ) -> List[List["Segment"]]:
- """Aligns lines to middle (adds extra lines to above and below as required).
-
- Args:
- lines (List[List[Segment]]): A list of lines.
- width (int): Desired width.
- height (int, optional): Desired height or None for no change.
- style (Style): Style of any padding added.
- new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
-
- Returns:
- List[List[Segment]]: New list of lines.
- """
- extra_lines = height - len(lines)
- if not extra_lines:
- return lines[:]
- lines = lines[:height]
- blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
- top_lines = extra_lines // 2
- bottom_lines = extra_lines - top_lines
- lines = [[blank]] * top_lines + lines + [[blank]] * bottom_lines
- return lines
-
- @classmethod
- def simplify(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
- """Simplify an iterable of segments by combining contiguous segments with the same style.
-
- Args:
- segments (Iterable[Segment]): An iterable of segments.
-
- Returns:
- Iterable[Segment]: A possibly smaller iterable of segments that will render the same way.
- """
- iter_segments = iter(segments)
- try:
- last_segment = next(iter_segments)
- except StopIteration:
- return
-
- _Segment = Segment
- for segment in iter_segments:
- if last_segment.style == segment.style and not segment.control:
- last_segment = _Segment(
- last_segment.text + segment.text, last_segment.style
- )
- else:
- yield last_segment
- last_segment = segment
- yield last_segment
-
- @classmethod
- def strip_links(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
- """Remove all links from an iterable of styles.
-
- Args:
- segments (Iterable[Segment]): An iterable segments.
-
- Yields:
- Segment: Segments with link removed.
- """
- for segment in segments:
- if segment.control or segment.style is None:
- yield segment
- else:
- text, style, _control = segment
- yield cls(text, style.update_link(None) if style else None)
-
- @classmethod
- def strip_styles(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
- """Remove all styles from an iterable of segments.
-
- Args:
- segments (Iterable[Segment]): An iterable segments.
-
- Yields:
- Segment: Segments with styles replace with None
- """
- for text, _style, control in segments:
- yield cls(text, None, control)
-
- @classmethod
- def remove_color(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
- """Remove all color from an iterable of segments.
-
- Args:
- segments (Iterable[Segment]): An iterable segments.
-
- Yields:
- Segment: Segments with colorless style.
- """
-
- cache: Dict[Style, Style] = {}
- for text, style, control in segments:
- if style:
- colorless_style = cache.get(style)
- if colorless_style is None:
- colorless_style = style.without_color
- cache[style] = colorless_style
- yield cls(text, colorless_style, control)
- else:
- yield cls(text, None, control)
-
- @classmethod
- def divide(
- cls, segments: Iterable["Segment"], cuts: Iterable[int]
- ) -> Iterable[List["Segment"]]:
- """Divides an iterable of segments in to portions.
-
- Args:
- cuts (Iterable[int]): Cell positions where to divide.
-
- Yields:
- [Iterable[List[Segment]]]: An iterable of Segments in List.
- """
- split_segments: List["Segment"] = []
- add_segment = split_segments.append
-
- iter_cuts = iter(cuts)
-
- while True:
- cut = next(iter_cuts, -1)
- if cut == -1:
- return []
- if cut != 0:
- break
- yield []
- pos = 0
-
- segments_clear = split_segments.clear
- segments_copy = split_segments.copy
-
- _cell_len = cached_cell_len
- for segment in segments:
- text, _style, control = segment
- while text:
- end_pos = pos if control else pos + _cell_len(text)
- if end_pos < cut:
- add_segment(segment)
- pos = end_pos
- break
-
- if end_pos == cut:
- add_segment(segment)
- yield segments_copy()
- segments_clear()
- pos = end_pos
-
- cut = next(iter_cuts, -1)
- if cut == -1:
- if split_segments:
- yield segments_copy()
- return
-
- break
-
- else:
- before, segment = segment.split_cells(cut - pos)
- text, _style, control = segment
- add_segment(before)
- yield segments_copy()
- segments_clear()
- pos = cut
-
- cut = next(iter_cuts, -1)
- if cut == -1:
- if split_segments:
- yield segments_copy()
- return
-
- yield segments_copy()
-
-
-class Segments:
- """A simple renderable to render an iterable of segments. This class may be useful if
- you want to print segments outside of a __rich_console__ method.
-
- Args:
- segments (Iterable[Segment]): An iterable of segments.
- new_lines (bool, optional): Add new lines between segments. Defaults to False.
- """
-
- def __init__(self, segments: Iterable[Segment], new_lines: bool = False) -> None:
- self.segments = list(segments)
- self.new_lines = new_lines
-
- def __rich_console__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> "RenderResult":
- if self.new_lines:
- line = Segment.line()
- for segment in self.segments:
- yield segment
- yield line
- else:
- yield from self.segments
-
-
-class SegmentLines:
- def __init__(self, lines: Iterable[List[Segment]], new_lines: bool = False) -> None:
- """A simple renderable containing a number of lines of segments. May be used as an intermediate
- in rendering process.
-
- Args:
- lines (Iterable[List[Segment]]): Lists of segments forming lines.
- new_lines (bool, optional): Insert new lines after each line. Defaults to False.
- """
- self.lines = list(lines)
- self.new_lines = new_lines
-
- def __rich_console__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> "RenderResult":
- if self.new_lines:
- new_line = Segment.line()
- for line in self.lines:
- yield from line
- yield new_line
- else:
- for line in self.lines:
- yield from line
-
-
-if __name__ == "__main__": # pragma: no cover
- from pip._vendor.rich.console import Console
- from pip._vendor.rich.syntax import Syntax
- from pip._vendor.rich.text import Text
-
- code = """from rich.console import Console
-console = Console()
-text = Text.from_markup("Hello, [bold magenta]World[/]!")
-console.print(text)"""
-
- text = Text.from_markup("Hello, [bold magenta]World[/]!")
-
- console = Console()
-
- console.rule("rich.Segment")
- console.print(
- "A Segment is the last step in the Rich render process before generating text with ANSI codes."
- )
- console.print("\nConsider the following code:\n")
- console.print(Syntax(code, "python", line_numbers=True))
- console.print()
- console.print(
- "When you call [b]print()[/b], Rich [i]renders[/i] the object in to the following:\n"
- )
- fragments = list(console.render(text))
- console.print(fragments)
- console.print()
- console.print("The Segments are then processed to produce the following output:\n")
- console.print(text)
- console.print(
- "\nYou will only need to know this if you are implementing your own Rich renderables."
- )
diff --git a/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/flax_impl/flax_unet_pseudo3d_blocks.py b/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/flax_impl/flax_unet_pseudo3d_blocks.py
deleted file mode 100644
index e310e6039c816644b9b25b165ee226fe4e1c8e0e..0000000000000000000000000000000000000000
--- a/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/flax_impl/flax_unet_pseudo3d_blocks.py
+++ /dev/null
@@ -1,254 +0,0 @@
-
-from typing import Tuple
-
-import jax
-import jax.numpy as jnp
-import flax.linen as nn
-
-from .flax_attention_pseudo3d import TransformerPseudo3DModel
-from .flax_resnet_pseudo3d import ResnetBlockPseudo3D, DownsamplePseudo3D, UpsamplePseudo3D
-
-
-class UNetMidBlockPseudo3DCrossAttn(nn.Module):
- in_channels: int
- num_layers: int = 1
- attn_num_head_channels: int = 1
- use_memory_efficient_attention: bool = False
- dtype: jnp.dtype = jnp.float32
-
- def setup(self) -> None:
- resnets = [
- ResnetBlockPseudo3D(
- in_channels = self.in_channels,
- out_channels = self.in_channels,
- dtype = self.dtype
- )
- ]
- attentions = []
- for _ in range(self.num_layers):
- attn_block = TransformerPseudo3DModel(
- in_channels = self.in_channels,
- num_attention_heads = self.attn_num_head_channels,
- attention_head_dim = self.in_channels // self.attn_num_head_channels,
- num_layers = 1,
- use_memory_efficient_attention = self.use_memory_efficient_attention,
- dtype = self.dtype
- )
- attentions.append(attn_block)
- res_block = ResnetBlockPseudo3D(
- in_channels = self.in_channels,
- out_channels = self.in_channels,
- dtype = self.dtype
- )
- resnets.append(res_block)
- self.attentions = attentions
- self.resnets = resnets
-
- def __call__(self,
- hidden_states: jax.Array,
- temb: jax.Array,
- encoder_hidden_states = jax.Array
- ) -> jax.Array:
- hidden_states = self.resnets[0](hidden_states, temb)
- for attn, resnet in zip(self.attentions, self.resnets[1:]):
- hidden_states = attn(hidden_states, encoder_hidden_states)
- hidden_states = resnet(hidden_states, temb)
- return hidden_states
-
-
-class CrossAttnDownBlockPseudo3D(nn.Module):
- in_channels: int
- out_channels: int
- num_layers: int = 1
- attn_num_head_channels: int = 1
- add_downsample: bool = True
- use_memory_efficient_attention: bool = False
- dtype: jnp.dtype = jnp.float32
-
- def setup(self) -> None:
- attentions = []
- resnets = []
- for i in range(self.num_layers):
- in_channels = self.in_channels if i == 0 else self.out_channels
- res_block = ResnetBlockPseudo3D(
- in_channels = in_channels,
- out_channels = self.out_channels,
- dtype = self.dtype
- )
- resnets.append(res_block)
- attn_block = TransformerPseudo3DModel(
- in_channels = self.out_channels,
- num_attention_heads = self.attn_num_head_channels,
- attention_head_dim = self.out_channels // self.attn_num_head_channels,
- num_layers = 1,
- use_memory_efficient_attention = self.use_memory_efficient_attention,
- dtype = self.dtype
- )
- attentions.append(attn_block)
- self.resnets = resnets
- self.attentions = attentions
-
- if self.add_downsample:
- self.downsamplers_0 = DownsamplePseudo3D(
- out_channels = self.out_channels,
- dtype = self.dtype
- )
- else:
- self.downsamplers_0 = None
-
- def __call__(self,
- hidden_states: jax.Array,
- temb: jax.Array,
- encoder_hidden_states: jax.Array
- ) -> Tuple[jax.Array, jax.Array]:
- output_states = ()
- for resnet, attn in zip(self.resnets, self.attentions):
- hidden_states = resnet(hidden_states, temb)
- hidden_states = attn(hidden_states, encoder_hidden_states)
- output_states += (hidden_states, )
- if self.add_downsample:
- hidden_states = self.downsamplers_0(hidden_states)
- output_states += (hidden_states, )
- return hidden_states, output_states
-
-
-class DownBlockPseudo3D(nn.Module):
- in_channels: int
- out_channels: int
- num_layers: int = 1
- add_downsample: bool = True
- dtype: jnp.dtype = jnp.float32
-
- def setup(self) -> None:
- resnets = []
- for i in range(self.num_layers):
- in_channels = self.in_channels if i == 0 else self.out_channels
- res_block = ResnetBlockPseudo3D(
- in_channels = in_channels,
- out_channels = self.out_channels,
- dtype = self.dtype
- )
- resnets.append(res_block)
- self.resnets = resnets
- if self.add_downsample:
- self.downsamplers_0 = DownsamplePseudo3D(
- out_channels = self.out_channels,
- dtype = self.dtype
- )
- else:
- self.downsamplers_0 = None
-
- def __call__(self,
- hidden_states: jax.Array,
- temb: jax.Array
- ) -> Tuple[jax.Array, jax.Array]:
- output_states = ()
- for resnet in self.resnets:
- hidden_states = resnet(hidden_states, temb)
- output_states += (hidden_states, )
- if self.add_downsample:
- hidden_states = self.downsamplers_0(hidden_states)
- output_states += (hidden_states, )
- return hidden_states, output_states
-
-
-class CrossAttnUpBlockPseudo3D(nn.Module):
- in_channels: int
- out_channels: int
- prev_output_channels: int
- num_layers: int = 1
- attn_num_head_channels: int = 1
- add_upsample: bool = True
- use_memory_efficient_attention: bool = False
- dtype: jnp.dtype = jnp.float32
-
- def setup(self) -> None:
- resnets = []
- attentions = []
- for i in range(self.num_layers):
- res_skip_channels = self.in_channels if (i == self.num_layers -1) else self.out_channels
- resnet_in_channels = self.prev_output_channels if i == 0 else self.out_channels
- res_block = ResnetBlockPseudo3D(
- in_channels = resnet_in_channels + res_skip_channels,
- out_channels = self.out_channels,
- dtype = self.dtype
- )
- resnets.append(res_block)
- attn_block = TransformerPseudo3DModel(
- in_channels = self.out_channels,
- num_attention_heads = self.attn_num_head_channels,
- attention_head_dim = self.out_channels // self.attn_num_head_channels,
- num_layers = 1,
- use_memory_efficient_attention = self.use_memory_efficient_attention,
- dtype = self.dtype
- )
- attentions.append(attn_block)
- self.resnets = resnets
- self.attentions = attentions
- if self.add_upsample:
- self.upsamplers_0 = UpsamplePseudo3D(
- out_channels = self.out_channels,
- dtype = self.dtype
- )
- else:
- self.upsamplers_0 = None
-
- def __call__(self,
- hidden_states: jax.Array,
- res_hidden_states_tuple: Tuple[jax.Array, ...],
- temb: jax.Array,
- encoder_hidden_states: jax.Array
- ) -> jax.Array:
- for resnet, attn in zip(self.resnets, self.attentions):
- res_hidden_states = res_hidden_states_tuple[-1]
- res_hidden_states_tuple = res_hidden_states_tuple[:-1]
- hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis = -1)
- hidden_states = resnet(hidden_states, temb)
- hidden_states = attn(hidden_states, encoder_hidden_states)
- if self.add_upsample:
- hidden_states = self.upsamplers_0(hidden_states)
- return hidden_states
-
-
-class UpBlockPseudo3D(nn.Module):
- in_channels: int
- out_channels: int
- prev_output_channels: int
- num_layers: int = 1
- add_upsample: bool = True
- dtype: jnp.dtype = jnp.float32
-
- def setup(self) -> None:
- resnets = []
- for i in range(self.num_layers):
- res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels
- resnet_in_channels = self.prev_output_channels if i == 0 else self.out_channels
- res_block = ResnetBlockPseudo3D(
- in_channels = resnet_in_channels + res_skip_channels,
- out_channels = self.out_channels,
- dtype = self.dtype
- )
- resnets.append(res_block)
- self.resnets = resnets
- if self.add_upsample:
- self.upsamplers_0 = UpsamplePseudo3D(
- out_channels = self.out_channels,
- dtype = self.dtype
- )
- else:
- self.upsamplers_0 = None
-
- def __call__(self,
- hidden_states: jax.Array,
- res_hidden_states_tuple: Tuple[jax.Array, ...],
- temb: jax.Array
- ) -> jax.Array:
- for resnet in self.resnets:
- res_hidden_states = res_hidden_states_tuple[-1]
- res_hidden_states_tuple = res_hidden_states_tuple[:-1]
- hidden_states = jnp.concatenate([hidden_states, res_hidden_states], axis = -1)
- hidden_states = resnet(hidden_states, temb)
- if self.add_upsample:
- hidden_states = self.upsamplers_0(hidden_states)
- return hidden_states
-
diff --git a/spaces/Tester002/Claudette/README.md b/spaces/Tester002/Claudette/README.md
deleted file mode 100644
index 4d66fac0a952a138355eb698001b73c7758333e2..0000000000000000000000000000000000000000
--- a/spaces/Tester002/Claudette/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Claudette
-emoji: 📚
-colorFrom: indigo
-colorTo: pink
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/TomLemsky/this_skin_does_not_exist/app.py b/spaces/TomLemsky/this_skin_does_not_exist/app.py
deleted file mode 100644
index 920f8cdd746080c252d602cdc0f79b97249de090..0000000000000000000000000000000000000000
--- a/spaces/TomLemsky/this_skin_does_not_exist/app.py
+++ /dev/null
@@ -1,119 +0,0 @@
-from denoising_diffusion_pytorch import Unet, GaussianDiffusion, Trainer
-import gradio as gr
-import numpy as np
-import torch
-from PIL import Image
-
-HTML_TEMPLATE="""
-
-
-
-
-
-
-
-
-
-
-
-"""
-
-def generate_image(diffusion_model, hide_layer=True, num_steps=200, num_img=1):
- diffusion_model.sampling_timesteps = num_steps
- images = diffusion_model.sample(num_img)
- images = images.cpu().permute((0,2,3,1)).numpy()
- return show_image(images, hide_layer=hide_layer)
-
-def show_image(images, hide_layer=True):
- num_img = len(images)
- masked_images = images.copy()
- if hide_layer:
- layer_mask = np.ones((1,64,64,4))
- layer_mask[:,:16,32:,:] = 0 # second layer head
- layer_mask[:,32:, :,:] = 0 # second layer body
- layer_mask[:,-16:,16:-16,:] = 1 # left arm and leg
- masked_images = masked_images * layer_mask
- masked_list = [masked_images[i] for i in range(num_img)]
- output_list = [ images[i] for i in range(num_img)]
- b64_img = [gr.processing_utils.encode_array_to_base64(i) for i in masked_list]
- html = [HTML_TEMPLATE.replace("{BASE64_PLACEHOLDER}",i) for i in b64_img]
- iframes= [f"""""" for h in html]
- return iframes + output_list
-
-def show_defaults():
- default_img_paths = ["person.png", "jacket_man.png", "pink_woman.png", "violet_woman.png"]
- images = []
- for p in default_img_paths:
- img = np.array(Image.open(p))/255
- images.append(img)
- stacked_images = np.stack(images)
- return show_image(stacked_images, hide_layer=True)
-
-
-if __name__ == '__main__':
-
- # define model and diffusion process
- n_channels = 4
- num_img = 4
-
- model = Unet(
- dim = 64,
- dim_mults = (1, 2, 4, 8),
- channels = n_channels
- )
-
- diffusion = GaussianDiffusion(
- model,
- image_size = 64,
- timesteps = 1000, # number of steps
- sampling_timesteps = 400 # 400
- )
-
- # dummy trainer instantiated to load model
- trainer = Trainer(diffusion, ".", num_samples=num_img, results_folder=".")
- trainer.load(160)
-
- with gr.Blocks(css=".gr-block {image-rendering: pixelated}") as demo:
- gr.Markdown("""# This skin does not exist
-
- A simple diffusion model trained from scratch on 200 000 Minecraft skins for a day on just my GTX 1660Ti 6GB.
- ([Write-up on how I made this](https://tomlemsky.github.io/2022/11/13/Minecraft-Skin-Generation-using-Diffusion.html))
- """)
- with gr.Row():
- step_slider = gr.Slider(minimum=1, maximum=200, value=40, label="Diffusion steps (values above 50 will take more than a minute)")
- hide_layer_checkbox = gr.Checkbox(True, label="Hide second skin layer (helmets, hair, outerwear, ...), often noisy due to sparse training data")
- generate_btn = gr.Button("Generate new Minecraft skins!")
-
- with gr.Row():
- #image_box = gr.Image(shape=(64,64), image_mode="RGBA"
- image_html = [gr.HTML() for i in range(num_img)]
- with gr.Row():
- image_blocks = [gr.Image(shape=(64,64), image_mode="RGBA") for i in range(num_img)]
-
- gr.Markdown("""
- Acknowledgements:
- - denoising_diffusion_pytorch (for the diffusion model): [https://github.com/lucidrains/denoising-diffusion-pytorch](https://github.com/lucidrains/denoising-diffusion-pytorch)
- - skinview3d (for the 3D Minecraft skin viewer): [https://github.com/bs-community/skinview3d](https://github.com/bs-community/skinview3d)
- - 3dmoljs (for the inspiration on how to use JavaScript 3D viewers with gradio): [https://huggingface.co/blog/spaces_3dmoljs](https://huggingface.co/blog/spaces_3dmoljs)
- """)
- # assign the skin generating function to the button
- wrapper = lambda num_steps, hide_layer:generate_image(diffusion, hide_layer=hide_layer, num_steps=num_steps, num_img=num_img)
- generate_btn.click(fn=wrapper, inputs=[step_slider, hide_layer_checkbox], outputs=image_html+image_blocks)
- # display default images at page load
- demo.load(show_defaults, inputs=None, outputs=image_html+image_blocks)
- demo.launch()
diff --git a/spaces/Toritto/Genshin-impact-IA-project-v1/rmvpe.py b/spaces/Toritto/Genshin-impact-IA-project-v1/rmvpe.py
deleted file mode 100644
index 3ad346141340e03bdbaa20121e1ed435bb3da57a..0000000000000000000000000000000000000000
--- a/spaces/Toritto/Genshin-impact-IA-project-v1/rmvpe.py
+++ /dev/null
@@ -1,432 +0,0 @@
-import sys, torch, numpy as np, traceback, pdb
-import torch.nn as nn
-from time import time as ttime
-import torch.nn.functional as F
-
-
-class BiGRU(nn.Module):
- def __init__(self, input_features, hidden_features, num_layers):
- super(BiGRU, self).__init__()
- self.gru = nn.GRU(
- input_features,
- hidden_features,
- num_layers=num_layers,
- batch_first=True,
- bidirectional=True,
- )
-
- def forward(self, x):
- return self.gru(x)[0]
-
-
-class ConvBlockRes(nn.Module):
- def __init__(self, in_channels, out_channels, momentum=0.01):
- super(ConvBlockRes, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=(3, 3),
- stride=(1, 1),
- padding=(1, 1),
- bias=False,
- ),
- nn.BatchNorm2d(out_channels, momentum=momentum),
- nn.ReLU(),
- nn.Conv2d(
- in_channels=out_channels,
- out_channels=out_channels,
- kernel_size=(3, 3),
- stride=(1, 1),
- padding=(1, 1),
- bias=False,
- ),
- nn.BatchNorm2d(out_channels, momentum=momentum),
- nn.ReLU(),
- )
- if in_channels != out_channels:
- self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
- self.is_shortcut = True
- else:
- self.is_shortcut = False
-
- def forward(self, x):
- if self.is_shortcut:
- return self.conv(x) + self.shortcut(x)
- else:
- return self.conv(x) + x
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- in_channels,
- in_size,
- n_encoders,
- kernel_size,
- n_blocks,
- out_channels=16,
- momentum=0.01,
- ):
- super(Encoder, self).__init__()
- self.n_encoders = n_encoders
- self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
- self.layers = nn.ModuleList()
- self.latent_channels = []
- for i in range(self.n_encoders):
- self.layers.append(
- ResEncoderBlock(
- in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
- )
- )
- self.latent_channels.append([out_channels, in_size])
- in_channels = out_channels
- out_channels *= 2
- in_size //= 2
- self.out_size = in_size
- self.out_channel = out_channels
-
- def forward(self, x):
- concat_tensors = []
- x = self.bn(x)
- for i in range(self.n_encoders):
- _, x = self.layers[i](x)
- concat_tensors.append(_)
- return x, concat_tensors
-
-
-class ResEncoderBlock(nn.Module):
- def __init__(
- self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
- ):
- super(ResEncoderBlock, self).__init__()
- self.n_blocks = n_blocks
- self.conv = nn.ModuleList()
- self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
- for i in range(n_blocks - 1):
- self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
- self.kernel_size = kernel_size
- if self.kernel_size is not None:
- self.pool = nn.AvgPool2d(kernel_size=kernel_size)
-
- def forward(self, x):
- for i in range(self.n_blocks):
- x = self.conv[i](x)
- if self.kernel_size is not None:
- return x, self.pool(x)
- else:
- return x
-
-
-class Intermediate(nn.Module): #
- def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
- super(Intermediate, self).__init__()
- self.n_inters = n_inters
- self.layers = nn.ModuleList()
- self.layers.append(
- ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
- )
- for i in range(self.n_inters - 1):
- self.layers.append(
- ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
- )
-
- def forward(self, x):
- for i in range(self.n_inters):
- x = self.layers[i](x)
- return x
-
-
-class ResDecoderBlock(nn.Module):
- def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
- super(ResDecoderBlock, self).__init__()
- out_padding = (0, 1) if stride == (1, 2) else (1, 1)
- self.n_blocks = n_blocks
- self.conv1 = nn.Sequential(
- nn.ConvTranspose2d(
- in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=(3, 3),
- stride=stride,
- padding=(1, 1),
- output_padding=out_padding,
- bias=False,
- ),
- nn.BatchNorm2d(out_channels, momentum=momentum),
- nn.ReLU(),
- )
- self.conv2 = nn.ModuleList()
- self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
- for i in range(n_blocks - 1):
- self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
-
- def forward(self, x, concat_tensor):
- x = self.conv1(x)
- x = torch.cat((x, concat_tensor), dim=1)
- for i in range(self.n_blocks):
- x = self.conv2[i](x)
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
- super(Decoder, self).__init__()
- self.layers = nn.ModuleList()
- self.n_decoders = n_decoders
- for i in range(self.n_decoders):
- out_channels = in_channels // 2
- self.layers.append(
- ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
- )
- in_channels = out_channels
-
- def forward(self, x, concat_tensors):
- for i in range(self.n_decoders):
- x = self.layers[i](x, concat_tensors[-1 - i])
- return x
-
-
-class DeepUnet(nn.Module):
- def __init__(
- self,
- kernel_size,
- n_blocks,
- en_de_layers=5,
- inter_layers=4,
- in_channels=1,
- en_out_channels=16,
- ):
- super(DeepUnet, self).__init__()
- self.encoder = Encoder(
- in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
- )
- self.intermediate = Intermediate(
- self.encoder.out_channel // 2,
- self.encoder.out_channel,
- inter_layers,
- n_blocks,
- )
- self.decoder = Decoder(
- self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
- )
-
- def forward(self, x):
- x, concat_tensors = self.encoder(x)
- x = self.intermediate(x)
- x = self.decoder(x, concat_tensors)
- return x
-
-
-class E2E(nn.Module):
- def __init__(
- self,
- n_blocks,
- n_gru,
- kernel_size,
- en_de_layers=5,
- inter_layers=4,
- in_channels=1,
- en_out_channels=16,
- ):
- super(E2E, self).__init__()
- self.unet = DeepUnet(
- kernel_size,
- n_blocks,
- en_de_layers,
- inter_layers,
- in_channels,
- en_out_channels,
- )
- self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
- if n_gru:
- self.fc = nn.Sequential(
- BiGRU(3 * 128, 256, n_gru),
- nn.Linear(512, 360),
- nn.Dropout(0.25),
- nn.Sigmoid(),
- )
- else:
- self.fc = nn.Sequential(
- nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
- )
-
- def forward(self, mel):
- mel = mel.transpose(-1, -2).unsqueeze(1)
- x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
- x = self.fc(x)
- return x
-
-
-from librosa.filters import mel
-
-
-class MelSpectrogram(torch.nn.Module):
- def __init__(
- self,
- is_half,
- n_mel_channels,
- sampling_rate,
- win_length,
- hop_length,
- n_fft=None,
- mel_fmin=0,
- mel_fmax=None,
- clamp=1e-5,
- ):
- super().__init__()
- n_fft = win_length if n_fft is None else n_fft
- self.hann_window = {}
- mel_basis = mel(
- sr=sampling_rate,
- n_fft=n_fft,
- n_mels=n_mel_channels,
- fmin=mel_fmin,
- fmax=mel_fmax,
- htk=True,
- )
- mel_basis = torch.from_numpy(mel_basis).float()
- self.register_buffer("mel_basis", mel_basis)
- self.n_fft = win_length if n_fft is None else n_fft
- self.hop_length = hop_length
- self.win_length = win_length
- self.sampling_rate = sampling_rate
- self.n_mel_channels = n_mel_channels
- self.clamp = clamp
- self.is_half = is_half
-
- def forward(self, audio, keyshift=0, speed=1, center=True):
- factor = 2 ** (keyshift / 12)
- n_fft_new = int(np.round(self.n_fft * factor))
- win_length_new = int(np.round(self.win_length * factor))
- hop_length_new = int(np.round(self.hop_length * speed))
- keyshift_key = str(keyshift) + "_" + str(audio.device)
- if keyshift_key not in self.hann_window:
- self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
- audio.device
- )
- fft = torch.stft(
- audio,
- n_fft=n_fft_new,
- hop_length=hop_length_new,
- win_length=win_length_new,
- window=self.hann_window[keyshift_key],
- center=center,
- return_complex=True,
- )
- magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
- if keyshift != 0:
- size = self.n_fft // 2 + 1
- resize = magnitude.size(1)
- if resize < size:
- magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
- magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
- mel_output = torch.matmul(self.mel_basis, magnitude)
- if self.is_half == True:
- mel_output = mel_output.half()
- log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
- return log_mel_spec
-
-
-class RMVPE:
- def __init__(self, model_path, is_half, device=None):
- self.resample_kernel = {}
- model = E2E(4, 1, (2, 2))
- ckpt = torch.load(model_path, map_location="cpu")
- model.load_state_dict(ckpt)
- model.eval()
- if is_half == True:
- model = model.half()
- self.model = model
- self.resample_kernel = {}
- self.is_half = is_half
- if device is None:
- device = "cuda" if torch.cuda.is_available() else "cpu"
- self.device = device
- self.mel_extractor = MelSpectrogram(
- is_half, 128, 16000, 1024, 160, None, 30, 8000
- ).to(device)
- self.model = self.model.to(device)
- cents_mapping = 20 * np.arange(360) + 1997.3794084376191
- self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
-
- def mel2hidden(self, mel):
- with torch.no_grad():
- n_frames = mel.shape[-1]
- mel = F.pad(
- mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect"
- )
- hidden = self.model(mel)
- return hidden[:, :n_frames]
-
- def decode(self, hidden, thred=0.03):
- cents_pred = self.to_local_average_cents(hidden, thred=thred)
- f0 = 10 * (2 ** (cents_pred / 1200))
- f0[f0 == 10] = 0
- # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])
- return f0
-
- def infer_from_audio(self, audio, thred=0.03):
- audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
- # torch.cuda.synchronize()
- # t0=ttime()
- mel = self.mel_extractor(audio, center=True)
- # torch.cuda.synchronize()
- # t1=ttime()
- hidden = self.mel2hidden(mel)
- # torch.cuda.synchronize()
- # t2=ttime()
- hidden = hidden.squeeze(0).cpu().numpy()
- if self.is_half == True:
- hidden = hidden.astype("float32")
- f0 = self.decode(hidden, thred=thred)
- # torch.cuda.synchronize()
- # t3=ttime()
- # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0))
- return f0
-
- def to_local_average_cents(self, salience, thred=0.05):
- # t0 = ttime()
- center = np.argmax(salience, axis=1) # 帧长#index
- salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368
- # t1 = ttime()
- center += 4
- todo_salience = []
- todo_cents_mapping = []
- starts = center - 4
- ends = center + 5
- for idx in range(salience.shape[0]):
- todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
- todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
- # t2 = ttime()
- todo_salience = np.array(todo_salience) # 帧长,9
- todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9
- product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
- weight_sum = np.sum(todo_salience, 1) # 帧长
- devided = product_sum / weight_sum # 帧长
- # t3 = ttime()
- maxx = np.max(salience, axis=1) # 帧长
- devided[maxx <= thred] = 0
- # t4 = ttime()
- # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
- return devided
-
-
-# if __name__ == '__main__':
-# audio, sampling_rate = sf.read("卢本伟语录~1.wav")
-# if len(audio.shape) > 1:
-# audio = librosa.to_mono(audio.transpose(1, 0))
-# audio_bak = audio.copy()
-# if sampling_rate != 16000:
-# audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
-# model_path = "/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/test-RMVPE/weights/rmvpe_llc_half.pt"
-# thred = 0.03 # 0.01
-# device = 'cuda' if torch.cuda.is_available() else 'cpu'
-# rmvpe = RMVPE(model_path,is_half=False, device=device)
-# t0=ttime()
-# f0 = rmvpe.infer_from_audio(audio, thred=thred)
-# f0 = rmvpe.infer_from_audio(audio, thred=thred)
-# f0 = rmvpe.infer_from_audio(audio, thred=thred)
-# f0 = rmvpe.infer_from_audio(audio, thred=thred)
-# f0 = rmvpe.infer_from_audio(audio, thred=thred)
-# t1=ttime()
-# print(f0.shape,t1-t0)
diff --git a/spaces/VIPLab/Track-Anything/tracker/inference/inference_core.py b/spaces/VIPLab/Track-Anything/tracker/inference/inference_core.py
deleted file mode 100644
index e77f0805e30d3967265ed458dd7357e65a20c24f..0000000000000000000000000000000000000000
--- a/spaces/VIPLab/Track-Anything/tracker/inference/inference_core.py
+++ /dev/null
@@ -1,115 +0,0 @@
-from inference.memory_manager import MemoryManager
-from model.network import XMem
-from model.aggregate import aggregate
-
-from tracker.util.tensor_util import pad_divide_by, unpad
-
-
-class InferenceCore:
- def __init__(self, network:XMem, config):
- self.config = config
- self.network = network
- self.mem_every = config['mem_every']
- self.deep_update_every = config['deep_update_every']
- self.enable_long_term = config['enable_long_term']
-
- # if deep_update_every < 0, synchronize deep update with memory frame
- self.deep_update_sync = (self.deep_update_every < 0)
-
- self.clear_memory()
- self.all_labels = None
-
- def clear_memory(self):
- self.curr_ti = -1
- self.last_mem_ti = 0
- if not self.deep_update_sync:
- self.last_deep_update_ti = -self.deep_update_every
- self.memory = MemoryManager(config=self.config)
-
- def update_config(self, config):
- self.mem_every = config['mem_every']
- self.deep_update_every = config['deep_update_every']
- self.enable_long_term = config['enable_long_term']
-
- # if deep_update_every < 0, synchronize deep update with memory frame
- self.deep_update_sync = (self.deep_update_every < 0)
- self.memory.update_config(config)
-
- def set_all_labels(self, all_labels):
- # self.all_labels = [l.item() for l in all_labels]
- self.all_labels = all_labels
-
- def step(self, image, mask=None, valid_labels=None, end=False):
- # image: 3*H*W
- # mask: num_objects*H*W or None
- self.curr_ti += 1
- image, self.pad = pad_divide_by(image, 16)
- image = image.unsqueeze(0) # add the batch dimension
-
- is_mem_frame = ((self.curr_ti-self.last_mem_ti >= self.mem_every) or (mask is not None)) and (not end)
- need_segment = (self.curr_ti > 0) and ((valid_labels is None) or (len(self.all_labels) != len(valid_labels)))
- is_deep_update = (
- (self.deep_update_sync and is_mem_frame) or # synchronized
- (not self.deep_update_sync and self.curr_ti-self.last_deep_update_ti >= self.deep_update_every) # no-sync
- ) and (not end)
- is_normal_update = (not self.deep_update_sync or not is_deep_update) and (not end)
-
- key, shrinkage, selection, f16, f8, f4 = self.network.encode_key(image,
- need_ek=(self.enable_long_term or need_segment),
- need_sk=is_mem_frame)
- multi_scale_features = (f16, f8, f4)
-
- # segment the current frame is needed
- if need_segment:
- memory_readout = self.memory.match_memory(key, selection).unsqueeze(0)
-
- hidden, pred_logits_with_bg, pred_prob_with_bg = self.network.segment(multi_scale_features, memory_readout,
- self.memory.get_hidden(), h_out=is_normal_update, strip_bg=False)
- # remove batch dim
- pred_prob_with_bg = pred_prob_with_bg[0]
- pred_prob_no_bg = pred_prob_with_bg[1:]
-
- pred_logits_with_bg = pred_logits_with_bg[0]
- pred_logits_no_bg = pred_logits_with_bg[1:]
-
- if is_normal_update:
- self.memory.set_hidden(hidden)
- else:
- pred_prob_no_bg = pred_prob_with_bg = pred_logits_with_bg = pred_logits_no_bg = None
-
- # use the input mask if any
- if mask is not None:
- mask, _ = pad_divide_by(mask, 16)
-
- if pred_prob_no_bg is not None:
- # if we have a predicted mask, we work on it
- # make pred_prob_no_bg consistent with the input mask
- mask_regions = (mask.sum(0) > 0.5)
- pred_prob_no_bg[:, mask_regions] = 0
- # shift by 1 because mask/pred_prob_no_bg do not contain background
- mask = mask.type_as(pred_prob_no_bg)
- if valid_labels is not None:
- shift_by_one_non_labels = [i for i in range(pred_prob_no_bg.shape[0]) if (i+1) not in valid_labels]
- # non-labelled objects are copied from the predicted mask
- mask[shift_by_one_non_labels] = pred_prob_no_bg[shift_by_one_non_labels]
- pred_prob_with_bg = aggregate(mask, dim=0)
-
- # also create new hidden states
- self.memory.create_hidden_state(len(self.all_labels), key)
-
- # save as memory if needed
- if is_mem_frame:
- value, hidden = self.network.encode_value(image, f16, self.memory.get_hidden(),
- pred_prob_with_bg[1:].unsqueeze(0), is_deep_update=is_deep_update)
- self.memory.add_memory(key, shrinkage, value, self.all_labels,
- selection=selection if self.enable_long_term else None)
- self.last_mem_ti = self.curr_ti
-
- if is_deep_update:
- self.memory.set_hidden(hidden)
- self.last_deep_update_ti = self.curr_ti
-
- if pred_logits_with_bg is None:
- return unpad(pred_prob_with_bg, self.pad), None
- else:
- return unpad(pred_prob_with_bg, self.pad), unpad(pred_logits_with_bg, self.pad)
diff --git a/spaces/Visgift/nyami/app.py b/spaces/Visgift/nyami/app.py
deleted file mode 100644
index eec8e91f6d20fa0211e98f8053f295a26e1f0622..0000000000000000000000000000000000000000
--- a/spaces/Visgift/nyami/app.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from engine import SentimentAnalyzer
-import streamlit as st
-
-
-# Load the sentiment analysis model from Hugging Face
-sentiment_analysis = SentimentAnalyzer()
-
-# Define the Streamlit app interface
-st.title("User Sentiment Analysis")
-
-sentence = st.text_input("Enter a sentence:")
-
-# Perform sentiment analysis on the input sentence
-if sentence:
- label = sentiment_analysis.get_sentiment(sentence)
- # Display the sentiment analysis result to the user
- st.write(f"Sentiment analysis result: {label}")
\ No newline at end of file
diff --git a/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/base_task.py b/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/base_task.py
deleted file mode 100644
index 7ceee96bdf520f8d730651e815defd83b7ecfebb..0000000000000000000000000000000000000000
--- a/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/base_task.py
+++ /dev/null
@@ -1,286 +0,0 @@
-"""
- Copyright (c) 2022, salesforce.com, inc.
- All rights reserved.
- SPDX-License-Identifier: BSD-3-Clause
- For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-"""
-
-import logging
-import os
-
-import torch
-import torch.distributed as dist
-from minigpt4.common.dist_utils import get_rank, get_world_size, is_main_process, is_dist_avail_and_initialized
-from minigpt4.common.logger import MetricLogger, SmoothedValue
-from minigpt4.common.registry import registry
-from minigpt4.datasets.data_utils import prepare_sample
-
-
-class BaseTask:
- def __init__(self, **kwargs):
- super().__init__()
-
- self.inst_id_key = "instance_id"
-
- @classmethod
- def setup_task(cls, **kwargs):
- return cls()
-
- def build_model(self, cfg):
- model_config = cfg.model_cfg
-
- model_cls = registry.get_model_class(model_config.arch)
- return model_cls.from_config(model_config)
-
- def build_datasets(self, cfg):
- """
- Build a dictionary of datasets, keyed by split 'train', 'valid', 'test'.
- Download dataset and annotations automatically if not exist.
-
- Args:
- cfg (common.config.Config): _description_
-
- Returns:
- dict: Dictionary of torch.utils.data.Dataset objects by split.
- """
-
- datasets = dict()
-
- datasets_config = cfg.datasets_cfg
-
- assert len(datasets_config) > 0, "At least one dataset has to be specified."
-
- for name in datasets_config:
- dataset_config = datasets_config[name]
-
- builder = registry.get_builder_class(name)(dataset_config)
- dataset = builder.build_datasets()
-
- dataset['train'].name = name
- if 'sample_ratio' in dataset_config:
- dataset['train'].sample_ratio = dataset_config.sample_ratio
-
- datasets[name] = dataset
-
- return datasets
-
- def train_step(self, model, samples):
- loss = model(samples)["loss"]
- return loss
-
- def valid_step(self, model, samples):
- raise NotImplementedError
-
- def before_evaluation(self, model, dataset, **kwargs):
- model.before_evaluation(dataset=dataset, task_type=type(self))
-
- def after_evaluation(self, **kwargs):
- pass
-
- def inference_step(self):
- raise NotImplementedError
-
- def evaluation(self, model, data_loader, cuda_enabled=True):
- metric_logger = MetricLogger(delimiter=" ")
- header = "Evaluation"
- # TODO make it configurable
- print_freq = 10
-
- results = []
-
- for samples in metric_logger.log_every(data_loader, print_freq, header):
- samples = prepare_sample(samples, cuda_enabled=cuda_enabled)
-
- eval_output = self.valid_step(model=model, samples=samples)
- results.extend(eval_output)
-
- if is_dist_avail_and_initialized():
- dist.barrier()
-
- return results
-
- def train_epoch(
- self,
- epoch,
- model,
- data_loader,
- optimizer,
- lr_scheduler,
- scaler=None,
- cuda_enabled=False,
- log_freq=50,
- accum_grad_iters=1,
- ):
- return self._train_inner_loop(
- epoch=epoch,
- iters_per_epoch=lr_scheduler.iters_per_epoch,
- model=model,
- data_loader=data_loader,
- optimizer=optimizer,
- scaler=scaler,
- lr_scheduler=lr_scheduler,
- log_freq=log_freq,
- cuda_enabled=cuda_enabled,
- accum_grad_iters=accum_grad_iters,
- )
-
- def train_iters(
- self,
- epoch,
- start_iters,
- iters_per_inner_epoch,
- model,
- data_loader,
- optimizer,
- lr_scheduler,
- scaler=None,
- cuda_enabled=False,
- log_freq=50,
- accum_grad_iters=1,
- ):
- return self._train_inner_loop(
- epoch=epoch,
- start_iters=start_iters,
- iters_per_epoch=iters_per_inner_epoch,
- model=model,
- data_loader=data_loader,
- optimizer=optimizer,
- scaler=scaler,
- lr_scheduler=lr_scheduler,
- log_freq=log_freq,
- cuda_enabled=cuda_enabled,
- accum_grad_iters=accum_grad_iters,
- )
-
- def _train_inner_loop(
- self,
- epoch,
- iters_per_epoch,
- model,
- data_loader,
- optimizer,
- lr_scheduler,
- scaler=None,
- start_iters=None,
- log_freq=50,
- cuda_enabled=False,
- accum_grad_iters=1,
- ):
- """
- An inner training loop compatible with both epoch-based and iter-based training.
-
- When using epoch-based, training stops after one epoch; when using iter-based,
- training stops after #iters_per_epoch iterations.
- """
- use_amp = scaler is not None
-
- if not hasattr(data_loader, "__next__"):
- # convert to iterator if not already
- data_loader = iter(data_loader)
-
- metric_logger = MetricLogger(delimiter=" ")
- metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
- metric_logger.add_meter("loss", SmoothedValue(window_size=1, fmt="{value:.4f}"))
-
- # if iter-based runner, schedule lr based on inner epoch.
- logging.info(
- "Start training epoch {}, {} iters per inner epoch.".format(
- epoch, iters_per_epoch
- )
- )
- header = "Train: data epoch: [{}]".format(epoch)
- if start_iters is None:
- # epoch-based runner
- inner_epoch = epoch
- else:
- # In iter-based runner, we schedule the learning rate based on iterations.
- inner_epoch = start_iters // iters_per_epoch
- header = header + "; inner epoch [{}]".format(inner_epoch)
-
- for i in metric_logger.log_every(range(iters_per_epoch), log_freq, header):
- # if using iter-based runner, we stop after iters_per_epoch iterations.
- if i >= iters_per_epoch:
- break
-
- samples = next(data_loader)
-
- samples = prepare_sample(samples, cuda_enabled=cuda_enabled)
- samples.update(
- {
- "epoch": inner_epoch,
- "num_iters_per_epoch": iters_per_epoch,
- "iters": i,
- }
- )
-
- lr_scheduler.step(cur_epoch=inner_epoch, cur_step=i)
-
- with torch.cuda.amp.autocast(enabled=use_amp):
- loss = self.train_step(model=model, samples=samples)
-
- # after_train_step()
- if use_amp:
- scaler.scale(loss).backward()
- else:
- loss.backward()
-
- # update gradients every accum_grad_iters iterations
- if (i + 1) % accum_grad_iters == 0:
- if use_amp:
- scaler.step(optimizer)
- scaler.update()
- else:
- optimizer.step()
- optimizer.zero_grad()
-
- metric_logger.update(loss=loss.item())
- metric_logger.update(lr=optimizer.param_groups[0]["lr"])
-
- # after train_epoch()
- # gather the stats from all processes
- metric_logger.synchronize_between_processes()
- logging.info("Averaged stats: " + str(metric_logger.global_avg()))
- return {
- k: "{:.3f}".format(meter.global_avg)
- for k, meter in metric_logger.meters.items()
- }
-
- @staticmethod
- def save_result(result, result_dir, filename, remove_duplicate=""):
- import json
-
- result_file = os.path.join(
- result_dir, "%s_rank%d.json" % (filename, get_rank())
- )
- final_result_file = os.path.join(result_dir, "%s.json" % filename)
-
- json.dump(result, open(result_file, "w"))
-
- if is_dist_avail_and_initialized():
- dist.barrier()
-
- if is_main_process():
- logging.warning("rank %d starts merging results." % get_rank())
- # combine results from all processes
- result = []
-
- for rank in range(get_world_size()):
- result_file = os.path.join(
- result_dir, "%s_rank%d.json" % (filename, rank)
- )
- res = json.load(open(result_file, "r"))
- result += res
-
- if remove_duplicate:
- result_new = []
- id_list = []
- for res in result:
- if res[remove_duplicate] not in id_list:
- id_list.append(res[remove_duplicate])
- result_new.append(res)
- result = result_new
-
- json.dump(result, open(final_result_file, "w"))
- print("result file saved to %s" % final_result_file)
-
- return final_result_file
diff --git a/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/image_text_pretrain.py b/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/image_text_pretrain.py
deleted file mode 100644
index bbe8ec83a5dc95ee26a36e457feb394d18b7cd17..0000000000000000000000000000000000000000
--- a/spaces/Vision-CAIR/MiniGPT-v2/minigpt4/tasks/image_text_pretrain.py
+++ /dev/null
@@ -1,18 +0,0 @@
-"""
- Copyright (c) 2022, salesforce.com, inc.
- All rights reserved.
- SPDX-License-Identifier: BSD-3-Clause
- For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-"""
-
-from minigpt4.common.registry import registry
-from minigpt4.tasks.base_task import BaseTask
-
-
-@registry.register_task("image_text_pretrain")
-class ImageTextPretrainTask(BaseTask):
- def __init__(self):
- super().__init__()
-
- def evaluation(self, model, data_loader, cuda_enabled=True):
- pass
diff --git a/spaces/Vrk/SeeFood/FoodNoFood.py b/spaces/Vrk/SeeFood/FoodNoFood.py
deleted file mode 100644
index 8b41b3a834ec74715ce6ba346ec98408ffde289e..0000000000000000000000000000000000000000
--- a/spaces/Vrk/SeeFood/FoodNoFood.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from PIL import Image
-import requests
-
-from transformers import CLIPProcessor, CLIPModel
-
-def food_not_food(input_image):
- model = CLIPModel.from_pretrained("flax-community/clip-rsicd-v2")
- processor = CLIPProcessor.from_pretrained("flax-community/clip-rsicd-v2")
-
- labels = ["food", "not food"]
- inputs = processor(text=[f"a photo of a {l}" for l in labels], images=input_image, return_tensors="pt", padding=True)
-
- outputs = model(**inputs)
- logits_per_image = outputs.logits_per_image
- prob = logits_per_image.softmax(dim=1).detach().cpu().numpy().argmax(axis=1)
- return labels[prob[0]]
\ No newline at end of file
diff --git a/spaces/Vrk/SkimLit/setup.sh b/spaces/Vrk/SkimLit/setup.sh
deleted file mode 100644
index d8f97044be8894928d03fa6fe7e79af09be7edba..0000000000000000000000000000000000000000
--- a/spaces/Vrk/SkimLit/setup.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-mkdir -p ~/.streamlit/
-echo "\
-[general]\n\
-email = \"your-email@domain.com\"\n\
-" > ~/.streamlit/credentials.toml
-echo "\
-[server]\n\
-headless = true\n\
-enableCORS=false\n\
-port = $PORT\n\
-" > ~/.streamlit/config.toml
diff --git a/spaces/WZUN666/vits-uma-genshin-honkai/utils.py b/spaces/WZUN666/vits-uma-genshin-honkai/utils.py
deleted file mode 100644
index ee4b01ddfbe8173965371b29f770f3e87615fe71..0000000000000000000000000000000000000000
--- a/spaces/WZUN666/vits-uma-genshin-honkai/utils.py
+++ /dev/null
@@ -1,225 +0,0 @@
-import os
-import sys
-import argparse
-import logging
-import json
-import subprocess
-import numpy as np
-import librosa
-import torch
-
-MATPLOTLIB_FLAG = False
-
-logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
-logger = logging
-
-
-def load_checkpoint(checkpoint_path, model, optimizer=None):
- assert os.path.isfile(checkpoint_path)
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
- iteration = checkpoint_dict['iteration']
- learning_rate = checkpoint_dict['learning_rate']
- if optimizer is not None:
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
- saved_state_dict = checkpoint_dict['model']
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict= {}
- for k, v in state_dict.items():
- try:
- new_state_dict[k] = saved_state_dict[k]
- except:
- logger.info("%s is not in the checkpoint" % k)
- new_state_dict[k] = v
- if hasattr(model, 'module'):
- model.module.load_state_dict(new_state_dict)
- else:
- model.load_state_dict(new_state_dict)
- logger.info("Loaded checkpoint '{}' (iteration {})" .format(
- checkpoint_path, iteration))
- return model, optimizer, learning_rate, iteration
-
-
-def plot_spectrogram_to_numpy(spectrogram):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(10,2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
- plt.xlabel("Frames")
- plt.ylabel("Channels")
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def plot_alignment_to_numpy(alignment, info=None):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(6, 4))
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
- interpolation='none')
- fig.colorbar(im, ax=ax)
- xlabel = 'Decoder timestep'
- if info is not None:
- xlabel += '\n\n' + info
- plt.xlabel(xlabel)
- plt.ylabel('Encoder timestep')
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def load_audio_to_torch(full_path, target_sampling_rate):
- audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True)
- return torch.FloatTensor(audio.astype(np.float32))
-
-
-def load_filepaths_and_text(filename, split="|"):
- with open(filename, encoding='utf-8') as f:
- filepaths_and_text = [line.strip().split(split) for line in f]
- return filepaths_and_text
-
-
-def get_hparams(init=True):
- parser = argparse.ArgumentParser()
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
- help='JSON file for configuration')
- parser.add_argument('-m', '--model', type=str, required=True,
- help='Model name')
-
- args = parser.parse_args()
- model_dir = os.path.join("./logs", args.model)
-
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
-
- config_path = args.config
- config_save_path = os.path.join(model_dir, "config.json")
- if init:
- with open(config_path, "r") as f:
- data = f.read()
- with open(config_save_path, "w") as f:
- f.write(data)
- else:
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_dir(model_dir):
- config_save_path = os.path.join(model_dir, "config.json")
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_file(config_path):
- with open(config_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- return hparams
-
-
-def check_git_hash(model_dir):
- source_dir = os.path.dirname(os.path.realpath(__file__))
- if not os.path.exists(os.path.join(source_dir, ".git")):
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
- source_dir
- ))
- return
-
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
-
- path = os.path.join(model_dir, "githash")
- if os.path.exists(path):
- saved_hash = open(path).read()
- if saved_hash != cur_hash:
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
- saved_hash[:8], cur_hash[:8]))
- else:
- open(path, "w").write(cur_hash)
-
-
-def get_logger(model_dir, filename="train.log"):
- global logger
- logger = logging.getLogger(os.path.basename(model_dir))
- logger.setLevel(logging.DEBUG)
-
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- h = logging.FileHandler(os.path.join(model_dir, filename))
- h.setLevel(logging.DEBUG)
- h.setFormatter(formatter)
- logger.addHandler(h)
- return logger
-
-
-class HParams():
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- if type(v) == dict:
- v = HParams(**v)
- self[k] = v
-
- def keys(self):
- return self.__dict__.keys()
-
- def items(self):
- return self.__dict__.items()
-
- def values(self):
- return self.__dict__.values()
-
- def __len__(self):
- return len(self.__dict__)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __setitem__(self, key, value):
- return setattr(self, key, value)
-
- def __contains__(self, key):
- return key in self.__dict__
-
- def __repr__(self):
- return self.__dict__.__repr__()
diff --git a/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/monotonic_align/setup.py b/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/monotonic_align/setup.py
deleted file mode 100644
index 30c224807a70faa9df9c9eb75f8e80c8c867b16b..0000000000000000000000000000000000000000
--- a/spaces/XlalalaX/VITS-Umamusume-voice-synthesizer/monotonic_align/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from distutils.core import setup
-from Cython.Build import cythonize
-import numpy
-
-setup(
- name = 'monotonic_align',
- ext_modules = cythonize("core.pyx"),
- include_dirs=[numpy.get_include()]
-)
diff --git a/spaces/XuebaoDingZhen/YOLOv50.0.1/hubconf.py b/spaces/XuebaoDingZhen/YOLOv50.0.1/hubconf.py
deleted file mode 100644
index f0192698fbe39f463e21a3092230258565cc7e0f..0000000000000000000000000000000000000000
--- a/spaces/XuebaoDingZhen/YOLOv50.0.1/hubconf.py
+++ /dev/null
@@ -1,169 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
-"""
-PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5
-
-Usage:
- import torch
- model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model
- model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch
- model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model
- model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo
-"""
-
-import torch
-
-
-def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
- """Creates or loads a YOLOv5 model
-
- Arguments:
- name (str): model name 'yolov5s' or path 'path/to/best.pt'
- pretrained (bool): load pretrained weights into the model
- channels (int): number of input channels
- classes (int): number of model classes
- autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
- verbose (bool): print all information to screen
- device (str, torch.device, None): device to use for model parameters
-
- Returns:
- YOLOv5 model
- """
- from pathlib import Path
-
- from models.common import AutoShape, DetectMultiBackend
- from models.experimental import attempt_load
- from models.yolo import ClassificationModel, DetectionModel, SegmentationModel
- from utils.downloads import attempt_download
- from utils.general import LOGGER, ROOT, check_requirements, intersect_dicts, logging
- from utils.torch_utils import select_device
-
- if not verbose:
- LOGGER.setLevel(logging.WARNING)
- check_requirements(ROOT / 'requirements.txt', exclude=('opencv-python', 'tensorboard', 'thop'))
- name = Path(name)
- path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path
- try:
- device = select_device(device)
- if pretrained and channels == 3 and classes == 80:
- try:
- model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model
- if autoshape:
- if model.pt and isinstance(model.model, ClassificationModel):
- LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. '
- 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).')
- elif model.pt and isinstance(model.model, SegmentationModel):
- LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. '
- 'You will not be able to run inference with this model.')
- else:
- model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
- except Exception:
- model = attempt_load(path, device=device, fuse=False) # arbitrary model
- else:
- cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path
- model = DetectionModel(cfg, channels, classes) # create model
- if pretrained:
- ckpt = torch.load(attempt_download(path), map_location=device) # load
- csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
- csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect
- model.load_state_dict(csd, strict=False) # load
- if len(ckpt['model'].names) == classes:
- model.names = ckpt['model'].names # set class names attribute
- if not verbose:
- LOGGER.setLevel(logging.INFO) # reset to default
- return model.to(device)
-
- except Exception as e:
- help_url = 'https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading'
- s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.'
- raise Exception(s) from e
-
-
-def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None):
- # YOLOv5 custom or local model
- return _create(path, autoshape=autoshape, verbose=_verbose, device=device)
-
-
-def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
- # YOLOv5-nano model https://github.com/ultralytics/yolov5
- return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device)
-
-
-def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
- # YOLOv5-small model https://github.com/ultralytics/yolov5
- return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device)
-
-
-def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
- # YOLOv5-medium model https://github.com/ultralytics/yolov5
- return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device)
-
-
-def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
- # YOLOv5-large model https://github.com/ultralytics/yolov5
- return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device)
-
-
-def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
- # YOLOv5-xlarge model https://github.com/ultralytics/yolov5
- return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device)
-
-
-def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
- # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5
- return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device)
-
-
-def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
- # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
- return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device)
-
-
-def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
- # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
- return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device)
-
-
-def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
- # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
- return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device)
-
-
-def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
- # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
- return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device)
-
-
-if __name__ == '__main__':
- import argparse
- from pathlib import Path
-
- import numpy as np
- from PIL import Image
-
- from utils.general import cv2, print_args
-
- # Argparser
- parser = argparse.ArgumentParser()
- parser.add_argument('--model', type=str, default='yolov5s', help='model name')
- opt = parser.parse_args()
- print_args(vars(opt))
-
- # Model
- model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True)
- # model = custom(path='path/to/model.pt') # custom
-
- # Images
- imgs = [
- 'data/images/zidane.jpg', # filename
- Path('data/images/zidane.jpg'), # Path
- 'https://ultralytics.com/images/zidane.jpg', # URI
- cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
- Image.open('data/images/bus.jpg'), # PIL
- np.zeros((320, 640, 3))] # numpy
-
- # Inference
- results = model(imgs, size=320) # batched inference
-
- # Results
- results.print()
- results.save()
diff --git a/spaces/XzJosh/Ava-Bert-VITS2/resample.py b/spaces/XzJosh/Ava-Bert-VITS2/resample.py
deleted file mode 100644
index 2ed1685654a371c5722168e9987809b05b1cb224..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/Ava-Bert-VITS2/resample.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import os
-import argparse
-import librosa
-import numpy as np
-from multiprocessing import Pool, cpu_count
-
-import soundfile
-from scipy.io import wavfile
-from tqdm import tqdm
-
-
-def process(item):
- spkdir, wav_name, args = item
- speaker = spkdir.replace("\\", "/").split("/")[-1]
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
- if os.path.exists(wav_path) and '.wav' in wav_path:
- os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True)
- wav, sr = librosa.load(wav_path, sr=args.sr)
- soundfile.write(
- os.path.join(args.out_dir, speaker, wav_name),
- wav,
- sr
- )
-
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--sr", type=int, default=44100, help="sampling rate")
- parser.add_argument("--in_dir", type=str, default="./raw", help="path to source dir")
- parser.add_argument("--out_dir", type=str, default="./dataset", help="path to target dir")
- args = parser.parse_args()
- # processs = 8
- processs = cpu_count()-2 if cpu_count() >4 else 1
- pool = Pool(processes=processs)
-
- for speaker in os.listdir(args.in_dir):
- spk_dir = os.path.join(args.in_dir, speaker)
- if os.path.isdir(spk_dir):
- print(spk_dir)
- for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])):
- pass
diff --git a/spaces/XzJosh/otto-Bert-VITS2/utils.py b/spaces/XzJosh/otto-Bert-VITS2/utils.py
deleted file mode 100644
index c6aa6cfc64c33e2eed33e9845239e831fc1c4a1a..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/otto-Bert-VITS2/utils.py
+++ /dev/null
@@ -1,293 +0,0 @@
-import os
-import glob
-import sys
-import argparse
-import logging
-import json
-import subprocess
-import numpy as np
-from scipy.io.wavfile import read
-import torch
-
-MATPLOTLIB_FLAG = False
-
-logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
-logger = logging
-
-
-def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False):
- assert os.path.isfile(checkpoint_path)
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
- iteration = checkpoint_dict['iteration']
- learning_rate = checkpoint_dict['learning_rate']
- if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None:
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
- elif optimizer is None and not skip_optimizer:
- #else: #Disable this line if Infer ,and enable the line upper
- new_opt_dict = optimizer.state_dict()
- new_opt_dict_params = new_opt_dict['param_groups'][0]['params']
- new_opt_dict['param_groups'] = checkpoint_dict['optimizer']['param_groups']
- new_opt_dict['param_groups'][0]['params'] = new_opt_dict_params
- optimizer.load_state_dict(new_opt_dict)
- saved_state_dict = checkpoint_dict['model']
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict = {}
- for k, v in state_dict.items():
- try:
- #assert "emb_g" not in k
- # print("load", k)
- new_state_dict[k] = saved_state_dict[k]
- assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape)
- except:
- print("error, %s is not in the checkpoint" % k)
- new_state_dict[k] = v
- if hasattr(model, 'module'):
- model.module.load_state_dict(new_state_dict, strict=False)
- else:
- model.load_state_dict(new_state_dict, strict=False)
- print("load ")
- logger.info("Loaded checkpoint '{}' (iteration {})".format(
- checkpoint_path, iteration))
- return model, optimizer, learning_rate, iteration
-
-
-def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
- logger.info("Saving model and optimizer state at iteration {} to {}".format(
- iteration, checkpoint_path))
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- torch.save({'model': state_dict,
- 'iteration': iteration,
- 'optimizer': optimizer.state_dict(),
- 'learning_rate': learning_rate}, checkpoint_path)
-
-
-def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
- for k, v in scalars.items():
- writer.add_scalar(k, v, global_step)
- for k, v in histograms.items():
- writer.add_histogram(k, v, global_step)
- for k, v in images.items():
- writer.add_image(k, v, global_step, dataformats='HWC')
- for k, v in audios.items():
- writer.add_audio(k, v, global_step, audio_sampling_rate)
-
-
-def latest_checkpoint_path(dir_path, regex="G_*.pth"):
- f_list = glob.glob(os.path.join(dir_path, regex))
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
- x = f_list[-1]
- print(x)
- return x
-
-
-def plot_spectrogram_to_numpy(spectrogram):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(10, 2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
- plt.xlabel("Frames")
- plt.ylabel("Channels")
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def plot_alignment_to_numpy(alignment, info=None):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(6, 4))
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
- interpolation='none')
- fig.colorbar(im, ax=ax)
- xlabel = 'Decoder timestep'
- if info is not None:
- xlabel += '\n\n' + info
- plt.xlabel(xlabel)
- plt.ylabel('Encoder timestep')
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def load_wav_to_torch(full_path):
- sampling_rate, data = read(full_path)
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
-
-
-def load_filepaths_and_text(filename, split="|"):
- with open(filename, encoding='utf-8') as f:
- filepaths_and_text = [line.strip().split(split) for line in f]
- return filepaths_and_text
-
-
-def get_hparams(init=True):
- parser = argparse.ArgumentParser()
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
- help='JSON file for configuration')
- parser.add_argument('-m', '--model', type=str, default="./OUTPUT_MODEL",
- help='Model name')
- parser.add_argument('--cont', dest='cont', action="store_true", default=False, help="whether to continue training on the latest checkpoint")
-
- args = parser.parse_args()
- model_dir = os.path.join("./logs", args.model)
-
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
-
- config_path = args.config
- config_save_path = os.path.join(model_dir, "config.json")
- if init:
- with open(config_path, "r") as f:
- data = f.read()
- with open(config_save_path, "w") as f:
- f.write(data)
- else:
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- hparams.cont = args.cont
- return hparams
-
-
-def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True):
- """Freeing up space by deleting saved ckpts
-
- Arguments:
- path_to_models -- Path to the model directory
- n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
- sort_by_time -- True -> chronologically delete ckpts
- False -> lexicographically delete ckpts
- """
- import re
- ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))]
- name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1)))
- time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f)))
- sort_key = time_key if sort_by_time else name_key
- x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')],
- key=sort_key)
- to_del = [os.path.join(path_to_models, fn) for fn in
- (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])]
- del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}")
- del_routine = lambda x: [os.remove(x), del_info(x)]
- rs = [del_routine(fn) for fn in to_del]
-
-def get_hparams_from_dir(model_dir):
- config_save_path = os.path.join(model_dir, "config.json")
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_file(config_path):
- with open(config_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- return hparams
-
-
-def check_git_hash(model_dir):
- source_dir = os.path.dirname(os.path.realpath(__file__))
- if not os.path.exists(os.path.join(source_dir, ".git")):
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
- source_dir
- ))
- return
-
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
-
- path = os.path.join(model_dir, "githash")
- if os.path.exists(path):
- saved_hash = open(path).read()
- if saved_hash != cur_hash:
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
- saved_hash[:8], cur_hash[:8]))
- else:
- open(path, "w").write(cur_hash)
-
-
-def get_logger(model_dir, filename="train.log"):
- global logger
- logger = logging.getLogger(os.path.basename(model_dir))
- logger.setLevel(logging.DEBUG)
-
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- h = logging.FileHandler(os.path.join(model_dir, filename))
- h.setLevel(logging.DEBUG)
- h.setFormatter(formatter)
- logger.addHandler(h)
- return logger
-
-
-class HParams():
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- if type(v) == dict:
- v = HParams(**v)
- self[k] = v
-
- def keys(self):
- return self.__dict__.keys()
-
- def items(self):
- return self.__dict__.items()
-
- def values(self):
- return self.__dict__.values()
-
- def __len__(self):
- return len(self.__dict__)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __setitem__(self, key, value):
- return setattr(self, key, value)
-
- def __contains__(self, key):
- return key in self.__dict__
-
- def __repr__(self):
- return self.__dict__.__repr__()
diff --git a/spaces/Yuliang/ECON/lib/dataset/NormalModule.py b/spaces/Yuliang/ECON/lib/dataset/NormalModule.py
deleted file mode 100644
index 16dd02ec26789d40715b24b67f371da45aff2f8f..0000000000000000000000000000000000000000
--- a/spaces/Yuliang/ECON/lib/dataset/NormalModule.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
-# holder of all proprietary rights on this computer program.
-# You can only use this computer program if you have closed
-# a license agreement with MPG or you get the right to use the computer
-# program from someone who is authorized to grant you that right.
-# Any use of the computer program without a valid license is prohibited and
-# liable to prosecution.
-#
-# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
-# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
-# for Intelligent Systems. All rights reserved.
-#
-# Contact: ps-license@tuebingen.mpg.de
-
-# pytorch lightning related libs
-import pytorch_lightning as pl
-from torch.utils.data import DataLoader
-
-from lib.dataset.NormalDataset import NormalDataset
-
-
-class NormalModule(pl.LightningDataModule):
- def __init__(self, cfg):
- super(NormalModule, self).__init__()
- self.cfg = cfg
-
- self.batch_size = self.cfg.batch_size
-
- self.data_size = {}
-
- def prepare_data(self):
-
- pass
-
- def setup(self, stage):
-
- self.train_dataset = NormalDataset(cfg=self.cfg, split="train")
- self.val_dataset = NormalDataset(cfg=self.cfg, split="val")
- self.test_dataset = NormalDataset(cfg=self.cfg, split="test")
-
- self.data_size = {
- "train": len(self.train_dataset),
- "val": len(self.val_dataset),
- }
-
- def train_dataloader(self):
-
- train_data_loader = DataLoader(
- self.train_dataset,
- batch_size=self.batch_size,
- shuffle=True,
- num_workers=self.cfg.num_threads,
- pin_memory=True,
- )
-
- return train_data_loader
-
- def val_dataloader(self):
-
- val_data_loader = DataLoader(
- self.val_dataset,
- batch_size=self.batch_size,
- shuffle=False,
- num_workers=self.cfg.num_threads,
- pin_memory=True,
- )
-
- return val_data_loader
-
- def val_dataloader(self):
-
- test_data_loader = DataLoader(
- self.test_dataset,
- batch_size=1,
- shuffle=False,
- num_workers=self.cfg.num_threads,
- pin_memory=True,
- )
-
- return test_data_loader
diff --git a/spaces/Yusin/docker_test/main.py b/spaces/Yusin/docker_test/main.py
deleted file mode 100644
index 11d1e04c3c9836d2d17adcfc79e0f8d560b89b14..0000000000000000000000000000000000000000
--- a/spaces/Yusin/docker_test/main.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import requests
-'''
-import os
-session_token = os.environ.get('SessionToken')
-conversation_id = os.environ.get('conversation_id')
-from revChatGPT.ChatGPT import Chatbot
-chatbot = Chatbot({"session_token": session_token}) # You can start a custom conversation
-
-
-import undetected_chromedriver.v2 as uc
-from selenium.webdriver.support import expected_conditions as EC
-from selenium.webdriver.support.ui import WebDriverWait
-from selenium.webdriver.common.by import By
-
-
-def get_element_or_none(driver, xpath, wait=None):
- try:
- if wait is None:
- return driver.find_element(By.XPATH, xpath)
- else:
- return WebDriverWait(driver, wait).until(
- EC.presence_of_element_located((By.XPATH, xpath)))
- except:
- return None
-
-
-def run():
- print("Welcome to game of Tom and Jerry. Here Cloudflare is the cat, Jerry is the Programmer. Our Goal as a good Jerry is to trick Cloudflare.")
-
- options = uc.ChromeOptions()
- options.arguments.extend(
- ["--no-sandbox", "--disable-setuid-sandbox"])
- print("Creating Driver...")
- driver = uc.Chrome(
- options=options
- )
- print("Created Driver...")
-
- driver.get('https://nowsecure.nl')
-
- element = get_element_or_none(driver, "/html/body/div[2]/div/main/h1", 20)
- if element is not None:
- print("We defeated Cloudflare, 🎉🥳 :)")
- else:
- print("Cloudflare defeated us :(, No woory we will try again. ")
- driver.quit()
-'''
-
-if __name__ == "__main__":
- #run()
- headers = {'Authorization': 'yusin'}
- data = {"content": 'am I stupid'}
- response = requests.post('http://93.56.204.222:7788/api/ask', headers=headers, json=data)
- print('this is my answear', response.text)
\ No newline at end of file
diff --git a/spaces/abdvl/datahub_qa_bot/docs/townhalls.md b/spaces/abdvl/datahub_qa_bot/docs/townhalls.md
deleted file mode 100644
index f9c3bb16150cd8b9cd52510d5e2180540fc857d8..0000000000000000000000000000000000000000
--- a/spaces/abdvl/datahub_qa_bot/docs/townhalls.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# DataHub Town Halls
-
-We hold regular virtual town hall meetings to meet with DataHub community.
-Currently it's held on the fourth Thursday of every month (with some exceptions such as holiday weekends).
-It's the perfect venue to meet the team behind DataHub and other users, as well as to ask higher-level questions, such as roadmap and product direction.
-From time to time we also use the opportunity to showcase upcoming features.
-
-## Meeting Invite & Agenda
-
-You can join with this link https://zoom.datahubproject.io, or [RSVP](https://rsvp.datahubproject.io/) to get a calendar invite - this will always have the most up-to-date agenda for upcoming sessions.
-
-## Past Meetings
-
-See [Town Hall History](townhall-history.md) for recordings of past town halls.
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/registry.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/registry.py
deleted file mode 100644
index fa9df39bc9f3d8d568361e7250ab35468f2b74e0..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/registry.py
+++ /dev/null
@@ -1,315 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import inspect
-import warnings
-from functools import partial
-
-from .misc import is_seq_of
-
-
-def build_from_cfg(cfg, registry, default_args=None):
- """Build a module from config dict.
-
- Args:
- cfg (dict): Config dict. It should at least contain the key "type".
- registry (:obj:`Registry`): The registry to search the type from.
- default_args (dict, optional): Default initialization arguments.
-
- Returns:
- object: The constructed object.
- """
- if not isinstance(cfg, dict):
- raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
- if 'type' not in cfg:
- if default_args is None or 'type' not in default_args:
- raise KeyError(
- '`cfg` or `default_args` must contain the key "type", '
- f'but got {cfg}\n{default_args}')
- if not isinstance(registry, Registry):
- raise TypeError('registry must be an mmcv.Registry object, '
- f'but got {type(registry)}')
- if not (isinstance(default_args, dict) or default_args is None):
- raise TypeError('default_args must be a dict or None, '
- f'but got {type(default_args)}')
-
- args = cfg.copy()
-
- if default_args is not None:
- for name, value in default_args.items():
- args.setdefault(name, value)
-
- obj_type = args.pop('type')
- if isinstance(obj_type, str):
- obj_cls = registry.get(obj_type)
- if obj_cls is None:
- raise KeyError(
- f'{obj_type} is not in the {registry.name} registry')
- elif inspect.isclass(obj_type):
- obj_cls = obj_type
- else:
- raise TypeError(
- f'type must be a str or valid type, but got {type(obj_type)}')
- try:
- return obj_cls(**args)
- except Exception as e:
- # Normal TypeError does not print class name.
- raise type(e)(f'{obj_cls.__name__}: {e}')
-
-
-class Registry:
- """A registry to map strings to classes.
-
- Registered object could be built from registry.
- Example:
- >>> MODELS = Registry('models')
- >>> @MODELS.register_module()
- >>> class ResNet:
- >>> pass
- >>> resnet = MODELS.build(dict(type='ResNet'))
-
- Please refer to
- https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for
- advanced usage.
-
- Args:
- name (str): Registry name.
- build_func(func, optional): Build function to construct instance from
- Registry, func:`build_from_cfg` is used if neither ``parent`` or
- ``build_func`` is specified. If ``parent`` is specified and
- ``build_func`` is not given, ``build_func`` will be inherited
- from ``parent``. Default: None.
- parent (Registry, optional): Parent registry. The class registered in
- children registry could be built from parent. Default: None.
- scope (str, optional): The scope of registry. It is the key to search
- for children registry. If not specified, scope will be the name of
- the package where class is defined, e.g. mmdet, mmcls, mmseg.
- Default: None.
- """
-
- def __init__(self, name, build_func=None, parent=None, scope=None):
- self._name = name
- self._module_dict = dict()
- self._children = dict()
- self._scope = self.infer_scope() if scope is None else scope
-
- # self.build_func will be set with the following priority:
- # 1. build_func
- # 2. parent.build_func
- # 3. build_from_cfg
- if build_func is None:
- if parent is not None:
- self.build_func = parent.build_func
- else:
- self.build_func = build_from_cfg
- else:
- self.build_func = build_func
- if parent is not None:
- assert isinstance(parent, Registry)
- parent._add_children(self)
- self.parent = parent
- else:
- self.parent = None
-
- def __len__(self):
- return len(self._module_dict)
-
- def __contains__(self, key):
- return self.get(key) is not None
-
- def __repr__(self):
- format_str = self.__class__.__name__ + \
- f'(name={self._name}, ' \
- f'items={self._module_dict})'
- return format_str
-
- @staticmethod
- def infer_scope():
- """Infer the scope of registry.
-
- The name of the package where registry is defined will be returned.
-
- Example:
- # in mmdet/models/backbone/resnet.py
- >>> MODELS = Registry('models')
- >>> @MODELS.register_module()
- >>> class ResNet:
- >>> pass
- The scope of ``ResNet`` will be ``mmdet``.
-
-
- Returns:
- scope (str): The inferred scope name.
- """
- # inspect.stack() trace where this function is called, the index-2
- # indicates the frame where `infer_scope()` is called
- filename = inspect.getmodule(inspect.stack()[2][0]).__name__
- split_filename = filename.split('.')
- return split_filename[0]
-
- @staticmethod
- def split_scope_key(key):
- """Split scope and key.
-
- The first scope will be split from key.
-
- Examples:
- >>> Registry.split_scope_key('mmdet.ResNet')
- 'mmdet', 'ResNet'
- >>> Registry.split_scope_key('ResNet')
- None, 'ResNet'
-
- Return:
- scope (str, None): The first scope.
- key (str): The remaining key.
- """
- split_index = key.find('.')
- if split_index != -1:
- return key[:split_index], key[split_index + 1:]
- else:
- return None, key
-
- @property
- def name(self):
- return self._name
-
- @property
- def scope(self):
- return self._scope
-
- @property
- def module_dict(self):
- return self._module_dict
-
- @property
- def children(self):
- return self._children
-
- def get(self, key):
- """Get the registry record.
-
- Args:
- key (str): The class name in string format.
-
- Returns:
- class: The corresponding class.
- """
- scope, real_key = self.split_scope_key(key)
- if scope is None or scope == self._scope:
- # get from self
- if real_key in self._module_dict:
- return self._module_dict[real_key]
- else:
- # get from self._children
- if scope in self._children:
- return self._children[scope].get(real_key)
- else:
- # goto root
- parent = self.parent
- while parent.parent is not None:
- parent = parent.parent
- return parent.get(key)
-
- def build(self, *args, **kwargs):
- return self.build_func(*args, **kwargs, registry=self)
-
- def _add_children(self, registry):
- """Add children for a registry.
-
- The ``registry`` will be added as children based on its scope.
- The parent registry could build objects from children registry.
-
- Example:
- >>> models = Registry('models')
- >>> mmdet_models = Registry('models', parent=models)
- >>> @mmdet_models.register_module()
- >>> class ResNet:
- >>> pass
- >>> resnet = models.build(dict(type='mmdet.ResNet'))
- """
-
- assert isinstance(registry, Registry)
- assert registry.scope is not None
- assert registry.scope not in self.children, \
- f'scope {registry.scope} exists in {self.name} registry'
- self.children[registry.scope] = registry
-
- def _register_module(self, module_class, module_name=None, force=False):
- if not inspect.isclass(module_class):
- raise TypeError('module must be a class, '
- f'but got {type(module_class)}')
-
- if module_name is None:
- module_name = module_class.__name__
- if isinstance(module_name, str):
- module_name = [module_name]
- for name in module_name:
- if not force and name in self._module_dict:
- raise KeyError(f'{name} is already registered '
- f'in {self.name}')
- self._module_dict[name] = module_class
-
- def deprecated_register_module(self, cls=None, force=False):
- warnings.warn(
- 'The old API of register_module(module, force=False) '
- 'is deprecated and will be removed, please use the new API '
- 'register_module(name=None, force=False, module=None) instead.')
- if cls is None:
- return partial(self.deprecated_register_module, force=force)
- self._register_module(cls, force=force)
- return cls
-
- def register_module(self, name=None, force=False, module=None):
- """Register a module.
-
- A record will be added to `self._module_dict`, whose key is the class
- name or the specified name, and value is the class itself.
- It can be used as a decorator or a normal function.
-
- Example:
- >>> backbones = Registry('backbone')
- >>> @backbones.register_module()
- >>> class ResNet:
- >>> pass
-
- >>> backbones = Registry('backbone')
- >>> @backbones.register_module(name='mnet')
- >>> class MobileNet:
- >>> pass
-
- >>> backbones = Registry('backbone')
- >>> class ResNet:
- >>> pass
- >>> backbones.register_module(ResNet)
-
- Args:
- name (str | None): The module name to be registered. If not
- specified, the class name will be used.
- force (bool, optional): Whether to override an existing class with
- the same name. Default: False.
- module (type): Module class to be registered.
- """
- if not isinstance(force, bool):
- raise TypeError(f'force must be a boolean, but got {type(force)}')
- # NOTE: This is a walkaround to be compatible with the old api,
- # while it may introduce unexpected bugs.
- if isinstance(name, type):
- return self.deprecated_register_module(name, force=force)
-
- # raise the error ahead of time
- if not (name is None or isinstance(name, str) or is_seq_of(name, str)):
- raise TypeError(
- 'name must be either of None, an instance of str or a sequence'
- f' of str, but got {type(name)}')
-
- # use it as a normal method: x.register_module(module=SomeClass)
- if module is not None:
- self._register_module(
- module_class=module, module_name=name, force=force)
- return module
-
- # use it as a decorator: @x.register_module()
- def _register(cls):
- self._register_module(
- module_class=cls, module_name=name, force=force)
- return cls
-
- return _register
diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/codecs/ffmpeg_lib/libavutil.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/codecs/ffmpeg_lib/libavutil.py
deleted file mode 100644
index 540bf23c5c34ce8bed08eefb944f7c022ee5d3c6..0000000000000000000000000000000000000000
--- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/media/codecs/ffmpeg_lib/libavutil.py
+++ /dev/null
@@ -1,240 +0,0 @@
-"""Wrapper for include/libavutil/avutil.h
-"""
-from ctypes import c_char_p, c_void_p, POINTER, Structure
-from ctypes import c_int, c_int64, c_uint64
-from ctypes import c_uint8, c_int8, c_uint, c_size_t
-
-import pyglet.lib
-from pyglet.util import debug_print
-from . import compat
-
-_debug = debug_print('debug_media')
-
-avutil = pyglet.lib.load_library(
- 'avutil',
- win32=('avutil-57', 'avutil-56'),
- darwin=('avutil.57', 'avutil-56')
-)
-
-avutil.avutil_version.restype = c_int
-compat.set_version('avutil', avutil.avutil_version() >> 16)
-
-AVMEDIA_TYPE_UNKNOWN = -1
-AVMEDIA_TYPE_VIDEO = 0
-AVMEDIA_TYPE_AUDIO = 1
-AVMEDIA_TYPE_DATA = 2
-AVMEDIA_TYPE_SUBTITLE = 3
-AVMEDIA_TYPE_ATTACHMENT = 4
-AVMEDIA_TYPE_NB = 5
-
-AV_SAMPLE_FMT_U8 = 0
-AV_SAMPLE_FMT_S16 = 1
-AV_SAMPLE_FMT_S32 = 2
-AV_SAMPLE_FMT_FLT = 3
-AV_SAMPLE_FORMAT_DOUBLE = 4
-AV_SAMPLE_FMT_U8P = 5
-AV_SAMPLE_FMT_S16P = 6
-AV_SAMPLE_FMT_S32P = 7
-AV_SAMPLE_FMT_FLTP = 8
-AV_SAMPLE_FMT_DBLP = 9
-AV_SAMPLE_FMT_S64 = 10
-AV_SAMPLE_FMT_S64P = 11
-
-AV_NUM_DATA_POINTERS = 8
-
-AV_PIX_FMT_RGB24 = 2
-AV_PIX_FMT_ARGB = 25
-AV_PIX_FMT_RGBA = 26
-
-AVChannelOrder = c_int
-class AVChannelLayout(Structure):
- _fields_ = [
- ('order', c_int),
- ('nb_channels', c_int),
- # .. more
- ]
-class AVBuffer(Structure):
- _fields_ = [
- ('data', POINTER(c_uint8)),
- ('size', c_int),
- # .. more
- ]
-
-
-class AVBufferRef(Structure):
- _fields_ = [
- ('buffer', POINTER(AVBuffer)),
- ('data', POINTER(c_uint8)),
- ('size', c_int)
- ]
-
-
-class AVDictionaryEntry(Structure):
- _fields_ = [
- ('key', c_char_p),
- ('value', c_char_p)
- ]
-
-
-class AVDictionary(Structure):
- _fields_ = [
- ('count', c_int),
- ('elems', POINTER(AVDictionaryEntry))
- ]
-
-
-class AVClass(Structure):
- pass
-
-
-class AVRational(Structure):
- _fields_ = [
- ('num', c_int),
- ('den', c_int)
- ]
-
- def __repr__(self):
- return f"AVRational({self.num}/{self.den})"
-
-
-class AVFrameSideData(Structure):
- pass
-
-
-class AVFrame(Structure):
- pass
-
-AVFrame_Fields = [
- ('data', POINTER(c_uint8) * AV_NUM_DATA_POINTERS),
- ('linesize', c_int * AV_NUM_DATA_POINTERS),
- ('extended_data', POINTER(POINTER(c_uint8))),
- ('width', c_int),
- ('height', c_int),
- ('nb_samples', c_int),
- ('format', c_int),
- ('key_frame', c_int),
- ('pict_type', c_int),
- ('sample_aspect_ratio', AVRational),
- ('pts', c_int64),
- ('pkt_pts', c_int64), # Deprecated. Removed in 57.
- ('pkt_dts', c_int64),
- ('time_base', AVRational), # (5.x)
- ('coded_picture_number', c_int),
- ('display_picture_number', c_int),
- ('quality', c_int),
- ('opaque', c_void_p),
- ('error', c_uint64 * AV_NUM_DATA_POINTERS), # Deprecated. Removed in 57.
- ('repeat_pict', c_int),
- ('interlaced_frame', c_int),
- ('top_field_first', c_int),
- ('palette_has_changed', c_int),
- ('reordered_opaque', c_int64),
- ('sample_rate', c_int),
- ('channel_layout', c_uint64),
- ('buf', POINTER(AVBufferRef) * AV_NUM_DATA_POINTERS),
- ('extended_buf', POINTER(POINTER(AVBufferRef))),
- ('nb_extended_buf', c_int),
- ('side_data', POINTER(POINTER(AVFrameSideData))),
- ('nb_side_data', c_int),
- ('flags', c_int),
- ('color_range', c_int),
- ('color_primaries', c_int),
- ('color_trc', c_int),
- ('colorspace', c_int),
- ('chroma_location', c_int),
- ('best_effort_timestamp', c_int64),
- ('pkt_pos', c_int64),
- ('pkt_duration', c_int64),
- # !
- ('metadata', POINTER(AVDictionary)),
- ('decode_error_flags', c_int),
- ('channels', c_int),
- ('pkt_size', c_int),
- ('qscale_table', POINTER(c_int8)), # Deprecated. Removed in 57.
- ('qstride', c_int), # Deprecated. Removed in 57.
- ('qscale_type', c_int), # Deprecated. Removed in 57.
- ('qp_table_buf', POINTER(AVBufferRef)), # Deprecated. Removed in 57.
- ('hw_frames_ctx', POINTER(AVBufferRef)),
- ('opaque_ref', POINTER(AVBufferRef)),
- ('crop_top', c_size_t), # video frames only
- ('crop_bottom', c_size_t), # video frames only
- ('crop_left', c_size_t), # video frames only
- ('crop_right', c_size_t), # video frames only
- ('private_ref', POINTER(AVBufferRef)),
-]
-
-compat.add_version_changes('avutil', 56, AVFrame, AVFrame_Fields,
- removals=('time_base',))
-
-compat.add_version_changes('avutil', 57, AVFrame, AVFrame_Fields,
- removals=('pkt_pts', 'error', 'qscale_table', 'qstride', 'qscale_type', 'qp_table_buf'))
-
-AV_NOPTS_VALUE = -0x8000000000000000
-AV_TIME_BASE = 1000000
-AV_TIME_BASE_Q = AVRational(1, AV_TIME_BASE)
-
-avutil.av_version_info.restype = c_char_p
-avutil.av_dict_get.restype = POINTER(AVDictionaryEntry)
-avutil.av_dict_get.argtypes = [POINTER(AVDictionary),
- c_char_p, POINTER(AVDictionaryEntry),
- c_int]
-avutil.av_rescale_q.restype = c_int64
-avutil.av_rescale_q.argtypes = [c_int64, AVRational, AVRational]
-avutil.av_samples_get_buffer_size.restype = c_int
-avutil.av_samples_get_buffer_size.argtypes = [POINTER(c_int),
- c_int, c_int, c_int]
-avutil.av_frame_alloc.restype = POINTER(AVFrame)
-avutil.av_frame_free.argtypes = [POINTER(POINTER(AVFrame))]
-avutil.av_get_default_channel_layout.restype = c_int64
-avutil.av_get_default_channel_layout.argtypes = [c_int]
-avutil.av_get_bytes_per_sample.restype = c_int
-avutil.av_get_bytes_per_sample.argtypes = [c_int]
-avutil.av_strerror.restype = c_int
-avutil.av_strerror.argtypes = [c_int, c_char_p, c_size_t]
-
-avutil.av_image_fill_arrays.restype = c_int
-avutil.av_image_fill_arrays.argtypes = [POINTER(c_uint8) * 4, c_int * 4,
- POINTER(c_uint8), c_int, c_int, c_int, c_int]
-avutil.av_dict_set.restype = c_int
-avutil.av_dict_set.argtypes = [POINTER(POINTER(AVDictionary)),
- c_char_p, c_char_p, c_int]
-avutil.av_dict_free.argtypes = [POINTER(POINTER(AVDictionary))]
-avutil.av_log_set_level.restype = c_int
-avutil.av_log_set_level.argtypes = [c_uint]
-avutil.av_malloc.restype = c_void_p
-avutil.av_malloc.argtypes = [c_int]
-avutil.av_freep.restype = c_void_p
-avutil.av_freep.argtypes = [c_void_p]
-
-__all__ = [
- 'avutil',
- 'AVMEDIA_TYPE_UNKNOWN',
- 'AVMEDIA_TYPE_VIDEO',
- 'AVMEDIA_TYPE_AUDIO',
- 'AVMEDIA_TYPE_DATA',
- 'AVMEDIA_TYPE_SUBTITLE',
- 'AVMEDIA_TYPE_ATTACHMENT',
- 'AVMEDIA_TYPE_NB',
- 'AV_SAMPLE_FMT_U8',
- 'AV_SAMPLE_FMT_S16',
- 'AV_SAMPLE_FMT_S32',
- 'AV_SAMPLE_FMT_FLT',
- 'AV_SAMPLE_FORMAT_DOUBLE',
- 'AV_SAMPLE_FMT_U8P',
- 'AV_SAMPLE_FMT_S16P',
- 'AV_SAMPLE_FMT_S32P',
- 'AV_SAMPLE_FMT_FLTP',
- 'AV_SAMPLE_FMT_DBLP',
- 'AV_SAMPLE_FMT_S64',
- 'AV_SAMPLE_FMT_S64P',
- 'AV_NUM_DATA_POINTERS',
- 'AV_PIX_FMT_RGB24',
- 'AV_PIX_FMT_ARGB',
- 'AV_PIX_FMT_RGBA',
- 'AV_NOPTS_VALUE',
- 'AV_TIME_BASE',
- 'AV_TIME_BASE_Q',
- 'AVFrame',
- 'AVRational',
- 'AVDictionary',
-]
diff --git a/spaces/active-learning/webhook/main.py b/spaces/active-learning/webhook/main.py
deleted file mode 100644
index 70c965ce2c01d7231cba861ff662ad69dae8ac04..0000000000000000000000000000000000000000
--- a/spaces/active-learning/webhook/main.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import os
-
-from fastapi import FastAPI, Request, Response
-import numpy as np
-from tensorflow import keras
-from tensorflow.keras import layers
-import tensorflow as tf
-
-from datasets import load_dataset
-from huggingface_hub import push_to_hub_keras, from_pretrained_keras
-
-KEY = os.environ.get("WEBHOOK_SECRET")
-
-app = FastAPI()
-
-def to_numpy(examples):
- examples["pixel_values"] = [np.array(image.convert('1')) for image in examples["image"]]
- return examples
-
-def preprocess():
- train_dataset = load_dataset("active-learning/labeled_samples")["train"]
- train_dataset = train_dataset.map(to_numpy, batched=True)
-
- test_dataset = load_dataset("active-learning/test_mnist")["test"]
- test_dataset = test_dataset.map(to_numpy, batched=True)
-
- x_train = train_dataset["pixel_values"]
- y_train = train_dataset["label"]
-
- x_test = test_dataset["pixel_values"]
- y_test = test_dataset["label"]
-
- x_train = np.expand_dims(x_train, -1)
- x_test = np.expand_dims(x_test, -1)
-
- num_classes = 10
-
- y_train = keras.utils.to_categorical(y_train, num_classes)
- y_test = keras.utils.to_categorical(y_test, num_classes)
-
- return x_train, y_train, x_test, y_test
-
-def train():
- input_shape = (28, 28, 1)
- x_train, y_train, x_test, y_test = preprocess()
- num_classes = 10
-
- model = keras.Sequential(
- [
- keras.Input(shape=input_shape),
- layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
- layers.MaxPooling2D(pool_size=(2, 2)),
- layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
- layers.MaxPooling2D(pool_size=(2, 2)),
- layers.Flatten(),
- layers.Dropout(0.5),
- layers.Dense(num_classes, activation="softmax"),
- ]
- )
-
- model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
- model.fit(x_train, y_train, batch_size=128, epochs=15, validation_split=0.1)
-
- score = model.evaluate(x_test, y_test, verbose=0)
- print("Test loss:", score[0])
- print("Test accuracy:", score[1])
-
- push_to_hub_keras(model, "active-learning/mnist_classifier")
-
-def find_samples_to_label():
- loaded_model = from_pretrained_keras("active-learning/mnist_classifier")
- loaded_model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
-
- unlabeled_data = load_dataset("active-learning/unlabeled_samples")["train"]
- processed_data = unlabeled_data.map(to_numpy, batched=True)
- processed_data = processed_data["pixel_values"]
- processed_data = tf.expand_dims(processed_data, -1)
-
- # Get all predictions
- # And then get the 5 samples with the lowest prediction score
- preds = loaded_model.predict(processed_data)
- top_pred_confs = 1 - np.max(preds, axis=1)
- idx_to_label = np.argpartition(top_pred_confs, -5)[-5:]
-
- # Upload samples to the dataset to label
- to_label_data = unlabeled_data.select(idx_to_label)
- to_label_data.push_to_hub("active-learning/to_label_samples")
-
- # Remove from the pool of samples
- unlabeled_data = unlabeled_data.select(
- (
- i for i in range(len(unlabeled_data))
- if i not in set(idx_to_label)
- )
- )
- unlabeled_data.push_to_hub("active-learning/unlabeled_samples")
-
-@app.get("/")
-def read_root():
- data = """
- Active Learning Trainer
- This is a demo app showing how to webhooks to do Active Learning.
- """
- return Response(content=data, media_type="text/html")
-
-@app.post("/webhook")
-async def webhook(request: Request):
- print("Received request")
- if request.headers.get("X-Webhook-Secret") is None:
- return Response("No secret", status_code=401)
- if request.headers.get("X-Webhook-Secret") != KEY:
- return Response("Invalid secret", status_code=401)
- data = await request.json()
- print("Webhook received!")
- train()
- find_samples_to_label()
- return "Model trained!"
-
diff --git a/spaces/adirik/ChangeIt/header.html b/spaces/adirik/ChangeIt/header.html
deleted file mode 100644
index 90d8fa50f1438180dcdfdc6a341028a2486ec53e..0000000000000000000000000000000000000000
--- a/spaces/adirik/ChangeIt/header.html
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-
- Change It!
-
-
-
-
- Change it! Upload a source image, input which clothing item/s you would like to change with text (e.g. "t-shirt") and upload an example image of what you'd like to replace it with.
-
-
- This demo is built using CLIPSeg and Paint by example .
-
-
You can skip the queue by duplicating this space and upgrading to gpu in settings:
-
-
\ No newline at end of file
diff --git a/spaces/ajayhk/colorize/README.md b/spaces/ajayhk/colorize/README.md
deleted file mode 100644
index 2d6800aff5aa36082337b42b6d4b78b8ebd9e987..0000000000000000000000000000000000000000
--- a/spaces/ajayhk/colorize/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Colorize
-emoji: 🌟
-colorFrom: blue
-colorTo: purple
-sdk: gradio
-sdk_version: 3.4
-app_file: start.py
-pinned: false
-license: mit
----
\ No newline at end of file
diff --git a/spaces/akhaliq/GPEN/face_detect/utils/nms/__init__.py b/spaces/akhaliq/GPEN/face_detect/utils/nms/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/akhaliq/SwinIR/utils/util_calculate_psnr_ssim.py b/spaces/akhaliq/SwinIR/utils/util_calculate_psnr_ssim.py
deleted file mode 100644
index 1a8fb27161f9c1fd3e37b14654dfe05eaadf619c..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/SwinIR/utils/util_calculate_psnr_ssim.py
+++ /dev/null
@@ -1,346 +0,0 @@
-import cv2
-import numpy as np
-import torch
-
-
-def calculate_psnr(img1, img2, crop_border, input_order='HWC', test_y_channel=False):
- """Calculate PSNR (Peak Signal-to-Noise Ratio).
-
- Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
-
- Args:
- img1 (ndarray): Images with range [0, 255].
- img2 (ndarray): Images with range [0, 255].
- crop_border (int): Cropped pixels in each edge of an image. These
- pixels are not involved in the PSNR calculation.
- input_order (str): Whether the input order is 'HWC' or 'CHW'.
- Default: 'HWC'.
- test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
-
- Returns:
- float: psnr result.
- """
-
- assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
- if input_order not in ['HWC', 'CHW']:
- raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"')
- img1 = reorder_image(img1, input_order=input_order)
- img2 = reorder_image(img2, input_order=input_order)
- img1 = img1.astype(np.float64)
- img2 = img2.astype(np.float64)
-
- if crop_border != 0:
- img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
- img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
-
- if test_y_channel:
- img1 = to_y_channel(img1)
- img2 = to_y_channel(img2)
-
- mse = np.mean((img1 - img2) ** 2)
- if mse == 0:
- return float('inf')
- return 20. * np.log10(255. / np.sqrt(mse))
-
-
-def _ssim(img1, img2):
- """Calculate SSIM (structural similarity) for one channel images.
-
- It is called by func:`calculate_ssim`.
-
- Args:
- img1 (ndarray): Images with range [0, 255] with order 'HWC'.
- img2 (ndarray): Images with range [0, 255] with order 'HWC'.
-
- Returns:
- float: ssim result.
- """
-
- C1 = (0.01 * 255) ** 2
- C2 = (0.03 * 255) ** 2
-
- img1 = img1.astype(np.float64)
- img2 = img2.astype(np.float64)
- kernel = cv2.getGaussianKernel(11, 1.5)
- window = np.outer(kernel, kernel.transpose())
-
- mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
- mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
- mu1_sq = mu1 ** 2
- mu2_sq = mu2 ** 2
- mu1_mu2 = mu1 * mu2
- sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq
- sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq
- sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
-
- ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
- return ssim_map.mean()
-
-
-def calculate_ssim(img1, img2, crop_border, input_order='HWC', test_y_channel=False):
- """Calculate SSIM (structural similarity).
-
- Ref:
- Image quality assessment: From error visibility to structural similarity
-
- The results are the same as that of the official released MATLAB code in
- https://ece.uwaterloo.ca/~z70wang/research/ssim/.
-
- For three-channel images, SSIM is calculated for each channel and then
- averaged.
-
- Args:
- img1 (ndarray): Images with range [0, 255].
- img2 (ndarray): Images with range [0, 255].
- crop_border (int): Cropped pixels in each edge of an image. These
- pixels are not involved in the SSIM calculation.
- input_order (str): Whether the input order is 'HWC' or 'CHW'.
- Default: 'HWC'.
- test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
-
- Returns:
- float: ssim result.
- """
-
- assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
- if input_order not in ['HWC', 'CHW']:
- raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"')
- img1 = reorder_image(img1, input_order=input_order)
- img2 = reorder_image(img2, input_order=input_order)
- img1 = img1.astype(np.float64)
- img2 = img2.astype(np.float64)
-
- if crop_border != 0:
- img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
- img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
-
- if test_y_channel:
- img1 = to_y_channel(img1)
- img2 = to_y_channel(img2)
-
- ssims = []
- for i in range(img1.shape[2]):
- ssims.append(_ssim(img1[..., i], img2[..., i]))
- return np.array(ssims).mean()
-
-
-def _blocking_effect_factor(im):
- block_size = 8
-
- block_horizontal_positions = torch.arange(7, im.shape[3] - 1, 8)
- block_vertical_positions = torch.arange(7, im.shape[2] - 1, 8)
-
- horizontal_block_difference = (
- (im[:, :, :, block_horizontal_positions] - im[:, :, :, block_horizontal_positions + 1]) ** 2).sum(
- 3).sum(2).sum(1)
- vertical_block_difference = (
- (im[:, :, block_vertical_positions, :] - im[:, :, block_vertical_positions + 1, :]) ** 2).sum(3).sum(
- 2).sum(1)
-
- nonblock_horizontal_positions = np.setdiff1d(torch.arange(0, im.shape[3] - 1), block_horizontal_positions)
- nonblock_vertical_positions = np.setdiff1d(torch.arange(0, im.shape[2] - 1), block_vertical_positions)
-
- horizontal_nonblock_difference = (
- (im[:, :, :, nonblock_horizontal_positions] - im[:, :, :, nonblock_horizontal_positions + 1]) ** 2).sum(
- 3).sum(2).sum(1)
- vertical_nonblock_difference = (
- (im[:, :, nonblock_vertical_positions, :] - im[:, :, nonblock_vertical_positions + 1, :]) ** 2).sum(
- 3).sum(2).sum(1)
-
- n_boundary_horiz = im.shape[2] * (im.shape[3] // block_size - 1)
- n_boundary_vert = im.shape[3] * (im.shape[2] // block_size - 1)
- boundary_difference = (horizontal_block_difference + vertical_block_difference) / (
- n_boundary_horiz + n_boundary_vert)
-
- n_nonboundary_horiz = im.shape[2] * (im.shape[3] - 1) - n_boundary_horiz
- n_nonboundary_vert = im.shape[3] * (im.shape[2] - 1) - n_boundary_vert
- nonboundary_difference = (horizontal_nonblock_difference + vertical_nonblock_difference) / (
- n_nonboundary_horiz + n_nonboundary_vert)
-
- scaler = np.log2(block_size) / np.log2(min([im.shape[2], im.shape[3]]))
- bef = scaler * (boundary_difference - nonboundary_difference)
-
- bef[boundary_difference <= nonboundary_difference] = 0
- return bef
-
-
-def calculate_psnrb(img1, img2, crop_border, input_order='HWC', test_y_channel=False):
- """Calculate PSNR-B (Peak Signal-to-Noise Ratio).
-
- Ref: Quality assessment of deblocked images, for JPEG image deblocking evaluation
- # https://gitlab.com/Queuecumber/quantization-guided-ac/-/blob/master/metrics/psnrb.py
-
- Args:
- img1 (ndarray): Images with range [0, 255].
- img2 (ndarray): Images with range [0, 255].
- crop_border (int): Cropped pixels in each edge of an image. These
- pixels are not involved in the PSNR calculation.
- input_order (str): Whether the input order is 'HWC' or 'CHW'.
- Default: 'HWC'.
- test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
-
- Returns:
- float: psnr result.
- """
-
- assert img1.shape == img2.shape, (f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
- if input_order not in ['HWC', 'CHW']:
- raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' '"HWC" and "CHW"')
- img1 = reorder_image(img1, input_order=input_order)
- img2 = reorder_image(img2, input_order=input_order)
- img1 = img1.astype(np.float64)
- img2 = img2.astype(np.float64)
-
- if crop_border != 0:
- img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
- img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
-
- if test_y_channel:
- img1 = to_y_channel(img1)
- img2 = to_y_channel(img2)
-
- # follow https://gitlab.com/Queuecumber/quantization-guided-ac/-/blob/master/metrics/psnrb.py
- img1 = torch.from_numpy(img1).permute(2, 0, 1).unsqueeze(0) / 255.
- img2 = torch.from_numpy(img2).permute(2, 0, 1).unsqueeze(0) / 255.
-
- total = 0
- for c in range(img1.shape[1]):
- mse = torch.nn.functional.mse_loss(img1[:, c:c + 1, :, :], img2[:, c:c + 1, :, :], reduction='none')
- bef = _blocking_effect_factor(img1[:, c:c + 1, :, :])
-
- mse = mse.view(mse.shape[0], -1).mean(1)
- total += 10 * torch.log10(1 / (mse + bef))
-
- return float(total) / img1.shape[1]
-
-
-def reorder_image(img, input_order='HWC'):
- """Reorder images to 'HWC' order.
-
- If the input_order is (h, w), return (h, w, 1);
- If the input_order is (c, h, w), return (h, w, c);
- If the input_order is (h, w, c), return as it is.
-
- Args:
- img (ndarray): Input image.
- input_order (str): Whether the input order is 'HWC' or 'CHW'.
- If the input image shape is (h, w), input_order will not have
- effects. Default: 'HWC'.
-
- Returns:
- ndarray: reordered image.
- """
-
- if input_order not in ['HWC', 'CHW']:
- raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are ' "'HWC' and 'CHW'")
- if len(img.shape) == 2:
- img = img[..., None]
- if input_order == 'CHW':
- img = img.transpose(1, 2, 0)
- return img
-
-
-def to_y_channel(img):
- """Change to Y channel of YCbCr.
-
- Args:
- img (ndarray): Images with range [0, 255].
-
- Returns:
- (ndarray): Images with range [0, 255] (float type) without round.
- """
- img = img.astype(np.float32) / 255.
- if img.ndim == 3 and img.shape[2] == 3:
- img = bgr2ycbcr(img, y_only=True)
- img = img[..., None]
- return img * 255.
-
-
-def _convert_input_type_range(img):
- """Convert the type and range of the input image.
-
- It converts the input image to np.float32 type and range of [0, 1].
- It is mainly used for pre-processing the input image in colorspace
- convertion functions such as rgb2ycbcr and ycbcr2rgb.
-
- Args:
- img (ndarray): The input image. It accepts:
- 1. np.uint8 type with range [0, 255];
- 2. np.float32 type with range [0, 1].
-
- Returns:
- (ndarray): The converted image with type of np.float32 and range of
- [0, 1].
- """
- img_type = img.dtype
- img = img.astype(np.float32)
- if img_type == np.float32:
- pass
- elif img_type == np.uint8:
- img /= 255.
- else:
- raise TypeError('The img type should be np.float32 or np.uint8, ' f'but got {img_type}')
- return img
-
-
-def _convert_output_type_range(img, dst_type):
- """Convert the type and range of the image according to dst_type.
-
- It converts the image to desired type and range. If `dst_type` is np.uint8,
- images will be converted to np.uint8 type with range [0, 255]. If
- `dst_type` is np.float32, it converts the image to np.float32 type with
- range [0, 1].
- It is mainly used for post-processing images in colorspace convertion
- functions such as rgb2ycbcr and ycbcr2rgb.
-
- Args:
- img (ndarray): The image to be converted with np.float32 type and
- range [0, 255].
- dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
- converts the image to np.uint8 type with range [0, 255]. If
- dst_type is np.float32, it converts the image to np.float32 type
- with range [0, 1].
-
- Returns:
- (ndarray): The converted image with desired type and range.
- """
- if dst_type not in (np.uint8, np.float32):
- raise TypeError('The dst_type should be np.float32 or np.uint8, ' f'but got {dst_type}')
- if dst_type == np.uint8:
- img = img.round()
- else:
- img /= 255.
- return img.astype(dst_type)
-
-
-def bgr2ycbcr(img, y_only=False):
- """Convert a BGR image to YCbCr image.
-
- The bgr version of rgb2ycbcr.
- It implements the ITU-R BT.601 conversion for standard-definition
- television. See more details in
- https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
-
- It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
- In OpenCV, it implements a JPEG conversion. See more details in
- https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
-
- Args:
- img (ndarray): The input image. It accepts:
- 1. np.uint8 type with range [0, 255];
- 2. np.float32 type with range [0, 1].
- y_only (bool): Whether to only return Y channel. Default: False.
-
- Returns:
- ndarray: The converted YCbCr image. The output image has the same type
- and range as input image.
- """
- img_type = img.dtype
- img = _convert_input_type_range(img)
- if y_only:
- out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
- else:
- out_img = np.matmul(
- img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) + [16, 128, 128]
- out_img = _convert_output_type_range(out_img, img_type)
- return out_img
diff --git a/spaces/algomuffin/jojo_fork/e4e/criteria/lpips/__init__.py b/spaces/algomuffin/jojo_fork/e4e/criteria/lpips/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/aliabid94/AutoGPT/autogpt/commands/image_gen.py b/spaces/aliabid94/AutoGPT/autogpt/commands/image_gen.py
deleted file mode 100644
index 0809fcdd3e38b52a2ce09ca1444f2574813d40f9..0000000000000000000000000000000000000000
--- a/spaces/aliabid94/AutoGPT/autogpt/commands/image_gen.py
+++ /dev/null
@@ -1,163 +0,0 @@
-""" Image Generation Module for AutoGPT."""
-import io
-import os.path
-import uuid
-from base64 import b64decode
-
-import openai
-import requests
-from PIL import Image
-
-from autogpt.config import Config
-from autogpt.workspace import path_in_workspace
-
-CFG = Config()
-
-
-def generate_image(prompt: str, size: int = 256) -> str:
- """Generate an image from a prompt.
-
- Args:
- prompt (str): The prompt to use
- size (int, optional): The size of the image. Defaults to 256. (Not supported by HuggingFace)
-
- Returns:
- str: The filename of the image
- """
- filename = f"{str(uuid.uuid4())}.jpg"
-
- # DALL-E
- if CFG.image_provider == "dalle":
- return generate_image_with_dalle(prompt, filename, size)
- # HuggingFace
- elif CFG.image_provider == "huggingface":
- return generate_image_with_hf(prompt, filename)
- # SD WebUI
- elif CFG.image_provider == "sdwebui":
- return generate_image_with_sd_webui(prompt, filename, size)
- return "No Image Provider Set"
-
-
-def generate_image_with_hf(prompt: str, filename: str) -> str:
- """Generate an image with HuggingFace's API.
-
- Args:
- prompt (str): The prompt to use
- filename (str): The filename to save the image to
-
- Returns:
- str: The filename of the image
- """
- API_URL = (
- f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}"
- )
- if CFG.huggingface_api_token is None:
- raise ValueError(
- "You need to set your Hugging Face API token in the config file."
- )
- headers = {
- "Authorization": f"Bearer {CFG.huggingface_api_token}",
- "X-Use-Cache": "false",
- }
-
- response = requests.post(
- API_URL,
- headers=headers,
- json={
- "inputs": prompt,
- },
- )
-
- image = Image.open(io.BytesIO(response.content))
- print(f"Image Generated for prompt:{prompt}")
-
- image.save(path_in_workspace(filename))
-
- return f"Saved to disk:{filename}"
-
-
-def generate_image_with_dalle(prompt: str, filename: str) -> str:
- """Generate an image with DALL-E.
-
- Args:
- prompt (str): The prompt to use
- filename (str): The filename to save the image to
-
- Returns:
- str: The filename of the image
- """
- openai.api_key = CFG.openai_api_key
-
- # Check for supported image sizes
- if size not in [256, 512, 1024]:
- closest = min([256, 512, 1024], key=lambda x: abs(x - size))
- print(
- f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
- )
- size = closest
-
- response = openai.Image.create(
- prompt=prompt,
- n=1,
- size=f"{size}x{size}",
- response_format="b64_json",
- )
-
- print(f"Image Generated for prompt:{prompt}")
-
- image_data = b64decode(response["data"][0]["b64_json"])
-
- with open(path_in_workspace(filename), mode="wb") as png:
- png.write(image_data)
-
- return f"Saved to disk:{filename}"
-
-
-def generate_image_with_sd_webui(
- prompt: str,
- filename: str,
- size: int = 512,
- negative_prompt: str = "",
- extra: dict = {},
-) -> str:
- """Generate an image with Stable Diffusion webui.
- Args:
- prompt (str): The prompt to use
- filename (str): The filename to save the image to
- size (int, optional): The size of the image. Defaults to 256.
- negative_prompt (str, optional): The negative prompt to use. Defaults to "".
- extra (dict, optional): Extra parameters to pass to the API. Defaults to {}.
- Returns:
- str: The filename of the image
- """
- # Create a session and set the basic auth if needed
- s = requests.Session()
- if CFG.sd_webui_auth:
- username, password = CFG.sd_webui_auth.split(":")
- s.auth = (username, password or "")
-
- # Generate the images
- response = requests.post(
- f"{CFG.sd_webui_url}/sdapi/v1/txt2img",
- json={
- "prompt": prompt,
- "negative_prompt": negative_prompt,
- "sampler_index": "DDIM",
- "steps": 20,
- "cfg_scale": 7.0,
- "width": size,
- "height": size,
- "n_iter": 1,
- **extra,
- },
- )
-
- print(f"Image Generated for prompt:{prompt}")
-
- # Save the image to disk
- response = response.json()
- b64 = b64decode(response["images"][0].split(",", 1)[0])
- image = Image.open(io.BytesIO(b64))
- image.save(path_in_workspace(filename))
-
- return f"Saved to disk:{filename}"
diff --git a/spaces/amagastya/SPARK/chainlit.md b/spaces/amagastya/SPARK/chainlit.md
deleted file mode 100644
index 12bdaa897ed271bb3c827c118ed6bf33a1428d9a..0000000000000000000000000000000000000000
--- a/spaces/amagastya/SPARK/chainlit.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# Welcome to SPARK! ⚡
-
-Hi there! 👋 SPARK is your Smart Prompt Assistant and Resource Knowledgebase. I'm here to help you navigate the exciting world of prompt engineering 💻😊
-Whether you need help setting the context, refining your desired outcome, or encouraging detailed responses, I've got you covered.
-### Data Sources 📚
-SPARK has access to the following sources:
-- **Brex's Prompt Engineering Guide:** [Brex's introduction to language models and prompt engineering](https://github.com/brexhq/prompt-engineering)
-- **promptingguide.ai:** [A prompt engineering guide that demonstrates many techniques](https://www.promptingguide.ai)
-- **OpenAI Cookbook:** [Techniques to improve reliability: A slightly dated (Sep 2022) review of techniques for prompting language models.](https://github.com/openai/openai-cookbook/blob/main/techniques_to_improve_reliability.md)
-- **learnprompting.org:** [An introductory course to prompt engineering](https://learnprompting.org/)
-- **Lil'Log Prompt Engineering:** [An OpenAI researcher's review of the prompt engineering literature (as of March 2023)](https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/)
-
-Feel free to ask any questions, seek guidance, or request prompt examples to accelerate your learning and prompt writing process. Let's dive into the fascinating world of prompt engineering and unlock the full potential of AI models together!
\ No newline at end of file
diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/src/os/win/pa_win_waveformat.c b/spaces/amarchheda/ChordDuplicate/portaudio/src/os/win/pa_win_waveformat.c
deleted file mode 100644
index 0436a399ba564993204afd9bef29c11864cc8488..0000000000000000000000000000000000000000
--- a/spaces/amarchheda/ChordDuplicate/portaudio/src/os/win/pa_win_waveformat.c
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * PortAudio Portable Real-Time Audio Library
- * Windows WAVEFORMAT* data structure utilities
- * portaudio.h should be included before this file.
- *
- * Copyright (c) 2007 Ross Bencina
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files
- * (the "Software"), to deal in the Software without restriction,
- * including without limitation the rights to use, copy, modify, merge,
- * publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so,
- * subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * The text above constitutes the entire PortAudio license; however,
- * the PortAudio community also makes the following non-binding requests:
- *
- * Any person wishing to distribute modifications to the Software is
- * requested to send the modifications to the original developer so that
- * they can be incorporated into the canonical version. It is also
- * requested that these non-binding requests be included along with the
- * license above.
- */
-
-#include
-#include
-#if defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_APP)
- #include /* for WAVEFORMATEX */
-#endif
-
-#include "portaudio.h"
-#include "pa_win_waveformat.h"
-
-
-#if !defined(WAVE_FORMAT_EXTENSIBLE)
-#define WAVE_FORMAT_EXTENSIBLE 0xFFFE
-#endif
-
-
-static GUID pawin_ksDataFormatSubtypeGuidBase =
- { (USHORT)(WAVE_FORMAT_PCM), 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 };
-
-
-int PaWin_SampleFormatToLinearWaveFormatTag( PaSampleFormat sampleFormat )
-{
- if( sampleFormat == paFloat32 )
- return PAWIN_WAVE_FORMAT_IEEE_FLOAT;
-
- return PAWIN_WAVE_FORMAT_PCM;
-}
-
-
-void PaWin_InitializeWaveFormatEx( PaWinWaveFormat *waveFormat,
- int numChannels, PaSampleFormat sampleFormat, int waveFormatTag, double sampleRate )
-{
- WAVEFORMATEX *waveFormatEx = (WAVEFORMATEX*)waveFormat;
- int bytesPerSample = Pa_GetSampleSize(sampleFormat);
- unsigned long bytesPerFrame = numChannels * bytesPerSample;
-
- waveFormatEx->wFormatTag = waveFormatTag;
- waveFormatEx->nChannels = (WORD)numChannels;
- waveFormatEx->nSamplesPerSec = (DWORD)sampleRate;
- waveFormatEx->nAvgBytesPerSec = waveFormatEx->nSamplesPerSec * bytesPerFrame;
- waveFormatEx->nBlockAlign = (WORD)bytesPerFrame;
- waveFormatEx->wBitsPerSample = bytesPerSample * 8;
- waveFormatEx->cbSize = 0;
-}
-
-
-void PaWin_InitializeWaveFormatExtensible( PaWinWaveFormat *waveFormat,
- int numChannels, PaSampleFormat sampleFormat, int waveFormatTag, double sampleRate,
- PaWinWaveFormatChannelMask channelMask )
-{
- WAVEFORMATEX *waveFormatEx = (WAVEFORMATEX*)waveFormat;
- int bytesPerSample = Pa_GetSampleSize(sampleFormat);
- unsigned long bytesPerFrame = numChannels * bytesPerSample;
- GUID guid;
-
- waveFormatEx->wFormatTag = WAVE_FORMAT_EXTENSIBLE;
- waveFormatEx->nChannels = (WORD)numChannels;
- waveFormatEx->nSamplesPerSec = (DWORD)sampleRate;
- waveFormatEx->nAvgBytesPerSec = waveFormatEx->nSamplesPerSec * bytesPerFrame;
- waveFormatEx->nBlockAlign = (WORD)bytesPerFrame;
- waveFormatEx->wBitsPerSample = bytesPerSample * 8;
- waveFormatEx->cbSize = 22;
-
- memcpy(&waveFormat->fields[PAWIN_INDEXOF_WVALIDBITSPERSAMPLE],
- &waveFormatEx->wBitsPerSample, sizeof(WORD));
-
- memcpy(&waveFormat->fields[PAWIN_INDEXOF_DWCHANNELMASK],
- &channelMask, sizeof(DWORD));
-
- guid = pawin_ksDataFormatSubtypeGuidBase;
- guid.Data1 = (USHORT)waveFormatTag;
- memcpy(&waveFormat->fields[PAWIN_INDEXOF_SUBFORMAT], &guid, sizeof(GUID));
-}
-
-PaWinWaveFormatChannelMask PaWin_DefaultChannelMask( int numChannels )
-{
- switch( numChannels ){
- case 1:
- return PAWIN_SPEAKER_MONO;
- case 2:
- return PAWIN_SPEAKER_STEREO;
- case 3:
- return PAWIN_SPEAKER_FRONT_LEFT | PAWIN_SPEAKER_FRONT_CENTER | PAWIN_SPEAKER_FRONT_RIGHT;
- case 4:
- return PAWIN_SPEAKER_QUAD;
- case 5:
- return PAWIN_SPEAKER_QUAD | PAWIN_SPEAKER_FRONT_CENTER;
- case 6:
- /* The meaning of the PAWIN_SPEAKER_5POINT1 flag has changed over time:
- http://msdn2.microsoft.com/en-us/library/aa474707.aspx
- We use PAWIN_SPEAKER_5POINT1 (not PAWIN_SPEAKER_5POINT1_SURROUND)
- because on some cards (eg Audigy) PAWIN_SPEAKER_5POINT1_SURROUND
- results in a virtual mixdown placing the rear output in the
- front _and_ rear speakers.
- */
- return PAWIN_SPEAKER_5POINT1;
- /* case 7: */
- case 8:
- /* RoBi: PAWIN_SPEAKER_7POINT1_SURROUND fits normal surround sound setups better than PAWIN_SPEAKER_7POINT1, f.i. NVidia HDMI Audio
- output is silent on channels 5&6 with NVidia drivers, and channel 7&8 with Microsoft HD Audio driver using PAWIN_SPEAKER_7POINT1.
- With PAWIN_SPEAKER_7POINT1_SURROUND both setups work OK. */
- return PAWIN_SPEAKER_7POINT1_SURROUND;
- }
-
- /* Apparently some Audigy drivers will output silence
- if the direct-out constant (0) is used. So this is not ideal.
-
- RoBi 2012-12-19: Also, NVidia driver seem to output garbage instead. Again not very ideal.
- */
- return PAWIN_SPEAKER_DIRECTOUT;
-
- /* Note that Alec Rogers proposed the following as an alternate method to
- generate the default channel mask, however it doesn't seem to be an improvement
- over the above, since some drivers will matrix outputs mapping to non-present
- speakers across multiple physical speakers.
-
- if(nChannels==1) {
- pwfFormat->dwChannelMask = SPEAKER_FRONT_CENTER;
- }
- else {
- pwfFormat->dwChannelMask = 0;
- for(i=0; idwChannelMask = (pwfFormat->dwChannelMask << 1) | 0x1;
- }
- */
-}
diff --git a/spaces/anakin87/who-killed-laura-palmer/presentations/README.md b/spaces/anakin87/who-killed-laura-palmer/presentations/README.md
deleted file mode 100644
index 422f41d55711723701b7d61f7a8a5744f2ac24b4..0000000000000000000000000000000000000000
--- a/spaces/anakin87/who-killed-laura-palmer/presentations/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# 🧑🏫 Presentations
-*PyCon Italy 2022*
-
-- [Video presentation](https://www.youtube.com/watch?v=V-c-qmDEJVg)
-- [Slides](./presentations/wklp_pycon.pdf)
diff --git a/spaces/aodianyun/ChatGLM-6B/README.md b/spaces/aodianyun/ChatGLM-6B/README.md
deleted file mode 100644
index 27b28672dd3482aa29f40bf55926adaa071c186f..0000000000000000000000000000000000000000
--- a/spaces/aodianyun/ChatGLM-6B/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: ChatGLM 6B
-emoji: 🏃
-colorFrom: indigo
-colorTo: pink
-sdk: gradio
-sdk_version: 3.21.0
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: multimodalart/ChatGLM-6B
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/aphenx/bingo/src/components/chat-image.tsx b/spaces/aphenx/bingo/src/components/chat-image.tsx
deleted file mode 100644
index 05ecc9771eada27a0f2d160bb01cba170d37bb09..0000000000000000000000000000000000000000
--- a/spaces/aphenx/bingo/src/components/chat-image.tsx
+++ /dev/null
@@ -1,170 +0,0 @@
-import {
- useEffect,
- useState,
- useCallback,
- ChangeEvent,
- ClipboardEvent,
- MouseEventHandler,
- FormEvent,
- useRef
-} from "react"
-import Image from 'next/image'
-import PasteIcon from '@/assets/images/paste.svg'
-import UploadIcon from '@/assets/images/upload.svg'
-import CameraIcon from '@/assets/images/camera.svg'
-import { useBing } from '@/lib/hooks/use-bing'
-import { cn } from '@/lib/utils'
-
-interface ChatImageProps extends Pick, 'uploadImage'> {}
-
-const preventDefault: MouseEventHandler = (event) => {
- event.nativeEvent.stopImmediatePropagation()
-}
-
-const toBase64 = (file: File): Promise => new Promise((resolve, reject) => {
- const reader = new FileReader()
- reader.readAsDataURL(file)
- reader.onload = () => resolve(reader.result as string)
- reader.onerror = reject
-})
-
-export function ChatImage({ children, uploadImage }: React.PropsWithChildren) {
- const videoRef = useRef(null)
- const canvasRef = useRef(null)
- const mediaStream = useRef()
- const [panel, setPanel] = useState('none')
-
- const upload = useCallback((url: string) => {
- if (url) {
- uploadImage(url)
- }
- setPanel('none')
- }, [panel])
-
- const onUpload = useCallback(async (event: ChangeEvent) => {
- const file = event.target.files?.[0]
- if (file) {
- const fileDataUrl = await toBase64(file)
- if (fileDataUrl) {
- upload(fileDataUrl)
- }
- }
- }, [])
-
- const onPaste = useCallback((event: ClipboardEvent) => {
- const pasteUrl = event.clipboardData.getData('text') ?? ''
- upload(pasteUrl)
- }, [])
-
- const onEnter = useCallback((event: FormEvent) => {
- event.preventDefault()
- event.stopPropagation()
- // @ts-ignore
- const inputUrl = event.target.elements.image.value
- if (inputUrl) {
- upload(inputUrl)
- }
- }, [])
-
- const openVideo: MouseEventHandler = async (event) => {
- event.stopPropagation()
- setPanel('camera-mode')
- }
-
- const onCapture = () => {
- if (canvasRef.current && videoRef.current) {
- const canvas = canvasRef.current
- canvas.width = videoRef.current!.videoWidth
- canvas.height = videoRef.current!.videoHeight
- canvas.getContext('2d')?.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height)
- const cameraUrl = canvas.toDataURL('image/jpeg')
- upload(cameraUrl)
- }
- }
-
- useEffect(() => {
- const handleBlur = () => {
- if (panel !== 'none') {
- setPanel('none')
- }
- }
- document.addEventListener('click', handleBlur)
- return () => {
- document.removeEventListener('click', handleBlur)
- }
- }, [panel])
-
- useEffect(() => {
- if (panel === 'camera-mode') {
- navigator.mediaDevices.getUserMedia({ video: true, audio: false })
- .then(videoStream => {
- mediaStream.current = videoStream
- if (videoRef.current) {
- videoRef.current.srcObject = videoStream
- }
- })
- } else {
- if (mediaStream.current) {
- mediaStream.current.getTracks().forEach(function(track) {
- track.stop()
- })
- mediaStream.current = undefined
- }
- }
- }, [panel])
-
- return (
-
-
panel === 'none' ? setPanel('normal') : setPanel('none')}>{children}
-
-
-
-
添加图像
-
-
-
-
-
-
-
-
-
- 从此设备上传
-
-
-
- 拍照
-
-
-
- {panel === 'camera-mode' &&
}
-
-
- )
-}
diff --git a/spaces/aphenx/bingo/tailwind.config.js b/spaces/aphenx/bingo/tailwind.config.js
deleted file mode 100644
index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000
--- a/spaces/aphenx/bingo/tailwind.config.js
+++ /dev/null
@@ -1,48 +0,0 @@
-/** @type {import('tailwindcss').Config} */
-module.exports = {
- content: [
- './src/pages/**/*.{js,ts,jsx,tsx,mdx}',
- './src/components/**/*.{js,ts,jsx,tsx,mdx}',
- './src/app/**/*.{js,ts,jsx,tsx,mdx}',
- './src/ui/**/*.{js,ts,jsx,tsx,mdx}',
- ],
- "darkMode": "class",
- theme: {
- extend: {
- colors: {
- 'primary-blue': 'rgb(var(--color-primary-blue) / )',
- secondary: 'rgb(var(--color-secondary) / )',
- 'primary-background': 'rgb(var(--primary-background) / )',
- 'primary-text': 'rgb(var(--primary-text) / )',
- 'secondary-text': 'rgb(var(--secondary-text) / )',
- 'light-text': 'rgb(var(--light-text) / )',
- 'primary-border': 'rgb(var(--primary-border) / )',
- },
- keyframes: {
- slideDownAndFade: {
- from: { opacity: 0, transform: 'translateY(-2px)' },
- to: { opacity: 1, transform: 'translateY(0)' },
- },
- slideLeftAndFade: {
- from: { opacity: 0, transform: 'translateX(2px)' },
- to: { opacity: 1, transform: 'translateX(0)' },
- },
- slideUpAndFade: {
- from: { opacity: 0, transform: 'translateY(2px)' },
- to: { opacity: 1, transform: 'translateY(0)' },
- },
- slideRightAndFade: {
- from: { opacity: 0, transform: 'translateX(2px)' },
- to: { opacity: 1, transform: 'translateX(0)' },
- },
- },
- animation: {
- slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
- slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
- slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
- slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)',
- },
- },
- },
- plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')],
-}
diff --git a/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/models.py b/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/models.py
deleted file mode 100644
index 5d8f154887a43a5c5f67cf6340f74268398e32d5..0000000000000000000000000000000000000000
--- a/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/models.py
+++ /dev/null
@@ -1,351 +0,0 @@
-import copy
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import attentions
-import commons
-import modules
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from commons import init_weights, get_padding
-from vdecoder.hifigan.models import Generator
-from utils import f0_to_coarse
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class Encoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- # print(x.shape,x_lengths.shape)
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class TextEncoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- filter_channels=None,
- n_heads=None,
- p_dropout=None):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
- self.f0_emb = nn.Embedding(256, hidden_channels)
-
- self.enc_ = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout)
-
- def forward(self, x, x_lengths, f0=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = x + self.f0_emb(f0).transpose(1,2)
- x = self.enc_(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
-
- return z, m, logs, x_mask
-
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
- ])
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ])
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2,3,5,7,11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class SpeakerEncoder(torch.nn.Module):
- def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256):
- super(SpeakerEncoder, self).__init__()
- self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True)
- self.linear = nn.Linear(model_hidden_size, model_embedding_size)
- self.relu = nn.ReLU()
-
- def forward(self, mels):
- self.lstm.flatten_parameters()
- _, (hidden, _) = self.lstm(mels)
- embeds_raw = self.relu(self.linear(hidden[-1]))
- return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True)
-
- def compute_partial_slices(self, total_frames, partial_frames, partial_hop):
- mel_slices = []
- for i in range(0, total_frames-partial_frames, partial_hop):
- mel_range = torch.arange(i, i+partial_frames)
- mel_slices.append(mel_range)
-
- return mel_slices
-
- def embed_utterance(self, mel, partial_frames=128, partial_hop=64):
- mel_len = mel.size(1)
- last_mel = mel[:,-partial_frames:]
-
- if mel_len > partial_frames:
- mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop)
- mels = list(mel[:,s] for s in mel_slices)
- mels.append(last_mel)
- mels = torch.stack(tuple(mels), 0).squeeze(1)
-
- with torch.no_grad():
- partial_embeds = self(mels)
- embed = torch.mean(partial_embeds, axis=0).unsqueeze(0)
- #embed = embed / torch.linalg.norm(embed, 2)
- else:
- with torch.no_grad():
- embed = self(last_mel)
-
- return embed
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- ssl_dim,
- n_speakers,
- **kwargs):
-
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- self.ssl_dim = ssl_dim
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
-
- self.enc_p_ = TextEncoder(ssl_dim, inter_channels, hidden_channels, 5, 1, 16,0, filter_channels, n_heads, p_dropout)
- hps = {
- "sampling_rate": 48000,
- "inter_channels": 192,
- "resblock": "1",
- "resblock_kernel_sizes": [3, 7, 11],
- "resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
- "upsample_rates": [10, 8, 2, 2],
- "upsample_initial_channel": 512,
- "upsample_kernel_sizes": [16, 16, 4, 4],
- "gin_channels": 256,
- }
- self.dec = Generator(h=hps)
- self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
-
- def forward(self, c, f0, spec, g=None, mel=None, c_lengths=None, spec_lengths=None):
- if c_lengths == None:
- c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
- if spec_lengths == None:
- spec_lengths = (torch.ones(spec.size(0)) * spec.size(-1)).to(spec.device)
-
- g = self.emb_g(g).transpose(1,2)
-
- z_ptemp, m_p, logs_p, _ = self.enc_p_(c, c_lengths, f0=f0_to_coarse(f0))
- z, m_q, logs_q, spec_mask = self.enc_q(spec, spec_lengths, g=g)
-
- z_p = self.flow(z, spec_mask, g=g)
- z_slice, pitch_slice, ids_slice = commons.rand_slice_segments_with_pitch(z, f0, spec_lengths, self.segment_size)
-
- # o = self.dec(z_slice, g=g)
- o = self.dec(z_slice, g=g, f0=pitch_slice)
-
- return o, ids_slice, spec_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, c, f0, g=None, mel=None, c_lengths=None):
- if c_lengths == None:
- c_lengths = (torch.ones(c.size(0)) * c.size(-1)).to(c.device)
- g = self.emb_g(g).transpose(1,2)
-
- z_p, m_p, logs_p, c_mask = self.enc_p_(c, c_lengths, f0=f0_to_coarse(f0))
- z = self.flow(z_p, c_mask, g=g, reverse=True)
-
- o = self.dec(z * c_mask, g=g, f0=f0)
-
- return o
diff --git a/spaces/artificialguybr/video-dubbing/whisper/whisper/transcribe.py b/spaces/artificialguybr/video-dubbing/whisper/whisper/transcribe.py
deleted file mode 100644
index 6e43a22faefb8b1ce927cf4e019a2c03e19be0db..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/whisper/whisper/transcribe.py
+++ /dev/null
@@ -1,461 +0,0 @@
-import argparse
-import os
-import warnings
-from typing import TYPE_CHECKING, Optional, Tuple, Union
-
-import numpy as np
-import torch
-import tqdm
-
-from .audio import (
- FRAMES_PER_SECOND,
- HOP_LENGTH,
- N_FRAMES,
- N_SAMPLES,
- SAMPLE_RATE,
- log_mel_spectrogram,
- pad_or_trim,
-)
-from .decoding import DecodingOptions, DecodingResult
-from .timing import add_word_timestamps
-from .tokenizer import LANGUAGES, TO_LANGUAGE_CODE, get_tokenizer
-from .utils import (
- exact_div,
- format_timestamp,
- get_writer,
- make_safe,
- optional_float,
- optional_int,
- str2bool,
-)
-
-if TYPE_CHECKING:
- from .model import Whisper
-
-
-def transcribe(
- model: "Whisper",
- audio: Union[str, np.ndarray, torch.Tensor],
- *,
- verbose: Optional[bool] = None,
- temperature: Union[float, Tuple[float, ...]] = (0.0, 0.2, 0.4, 0.6, 0.8, 1.0),
- compression_ratio_threshold: Optional[float] = 2.4,
- logprob_threshold: Optional[float] = -1.0,
- no_speech_threshold: Optional[float] = 0.6,
- condition_on_previous_text: bool = True,
- initial_prompt: Optional[str] = None,
- word_timestamps: bool = False,
- prepend_punctuations: str = "\"'“¿([{-",
- append_punctuations: str = "\"'.。,,!!??::”)]}、",
- **decode_options,
-):
- """
- Transcribe an audio file using Whisper
-
- Parameters
- ----------
- model: Whisper
- The Whisper model instance
-
- audio: Union[str, np.ndarray, torch.Tensor]
- The path to the audio file to open, or the audio waveform
-
- verbose: bool
- Whether to display the text being decoded to the console. If True, displays all the details,
- If False, displays minimal details. If None, does not display anything
-
- temperature: Union[float, Tuple[float, ...]]
- Temperature for sampling. It can be a tuple of temperatures, which will be successively used
- upon failures according to either `compression_ratio_threshold` or `logprob_threshold`.
-
- compression_ratio_threshold: float
- If the gzip compression ratio is above this value, treat as failed
-
- logprob_threshold: float
- If the average log probability over sampled tokens is below this value, treat as failed
-
- no_speech_threshold: float
- If the no_speech probability is higher than this value AND the average log probability
- over sampled tokens is below `logprob_threshold`, consider the segment as silent
-
- condition_on_previous_text: bool
- if True, the previous output of the model is provided as a prompt for the next window;
- disabling may make the text inconsistent across windows, but the model becomes less prone to
- getting stuck in a failure loop, such as repetition looping or timestamps going out of sync.
-
- word_timestamps: bool
- Extract word-level timestamps using the cross-attention pattern and dynamic time warping,
- and include the timestamps for each word in each segment.
-
- prepend_punctuations: str
- If word_timestamps is True, merge these punctuation symbols with the next word
-
- append_punctuations: str
- If word_timestamps is True, merge these punctuation symbols with the previous word
-
- initial_prompt: Optional[str]
- Optional text to provide as a prompt for the first window. This can be used to provide, or
- "prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns
- to make it more likely to predict those word correctly.
-
- decode_options: dict
- Keyword arguments to construct `DecodingOptions` instances
-
- Returns
- -------
- A dictionary containing the resulting text ("text") and segment-level details ("segments"), and
- the spoken language ("language"), which is detected when `decode_options["language"]` is None.
- """
- dtype = torch.float16 if decode_options.get("fp16", True) else torch.float32
- if model.device == torch.device("cpu"):
- if torch.cuda.is_available():
- warnings.warn("Performing inference on CPU when CUDA is available")
- if dtype == torch.float16:
- warnings.warn("FP16 is not supported on CPU; using FP32 instead")
- dtype = torch.float32
-
- if dtype == torch.float32:
- decode_options["fp16"] = False
-
- # Pad 30-seconds of silence to the input audio, for slicing
- mel = log_mel_spectrogram(audio, padding=N_SAMPLES)
- content_frames = mel.shape[-1] - N_FRAMES
-
- if decode_options.get("language", None) is None:
- if not model.is_multilingual:
- decode_options["language"] = "en"
- else:
- if verbose:
- print(
- "Detecting language using up to the first 30 seconds. Use `--language` to specify the language"
- )
- mel_segment = pad_or_trim(mel, N_FRAMES).to(model.device).to(dtype)
- _, probs = model.detect_language(mel_segment)
- decode_options["language"] = max(probs, key=probs.get)
- if verbose is not None:
- print(
- f"Detected language: {LANGUAGES[decode_options['language']].title()}"
- )
-
- language: str = decode_options["language"]
- task: str = decode_options.get("task", "transcribe")
- tokenizer = get_tokenizer(model.is_multilingual, language=language, task=task)
-
- if word_timestamps and task == "translate":
- warnings.warn("Word-level timestamps on translations may not be reliable.")
-
- def decode_with_fallback(segment: torch.Tensor) -> DecodingResult:
- temperatures = (
- [temperature] if isinstance(temperature, (int, float)) else temperature
- )
- decode_result = None
-
- for t in temperatures:
- kwargs = {**decode_options}
- if t > 0:
- # disable beam_size and patience when t > 0
- kwargs.pop("beam_size", None)
- kwargs.pop("patience", None)
- else:
- # disable best_of when t == 0
- kwargs.pop("best_of", None)
-
- options = DecodingOptions(**kwargs, temperature=t)
- decode_result = model.decode(segment, options)
-
- needs_fallback = False
- if (
- compression_ratio_threshold is not None
- and decode_result.compression_ratio > compression_ratio_threshold
- ):
- needs_fallback = True # too repetitive
- if (
- logprob_threshold is not None
- and decode_result.avg_logprob < logprob_threshold
- ):
- needs_fallback = True # average log probability is too low
- if (
- no_speech_threshold is not None
- and decode_result.no_speech_prob > no_speech_threshold
- ):
- needs_fallback = False # silence
- if not needs_fallback:
- break
-
- return decode_result
-
- seek = 0
- input_stride = exact_div(
- N_FRAMES, model.dims.n_audio_ctx
- ) # mel frames per output token: 2
- time_precision = (
- input_stride * HOP_LENGTH / SAMPLE_RATE
- ) # time per output token: 0.02 (seconds)
- all_tokens = []
- all_segments = []
- prompt_reset_since = 0
-
- if initial_prompt is not None:
- initial_prompt_tokens = tokenizer.encode(" " + initial_prompt.strip())
- all_tokens.extend(initial_prompt_tokens)
- else:
- initial_prompt_tokens = []
-
- def new_segment(
- *, start: float, end: float, tokens: torch.Tensor, result: DecodingResult
- ):
- tokens = tokens.tolist()
- text_tokens = [token for token in tokens if token < tokenizer.eot]
- return {
- "seek": seek,
- "start": start,
- "end": end,
- "text": tokenizer.decode(text_tokens),
- "tokens": tokens,
- "temperature": result.temperature,
- "avg_logprob": result.avg_logprob,
- "compression_ratio": result.compression_ratio,
- "no_speech_prob": result.no_speech_prob,
- }
-
- # show the progress bar when verbose is False (if True, transcribed text will be printed)
- with tqdm.tqdm(
- total=content_frames, unit="frames", disable=verbose is not False
- ) as pbar:
- last_speech_timestamp = 0.0
- while seek < content_frames:
- time_offset = float(seek * HOP_LENGTH / SAMPLE_RATE)
- mel_segment = mel[:, seek : seek + N_FRAMES]
- segment_size = min(N_FRAMES, content_frames - seek)
- segment_duration = segment_size * HOP_LENGTH / SAMPLE_RATE
- mel_segment = pad_or_trim(mel_segment, N_FRAMES).to(model.device).to(dtype)
-
- decode_options["prompt"] = all_tokens[prompt_reset_since:]
- result: DecodingResult = decode_with_fallback(mel_segment)
- tokens = torch.tensor(result.tokens)
-
- if no_speech_threshold is not None:
- # no voice activity check
- should_skip = result.no_speech_prob > no_speech_threshold
- if (
- logprob_threshold is not None
- and result.avg_logprob > logprob_threshold
- ):
- # don't skip if the logprob is high enough, despite the no_speech_prob
- should_skip = False
-
- if should_skip:
- seek += segment_size # fast-forward to the next segment boundary
- continue
-
- previous_seek = seek
- current_segments = []
-
- timestamp_tokens: torch.Tensor = tokens.ge(tokenizer.timestamp_begin)
- single_timestamp_ending = timestamp_tokens[-2:].tolist() == [False, True]
-
- consecutive = torch.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0]
- consecutive.add_(1)
- if len(consecutive) > 0:
- # if the output contains two consecutive timestamp tokens
- slices = consecutive.tolist()
- if single_timestamp_ending:
- slices.append(len(tokens))
-
- last_slice = 0
- for current_slice in slices:
- sliced_tokens = tokens[last_slice:current_slice]
- start_timestamp_pos = (
- sliced_tokens[0].item() - tokenizer.timestamp_begin
- )
- end_timestamp_pos = (
- sliced_tokens[-1].item() - tokenizer.timestamp_begin
- )
- current_segments.append(
- new_segment(
- start=time_offset + start_timestamp_pos * time_precision,
- end=time_offset + end_timestamp_pos * time_precision,
- tokens=sliced_tokens,
- result=result,
- )
- )
- last_slice = current_slice
-
- if single_timestamp_ending:
- # single timestamp at the end means no speech after the last timestamp.
- seek += segment_size
- else:
- # otherwise, ignore the unfinished segment and seek to the last timestamp
- last_timestamp_pos = (
- tokens[last_slice - 1].item() - tokenizer.timestamp_begin
- )
- seek += last_timestamp_pos * input_stride
- else:
- duration = segment_duration
- timestamps = tokens[timestamp_tokens.nonzero().flatten()]
- if (
- len(timestamps) > 0
- and timestamps[-1].item() != tokenizer.timestamp_begin
- ):
- # no consecutive timestamps but it has a timestamp; use the last one.
- last_timestamp_pos = (
- timestamps[-1].item() - tokenizer.timestamp_begin
- )
- duration = last_timestamp_pos * time_precision
-
- current_segments.append(
- new_segment(
- start=time_offset,
- end=time_offset + duration,
- tokens=tokens,
- result=result,
- )
- )
- seek += segment_size
-
- if word_timestamps:
- add_word_timestamps(
- segments=current_segments,
- model=model,
- tokenizer=tokenizer,
- mel=mel_segment,
- num_frames=segment_size,
- prepend_punctuations=prepend_punctuations,
- append_punctuations=append_punctuations,
- last_speech_timestamp=last_speech_timestamp,
- )
- word_end_timestamps = [
- w["end"] for s in current_segments for w in s["words"]
- ]
- if len(word_end_timestamps) > 0:
- last_speech_timestamp = word_end_timestamps[-1]
- if not single_timestamp_ending and len(word_end_timestamps) > 0:
- seek_shift = round(
- (word_end_timestamps[-1] - time_offset) * FRAMES_PER_SECOND
- )
- if seek_shift > 0:
- seek = previous_seek + seek_shift
-
- if verbose:
- for segment in current_segments:
- start, end, text = segment["start"], segment["end"], segment["text"]
- line = f"[{format_timestamp(start)} --> {format_timestamp(end)}] {text}"
- print(make_safe(line))
-
- # if a segment is instantaneous or does not contain text, clear it
- for i, segment in enumerate(current_segments):
- if segment["start"] == segment["end"] or segment["text"].strip() == "":
- segment["text"] = ""
- segment["tokens"] = []
- segment["words"] = []
-
- all_segments.extend(
- [
- {"id": i, **segment}
- for i, segment in enumerate(
- current_segments, start=len(all_segments)
- )
- ]
- )
- all_tokens.extend(
- [token for segment in current_segments for token in segment["tokens"]]
- )
-
- if not condition_on_previous_text or result.temperature > 0.5:
- # do not feed the prompt tokens if a high temperature was used
- prompt_reset_since = len(all_tokens)
-
- # update progress bar
- pbar.update(min(content_frames, seek) - previous_seek)
-
- return dict(
- text=tokenizer.decode(all_tokens[len(initial_prompt_tokens) :]),
- segments=all_segments,
- language=language,
- )
-
-
-def cli():
- from . import available_models
-
- # fmt: off
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument("audio", nargs="+", type=str, help="audio file(s) to transcribe")
- parser.add_argument("--model", default="small", choices=available_models(), help="name of the Whisper model to use")
- parser.add_argument("--model_dir", type=str, default=None, help="the path to save model files; uses ~/.cache/whisper by default")
- parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu", help="device to use for PyTorch inference")
- parser.add_argument("--output_dir", "-o", type=str, default=".", help="directory to save the outputs")
- parser.add_argument("--output_format", "-f", type=str, default="all", choices=["txt", "vtt", "srt", "tsv", "json", "all"], help="format of the output file; if not specified, all available formats will be produced")
- parser.add_argument("--verbose", type=str2bool, default=True, help="whether to print out the progress and debug messages")
-
- parser.add_argument("--task", type=str, default="transcribe", choices=["transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')")
- parser.add_argument("--language", type=str, default=None, choices=sorted(LANGUAGES.keys()) + sorted([k.title() for k in TO_LANGUAGE_CODE.keys()]), help="language spoken in the audio, specify None to perform language detection")
-
- parser.add_argument("--temperature", type=float, default=0, help="temperature to use for sampling")
- parser.add_argument("--best_of", type=optional_int, default=5, help="number of candidates when sampling with non-zero temperature")
- parser.add_argument("--beam_size", type=optional_int, default=5, help="number of beams in beam search, only applicable when temperature is zero")
- parser.add_argument("--patience", type=float, default=None, help="optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search")
- parser.add_argument("--length_penalty", type=float, default=None, help="optional token length penalty coefficient (alpha) as in https://arxiv.org/abs/1609.08144, uses simple length normalization by default")
-
- parser.add_argument("--suppress_tokens", type=str, default="-1", help="comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations")
- parser.add_argument("--initial_prompt", type=str, default=None, help="optional text to provide as a prompt for the first window.")
- parser.add_argument("--condition_on_previous_text", type=str2bool, default=True, help="if True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop")
- parser.add_argument("--fp16", type=str2bool, default=True, help="whether to perform inference in fp16; True by default")
-
- parser.add_argument("--temperature_increment_on_fallback", type=optional_float, default=0.2, help="temperature to increase when falling back when the decoding fails to meet either of the thresholds below")
- parser.add_argument("--compression_ratio_threshold", type=optional_float, default=2.4, help="if the gzip compression ratio is higher than this value, treat the decoding as failed")
- parser.add_argument("--logprob_threshold", type=optional_float, default=-1.0, help="if the average log probability is lower than this value, treat the decoding as failed")
- parser.add_argument("--no_speech_threshold", type=optional_float, default=0.6, help="if the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence")
- parser.add_argument("--word_timestamps", type=str2bool, default=False, help="(experimental) extract word-level timestamps and refine the results based on them")
- parser.add_argument("--prepend_punctuations", type=str, default="\"\'“¿([{-", help="if word_timestamps is True, merge these punctuation symbols with the next word")
- parser.add_argument("--append_punctuations", type=str, default="\"\'.。,,!!??::”)]}、", help="if word_timestamps is True, merge these punctuation symbols with the previous word")
- parser.add_argument("--highlight_words", type=str2bool, default=False, help="(requires --word_timestamps True) underline each word as it is spoken in srt and vtt")
- parser.add_argument("--max_line_width", type=optional_int, default=None, help="(requires --word_timestamps True) the maximum number of characters in a line before breaking the line")
- parser.add_argument("--max_line_count", type=optional_int, default=None, help="(requires --word_timestamps True) the maximum number of lines in a segment")
- parser.add_argument("--threads", type=optional_int, default=0, help="number of threads used by torch for CPU inference; supercedes MKL_NUM_THREADS/OMP_NUM_THREADS")
- # fmt: on
-
- args = parser.parse_args().__dict__
- model_name: str = args.pop("model")
- model_dir: str = args.pop("model_dir")
- output_dir: str = args.pop("output_dir")
- output_format: str = args.pop("output_format")
- device: str = args.pop("device")
- os.makedirs(output_dir, exist_ok=True)
-
- if model_name.endswith(".en") and args["language"] not in {"en", "English"}:
- if args["language"] is not None:
- warnings.warn(
- f"{model_name} is an English-only model but receipted '{args['language']}'; using English instead."
- )
- args["language"] = "en"
-
- temperature = args.pop("temperature")
- if (increment := args.pop("temperature_increment_on_fallback")) is not None:
- temperature = tuple(np.arange(temperature, 1.0 + 1e-6, increment))
- else:
- temperature = [temperature]
-
- if (threads := args.pop("threads")) > 0:
- torch.set_num_threads(threads)
-
- from . import load_model
-
- model = load_model(model_name, device=device, download_root=model_dir)
-
- writer = get_writer(output_format, output_dir)
- word_options = ["highlight_words", "max_line_count", "max_line_width"]
- if not args["word_timestamps"]:
- for option in word_options:
- if args[option]:
- parser.error(f"--{option} requires --word_timestamps True")
- if args["max_line_count"] and not args["max_line_width"]:
- warnings.warn("--max_line_count has no effect without --max_line_width")
- writer_args = {arg: args.pop(arg) for arg in word_options}
- for audio_path in args.pop("audio"):
- result = transcribe(model, audio_path, temperature=temperature, **args)
- writer(result, audio_path, writer_args)
-
-
-if __name__ == "__main__":
- cli()
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/AsyncGen.c b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/AsyncGen.c
deleted file mode 100644
index 9a11d6a129ccbc7a7590b058f3dc21fdc7049fa1..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Utility/AsyncGen.c
+++ /dev/null
@@ -1,1133 +0,0 @@
-// This is copied from genobject.c in CPython 3.6.
-// Try to keep it in sync by doing this from time to time:
-// sed -e 's|__pyx_||ig' Cython/Utility/AsyncGen.c | diff -udw - cpython/Objects/genobject.c | less
-
-//////////////////// AsyncGenerator.proto ////////////////////
-//@requires: Coroutine.c::Coroutine
-
-#define __Pyx_AsyncGen_USED
-typedef struct {
- __pyx_CoroutineObject coro;
- PyObject *ag_finalizer;
- int ag_hooks_inited;
- int ag_closed;
-} __pyx_PyAsyncGenObject;
-
-static PyTypeObject *__pyx__PyAsyncGenWrappedValueType = 0;
-static PyTypeObject *__pyx__PyAsyncGenASendType = 0;
-static PyTypeObject *__pyx__PyAsyncGenAThrowType = 0;
-static PyTypeObject *__pyx_AsyncGenType = 0;
-
-#define __Pyx_AsyncGen_CheckExact(obj) (Py_TYPE(obj) == __pyx_AsyncGenType)
-#define __pyx_PyAsyncGenASend_CheckExact(o) \
- (Py_TYPE(o) == __pyx__PyAsyncGenASendType)
-#define __pyx_PyAsyncGenAThrow_CheckExact(o) \
- (Py_TYPE(o) == __pyx__PyAsyncGenAThrowType)
-
-static PyObject *__Pyx_async_gen_anext(PyObject *o);
-static CYTHON_INLINE PyObject *__Pyx_async_gen_asend_iternext(PyObject *o);
-static PyObject *__Pyx_async_gen_asend_send(PyObject *o, PyObject *arg);
-static PyObject *__Pyx_async_gen_asend_close(PyObject *o, PyObject *args);
-static PyObject *__Pyx_async_gen_athrow_close(PyObject *o, PyObject *args);
-
-static PyObject *__Pyx__PyAsyncGenValueWrapperNew(PyObject *val);
-
-
-static __pyx_CoroutineObject *__Pyx_AsyncGen_New(
- __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
- PyObject *name, PyObject *qualname, PyObject *module_name) {
- __pyx_PyAsyncGenObject *gen = PyObject_GC_New(__pyx_PyAsyncGenObject, __pyx_AsyncGenType);
- if (unlikely(!gen))
- return NULL;
- gen->ag_finalizer = NULL;
- gen->ag_closed = 0;
- gen->ag_hooks_inited = 0;
- return __Pyx__Coroutine_NewInit((__pyx_CoroutineObject*)gen, body, code, closure, name, qualname, module_name);
-}
-
-static int __pyx_AsyncGen_init(void);
-static void __Pyx_PyAsyncGen_Fini(void);
-
-//////////////////// AsyncGenerator.cleanup ////////////////////
-
-__Pyx_PyAsyncGen_Fini();
-
-//////////////////// AsyncGeneratorInitFinalizer ////////////////////
-
-// this is separated out because it needs more adaptation
-
-#if PY_VERSION_HEX < 0x030600B0
-static int __Pyx_async_gen_init_hooks(__pyx_PyAsyncGenObject *o) {
-#if 0
- // TODO: implement finalizer support in older Python versions
- PyThreadState *tstate;
- PyObject *finalizer;
- PyObject *firstiter;
-#endif
-
- if (likely(o->ag_hooks_inited)) {
- return 0;
- }
-
- o->ag_hooks_inited = 1;
-
-#if 0
- tstate = __Pyx_PyThreadState_Current;
-
- finalizer = tstate->async_gen_finalizer;
- if (finalizer) {
- Py_INCREF(finalizer);
- o->ag_finalizer = finalizer;
- }
-
- firstiter = tstate->async_gen_firstiter;
- if (firstiter) {
- PyObject *res;
-
- Py_INCREF(firstiter);
- res = __Pyx_PyObject_CallOneArg(firstiter, (PyObject*)o);
- Py_DECREF(firstiter);
- if (res == NULL) {
- return 1;
- }
- Py_DECREF(res);
- }
-#endif
-
- return 0;
-}
-#endif
-
-
-//////////////////// AsyncGenerator ////////////////////
-//@requires: AsyncGeneratorInitFinalizer
-//@requires: Coroutine.c::Coroutine
-//@requires: Coroutine.c::ReturnWithStopIteration
-//@requires: ObjectHandling.c::PyObjectCall2Args
-//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict
-
-PyDoc_STRVAR(__Pyx_async_gen_send_doc,
-"send(arg) -> send 'arg' into generator,\n\
-return next yielded value or raise StopIteration.");
-
-PyDoc_STRVAR(__Pyx_async_gen_close_doc,
-"close() -> raise GeneratorExit inside generator.");
-
-PyDoc_STRVAR(__Pyx_async_gen_throw_doc,
-"throw(typ[,val[,tb]]) -> raise exception in generator,\n\
-return next yielded value or raise StopIteration.");
-
-PyDoc_STRVAR(__Pyx_async_gen_await_doc,
-"__await__() -> return a representation that can be passed into the 'await' expression.");
-
-// COPY STARTS HERE:
-
-static PyObject *__Pyx_async_gen_asend_new(__pyx_PyAsyncGenObject *, PyObject *);
-static PyObject *__Pyx_async_gen_athrow_new(__pyx_PyAsyncGenObject *, PyObject *);
-
-static const char *__Pyx_NON_INIT_CORO_MSG = "can't send non-None value to a just-started coroutine";
-static const char *__Pyx_ASYNC_GEN_IGNORED_EXIT_MSG = "async generator ignored GeneratorExit";
-
-typedef enum {
- __PYX_AWAITABLE_STATE_INIT, /* new awaitable, has not yet been iterated */
- __PYX_AWAITABLE_STATE_ITER, /* being iterated */
- __PYX_AWAITABLE_STATE_CLOSED, /* closed */
-} __pyx_AwaitableState;
-
-typedef struct {
- PyObject_HEAD
- __pyx_PyAsyncGenObject *ags_gen;
-
- /* Can be NULL, when in the __anext__() mode (equivalent of "asend(None)") */
- PyObject *ags_sendval;
-
- __pyx_AwaitableState ags_state;
-} __pyx_PyAsyncGenASend;
-
-
-typedef struct {
- PyObject_HEAD
- __pyx_PyAsyncGenObject *agt_gen;
-
- /* Can be NULL, when in the "aclose()" mode (equivalent of "athrow(GeneratorExit)") */
- PyObject *agt_args;
-
- __pyx_AwaitableState agt_state;
-} __pyx_PyAsyncGenAThrow;
-
-
-typedef struct {
- PyObject_HEAD
- PyObject *agw_val;
-} __pyx__PyAsyncGenWrappedValue;
-
-
-#ifndef _PyAsyncGen_MAXFREELIST
-#define _PyAsyncGen_MAXFREELIST 80
-#endif
-
-// Freelists boost performance 6-10%; they also reduce memory
-// fragmentation, as _PyAsyncGenWrappedValue and PyAsyncGenASend
-// are short-living objects that are instantiated for every
-// __anext__ call.
-
-static __pyx__PyAsyncGenWrappedValue *__Pyx_ag_value_freelist[_PyAsyncGen_MAXFREELIST];
-static int __Pyx_ag_value_freelist_free = 0;
-
-static __pyx_PyAsyncGenASend *__Pyx_ag_asend_freelist[_PyAsyncGen_MAXFREELIST];
-static int __Pyx_ag_asend_freelist_free = 0;
-
-#define __pyx__PyAsyncGenWrappedValue_CheckExact(o) \
- (Py_TYPE(o) == __pyx__PyAsyncGenWrappedValueType)
-
-
-static int
-__Pyx_async_gen_traverse(__pyx_PyAsyncGenObject *gen, visitproc visit, void *arg)
-{
- Py_VISIT(gen->ag_finalizer);
- return __Pyx_Coroutine_traverse((__pyx_CoroutineObject*)gen, visit, arg);
-}
-
-
-static PyObject *
-__Pyx_async_gen_repr(__pyx_CoroutineObject *o)
-{
- // avoid NULL pointer dereference for qualname during garbage collection
- return PyUnicode_FromFormat("",
- o->gi_qualname ? o->gi_qualname : Py_None, o);
-}
-
-
-#if PY_VERSION_HEX >= 0x030600B0
-static int
-__Pyx_async_gen_init_hooks(__pyx_PyAsyncGenObject *o)
-{
- PyThreadState *tstate;
- PyObject *finalizer;
- PyObject *firstiter;
-
- if (o->ag_hooks_inited) {
- return 0;
- }
-
- o->ag_hooks_inited = 1;
-
- tstate = __Pyx_PyThreadState_Current;
-
- finalizer = tstate->async_gen_finalizer;
- if (finalizer) {
- Py_INCREF(finalizer);
- o->ag_finalizer = finalizer;
- }
-
- firstiter = tstate->async_gen_firstiter;
- if (firstiter) {
- PyObject *res;
-#if CYTHON_UNPACK_METHODS
- PyObject *self;
-#endif
-
- Py_INCREF(firstiter);
- // at least asyncio stores methods here => optimise the call
-#if CYTHON_UNPACK_METHODS
- if (likely(PyMethod_Check(firstiter)) && likely((self = PyMethod_GET_SELF(firstiter)) != NULL)) {
- PyObject *function = PyMethod_GET_FUNCTION(firstiter);
- res = __Pyx_PyObject_Call2Args(function, self, (PyObject*)o);
- } else
-#endif
- res = __Pyx_PyObject_CallOneArg(firstiter, (PyObject*)o);
-
- Py_DECREF(firstiter);
- if (unlikely(res == NULL)) {
- return 1;
- }
- Py_DECREF(res);
- }
-
- return 0;
-}
-#endif
-
-
-static PyObject *
-__Pyx_async_gen_anext(PyObject *g)
-{
- __pyx_PyAsyncGenObject *o = (__pyx_PyAsyncGenObject*) g;
- if (__Pyx_async_gen_init_hooks(o)) {
- return NULL;
- }
- return __Pyx_async_gen_asend_new(o, NULL);
-}
-
-static PyObject *
-__Pyx_async_gen_anext_method(PyObject *g, CYTHON_UNUSED PyObject *arg) {
- return __Pyx_async_gen_anext(g);
-}
-
-
-static PyObject *
-__Pyx_async_gen_asend(__pyx_PyAsyncGenObject *o, PyObject *arg)
-{
- if (__Pyx_async_gen_init_hooks(o)) {
- return NULL;
- }
- return __Pyx_async_gen_asend_new(o, arg);
-}
-
-
-static PyObject *
-__Pyx_async_gen_aclose(__pyx_PyAsyncGenObject *o, CYTHON_UNUSED PyObject *arg)
-{
- if (__Pyx_async_gen_init_hooks(o)) {
- return NULL;
- }
- return __Pyx_async_gen_athrow_new(o, NULL);
-}
-
-
-static PyObject *
-__Pyx_async_gen_athrow(__pyx_PyAsyncGenObject *o, PyObject *args)
-{
- if (__Pyx_async_gen_init_hooks(o)) {
- return NULL;
- }
- return __Pyx_async_gen_athrow_new(o, args);
-}
-
-
-static PyObject *
-__Pyx_async_gen_self_method(PyObject *g, CYTHON_UNUSED PyObject *arg) {
- return __Pyx_NewRef(g);
-}
-
-
-static PyGetSetDef __Pyx_async_gen_getsetlist[] = {
- {(char*) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name,
- (char*) PyDoc_STR("name of the async generator"), 0},
- {(char*) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname,
- (char*) PyDoc_STR("qualified name of the async generator"), 0},
- //REMOVED: {(char*) "ag_await", (getter)coro_get_cr_await, NULL,
- //REMOVED: (char*) PyDoc_STR("object being awaited on, or None")},
- {0, 0, 0, 0, 0} /* Sentinel */
-};
-
-static PyMemberDef __Pyx_async_gen_memberlist[] = {
- //REMOVED: {(char*) "ag_frame", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_frame), READONLY},
- {(char*) "ag_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
- //REMOVED: {(char*) "ag_code", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_code), READONLY},
- //ADDED: "ag_await"
- {(char*) "ag_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY,
- (char*) PyDoc_STR("object being awaited on, or None")},
- {0, 0, 0, 0, 0} /* Sentinel */
-};
-
-PyDoc_STRVAR(__Pyx_async_aclose_doc,
-"aclose() -> raise GeneratorExit inside generator.");
-
-PyDoc_STRVAR(__Pyx_async_asend_doc,
-"asend(v) -> send 'v' in generator.");
-
-PyDoc_STRVAR(__Pyx_async_athrow_doc,
-"athrow(typ[,val[,tb]]) -> raise exception in generator.");
-
-PyDoc_STRVAR(__Pyx_async_aiter_doc,
-"__aiter__(v) -> return an asynchronous iterator.");
-
-PyDoc_STRVAR(__Pyx_async_anext_doc,
-"__anext__(v) -> continue asynchronous iteration and return the next element.");
-
-static PyMethodDef __Pyx_async_gen_methods[] = {
- {"asend", (PyCFunction)__Pyx_async_gen_asend, METH_O, __Pyx_async_asend_doc},
- {"athrow",(PyCFunction)__Pyx_async_gen_athrow, METH_VARARGS, __Pyx_async_athrow_doc},
- {"aclose", (PyCFunction)__Pyx_async_gen_aclose, METH_NOARGS, __Pyx_async_aclose_doc},
- {"__aiter__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_aiter_doc},
- {"__anext__", (PyCFunction)__Pyx_async_gen_anext_method, METH_NOARGS, __Pyx_async_anext_doc},
- {0, 0, 0, 0} /* Sentinel */
-};
-
-
-#if CYTHON_USE_ASYNC_SLOTS
-static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_as_async = {
- 0, /* am_await */
- PyObject_SelfIter, /* am_aiter */
- (unaryfunc)__Pyx_async_gen_anext, /* am_anext */
-#if PY_VERSION_HEX >= 0x030A00A3
- 0, /*am_send*/
-#endif
-};
-#endif
-
-static PyTypeObject __pyx_AsyncGenType_type = {
- PyVarObject_HEAD_INIT(0, 0)
- "async_generator", /* tp_name */
- sizeof(__pyx_PyAsyncGenObject), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)__Pyx_Coroutine_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if CYTHON_USE_ASYNC_SLOTS
- &__Pyx_async_gen_as_async, /* tp_as_async */
-#else
- 0, /*tp_reserved*/
-#endif
- (reprfunc)__Pyx_async_gen_repr, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
- Py_TPFLAGS_HAVE_FINALIZE, /* tp_flags */
- 0, /* tp_doc */
- (traverseproc)__Pyx_async_gen_traverse, /* tp_traverse */
- 0, /* tp_clear */
-#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1
- // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare
- __Pyx_Coroutine_compare, /*tp_richcompare*/
-#else
- 0, /*tp_richcompare*/
-#endif
- offsetof(__pyx_CoroutineObject, gi_weakreflist), /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- __Pyx_async_gen_methods, /* tp_methods */
- __Pyx_async_gen_memberlist, /* tp_members */
- __Pyx_async_gen_getsetlist, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
-#if CYTHON_USE_TP_FINALIZE
- 0, /*tp_del*/
-#else
- __Pyx_Coroutine_del, /*tp_del*/
-#endif
- 0, /* tp_version_tag */
-#if CYTHON_USE_TP_FINALIZE
- __Pyx_Coroutine_del, /* tp_finalize */
-#elif PY_VERSION_HEX >= 0x030400a1
- 0, /* tp_finalize */
-#endif
-#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
- 0, /*tp_vectorcall*/
-#endif
-#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
- 0, /*tp_print*/
-#endif
-#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
- 0, /*tp_pypy_flags*/
-#endif
-};
-
-
-static int
-__Pyx_PyAsyncGen_ClearFreeLists(void)
-{
- int ret = __Pyx_ag_value_freelist_free + __Pyx_ag_asend_freelist_free;
-
- while (__Pyx_ag_value_freelist_free) {
- __pyx__PyAsyncGenWrappedValue *o;
- o = __Pyx_ag_value_freelist[--__Pyx_ag_value_freelist_free];
- assert(__pyx__PyAsyncGenWrappedValue_CheckExact(o));
- PyObject_GC_Del(o);
- }
-
- while (__Pyx_ag_asend_freelist_free) {
- __pyx_PyAsyncGenASend *o;
- o = __Pyx_ag_asend_freelist[--__Pyx_ag_asend_freelist_free];
- assert(Py_TYPE(o) == __pyx__PyAsyncGenASendType);
- PyObject_GC_Del(o);
- }
-
- return ret;
-}
-
-static void
-__Pyx_PyAsyncGen_Fini(void)
-{
- __Pyx_PyAsyncGen_ClearFreeLists();
-}
-
-
-static PyObject *
-__Pyx_async_gen_unwrap_value(__pyx_PyAsyncGenObject *gen, PyObject *result)
-{
- if (result == NULL) {
- PyObject *exc_type = PyErr_Occurred();
- if (!exc_type) {
- PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration);
- gen->ag_closed = 1;
- } else if (__Pyx_PyErr_GivenExceptionMatches2(exc_type, __Pyx_PyExc_StopAsyncIteration, PyExc_GeneratorExit)) {
- gen->ag_closed = 1;
- }
-
- return NULL;
- }
-
- if (__pyx__PyAsyncGenWrappedValue_CheckExact(result)) {
- /* async yield */
- __Pyx_ReturnWithStopIteration(((__pyx__PyAsyncGenWrappedValue*)result)->agw_val);
- Py_DECREF(result);
- return NULL;
- }
-
- return result;
-}
-
-
-/* ---------- Async Generator ASend Awaitable ------------ */
-
-
-static void
-__Pyx_async_gen_asend_dealloc(__pyx_PyAsyncGenASend *o)
-{
- PyObject_GC_UnTrack((PyObject *)o);
- Py_CLEAR(o->ags_gen);
- Py_CLEAR(o->ags_sendval);
- if (__Pyx_ag_asend_freelist_free < _PyAsyncGen_MAXFREELIST) {
- assert(__pyx_PyAsyncGenASend_CheckExact(o));
- __Pyx_ag_asend_freelist[__Pyx_ag_asend_freelist_free++] = o;
- } else {
- PyObject_GC_Del(o);
- }
-}
-
-static int
-__Pyx_async_gen_asend_traverse(__pyx_PyAsyncGenASend *o, visitproc visit, void *arg)
-{
- Py_VISIT(o->ags_gen);
- Py_VISIT(o->ags_sendval);
- return 0;
-}
-
-
-static PyObject *
-__Pyx_async_gen_asend_send(PyObject *g, PyObject *arg)
-{
- __pyx_PyAsyncGenASend *o = (__pyx_PyAsyncGenASend*) g;
- PyObject *result;
-
- if (unlikely(o->ags_state == __PYX_AWAITABLE_STATE_CLOSED)) {
- PyErr_SetNone(PyExc_StopIteration);
- return NULL;
- }
-
- if (o->ags_state == __PYX_AWAITABLE_STATE_INIT) {
- if (arg == NULL || arg == Py_None) {
- arg = o->ags_sendval ? o->ags_sendval : Py_None;
- }
- o->ags_state = __PYX_AWAITABLE_STATE_ITER;
- }
-
- result = __Pyx_Coroutine_Send((PyObject*)o->ags_gen, arg);
- result = __Pyx_async_gen_unwrap_value(o->ags_gen, result);
-
- if (result == NULL) {
- o->ags_state = __PYX_AWAITABLE_STATE_CLOSED;
- }
-
- return result;
-}
-
-
-static CYTHON_INLINE PyObject *
-__Pyx_async_gen_asend_iternext(PyObject *o)
-{
- return __Pyx_async_gen_asend_send(o, Py_None);
-}
-
-
-static PyObject *
-__Pyx_async_gen_asend_throw(__pyx_PyAsyncGenASend *o, PyObject *args)
-{
- PyObject *result;
-
- if (unlikely(o->ags_state == __PYX_AWAITABLE_STATE_CLOSED)) {
- PyErr_SetNone(PyExc_StopIteration);
- return NULL;
- }
-
- result = __Pyx_Coroutine_Throw((PyObject*)o->ags_gen, args);
- result = __Pyx_async_gen_unwrap_value(o->ags_gen, result);
-
- if (result == NULL) {
- o->ags_state = __PYX_AWAITABLE_STATE_CLOSED;
- }
-
- return result;
-}
-
-
-static PyObject *
-__Pyx_async_gen_asend_close(PyObject *g, CYTHON_UNUSED PyObject *args)
-{
- __pyx_PyAsyncGenASend *o = (__pyx_PyAsyncGenASend*) g;
- o->ags_state = __PYX_AWAITABLE_STATE_CLOSED;
- Py_RETURN_NONE;
-}
-
-
-static PyMethodDef __Pyx_async_gen_asend_methods[] = {
- {"send", (PyCFunction)__Pyx_async_gen_asend_send, METH_O, __Pyx_async_gen_send_doc},
- {"throw", (PyCFunction)__Pyx_async_gen_asend_throw, METH_VARARGS, __Pyx_async_gen_throw_doc},
- {"close", (PyCFunction)__Pyx_async_gen_asend_close, METH_NOARGS, __Pyx_async_gen_close_doc},
- {"__await__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_gen_await_doc},
- {0, 0, 0, 0} /* Sentinel */
-};
-
-
-#if CYTHON_USE_ASYNC_SLOTS
-static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_asend_as_async = {
- PyObject_SelfIter, /* am_await */
- 0, /* am_aiter */
- 0, /* am_anext */
-#if PY_VERSION_HEX >= 0x030A00A3
- 0, /*am_send*/
-#endif
-};
-#endif
-
-
-static PyTypeObject __pyx__PyAsyncGenASendType_type = {
- PyVarObject_HEAD_INIT(0, 0)
- "async_generator_asend", /* tp_name */
- sizeof(__pyx_PyAsyncGenASend), /* tp_basicsize */
- 0, /* tp_itemsize */
- /* methods */
- (destructor)__Pyx_async_gen_asend_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if CYTHON_USE_ASYNC_SLOTS
- &__Pyx_async_gen_asend_as_async, /* tp_as_async */
-#else
- 0, /*tp_reserved*/
-#endif
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
- 0, /* tp_doc */
- (traverseproc)__Pyx_async_gen_asend_traverse, /* tp_traverse */
- 0, /* tp_clear */
-#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1
- // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare
- __Pyx_Coroutine_compare, /*tp_richcompare*/
-#else
- 0, /*tp_richcompare*/
-#endif
- 0, /* tp_weaklistoffset */
- PyObject_SelfIter, /* tp_iter */
- (iternextfunc)__Pyx_async_gen_asend_iternext, /* tp_iternext */
- __Pyx_async_gen_asend_methods, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
-#if PY_VERSION_HEX >= 0x030400a1
- 0, /* tp_finalize */
-#endif
-#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
- 0, /*tp_vectorcall*/
-#endif
-#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
- 0, /*tp_print*/
-#endif
-#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
- 0, /*tp_pypy_flags*/
-#endif
-};
-
-
-static PyObject *
-__Pyx_async_gen_asend_new(__pyx_PyAsyncGenObject *gen, PyObject *sendval)
-{
- __pyx_PyAsyncGenASend *o;
- if (__Pyx_ag_asend_freelist_free) {
- __Pyx_ag_asend_freelist_free--;
- o = __Pyx_ag_asend_freelist[__Pyx_ag_asend_freelist_free];
- _Py_NewReference((PyObject *)o);
- } else {
- o = PyObject_GC_New(__pyx_PyAsyncGenASend, __pyx__PyAsyncGenASendType);
- if (o == NULL) {
- return NULL;
- }
- }
-
- Py_INCREF(gen);
- o->ags_gen = gen;
-
- Py_XINCREF(sendval);
- o->ags_sendval = sendval;
-
- o->ags_state = __PYX_AWAITABLE_STATE_INIT;
-
- PyObject_GC_Track((PyObject*)o);
- return (PyObject*)o;
-}
-
-
-/* ---------- Async Generator Value Wrapper ------------ */
-
-
-static void
-__Pyx_async_gen_wrapped_val_dealloc(__pyx__PyAsyncGenWrappedValue *o)
-{
- PyObject_GC_UnTrack((PyObject *)o);
- Py_CLEAR(o->agw_val);
- if (__Pyx_ag_value_freelist_free < _PyAsyncGen_MAXFREELIST) {
- assert(__pyx__PyAsyncGenWrappedValue_CheckExact(o));
- __Pyx_ag_value_freelist[__Pyx_ag_value_freelist_free++] = o;
- } else {
- PyObject_GC_Del(o);
- }
-}
-
-
-static int
-__Pyx_async_gen_wrapped_val_traverse(__pyx__PyAsyncGenWrappedValue *o,
- visitproc visit, void *arg)
-{
- Py_VISIT(o->agw_val);
- return 0;
-}
-
-
-static PyTypeObject __pyx__PyAsyncGenWrappedValueType_type = {
- PyVarObject_HEAD_INIT(0, 0)
- "async_generator_wrapped_value", /* tp_name */
- sizeof(__pyx__PyAsyncGenWrappedValue), /* tp_basicsize */
- 0, /* tp_itemsize */
- /* methods */
- (destructor)__Pyx_async_gen_wrapped_val_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_as_async */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
- 0, /* tp_doc */
- (traverseproc)__Pyx_async_gen_wrapped_val_traverse, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
-#if PY_VERSION_HEX >= 0x030400a1
- 0, /* tp_finalize */
-#endif
-#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
- 0, /*tp_vectorcall*/
-#endif
-#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
- 0, /*tp_print*/
-#endif
-#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
- 0, /*tp_pypy_flags*/
-#endif
-};
-
-
-static PyObject *
-__Pyx__PyAsyncGenValueWrapperNew(PyObject *val)
-{
- // NOTE: steals a reference to val !
- __pyx__PyAsyncGenWrappedValue *o;
- assert(val);
-
- if (__Pyx_ag_value_freelist_free) {
- __Pyx_ag_value_freelist_free--;
- o = __Pyx_ag_value_freelist[__Pyx_ag_value_freelist_free];
- assert(__pyx__PyAsyncGenWrappedValue_CheckExact(o));
- _Py_NewReference((PyObject*)o);
- } else {
- o = PyObject_GC_New(__pyx__PyAsyncGenWrappedValue, __pyx__PyAsyncGenWrappedValueType);
- if (unlikely(!o)) {
- Py_DECREF(val);
- return NULL;
- }
- }
- o->agw_val = val;
- // no Py_INCREF(val) - steals reference!
- PyObject_GC_Track((PyObject*)o);
- return (PyObject*)o;
-}
-
-
-/* ---------- Async Generator AThrow awaitable ------------ */
-
-
-static void
-__Pyx_async_gen_athrow_dealloc(__pyx_PyAsyncGenAThrow *o)
-{
- PyObject_GC_UnTrack((PyObject *)o);
- Py_CLEAR(o->agt_gen);
- Py_CLEAR(o->agt_args);
- PyObject_GC_Del(o);
-}
-
-
-static int
-__Pyx_async_gen_athrow_traverse(__pyx_PyAsyncGenAThrow *o, visitproc visit, void *arg)
-{
- Py_VISIT(o->agt_gen);
- Py_VISIT(o->agt_args);
- return 0;
-}
-
-
-static PyObject *
-__Pyx_async_gen_athrow_send(__pyx_PyAsyncGenAThrow *o, PyObject *arg)
-{
- __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*)o->agt_gen;
- PyObject *retval;
-
- if (o->agt_state == __PYX_AWAITABLE_STATE_CLOSED) {
- PyErr_SetNone(PyExc_StopIteration);
- return NULL;
- }
-
- if (o->agt_state == __PYX_AWAITABLE_STATE_INIT) {
- if (o->agt_gen->ag_closed) {
- PyErr_SetNone(PyExc_StopIteration);
- return NULL;
- }
-
- if (arg != Py_None) {
- PyErr_SetString(PyExc_RuntimeError, __Pyx_NON_INIT_CORO_MSG);
- return NULL;
- }
-
- o->agt_state = __PYX_AWAITABLE_STATE_ITER;
-
- if (o->agt_args == NULL) {
- /* aclose() mode */
- o->agt_gen->ag_closed = 1;
-
- retval = __Pyx__Coroutine_Throw((PyObject*)gen,
- /* Do not close generator when
- PyExc_GeneratorExit is passed */
- PyExc_GeneratorExit, NULL, NULL, NULL, 0);
-
- if (retval && __pyx__PyAsyncGenWrappedValue_CheckExact(retval)) {
- Py_DECREF(retval);
- goto yield_close;
- }
- } else {
- PyObject *typ;
- PyObject *tb = NULL;
- PyObject *val = NULL;
-
- if (!PyArg_UnpackTuple(o->agt_args, "athrow", 1, 3,
- &typ, &val, &tb)) {
- return NULL;
- }
-
- retval = __Pyx__Coroutine_Throw((PyObject*)gen,
- /* Do not close generator when PyExc_GeneratorExit is passed */
- typ, val, tb, o->agt_args, 0);
- retval = __Pyx_async_gen_unwrap_value(o->agt_gen, retval);
- }
- if (retval == NULL) {
- goto check_error;
- }
- return retval;
- }
-
- assert (o->agt_state == __PYX_AWAITABLE_STATE_ITER);
-
- retval = __Pyx_Coroutine_Send((PyObject *)gen, arg);
- if (o->agt_args) {
- return __Pyx_async_gen_unwrap_value(o->agt_gen, retval);
- } else {
- /* aclose() mode */
- if (retval) {
- if (__pyx__PyAsyncGenWrappedValue_CheckExact(retval)) {
- Py_DECREF(retval);
- goto yield_close;
- }
- else {
- return retval;
- }
- }
- else {
- goto check_error;
- }
- }
-
-yield_close:
- PyErr_SetString(
- PyExc_RuntimeError, __Pyx_ASYNC_GEN_IGNORED_EXIT_MSG);
- return NULL;
-
-check_error:
- if (PyErr_ExceptionMatches(__Pyx_PyExc_StopAsyncIteration)) {
- o->agt_state = __PYX_AWAITABLE_STATE_CLOSED;
- if (o->agt_args == NULL) {
- // when aclose() is called we don't want to propagate
- // StopAsyncIteration; just raise StopIteration, signalling
- // that 'aclose()' is done.
- PyErr_Clear();
- PyErr_SetNone(PyExc_StopIteration);
- }
- }
- else if (PyErr_ExceptionMatches(PyExc_GeneratorExit)) {
- o->agt_state = __PYX_AWAITABLE_STATE_CLOSED;
- PyErr_Clear(); /* ignore these errors */
- PyErr_SetNone(PyExc_StopIteration);
- }
- return NULL;
-}
-
-
-static PyObject *
-__Pyx_async_gen_athrow_throw(__pyx_PyAsyncGenAThrow *o, PyObject *args)
-{
- PyObject *retval;
-
- if (o->agt_state == __PYX_AWAITABLE_STATE_INIT) {
- PyErr_SetString(PyExc_RuntimeError, __Pyx_NON_INIT_CORO_MSG);
- return NULL;
- }
-
- if (o->agt_state == __PYX_AWAITABLE_STATE_CLOSED) {
- PyErr_SetNone(PyExc_StopIteration);
- return NULL;
- }
-
- retval = __Pyx_Coroutine_Throw((PyObject*)o->agt_gen, args);
- if (o->agt_args) {
- return __Pyx_async_gen_unwrap_value(o->agt_gen, retval);
- } else {
- /* aclose() mode */
- if (retval && __pyx__PyAsyncGenWrappedValue_CheckExact(retval)) {
- Py_DECREF(retval);
- PyErr_SetString(PyExc_RuntimeError, __Pyx_ASYNC_GEN_IGNORED_EXIT_MSG);
- return NULL;
- }
- return retval;
- }
-}
-
-
-static PyObject *
-__Pyx_async_gen_athrow_iternext(__pyx_PyAsyncGenAThrow *o)
-{
- return __Pyx_async_gen_athrow_send(o, Py_None);
-}
-
-
-static PyObject *
-__Pyx_async_gen_athrow_close(PyObject *g, CYTHON_UNUSED PyObject *args)
-{
- __pyx_PyAsyncGenAThrow *o = (__pyx_PyAsyncGenAThrow*) g;
- o->agt_state = __PYX_AWAITABLE_STATE_CLOSED;
- Py_RETURN_NONE;
-}
-
-
-static PyMethodDef __Pyx_async_gen_athrow_methods[] = {
- {"send", (PyCFunction)__Pyx_async_gen_athrow_send, METH_O, __Pyx_async_gen_send_doc},
- {"throw", (PyCFunction)__Pyx_async_gen_athrow_throw, METH_VARARGS, __Pyx_async_gen_throw_doc},
- {"close", (PyCFunction)__Pyx_async_gen_athrow_close, METH_NOARGS, __Pyx_async_gen_close_doc},
- {"__await__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_gen_await_doc},
- {0, 0, 0, 0} /* Sentinel */
-};
-
-
-#if CYTHON_USE_ASYNC_SLOTS
-static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_athrow_as_async = {
- PyObject_SelfIter, /* am_await */
- 0, /* am_aiter */
- 0, /* am_anext */
-#if PY_VERSION_HEX >= 0x030A00A3
- 0, /*am_send*/
-#endif
-};
-#endif
-
-
-static PyTypeObject __pyx__PyAsyncGenAThrowType_type = {
- PyVarObject_HEAD_INIT(0, 0)
- "async_generator_athrow", /* tp_name */
- sizeof(__pyx_PyAsyncGenAThrow), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)__Pyx_async_gen_athrow_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
-#if CYTHON_USE_ASYNC_SLOTS
- &__Pyx_async_gen_athrow_as_async, /* tp_as_async */
-#else
- 0, /*tp_reserved*/
-#endif
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
- 0, /* tp_doc */
- (traverseproc)__Pyx_async_gen_athrow_traverse, /* tp_traverse */
- 0, /* tp_clear */
-#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1
- // in order to (mis-)use tp_reserved above, we must also implement tp_richcompare
- __Pyx_Coroutine_compare, /*tp_richcompare*/
-#else
- 0, /*tp_richcompare*/
-#endif
- 0, /* tp_weaklistoffset */
- PyObject_SelfIter, /* tp_iter */
- (iternextfunc)__Pyx_async_gen_athrow_iternext, /* tp_iternext */
- __Pyx_async_gen_athrow_methods, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
- 0, /* tp_version_tag */
-#if PY_VERSION_HEX >= 0x030400a1
- 0, /* tp_finalize */
-#endif
-#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
- 0, /*tp_vectorcall*/
-#endif
-#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
- 0, /*tp_print*/
-#endif
-#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
- 0, /*tp_pypy_flags*/
-#endif
-};
-
-
-static PyObject *
-__Pyx_async_gen_athrow_new(__pyx_PyAsyncGenObject *gen, PyObject *args)
-{
- __pyx_PyAsyncGenAThrow *o;
- o = PyObject_GC_New(__pyx_PyAsyncGenAThrow, __pyx__PyAsyncGenAThrowType);
- if (o == NULL) {
- return NULL;
- }
- o->agt_gen = gen;
- o->agt_args = args;
- o->agt_state = __PYX_AWAITABLE_STATE_INIT;
- Py_INCREF(gen);
- Py_XINCREF(args);
- PyObject_GC_Track((PyObject*)o);
- return (PyObject*)o;
-}
-
-
-/* ---------- global type sharing ------------ */
-
-static int __pyx_AsyncGen_init(void) {
- // on Windows, C-API functions can't be used in slots statically
- __pyx_AsyncGenType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
- __pyx__PyAsyncGenWrappedValueType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
- __pyx__PyAsyncGenAThrowType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
- __pyx__PyAsyncGenASendType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
-
- __pyx_AsyncGenType = __Pyx_FetchCommonType(&__pyx_AsyncGenType_type);
- if (unlikely(!__pyx_AsyncGenType))
- return -1;
-
- __pyx__PyAsyncGenAThrowType = __Pyx_FetchCommonType(&__pyx__PyAsyncGenAThrowType_type);
- if (unlikely(!__pyx__PyAsyncGenAThrowType))
- return -1;
-
- __pyx__PyAsyncGenWrappedValueType = __Pyx_FetchCommonType(&__pyx__PyAsyncGenWrappedValueType_type);
- if (unlikely(!__pyx__PyAsyncGenWrappedValueType))
- return -1;
-
- __pyx__PyAsyncGenASendType = __Pyx_FetchCommonType(&__pyx__PyAsyncGenASendType_type);
- if (unlikely(!__pyx__PyAsyncGenASendType))
- return -1;
-
- return 0;
-}
diff --git a/spaces/asafAdge/Detic/detic/data/custom_dataset_mapper.py b/spaces/asafAdge/Detic/detic/data/custom_dataset_mapper.py
deleted file mode 100644
index c7727dded3f93f5eeafdcd72e257197e3fdc817b..0000000000000000000000000000000000000000
--- a/spaces/asafAdge/Detic/detic/data/custom_dataset_mapper.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import copy
-import logging
-import numpy as np
-from typing import List, Optional, Union
-import torch
-import pycocotools.mask as mask_util
-
-from detectron2.config import configurable
-
-from detectron2.data import detection_utils as utils
-from detectron2.data.detection_utils import transform_keypoint_annotations
-from detectron2.data import transforms as T
-from detectron2.data.dataset_mapper import DatasetMapper
-from detectron2.structures import Boxes, BoxMode, Instances
-from detectron2.structures import Keypoints, PolygonMasks, BitMasks
-from fvcore.transforms.transform import TransformList
-from .custom_build_augmentation import build_custom_augmentation
-from .tar_dataset import DiskTarDataset
-
-__all__ = ["CustomDatasetMapper"]
-
-class CustomDatasetMapper(DatasetMapper):
- @configurable
- def __init__(self, is_train: bool,
- with_ann_type=False,
- dataset_ann=[],
- use_diff_bs_size=False,
- dataset_augs=[],
- is_debug=False,
- use_tar_dataset=False,
- tarfile_path='',
- tar_index_dir='',
- **kwargs):
- """
- add image labels
- """
- self.with_ann_type = with_ann_type
- self.dataset_ann = dataset_ann
- self.use_diff_bs_size = use_diff_bs_size
- if self.use_diff_bs_size and is_train:
- self.dataset_augs = [T.AugmentationList(x) for x in dataset_augs]
- self.is_debug = is_debug
- self.use_tar_dataset = use_tar_dataset
- if self.use_tar_dataset:
- print('Using tar dataset')
- self.tar_dataset = DiskTarDataset(tarfile_path, tar_index_dir)
- super().__init__(is_train, **kwargs)
-
-
- @classmethod
- def from_config(cls, cfg, is_train: bool = True):
- ret = super().from_config(cfg, is_train)
- ret.update({
- 'with_ann_type': cfg.WITH_IMAGE_LABELS,
- 'dataset_ann': cfg.DATALOADER.DATASET_ANN,
- 'use_diff_bs_size': cfg.DATALOADER.USE_DIFF_BS_SIZE,
- 'is_debug': cfg.IS_DEBUG,
- 'use_tar_dataset': cfg.DATALOADER.USE_TAR_DATASET,
- 'tarfile_path': cfg.DATALOADER.TARFILE_PATH,
- 'tar_index_dir': cfg.DATALOADER.TAR_INDEX_DIR,
- })
- if ret['use_diff_bs_size'] and is_train:
- if cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop':
- dataset_scales = cfg.DATALOADER.DATASET_INPUT_SCALE
- dataset_sizes = cfg.DATALOADER.DATASET_INPUT_SIZE
- ret['dataset_augs'] = [
- build_custom_augmentation(cfg, True, scale, size) \
- for scale, size in zip(dataset_scales, dataset_sizes)]
- else:
- assert cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge'
- min_sizes = cfg.DATALOADER.DATASET_MIN_SIZES
- max_sizes = cfg.DATALOADER.DATASET_MAX_SIZES
- ret['dataset_augs'] = [
- build_custom_augmentation(
- cfg, True, min_size=mi, max_size=ma) \
- for mi, ma in zip(min_sizes, max_sizes)]
- else:
- ret['dataset_augs'] = []
-
- return ret
-
- def __call__(self, dataset_dict):
- """
- include image labels
- """
- dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
- # USER: Write your own image loading if it's not from a file
- if 'file_name' in dataset_dict:
- ori_image = utils.read_image(
- dataset_dict["file_name"], format=self.image_format)
- else:
- ori_image, _, _ = self.tar_dataset[dataset_dict["tar_index"]]
- ori_image = utils._apply_exif_orientation(ori_image)
- ori_image = utils.convert_PIL_to_numpy(ori_image, self.image_format)
- utils.check_image_size(dataset_dict, ori_image)
-
- # USER: Remove if you don't do semantic/panoptic segmentation.
- if "sem_seg_file_name" in dataset_dict:
- sem_seg_gt = utils.read_image(
- dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
- else:
- sem_seg_gt = None
-
- if self.is_debug:
- dataset_dict['dataset_source'] = 0
-
- not_full_labeled = 'dataset_source' in dataset_dict and \
- self.with_ann_type and \
- self.dataset_ann[dataset_dict['dataset_source']] != 'box'
-
- aug_input = T.AugInput(copy.deepcopy(ori_image), sem_seg=sem_seg_gt)
- if self.use_diff_bs_size and self.is_train:
- transforms = \
- self.dataset_augs[dataset_dict['dataset_source']](aug_input)
- else:
- transforms = self.augmentations(aug_input)
- image, sem_seg_gt = aug_input.image, aug_input.sem_seg
-
- image_shape = image.shape[:2] # h, w
- dataset_dict["image"] = torch.as_tensor(
- np.ascontiguousarray(image.transpose(2, 0, 1)))
-
- if sem_seg_gt is not None:
- dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
-
- # USER: Remove if you don't use pre-computed proposals.
- # Most users would not need this feature.
- if self.proposal_topk is not None:
- utils.transform_proposals(
- dataset_dict, image_shape, transforms,
- proposal_topk=self.proposal_topk
- )
-
- if not self.is_train:
- # USER: Modify this if you want to keep them for some reason.
- dataset_dict.pop("annotations", None)
- dataset_dict.pop("sem_seg_file_name", None)
- return dataset_dict
-
- if "annotations" in dataset_dict:
- # USER: Modify this if you want to keep them for some reason.
- for anno in dataset_dict["annotations"]:
- if not self.use_instance_mask:
- anno.pop("segmentation", None)
- if not self.use_keypoint:
- anno.pop("keypoints", None)
-
- # USER: Implement additional transformations if you have other types of data
- all_annos = [
- (utils.transform_instance_annotations(
- obj, transforms, image_shape,
- keypoint_hflip_indices=self.keypoint_hflip_indices,
- ), obj.get("iscrowd", 0))
- for obj in dataset_dict.pop("annotations")
- ]
- annos = [ann[0] for ann in all_annos if ann[1] == 0]
- instances = utils.annotations_to_instances(
- annos, image_shape, mask_format=self.instance_mask_format
- )
-
- del all_annos
- if self.recompute_boxes:
- instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
- dataset_dict["instances"] = utils.filter_empty_instances(instances)
- if self.with_ann_type:
- dataset_dict["pos_category_ids"] = dataset_dict.get(
- 'pos_category_ids', [])
- dataset_dict["ann_type"] = \
- self.dataset_ann[dataset_dict['dataset_source']]
- if self.is_debug and (('pos_category_ids' not in dataset_dict) or \
- (dataset_dict['pos_category_ids'] == [])):
- dataset_dict['pos_category_ids'] = [x for x in sorted(set(
- dataset_dict['instances'].gt_classes.tolist()
- ))]
- return dataset_dict
-
-# DETR augmentation
-def build_transform_gen(cfg, is_train):
- """
- """
- if is_train:
- min_size = cfg.INPUT.MIN_SIZE_TRAIN
- max_size = cfg.INPUT.MAX_SIZE_TRAIN
- sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
- else:
- min_size = cfg.INPUT.MIN_SIZE_TEST
- max_size = cfg.INPUT.MAX_SIZE_TEST
- sample_style = "choice"
- if sample_style == "range":
- assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
-
- logger = logging.getLogger(__name__)
- tfm_gens = []
- if is_train:
- tfm_gens.append(T.RandomFlip())
- tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
- if is_train:
- logger.info("TransformGens used in training: " + str(tfm_gens))
- return tfm_gens
-
-
-class DetrDatasetMapper:
- """
- A callable which takes a dataset dict in Detectron2 Dataset format,
- and map it into a format used by DETR.
- The callable currently does the following:
- 1. Read the image from "file_name"
- 2. Applies geometric transforms to the image and annotation
- 3. Find and applies suitable cropping to the image and annotation
- 4. Prepare image and annotation to Tensors
- """
-
- def __init__(self, cfg, is_train=True):
- if cfg.INPUT.CROP.ENABLED and is_train:
- self.crop_gen = [
- T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
- T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
- ]
- else:
- self.crop_gen = None
-
- self.mask_on = cfg.MODEL.MASK_ON
- self.tfm_gens = build_transform_gen(cfg, is_train)
- logging.getLogger(__name__).info(
- "Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen))
- )
-
- self.img_format = cfg.INPUT.FORMAT
- self.is_train = is_train
-
- def __call__(self, dataset_dict):
- """
- Args:
- dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
- Returns:
- dict: a format that builtin models in detectron2 accept
- """
- dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
- image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
- utils.check_image_size(dataset_dict, image)
-
- if self.crop_gen is None:
- image, transforms = T.apply_transform_gens(self.tfm_gens, image)
- else:
- if np.random.rand() > 0.5:
- image, transforms = T.apply_transform_gens(self.tfm_gens, image)
- else:
- image, transforms = T.apply_transform_gens(
- self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image
- )
-
- image_shape = image.shape[:2] # h, w
-
- # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
- # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
- # Therefore it's important to use torch.Tensor.
- dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
-
- if not self.is_train:
- # USER: Modify this if you want to keep them for some reason.
- dataset_dict.pop("annotations", None)
- return dataset_dict
-
- if "annotations" in dataset_dict:
- # USER: Modify this if you want to keep them for some reason.
- for anno in dataset_dict["annotations"]:
- if not self.mask_on:
- anno.pop("segmentation", None)
- anno.pop("keypoints", None)
-
- # USER: Implement additional transformations if you have other types of data
- annos = [
- utils.transform_instance_annotations(obj, transforms, image_shape)
- for obj in dataset_dict.pop("annotations")
- if obj.get("iscrowd", 0) == 0
- ]
- instances = utils.annotations_to_instances(annos, image_shape)
- dataset_dict["instances"] = utils.filter_empty_instances(instances)
- return dataset_dict
\ No newline at end of file
diff --git a/spaces/asciicorp/Legal-ai/save.py b/spaces/asciicorp/Legal-ai/save.py
deleted file mode 100644
index a57eaa46838c083864c8cae916ae9b9b277a1dbe..0000000000000000000000000000000000000000
--- a/spaces/asciicorp/Legal-ai/save.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import streamlit as st
-from datetime import datetime
-import base64
-
-def save_function(model, temperature, template):
- current_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
- filename = f"api_{model}_{current_time}.py"
- with open(filename, "w") as f:
- f.write("from langchain.prompts.prompt import PromptTemplate\n")
- f.write("from langchain.llms import OpenAI\n")
- f.write("from langchain.chains import ChatVectorDBChain\n")
- f.write("\n")
- f.write("import os\n")
- f.write("import pickle\n")
- f.write("from fastapi import FastAPI, Request\n")
- f.write("\n")
- f.write('os.environ["OPENAI_API_KEY"] = "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"\n')
- f.write("\n")
- f.write(f"model = '{model}'\n")
- f.write(f"temperature = {temperature}\n")
- f.write(f"template = '''{template}'''\n")
- f.write("\n")
- f.write("_template = '''Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n")
- f.write("you can assume the question is about the document.\n")
- f.write("\n")
- f.write("Chat History:\n")
- f.write("{chat_history}\n")
- f.write("Follow Up Input: {question}\n")
- f.write("Standalone question:'''\n")
- f.write("\n")
- f.write("CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)\n")
- f.write("\n")
- f.write("QA_PROMPT = PromptTemplate(template=template, input_variables=['question', 'context'])\n")
- f.write("\n")
- f.write('with open("vectorstore.pkl", "rb") as f:\n')
- f.write(" vectorstore = pickle.load(f)\n")
- f.write("\n")
- f.write("app = FastAPI()\n")
- f.write("llm = OpenAI(model=model, temperature=temperature)\n")
- f.write("qa_chain = ChatVectorDBChain.from_llm(\n")
- f.write(" llm,\n")
- f.write(" vectorstore,\n")
- f.write(" qa_prompt=QA_PROMPT,\n")
- f.write(" condense_question_prompt=CONDENSE_QUESTION_PROMPT,\n")
- f.write(" )\n")
- f.write('@app.post("/api")\n')
- f.write("async def get_answer(request: Request):\n")
- f.write(" body = await request.json()\n")
- f.write(' question = body.get("question")\n')
- f.write(' chat_history = body.get("chat_history", [])\n')
- f.write(' result = qa_chain({"question": question, "chat_history": chat_history})\n')
- f.write(' chat_history.append((question, result["answer"]))\n')
- f.write(' return {"answer": result["answer"]}\n')
- st.success(f"Custom API created as {filename}")
- with open(f"{filename}", 'rb') as f:
- bytes = f.read()
- b64 = base64.b64encode(bytes).decode()
- href = f'Download custom API '
- st.markdown(href, unsafe_allow_html=True)
\ No newline at end of file
diff --git a/spaces/ashercn97/AsherTesting/docs/Generation-parameters.md b/spaces/ashercn97/AsherTesting/docs/Generation-parameters.md
deleted file mode 100644
index 447742160f3d89796e10726b0257c19435e90449..0000000000000000000000000000000000000000
--- a/spaces/ashercn97/AsherTesting/docs/Generation-parameters.md
+++ /dev/null
@@ -1,35 +0,0 @@
-# Generation parameters
-
-For a description of the generation parameters provided by the transformers library, see this link: https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig
-
-### llama.cpp
-
-llama.cpp only uses the following parameters:
-
-* temperature
-* top_p
-* top_k
-* repetition_penalty
-* tfs
-* mirostat_mode
-* mirostat_tau
-* mirostat_eta
-
-### ExLlama
-
-ExLlama only uses the following parameters:
-
-* temperature
-* top_p
-* top_k
-* repetition_penalty
-* repetition_penalty_range
-* typical_p
-
-### RWKV
-
-RWKV only uses the following parameters when loaded through the old .pth weights:
-
-* temperature
-* top_p
-* top_k
diff --git a/spaces/awacke1/Science-NER-Spacy-Streamlit/README.md b/spaces/awacke1/Science-NER-Spacy-Streamlit/README.md
deleted file mode 100644
index 9805241f6476eaa5bd4711a13b070367ba061c14..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Science-NER-Spacy-Streamlit/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: 🔥 Science NER Spacy for STEM Streamlit
-emoji: 🔥
-colorFrom: indigo
-colorTo: red
-sdk: streamlit
-sdk_version: 1.15.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/balgot/text-to-stylegan3/download_model.sh b/spaces/balgot/text-to-stylegan3/download_model.sh
deleted file mode 100644
index 02eaac6b3f16988f15937b96fb81aeeca89dace3..0000000000000000000000000000000000000000
--- a/spaces/balgot/text-to-stylegan3/download_model.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-
-echo "Downloading the translation model..."
-wget --show-progress --verbose -nc -O model.pt https://huggingface.co/balgot/bert-2-stylegan3/resolve/main/translation_model-sd.pt
\ No newline at end of file
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/lines/Wireframe.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/lines/Wireframe.js
deleted file mode 100644
index 9f582cf33d44338c609cb47db9f7b9b576b205c2..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/lines/Wireframe.js
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * @author WestLangley / http://github.com/WestLangley
- *
- */
-
-THREE.Wireframe = function ( geometry, material ) {
-
- THREE.Mesh.call( this );
-
- this.type = 'Wireframe';
-
- this.geometry = geometry !== undefined ? geometry : new THREE.LineSegmentsGeometry();
- this.material = material !== undefined ? material : new THREE.LineMaterial( { color: Math.random() * 0xffffff } );
-
-};
-
-THREE.Wireframe.prototype = Object.assign( Object.create( THREE.Mesh.prototype ), {
-
- constructor: THREE.Wireframe,
-
- isWireframe: true,
-
- computeLineDistances: ( function () { // for backwards-compatability, but could be a method of LineSegmentsGeometry...
-
- var start = new THREE.Vector3();
- var end = new THREE.Vector3();
-
- return function computeLineDistances() {
-
- var geometry = this.geometry;
-
- var instanceStart = geometry.attributes.instanceStart;
- var instanceEnd = geometry.attributes.instanceEnd;
- var lineDistances = new Float32Array( 2 * instanceStart.data.count );
-
- for ( var i = 0, j = 0, l = instanceStart.data.count; i < l; i ++, j += 2 ) {
-
- start.fromBufferAttribute( instanceStart, i );
- end.fromBufferAttribute( instanceEnd, i );
-
- lineDistances[ j ] = ( j === 0 ) ? 0 : lineDistances[ j - 1 ];
- lineDistances[ j + 1 ] = lineDistances[ j ] + start.distanceTo( end );
-
- }
-
- var instanceDistanceBuffer = new THREE.InstancedInterleavedBuffer( lineDistances, 2, 1 ); // d0, d1
-
- geometry.addAttribute( 'instanceDistanceStart', new THREE.InterleavedBufferAttribute( instanceDistanceBuffer, 1, 0 ) ); // d0
- geometry.addAttribute( 'instanceDistanceEnd', new THREE.InterleavedBufferAttribute( instanceDistanceBuffer, 1, 1 ) ); // d1
-
- return this;
-
- };
-
- }() ),
-
- copy: function ( source ) {
-
- // todo
-
- return this;
-
- }
-
-} );
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/extras/curves/QuadraticBezierCurve.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/extras/curves/QuadraticBezierCurve.d.ts
deleted file mode 100644
index 8face24fa9a42526d1033a4b649b86345606aa7b..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/extras/curves/QuadraticBezierCurve.d.ts
+++ /dev/null
@@ -1,10 +0,0 @@
-import { Vector2 } from './../../math/Vector2';
-import { Curve } from './../core/Curve';
-
-export class QuadraticBezierCurve extends Curve {
- constructor(v0: Vector2, v1: Vector2, v2: Vector2);
-
- v0: Vector2;
- v1: Vector2;
- v2: Vector2;
-}
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/logdepthbuf_vertex.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/logdepthbuf_vertex.glsl.js
deleted file mode 100644
index 1631661a9bfa38316f66a9eec2482da043d616ea..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/logdepthbuf_vertex.glsl.js
+++ /dev/null
@@ -1,17 +0,0 @@
-export default /* glsl */`
-#ifdef USE_LOGDEPTHBUF
-
- #ifdef USE_LOGDEPTHBUF_EXT
-
- vFragDepth = 1.0 + gl_Position.w;
-
- #else
-
- gl_Position.z = log2( max( EPSILON, gl_Position.w + 1.0 ) ) * logDepthBufFC - 1.0;
-
- gl_Position.z *= gl_Position.w;
-
- #endif
-
-#endif
-`;
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/textures/CubeTexture.js b/spaces/banana-projects/web3d/node_modules/three/src/textures/CubeTexture.js
deleted file mode 100644
index 6d2e2946c97bd4ef257344bef0ee271992483be7..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/textures/CubeTexture.js
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * @author mrdoob / http://mrdoob.com/
- */
-
-import { Texture } from './Texture.js';
-import { CubeReflectionMapping, RGBFormat } from '../constants.js';
-
-function CubeTexture( images, mapping, wrapS, wrapT, magFilter, minFilter, format, type, anisotropy, encoding ) {
-
- images = images !== undefined ? images : [];
- mapping = mapping !== undefined ? mapping : CubeReflectionMapping;
- format = format !== undefined ? format : RGBFormat;
-
- Texture.call( this, images, mapping, wrapS, wrapT, magFilter, minFilter, format, type, anisotropy, encoding );
-
- this.flipY = false;
-
-}
-
-CubeTexture.prototype = Object.create( Texture.prototype );
-CubeTexture.prototype.constructor = CubeTexture;
-
-CubeTexture.prototype.isCubeTexture = true;
-
-Object.defineProperty( CubeTexture.prototype, 'images', {
-
- get: function () {
-
- return this.image;
-
- },
-
- set: function ( value ) {
-
- this.image = value;
-
- }
-
-} );
-
-
-export { CubeTexture };
diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/swinir_arch.py b/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/swinir_arch.py
deleted file mode 100644
index 3917fa2c7408e1f5b55b9930c643a9af920a4d81..0000000000000000000000000000000000000000
--- a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/swinir_arch.py
+++ /dev/null
@@ -1,956 +0,0 @@
-# Modified from https://github.com/JingyunLiang/SwinIR
-# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
-# Originally Written by Ze Liu, Modified by Jingyun Liang.
-
-import math
-import torch
-import torch.nn as nn
-import torch.utils.checkpoint as checkpoint
-
-from basicsr.utils.registry import ARCH_REGISTRY
-from .arch_util import to_2tuple, trunc_normal_
-
-
-def drop_path(x, drop_prob: float = 0., training: bool = False):
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
-
- From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
- """
- if drop_prob == 0. or not training:
- return x
- keep_prob = 1 - drop_prob
- shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
- random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
- random_tensor.floor_() # binarize
- output = x.div(keep_prob) * random_tensor
- return output
-
-
-class DropPath(nn.Module):
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
-
- From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
- """
-
- def __init__(self, drop_prob=None):
- super(DropPath, self).__init__()
- self.drop_prob = drop_prob
-
- def forward(self, x):
- return drop_path(x, self.drop_prob, self.training)
-
-
-class Mlp(nn.Module):
-
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-def window_partition(x, window_size):
- """
- Args:
- x: (b, h, w, c)
- window_size (int): window size
-
- Returns:
- windows: (num_windows*b, window_size, window_size, c)
- """
- b, h, w, c = x.shape
- x = x.view(b, h // window_size, window_size, w // window_size, window_size, c)
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, c)
- return windows
-
-
-def window_reverse(windows, window_size, h, w):
- """
- Args:
- windows: (num_windows*b, window_size, window_size, c)
- window_size (int): Window size
- h (int): Height of image
- w (int): Width of image
-
- Returns:
- x: (b, h, w, c)
- """
- b = int(windows.shape[0] / (h * w / window_size / window_size))
- x = windows.view(b, h // window_size, w // window_size, window_size, window_size, -1)
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(b, h, w, -1)
- return x
-
-
-class WindowAttention(nn.Module):
- r""" Window based multi-head self attention (W-MSA) module with relative position bias.
- It supports both of shifted and non-shifted window.
-
- Args:
- dim (int): Number of input channels.
- window_size (tuple[int]): The height and width of the window.
- num_heads (int): Number of attention heads.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
- attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
- proj_drop (float, optional): Dropout ratio of output. Default: 0.0
- """
-
- def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
-
- super().__init__()
- self.dim = dim
- self.window_size = window_size # Wh, Ww
- self.num_heads = num_heads
- head_dim = dim // num_heads
- self.scale = qk_scale or head_dim**-0.5
-
- # define a parameter table of relative position bias
- self.relative_position_bias_table = nn.Parameter(
- torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
-
- # get pair-wise relative position index for each token inside the window
- coords_h = torch.arange(self.window_size[0])
- coords_w = torch.arange(self.window_size[1])
- coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
- coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
- relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
- relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
- relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
- relative_coords[:, :, 1] += self.window_size[1] - 1
- relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
- relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
- self.register_buffer('relative_position_index', relative_position_index)
-
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
-
- self.proj_drop = nn.Dropout(proj_drop)
-
- trunc_normal_(self.relative_position_bias_table, std=.02)
- self.softmax = nn.Softmax(dim=-1)
-
- def forward(self, x, mask=None):
- """
- Args:
- x: input features with shape of (num_windows*b, n, c)
- mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
- """
- b_, n, c = x.shape
- qkv = self.qkv(x).reshape(b_, n, 3, self.num_heads, c // self.num_heads).permute(2, 0, 3, 1, 4)
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
-
- q = q * self.scale
- attn = (q @ k.transpose(-2, -1))
-
- relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
- self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
- relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
- attn = attn + relative_position_bias.unsqueeze(0)
-
- if mask is not None:
- nw = mask.shape[0]
- attn = attn.view(b_ // nw, nw, self.num_heads, n, n) + mask.unsqueeze(1).unsqueeze(0)
- attn = attn.view(-1, self.num_heads, n, n)
- attn = self.softmax(attn)
- else:
- attn = self.softmax(attn)
-
- attn = self.attn_drop(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(b_, n, c)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
-
- def extra_repr(self) -> str:
- return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
-
- def flops(self, n):
- # calculate flops for 1 window with token length of n
- flops = 0
- # qkv = self.qkv(x)
- flops += n * self.dim * 3 * self.dim
- # attn = (q @ k.transpose(-2, -1))
- flops += self.num_heads * n * (self.dim // self.num_heads) * n
- # x = (attn @ v)
- flops += self.num_heads * n * n * (self.dim // self.num_heads)
- # x = self.proj(x)
- flops += n * self.dim * self.dim
- return flops
-
-
-class SwinTransformerBlock(nn.Module):
- r""" Swin Transformer Block.
-
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- num_heads (int): Number of attention heads.
- window_size (int): Window size.
- shift_size (int): Shift size for SW-MSA.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float, optional): Stochastic depth rate. Default: 0.0
- act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- """
-
- def __init__(self,
- dim,
- input_resolution,
- num_heads,
- window_size=7,
- shift_size=0,
- mlp_ratio=4.,
- qkv_bias=True,
- qk_scale=None,
- drop=0.,
- attn_drop=0.,
- drop_path=0.,
- act_layer=nn.GELU,
- norm_layer=nn.LayerNorm):
- super().__init__()
- self.dim = dim
- self.input_resolution = input_resolution
- self.num_heads = num_heads
- self.window_size = window_size
- self.shift_size = shift_size
- self.mlp_ratio = mlp_ratio
- if min(self.input_resolution) <= self.window_size:
- # if window size is larger than input resolution, we don't partition windows
- self.shift_size = 0
- self.window_size = min(self.input_resolution)
- assert 0 <= self.shift_size < self.window_size, 'shift_size must in 0-window_size'
-
- self.norm1 = norm_layer(dim)
- self.attn = WindowAttention(
- dim,
- window_size=to_2tuple(self.window_size),
- num_heads=num_heads,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- attn_drop=attn_drop,
- proj_drop=drop)
-
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- if self.shift_size > 0:
- attn_mask = self.calculate_mask(self.input_resolution)
- else:
- attn_mask = None
-
- self.register_buffer('attn_mask', attn_mask)
-
- def calculate_mask(self, x_size):
- # calculate attention mask for SW-MSA
- h, w = x_size
- img_mask = torch.zeros((1, h, w, 1)) # 1 h w 1
- h_slices = (slice(0, -self.window_size), slice(-self.window_size,
- -self.shift_size), slice(-self.shift_size, None))
- w_slices = (slice(0, -self.window_size), slice(-self.window_size,
- -self.shift_size), slice(-self.shift_size, None))
- cnt = 0
- for h in h_slices:
- for w in w_slices:
- img_mask[:, h, w, :] = cnt
- cnt += 1
-
- mask_windows = window_partition(img_mask, self.window_size) # nw, window_size, window_size, 1
- mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
- attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
- attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
-
- return attn_mask
-
- def forward(self, x, x_size):
- h, w = x_size
- b, _, c = x.shape
- # assert seq_len == h * w, "input feature has wrong size"
-
- shortcut = x
- x = self.norm1(x)
- x = x.view(b, h, w, c)
-
- # cyclic shift
- if self.shift_size > 0:
- shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
- else:
- shifted_x = x
-
- # partition windows
- x_windows = window_partition(shifted_x, self.window_size) # nw*b, window_size, window_size, c
- x_windows = x_windows.view(-1, self.window_size * self.window_size, c) # nw*b, window_size*window_size, c
-
- # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
- if self.input_resolution == x_size:
- attn_windows = self.attn(x_windows, mask=self.attn_mask) # nw*b, window_size*window_size, c
- else:
- attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
-
- # merge windows
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, c)
- shifted_x = window_reverse(attn_windows, self.window_size, h, w) # b h' w' c
-
- # reverse cyclic shift
- if self.shift_size > 0:
- x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
- else:
- x = shifted_x
- x = x.view(b, h * w, c)
-
- # FFN
- x = shortcut + self.drop_path(x)
- x = x + self.drop_path(self.mlp(self.norm2(x)))
-
- return x
-
- def extra_repr(self) -> str:
- return (f'dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, '
- f'window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}')
-
- def flops(self):
- flops = 0
- h, w = self.input_resolution
- # norm1
- flops += self.dim * h * w
- # W-MSA/SW-MSA
- nw = h * w / self.window_size / self.window_size
- flops += nw * self.attn.flops(self.window_size * self.window_size)
- # mlp
- flops += 2 * h * w * self.dim * self.dim * self.mlp_ratio
- # norm2
- flops += self.dim * h * w
- return flops
-
-
-class PatchMerging(nn.Module):
- r""" Patch Merging Layer.
-
- Args:
- input_resolution (tuple[int]): Resolution of input feature.
- dim (int): Number of input channels.
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- """
-
- def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
- super().__init__()
- self.input_resolution = input_resolution
- self.dim = dim
- self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
- self.norm = norm_layer(4 * dim)
-
- def forward(self, x):
- """
- x: b, h*w, c
- """
- h, w = self.input_resolution
- b, seq_len, c = x.shape
- assert seq_len == h * w, 'input feature has wrong size'
- assert h % 2 == 0 and w % 2 == 0, f'x size ({h}*{w}) are not even.'
-
- x = x.view(b, h, w, c)
-
- x0 = x[:, 0::2, 0::2, :] # b h/2 w/2 c
- x1 = x[:, 1::2, 0::2, :] # b h/2 w/2 c
- x2 = x[:, 0::2, 1::2, :] # b h/2 w/2 c
- x3 = x[:, 1::2, 1::2, :] # b h/2 w/2 c
- x = torch.cat([x0, x1, x2, x3], -1) # b h/2 w/2 4*c
- x = x.view(b, -1, 4 * c) # b h/2*w/2 4*c
-
- x = self.norm(x)
- x = self.reduction(x)
-
- return x
-
- def extra_repr(self) -> str:
- return f'input_resolution={self.input_resolution}, dim={self.dim}'
-
- def flops(self):
- h, w = self.input_resolution
- flops = h * w * self.dim
- flops += (h // 2) * (w // 2) * 4 * self.dim * 2 * self.dim
- return flops
-
-
-class BasicLayer(nn.Module):
- """ A basic Swin Transformer layer for one stage.
-
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- depth (int): Number of blocks.
- num_heads (int): Number of attention heads.
- window_size (int): Local window size.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
- """
-
- def __init__(self,
- dim,
- input_resolution,
- depth,
- num_heads,
- window_size,
- mlp_ratio=4.,
- qkv_bias=True,
- qk_scale=None,
- drop=0.,
- attn_drop=0.,
- drop_path=0.,
- norm_layer=nn.LayerNorm,
- downsample=None,
- use_checkpoint=False):
-
- super().__init__()
- self.dim = dim
- self.input_resolution = input_resolution
- self.depth = depth
- self.use_checkpoint = use_checkpoint
-
- # build blocks
- self.blocks = nn.ModuleList([
- SwinTransformerBlock(
- dim=dim,
- input_resolution=input_resolution,
- num_heads=num_heads,
- window_size=window_size,
- shift_size=0 if (i % 2 == 0) else window_size // 2,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop,
- attn_drop=attn_drop,
- drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
- norm_layer=norm_layer) for i in range(depth)
- ])
-
- # patch merging layer
- if downsample is not None:
- self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
- else:
- self.downsample = None
-
- def forward(self, x, x_size):
- for blk in self.blocks:
- if self.use_checkpoint:
- x = checkpoint.checkpoint(blk, x)
- else:
- x = blk(x, x_size)
- if self.downsample is not None:
- x = self.downsample(x)
- return x
-
- def extra_repr(self) -> str:
- return f'dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}'
-
- def flops(self):
- flops = 0
- for blk in self.blocks:
- flops += blk.flops()
- if self.downsample is not None:
- flops += self.downsample.flops()
- return flops
-
-
-class RSTB(nn.Module):
- """Residual Swin Transformer Block (RSTB).
-
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resolution.
- depth (int): Number of blocks.
- num_heads (int): Number of attention heads.
- window_size (int): Local window size.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
- img_size: Input image size.
- patch_size: Patch size.
- resi_connection: The convolutional block before residual connection.
- """
-
- def __init__(self,
- dim,
- input_resolution,
- depth,
- num_heads,
- window_size,
- mlp_ratio=4.,
- qkv_bias=True,
- qk_scale=None,
- drop=0.,
- attn_drop=0.,
- drop_path=0.,
- norm_layer=nn.LayerNorm,
- downsample=None,
- use_checkpoint=False,
- img_size=224,
- patch_size=4,
- resi_connection='1conv'):
- super(RSTB, self).__init__()
-
- self.dim = dim
- self.input_resolution = input_resolution
-
- self.residual_group = BasicLayer(
- dim=dim,
- input_resolution=input_resolution,
- depth=depth,
- num_heads=num_heads,
- window_size=window_size,
- mlp_ratio=mlp_ratio,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop,
- attn_drop=attn_drop,
- drop_path=drop_path,
- norm_layer=norm_layer,
- downsample=downsample,
- use_checkpoint=use_checkpoint)
-
- if resi_connection == '1conv':
- self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
- elif resi_connection == '3conv':
- # to save parameters and memory
- self.conv = nn.Sequential(
- nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(dim // 4, dim // 4, 1, 1, 0), nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(dim // 4, dim, 3, 1, 1))
-
- self.patch_embed = PatchEmbed(
- img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None)
-
- self.patch_unembed = PatchUnEmbed(
- img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, norm_layer=None)
-
- def forward(self, x, x_size):
- return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
-
- def flops(self):
- flops = 0
- flops += self.residual_group.flops()
- h, w = self.input_resolution
- flops += h * w * self.dim * self.dim * 9
- flops += self.patch_embed.flops()
- flops += self.patch_unembed.flops()
-
- return flops
-
-
-class PatchEmbed(nn.Module):
- r""" Image to Patch Embedding
-
- Args:
- img_size (int): Image size. Default: 224.
- patch_size (int): Patch token size. Default: 4.
- in_chans (int): Number of input image channels. Default: 3.
- embed_dim (int): Number of linear projection output channels. Default: 96.
- norm_layer (nn.Module, optional): Normalization layer. Default: None
- """
-
- def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
- super().__init__()
- img_size = to_2tuple(img_size)
- patch_size = to_2tuple(patch_size)
- patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
- self.img_size = img_size
- self.patch_size = patch_size
- self.patches_resolution = patches_resolution
- self.num_patches = patches_resolution[0] * patches_resolution[1]
-
- self.in_chans = in_chans
- self.embed_dim = embed_dim
-
- if norm_layer is not None:
- self.norm = norm_layer(embed_dim)
- else:
- self.norm = None
-
- def forward(self, x):
- x = x.flatten(2).transpose(1, 2) # b Ph*Pw c
- if self.norm is not None:
- x = self.norm(x)
- return x
-
- def flops(self):
- flops = 0
- h, w = self.img_size
- if self.norm is not None:
- flops += h * w * self.embed_dim
- return flops
-
-
-class PatchUnEmbed(nn.Module):
- r""" Image to Patch Unembedding
-
- Args:
- img_size (int): Image size. Default: 224.
- patch_size (int): Patch token size. Default: 4.
- in_chans (int): Number of input image channels. Default: 3.
- embed_dim (int): Number of linear projection output channels. Default: 96.
- norm_layer (nn.Module, optional): Normalization layer. Default: None
- """
-
- def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
- super().__init__()
- img_size = to_2tuple(img_size)
- patch_size = to_2tuple(patch_size)
- patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
- self.img_size = img_size
- self.patch_size = patch_size
- self.patches_resolution = patches_resolution
- self.num_patches = patches_resolution[0] * patches_resolution[1]
-
- self.in_chans = in_chans
- self.embed_dim = embed_dim
-
- def forward(self, x, x_size):
- x = x.transpose(1, 2).view(x.shape[0], self.embed_dim, x_size[0], x_size[1]) # b Ph*Pw c
- return x
-
- def flops(self):
- flops = 0
- return flops
-
-
-class Upsample(nn.Sequential):
- """Upsample module.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
- """
-
- def __init__(self, scale, num_feat):
- m = []
- if (scale & (scale - 1)) == 0: # scale = 2^n
- for _ in range(int(math.log(scale, 2))):
- m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(2))
- elif scale == 3:
- m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
- m.append(nn.PixelShuffle(3))
- else:
- raise ValueError(f'scale {scale} is not supported. Supported scales: 2^n and 3.')
- super(Upsample, self).__init__(*m)
-
-
-class UpsampleOneStep(nn.Sequential):
- """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
- Used in lightweight SR to save parameters.
-
- Args:
- scale (int): Scale factor. Supported scales: 2^n and 3.
- num_feat (int): Channel number of intermediate features.
-
- """
-
- def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
- self.num_feat = num_feat
- self.input_resolution = input_resolution
- m = []
- m.append(nn.Conv2d(num_feat, (scale**2) * num_out_ch, 3, 1, 1))
- m.append(nn.PixelShuffle(scale))
- super(UpsampleOneStep, self).__init__(*m)
-
- def flops(self):
- h, w = self.input_resolution
- flops = h * w * self.num_feat * 3 * 9
- return flops
-
-
-@ARCH_REGISTRY.register()
-class SwinIR(nn.Module):
- r""" SwinIR
- A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
-
- Args:
- img_size (int | tuple(int)): Input image size. Default 64
- patch_size (int | tuple(int)): Patch size. Default: 1
- in_chans (int): Number of input image channels. Default: 3
- embed_dim (int): Patch embedding dimension. Default: 96
- depths (tuple(int)): Depth of each Swin Transformer layer.
- num_heads (tuple(int)): Number of attention heads in different layers.
- window_size (int): Window size. Default: 7
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
- qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
- drop_rate (float): Dropout rate. Default: 0
- attn_drop_rate (float): Attention dropout rate. Default: 0
- drop_path_rate (float): Stochastic depth rate. Default: 0.1
- norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
- ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
- patch_norm (bool): If True, add normalization after patch embedding. Default: True
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
- upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
- img_range: Image range. 1. or 255.
- upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
- resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
- """
-
- def __init__(self,
- img_size=64,
- patch_size=1,
- in_chans=3,
- embed_dim=96,
- depths=(6, 6, 6, 6),
- num_heads=(6, 6, 6, 6),
- window_size=7,
- mlp_ratio=4.,
- qkv_bias=True,
- qk_scale=None,
- drop_rate=0.,
- attn_drop_rate=0.,
- drop_path_rate=0.1,
- norm_layer=nn.LayerNorm,
- ape=False,
- patch_norm=True,
- use_checkpoint=False,
- upscale=2,
- img_range=1.,
- upsampler='',
- resi_connection='1conv',
- **kwargs):
- super(SwinIR, self).__init__()
- num_in_ch = in_chans
- num_out_ch = in_chans
- num_feat = 64
- self.img_range = img_range
- if in_chans == 3:
- rgb_mean = (0.4488, 0.4371, 0.4040)
- self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
- else:
- self.mean = torch.zeros(1, 1, 1, 1)
- self.upscale = upscale
- self.upsampler = upsampler
-
- # ------------------------- 1, shallow feature extraction ------------------------- #
- self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
-
- # ------------------------- 2, deep feature extraction ------------------------- #
- self.num_layers = len(depths)
- self.embed_dim = embed_dim
- self.ape = ape
- self.patch_norm = patch_norm
- self.num_features = embed_dim
- self.mlp_ratio = mlp_ratio
-
- # split image into non-overlapping patches
- self.patch_embed = PatchEmbed(
- img_size=img_size,
- patch_size=patch_size,
- in_chans=embed_dim,
- embed_dim=embed_dim,
- norm_layer=norm_layer if self.patch_norm else None)
- num_patches = self.patch_embed.num_patches
- patches_resolution = self.patch_embed.patches_resolution
- self.patches_resolution = patches_resolution
-
- # merge non-overlapping patches into image
- self.patch_unembed = PatchUnEmbed(
- img_size=img_size,
- patch_size=patch_size,
- in_chans=embed_dim,
- embed_dim=embed_dim,
- norm_layer=norm_layer if self.patch_norm else None)
-
- # absolute position embedding
- if self.ape:
- self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
- trunc_normal_(self.absolute_pos_embed, std=.02)
-
- self.pos_drop = nn.Dropout(p=drop_rate)
-
- # stochastic depth
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
-
- # build Residual Swin Transformer blocks (RSTB)
- self.layers = nn.ModuleList()
- for i_layer in range(self.num_layers):
- layer = RSTB(
- dim=embed_dim,
- input_resolution=(patches_resolution[0], patches_resolution[1]),
- depth=depths[i_layer],
- num_heads=num_heads[i_layer],
- window_size=window_size,
- mlp_ratio=self.mlp_ratio,
- qkv_bias=qkv_bias,
- qk_scale=qk_scale,
- drop=drop_rate,
- attn_drop=attn_drop_rate,
- drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
- norm_layer=norm_layer,
- downsample=None,
- use_checkpoint=use_checkpoint,
- img_size=img_size,
- patch_size=patch_size,
- resi_connection=resi_connection)
- self.layers.append(layer)
- self.norm = norm_layer(self.num_features)
-
- # build the last conv layer in deep feature extraction
- if resi_connection == '1conv':
- self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
- elif resi_connection == '3conv':
- # to save parameters and memory
- self.conv_after_body = nn.Sequential(
- nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0), nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
-
- # ------------------------- 3, high quality image reconstruction ------------------------- #
- if self.upsampler == 'pixelshuffle':
- # for classical SR
- self.conv_before_upsample = nn.Sequential(
- nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True))
- self.upsample = Upsample(upscale, num_feat)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- elif self.upsampler == 'pixelshuffledirect':
- # for lightweight SR (to save parameters)
- self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
- (patches_resolution[0], patches_resolution[1]))
- elif self.upsampler == 'nearest+conv':
- # for real-world SR (less artifacts)
- assert self.upscale == 4, 'only support x4 now.'
- self.conv_before_upsample = nn.Sequential(
- nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True))
- self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
- self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
- else:
- # for image denoising and JPEG compression artifact reduction
- self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
-
- self.apply(self._init_weights)
-
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
-
- @torch.jit.ignore
- def no_weight_decay(self):
- return {'absolute_pos_embed'}
-
- @torch.jit.ignore
- def no_weight_decay_keywords(self):
- return {'relative_position_bias_table'}
-
- def forward_features(self, x):
- x_size = (x.shape[2], x.shape[3])
- x = self.patch_embed(x)
- if self.ape:
- x = x + self.absolute_pos_embed
- x = self.pos_drop(x)
-
- for layer in self.layers:
- x = layer(x, x_size)
-
- x = self.norm(x) # b seq_len c
- x = self.patch_unembed(x, x_size)
-
- return x
-
- def forward(self, x):
- self.mean = self.mean.type_as(x)
- x = (x - self.mean) * self.img_range
-
- if self.upsampler == 'pixelshuffle':
- # for classical SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.conv_before_upsample(x)
- x = self.conv_last(self.upsample(x))
- elif self.upsampler == 'pixelshuffledirect':
- # for lightweight SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.upsample(x)
- elif self.upsampler == 'nearest+conv':
- # for real-world SR
- x = self.conv_first(x)
- x = self.conv_after_body(self.forward_features(x)) + x
- x = self.conv_before_upsample(x)
- x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
- x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
- x = self.conv_last(self.lrelu(self.conv_hr(x)))
- else:
- # for image denoising and JPEG compression artifact reduction
- x_first = self.conv_first(x)
- res = self.conv_after_body(self.forward_features(x_first)) + x_first
- x = x + self.conv_last(res)
-
- x = x / self.img_range + self.mean
-
- return x
-
- def flops(self):
- flops = 0
- h, w = self.patches_resolution
- flops += h * w * 3 * self.embed_dim * 9
- flops += self.patch_embed.flops()
- for layer in self.layers:
- flops += layer.flops()
- flops += h * w * 3 * self.embed_dim * self.embed_dim
- flops += self.upsample.flops()
- return flops
-
-
-if __name__ == '__main__':
- upscale = 4
- window_size = 8
- height = (1024 // upscale // window_size + 1) * window_size
- width = (720 // upscale // window_size + 1) * window_size
- model = SwinIR(
- upscale=2,
- img_size=(height, width),
- window_size=window_size,
- img_range=1.,
- depths=[6, 6, 6, 6],
- embed_dim=60,
- num_heads=[6, 6, 6, 6],
- mlp_ratio=2,
- upsampler='pixelshuffledirect')
- print(model)
- print(height, width, model.flops() / 1e9)
-
- x = torch.randn((1, 3, height, width))
- x = model(x)
- print(x.shape)
diff --git a/spaces/bioriAsaeru/text-to-voice/Battleshiptamildubbedfullmoviefreedownload Dont Miss the Spectacular Visual Effects and Soundtrack of the Film.md b/spaces/bioriAsaeru/text-to-voice/Battleshiptamildubbedfullmoviefreedownload Dont Miss the Spectacular Visual Effects and Soundtrack of the Film.md
deleted file mode 100644
index 5ca27366dad2181103bae4dbd5609be0992036fd..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Battleshiptamildubbedfullmoviefreedownload Dont Miss the Spectacular Visual Effects and Soundtrack of the Film.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Battleshiptamildubbedfullmoviefreedownload Download File ===> https://urloso.com/2uyPQ6
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/bioriAsaeru/text-to-voice/Facebook Chat Sniffer Network How to Install and Use This Powerful Application.md b/spaces/bioriAsaeru/text-to-voice/Facebook Chat Sniffer Network How to Install and Use This Powerful Application.md
deleted file mode 100644
index da60c62d275fd7de98acd30fb1f5e8711942f5d8..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Facebook Chat Sniffer Network How to Install and Use This Powerful Application.md
+++ /dev/null
@@ -1,13 +0,0 @@
-
-IMMonitor Facebook Spy is a packet sniffer that reads Facebook messages. The application can detect all the Facebook chat conversations that are taking place in your network and it displays them organized by contact name. This app doesn't require a client to be installed on the target computer, instead it can monitor any IP address in the same subnet as the computer that is running the app.
-The first step that you have to do to use this app is tell it what IP address to scan. This is quite simple. You can either manually type the IP address or carry out a scan for any active computers in the network and then select that computer from a drop-down menu. After the IP address is set, the scan can be started. When the application detects an ongoing chat, it displays the messages and it organizes them by contact name. It also keeps a log of how many messages were sent and it shows it. I like that you don't simply see packets and technical jargon all over the screen. You only see contact names and messages, along with some packet info once in a while.
-Facebook Chat Sniffer Network Download File ……… https://urloso.com/2uyRaB
-I need to disagree that this method monitors all network traffic in/out of you home. I simply logs the ip addresses which is not the same thing. There are legitimate sites that can be visited where my teen can still get into trouble. What my main interest is what data is being exchanged with host IP addresses. Is it an innocent chat with one of her friend or is the pedophile across town trying to set up a meeting at the mall. Do you see the important difference. I have been scouring the web looking for a solution but all I can find is network sniffers that record everything in a sort of cryptic raw format and you have to be really savy to interpret the data and what it means.
-PCAP is an abbreviation of \u201cpacket capture.\u201d A PCAP tool copies packets as they travel around the network. The captured packets are displayed in a viewer within the tool, stored to a file, or both. Some PCAP tools will copy all of each packet, including its data payload, while others only display and\/or store packet headers. PCAP tools that capture packets in their entirety create very large files and are stored with the .pcap extension.","author":"@type":"Person","name":"Jon Watson","description":"Jon is a senior Linux systems administrator currently working in the Internet security field. Most of his time is spent reviewing security related issues and developing ways to mitigate damage or prevent them from happening in the first place. He has articles published in Linux Journal and Linux Magazines and was previously the Tech Channel editor for b5 Media.\n","url":"https:\/\/www.comparitech.com\/author\/jon-watson\/"}},"@type":"Question","name":"What are the best network traffic analysis tools?","answerCount":1,"acceptedAnswer":"@type":"Answer","text":"Our research shows that the best network traffic analysis tools are SolarWinds Deep Packet Inspection and Analysis Tool, Paessler Packet Capture Tool, ManageEngine NetFlow Analyzer, and the Omnipeek Network Protocol Analyzer. There are also some industry favorites such as tcpdump, Windump, and Wireshark.","author":"@type":"Person","name":"Jon Watson","description":"Jon is a senior Linux systems administrator currently working in the Internet security field. Most of his time is spent reviewing security related issues and developing ways to mitigate damage or prevent them from happening in the first place. He has articles published in Linux Journal and Linux Magazines and was previously the Tech Channel editor for b5 Media.\n","url":"https:\/\/www.comparitech.com\/author\/jon-watson\/","@type":"Question","name":"How does a packet analyzer work?","answerCount":1,"acceptedAnswer":"@type":"Answer","text":"A packet analyzer captures packets as they travel around the network. This can be implemented as a stand-alone packet capture device that works as a TAP or software that accesses the network adapter of its host computer in \u201cpromiscuous mode.\u201d As well as copying network packets, a packet analyzer needs to offer a utility to view, search, and filter packet data. Some packet analyzers also include more sophisticated analysis tools.","author":"@type":"Person","name":"Jon Watson","description":"Jon is a senior Linux systems administrator currently working in the Internet security field. Most of his time is spent reviewing security related issues and developing ways to mitigate damage or prevent them from happening in the first place. He has articles published in Linux Journal and Linux Magazines and was previously the Tech Channel editor for b5 Media.\n","url":"https:\/\/www.comparitech.com\/author\/jon-watson\/","@type":"Question","name":"Can packet sniffing be detected?","answerCount":1,"acceptedAnswer":"@type":"Answer","text":"Packet sniffing can be detected in certain circumstances. The solution to finding packet capture depends on the location of the packet sniffer and the method it uses. A software packet sniffing tool requires that the host computer\u2019s network adapter is in promiscuous mode. Issuing a Ping with the right IP address but the wrong MAC address for each computer on the network should spot the hosts that are in promiscuous mode and therefore likely to be in use for packet sniffing.","author":"@type":"Person","name":"Jon Watson","description":"Jon is a senior Linux systems administrator currently working in the Internet security field. Most of his time is spent reviewing security related issues and developing ways to mitigate damage or prevent them from happening in the first place. He has articles published in Linux Journal and Linux Magazines and was previously the Tech Channel editor for b5 Media.\n","url":"https:\/\/www.comparitech.com\/author\/jon-watson\/","@type":"Question","name":"What is full packet capture?","answerCount":1,"acceptedAnswer":"@type":"Answer","text":"Full packet capture copies all of a packet including the data payload. Typically full packet capture data gets stored in a file with the .pcap extension. Businesses don\u2019t like network professionals using this method because the contents of the packet might not be encrypted. Allowing IT department staff to use full packet capture capabilities can break the confidentiality of data held by the enterprise and invalidate data security standards compliance.","author":"@type":"Person","name":"Jon Watson","description":"Jon is a senior Linux systems administrator currently working in the Internet security field. Most of his time is spent reviewing security related issues and developing ways to mitigate damage or prevent them from happening in the first place. He has articles published in Linux Journal and Linux Magazines and was previously the Tech Channel editor for b5 Media.\n","url":"https:\/\/www.comparitech.com\/author\/jon-watson\/"]} "@context":"http:\/\/schema.org","@type":"BreadcrumbList","itemListElement":["@type":"ListItem","position":1,"name":"Home","item":"https:\/\/www.comparitech.com\/","@type":"ListItem","position":2,"name":"Net Admin","item":"https:\/\/www.comparitech.com\/net-admin\/","@type":"ListItem","position":3,"name":"Best Packet Sniffer Tools in 2020","item":"https:\/\/www.comparitech.com\/net-admin\/packet-sniffer-network-analyzers\/"]Net AdminBest Packet Sniffer Tools in 2020 We are funded by our readers and may receive a commission when you buy using links on our site. 11 Best Packet Sniffers in 2023 Looking at ways to get a birds-eye view of your network's traffic and establish some control of data loss and flows? In this article, we round up the best packet sniffers and software tools. Jon Watson Linux and internet security expert @lahmstache UPDATED: July 18, 2022 body.single .section.main-content.sidebar-active .col.grid-item.sidebar.span_1_of_3 float: right; body.single .section.main-content.sidebar-active .col.grid-item.content.span_2_of_3 margin-left: 0;
-The actions you take depend on your available budget. If you have the resources to expand network capacity, the packet sniffer will enable you to target new resources more effectively. If you have no budget, packet sniffing will help traffic shaping through prioritizing application traffic, resizing subnets, rescheduling heavy-traffic events, limiting bandwidth for specific applications, or replacing applications with more efficient alternatives.
-The key feature of a packet sniffer is that it copies data as it travels across a network and makes it available for viewing . The sniffing device simply copies all of the data that it sees passing over a network. When implemented on a switch, settings of the device allow the passing packet to be sent to a second port as well as the intended destination, thus duplicating traffic. Usually, the packets of data that are reaped from the network get copied to a file. Some tools will also show that data in a dashboard. However, packet sniffers can gather a lot of data, which includes encoded admin information . You will need to find an analysis tool that can help you be dereferencing information on the journey of the packets in the extract and other pieces of information, such as the relevance of the port numbers that the packets travel between.
-The PRTG packet sniffer only captures the headers of the packets traveling across your network. This gives the packet analyzer a speed advantage and it also reduces the amount of storage space needed to hold capture files. The dashboard of the packet sniffer categorizes traffic by application type. These include email traffic, web packets, chat app traffic data, and file transfer packet volumes.
-NetworkMiner is a fascinating tool that falls more into the category of a forensic tool rather than a straight-up network sniffer. The field of forensics typically deals with the investigation and collection of evidence and Network Miner does that job well for network traffic. Much like WireShark can follow a TCP stream to recover an entire TCP conversation, Network Miner can follow a stream to reconstruct files that were sent over the network.
-
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/Integral Equations By Md Raisinghania Pdf Download.md b/spaces/bioriAsaeru/text-to-voice/Integral Equations By Md Raisinghania Pdf Download.md
deleted file mode 100644
index 5f4e1980c557ffe24cf1860860c0015dc8b3fecf..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Integral Equations By Md Raisinghania Pdf Download.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
- also, the book is available in hindi, and the translated version can be purchased from gotoassist. in addition to the book, students also have the option to purchase the course materials, which include:
-integral equations by md raisinghania pdf download Download Zip >>>>> https://urloso.com/2uyPAn
integral equations and boundary value problems for a single course integral equations and boundary value problems for multiple courses integral equations and boundary value problems for a whole semester
- the book is mainly divided into two major parts. in the first part, raisinghania has included a chapter on the classic fredholm integral equation. the second part is devoted to the singular integral equations. for this, there is a chapter on the singular integral equations, followed by the boundary value problems.
-chapter 1 contains important definitions, along with theorems and results. it deals with the basic tools for the solution of the boundary value problems. this chapter provides a very lucid introduction to linear operators, which is important for the further chapters on the fredholm integral equation. it also establishes the link between linear operators and the integral equations which are defined and dealt with in the remaining chapters.
-chapter 2 deals with the fredholm integral equation of the first kind. this is considered as the fundamental equation to the solution of the boundary value problems. this chapter is illustrated with many examples, and it has a separate chapter on perturbation theory.
-chapter 3 deals with the fredholm integral equation of the second kind. this is the fundamental equation to the solution of the boundary value problems. it also considers the dirichlet integral equation. in this chapter, the equality case is examined in depth. the well-known wiener-hopf factorization theorem is considered in detail. it is also shown how the integral equation is related to the linear ordinary differential equation.
-
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/Kamasutra Malayalam Book Pdf 183.md b/spaces/bioriAsaeru/text-to-voice/Kamasutra Malayalam Book Pdf 183.md
deleted file mode 100644
index 5e7a33ba518f93f2554d46cf4834ce6f685c3b71..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Kamasutra Malayalam Book Pdf 183.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-The Ten Commandments were meant exclusively for Jewish males.[65] Michael Coogan writes that according to the text wives are the property of their husband, marriage meaning transfer of property (from father to husband),[65] and women are less valuable than real estate, being mentioned after real estate.[65] Adultery is violating the property right of a man.[66] Coogan's book was criticized by Phyllis Trible, who argues that he failed to note that patriarchy was not decreed, but only described by God, patriarchy being specific to people after the fall.[67] She states that Paul the Apostle made the same mistake as Coogan.[67]
-kamasutra malayalam book pdf 183 Download File ★ https://urloso.com/2uyRnE
-The Manusmriti , also known as the Laws of Manu , deals with this in greater detail. When translated, verse 4.134 of the book declares adultery to be a heinous offense.[102] The Manusmriti does not include adultery as a "grievous sin", but includes it as a "secondary sin" that leads to a loss of caste.[103] In the book, the intent and mutual consent are a part that determine the recommended punishment. Rape is not considered as adultery for the woman, while the rapist is punished severely. Lesser punishment is recommended for consensual adulterous sex.[100] Death penalty is mentioned by Manu,[104] as well as "penance" for the sin of adultery.[103][105] even in cases of repeated adultery with a man of the same caste.[106] In verses 8.362-363, the author states that sexual relations with the wife of traveling performer is not a sin, and exempts such sexual liaisons.[107][108] The book offers two views on adultery. It recommends a new married couple to remain sexually faithful to each other for life. It also accepts that adulterous relationships happen, children are born from such relationships and then proceeds to reason that the child belongs to the legal husband of the pregnant woman, and not to the biological father.[109]
-The theme of adultery has been used in many literary works, and has served as a theme for notable books such as Anna Karenina , Madame Bovary , Lady Chatterley's Lover , The Scarlet Letter and Adultery . It has also been the theme of many movies.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/structures/instances.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/structures/instances.py
deleted file mode 100644
index c9579bce2730f42e256c6eed99d9014d09304c99..0000000000000000000000000000000000000000
--- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/structures/instances.py
+++ /dev/null
@@ -1,194 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import itertools
-import warnings
-from typing import Any, Dict, List, Tuple, Union
-import torch
-
-
-class Instances:
- """
- This class represents a list of instances in an image.
- It stores the attributes of instances (e.g., boxes, masks, labels, scores) as "fields".
- All fields must have the same ``__len__`` which is the number of instances.
-
- All other (non-field) attributes of this class are considered private:
- they must start with '_' and are not modifiable by a user.
-
- Some basic usage:
-
- 1. Set/get/check a field:
-
- .. code-block:: python
-
- instances.gt_boxes = Boxes(...)
- print(instances.pred_masks) # a tensor of shape (N, H, W)
- print('gt_masks' in instances)
-
- 2. ``len(instances)`` returns the number of instances
- 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields
- and returns a new :class:`Instances`.
- Typically, ``indices`` is a integer vector of indices,
- or a binary mask of length ``num_instances``
-
- .. code-block:: python
-
- category_3_detections = instances[instances.pred_classes == 3]
- confident_detections = instances[instances.scores > 0.9]
- """
-
- def __init__(self, image_size: Tuple[int, int], **kwargs: Any):
- """
- Args:
- image_size (height, width): the spatial size of the image.
- kwargs: fields to add to this `Instances`.
- """
- self._image_size = image_size
- self._fields: Dict[str, Any] = {}
- for k, v in kwargs.items():
- self.set(k, v)
-
- @property
- def image_size(self) -> Tuple[int, int]:
- """
- Returns:
- tuple: height, width
- """
- return self._image_size
-
- def __setattr__(self, name: str, val: Any) -> None:
- if name.startswith("_"):
- super().__setattr__(name, val)
- else:
- self.set(name, val)
-
- def __getattr__(self, name: str) -> Any:
- if name == "_fields" or name not in self._fields:
- raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
- return self._fields[name]
-
- def set(self, name: str, value: Any) -> None:
- """
- Set the field named `name` to `value`.
- The length of `value` must be the number of instances,
- and must agree with other existing fields in this object.
- """
- with warnings.catch_warnings(record=True):
- data_len = len(value)
- if len(self._fields):
- assert (
- len(self) == data_len
- ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self))
- self._fields[name] = value
-
- def has(self, name: str) -> bool:
- """
- Returns:
- bool: whether the field called `name` exists.
- """
- return name in self._fields
-
- def remove(self, name: str) -> None:
- """
- Remove the field called `name`.
- """
- del self._fields[name]
-
- def get(self, name: str) -> Any:
- """
- Returns the field called `name`.
- """
- return self._fields[name]
-
- def get_fields(self) -> Dict[str, Any]:
- """
- Returns:
- dict: a dict which maps names (str) to data of the fields
-
- Modifying the returned dict will modify this instance.
- """
- return self._fields
-
- # Tensor-like methods
- def to(self, *args: Any, **kwargs: Any) -> "Instances":
- """
- Returns:
- Instances: all fields are called with a `to(device)`, if the field has this method.
- """
- ret = Instances(self._image_size)
- for k, v in self._fields.items():
- if hasattr(v, "to"):
- v = v.to(*args, **kwargs)
- ret.set(k, v)
- return ret
-
- def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Instances":
- """
- Args:
- item: an index-like object and will be used to index all the fields.
-
- Returns:
- If `item` is a string, return the data in the corresponding field.
- Otherwise, returns an `Instances` where all fields are indexed by `item`.
- """
- if type(item) == int:
- if item >= len(self) or item < -len(self):
- raise IndexError("Instances index out of range!")
- else:
- item = slice(item, None, len(self))
-
- ret = Instances(self._image_size)
- for k, v in self._fields.items():
- ret.set(k, v[item])
- return ret
-
- def __len__(self) -> int:
- for v in self._fields.values():
- # use __len__ because len() has to be int and is not friendly to tracing
- return v.__len__()
- raise NotImplementedError("Empty Instances does not support __len__!")
-
- def __iter__(self):
- raise NotImplementedError("`Instances` object is not iterable!")
-
- @staticmethod
- def cat(instance_lists: List["Instances"]) -> "Instances":
- """
- Args:
- instance_lists (list[Instances])
-
- Returns:
- Instances
- """
- assert all(isinstance(i, Instances) for i in instance_lists)
- assert len(instance_lists) > 0
- if len(instance_lists) == 1:
- return instance_lists[0]
-
- image_size = instance_lists[0].image_size
- if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing
- for i in instance_lists[1:]:
- assert i.image_size == image_size
- ret = Instances(image_size)
- for k in instance_lists[0]._fields.keys():
- values = [i.get(k) for i in instance_lists]
- v0 = values[0]
- if isinstance(v0, torch.Tensor):
- values = torch.cat(values, dim=0)
- elif isinstance(v0, list):
- values = list(itertools.chain(*values))
- elif hasattr(type(v0), "cat"):
- values = type(v0).cat(values)
- else:
- raise ValueError("Unsupported type {} for concatenation".format(type(v0)))
- ret.set(k, values)
- return ret
-
- def __str__(self) -> str:
- s = self.__class__.__name__ + "("
- s += "num_instances={}, ".format(len(self))
- s += "image_height={}, ".format(self._image_size[0])
- s += "image_width={}, ".format(self._image_size[1])
- s += "fields=[{}])".format(", ".join((f"{k}: {v}" for k, v in self._fields.items())))
- return s
-
- __repr__ = __str__
diff --git a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5-flask-master/restapi.py b/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5-flask-master/restapi.py
deleted file mode 100644
index 2bab7b06a5e5397b46db43f375c7cff398646501..0000000000000000000000000000000000000000
--- a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5-flask-master/restapi.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""
-Run a rest API exposing the yolov5s object detection model
-"""
-import argparse
-import io
-from PIL import Image
-
-import torch
-from flask import Flask, request
-
-app = Flask(__name__)
-
-DETECTION_URL = "/v1/object-detection/yolov5s"
-
-
-@app.route(DETECTION_URL, methods=["POST"])
-def predict():
- if not request.method == "POST":
- return
-
- if request.files.get("image"):
- image_file = request.files["image"]
- image_bytes = image_file.read()
-
- img = Image.open(io.BytesIO(image_bytes))
-
- results = model(img, size=640)
- data = results.pandas().xyxy[0].to_json(orient="records")
- return data
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="Flask api exposing yolov5 model")
- parser.add_argument("--port", default=5000, type=int, help="port number")
- args = parser.parse_args()
-
- model = torch.hub.load(
- "ultralytics/yolov5", "yolov5s", pretrained=True, force_reload=True
- ).autoshape() # force_reload = recache latest code
- model.eval()
- app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat
diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/TridentNet/tridentnet/config.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/TridentNet/tridentnet/config.py
deleted file mode 100644
index 4b8732a43f6974ec60168652bf08e382ddc9c941..0000000000000000000000000000000000000000
--- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/TridentNet/tridentnet/config.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-from detectron2.config import CfgNode as CN
-
-
-def add_tridentnet_config(cfg):
- """
- Add config for tridentnet.
- """
- _C = cfg
-
- _C.MODEL.TRIDENT = CN()
-
- # Number of branches for TridentNet.
- _C.MODEL.TRIDENT.NUM_BRANCH = 3
- # Specify the dilations for each branch.
- _C.MODEL.TRIDENT.BRANCH_DILATIONS = [1, 2, 3]
- # Specify the stage for applying trident blocks. Default stage is Res4 according to the
- # TridentNet paper.
- _C.MODEL.TRIDENT.TRIDENT_STAGE = "res4"
- # Specify the test branch index TridentNet Fast inference:
- # - use -1 to aggregate results of all branches during inference.
- # - otherwise, only using specified branch for fast inference. Recommended setting is
- # to use the middle branch.
- _C.MODEL.TRIDENT.TEST_BRANCH_IDX = 1
diff --git a/spaces/carloscar/stable-diffusion-webui-controlnet-docker/run.py b/spaces/carloscar/stable-diffusion-webui-controlnet-docker/run.py
deleted file mode 100644
index 503c6065bd96c8739fb81c94485ea2f7c86441de..0000000000000000000000000000000000000000
--- a/spaces/carloscar/stable-diffusion-webui-controlnet-docker/run.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import os
-import subprocess
-import sys
-
-
-def on_start():
- print("---------------")
- print("Running script './on_start.sh' to download models ...")
- print("---------------")
- result = subprocess.run("./on_start.sh", shell=True, env=os.environ)
- if result.returncode != 0:
- raise RuntimeError(f"Error executing ./on_start.sh [exit code: {result.returncode}]")
-
-
-def start():
- print("---------------")
- print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}")
- print("---------------")
- import webui # type: ignore # noqa
-
- if "--nowebui" in sys.argv:
- webui.api_only()
- else:
- webui.webui()
-
-
-def set_options():
- import torch # type: ignore # noqa
-
- if not torch.cuda.is_available():
- # If no GPU is available, uninstall xformers and apply "--precision full --no-half --use-cpu all" to sys.argv.
- os.system(f"{sys.executable} -m pip uninstall -y xformers")
- sys.argv.extend(
- [
- "--precision",
- "full",
- "--no-half",
- "--use-cpu",
- "all",
- ]
- )
- else:
- # Applies "--force-enable-xformers --xformers" to sys.argv when there's a GPU present.
- sys.argv.extend(["--force-enable-xformers", "--xformers"])
-
- is_shared_ui = str(os.environ.get("IS_SHARED_UI", "") or "").strip().lower() not in ("", "0", "false", "none", "no")
- if not is_shared_ui:
- # Provide access to extensions only if IS_SHARED_UI isn't set.
- sys.argv.extend(["--enable-insecure-extension-access"])
-
-
-if __name__ == "__main__":
- set_options()
- on_start()
- start()
diff --git a/spaces/catasaurus/sound-distance/app.py b/spaces/catasaurus/sound-distance/app.py
deleted file mode 100644
index 25a126216f750a45da8615665c9d297902735338..0000000000000000000000000000000000000000
--- a/spaces/catasaurus/sound-distance/app.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import gradio as gr
-import soundex
-
-model = soundex.Soundex()
-
-iface = gr.Interface(fn=model.compare, inputs=[gr.Textbox(label='Text 1'), gr.Textbox(label='Text 2')], outputs=gr.Textbox(label='Sound distance'), title="Find how different two words sound")
-iface.launch()
\ No newline at end of file
diff --git a/spaces/cc1799/vits-uma-genshin-honkai/text/__init__.py b/spaces/cc1799/vits-uma-genshin-honkai/text/__init__.py
deleted file mode 100644
index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000
--- a/spaces/cc1799/vits-uma-genshin-honkai/text/__init__.py
+++ /dev/null
@@ -1,57 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-from text.symbols import symbols
-
-
-# Mappings from symbol to numeric ID and vice versa:
-_symbol_to_id = {s: i for i, s in enumerate(symbols)}
-_id_to_symbol = {i: s for i, s in enumerate(symbols)}
-
-
-def text_to_sequence(text, symbols, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
- sequence = []
-
- clean_text = _clean_text(text, cleaner_names)
- for symbol in clean_text:
- if symbol not in _symbol_to_id.keys():
- continue
- symbol_id = _symbol_to_id[symbol]
- sequence += [symbol_id]
- return sequence, clean_text
-
-
-def cleaned_text_to_sequence(cleaned_text):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
- return sequence
-
-
-def sequence_to_text(sequence):
- '''Converts a sequence of IDs back to a string'''
- result = ''
- for symbol_id in sequence:
- s = _id_to_symbol[symbol_id]
- result += s
- return result
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/ceyda/kornia-augmentations-tester/kornia_aug.py b/spaces/ceyda/kornia-augmentations-tester/kornia_aug.py
deleted file mode 100644
index 466384608c471685933a9a234a857a9e78663d7d..0000000000000000000000000000000000000000
--- a/spaces/ceyda/kornia-augmentations-tester/kornia_aug.py
+++ /dev/null
@@ -1,151 +0,0 @@
-import streamlit as st
-import kornia
-from torch import nn
-import torch
-from torchvision.transforms import functional as F
-from torchvision.utils import make_grid
-from streamlit_ace import st_ace
-from PIL import Image
-
-IS_LOCAL = False #Change this
-
-@st.cache(suppress_st_warning=True)
-def set_transform(content):
- # st.write("set transform")
- try:
- transform = eval(content, {"kornia": kornia, "nn": nn}, None)
- except Exception as e:
- st.write(f"There was an error: {e}")
- transform = nn.Sequential()
- return transform
-
-st.markdown("# Kornia Augmentations Demo")
-st.sidebar.markdown(
- "[Kornia](https://github.com/kornia/kornia) is a *differentiable* computer vision library for PyTorch."
-)
-ims=[]
-uploaded_files = st.sidebar.file_uploader("Choose a file",accept_multiple_files=True)
-if uploaded_files is not None:
- for uploaded_file in uploaded_files:
- im = Image.open(uploaded_file)
- st.sidebar.image(im, caption="Input Image", width=256)
- im=im.resize((512,512))
- image = F.pil_to_tensor(im).float() / 255
- ims.append(image)
-else:
- im = Image.open("./images/pretty_bird.jpg")
- st.sidebar.image(im, caption="Input Image", width=256)
- image = F.pil_to_tensor(im).float() / 255
- ims.append(image)
-scaler = 256
-
-# batch size is just for show
-batch_size = st.sidebar.slider("batch_size", min_value=4, max_value=16,value=8)
-gpu = st.sidebar.checkbox("Use GPU!", value=True)
-if not gpu:
- st.sidebar.markdown("With Kornia you do ops on the GPU!")
- device = torch.device("cpu")
-else:
- if not IS_LOCAL:
- st.sidebar.markdown("(GPU Not available on hosted demo, try on your local!)")
- # Credits
- st.sidebar.caption("Demo made by [Ceyda Cinarel](https://linktr.ee/ceydai)")
- st.sidebar.markdown("Clone [Code](https://github.com/cceyda/kornia-demo)")
- device = torch.device("cpu")
- else:
- st.sidebar.markdown("Running on GPU~")
- device = torch.device("cuda:0")
-
-predefined_transforms = [
- """
-nn.Sequential(
- kornia.augmentation.RandomAffine(degrees=360,p=0.5),
- kornia.augmentation.ColorJitter(brightness=0.2, contrast=0.3, saturation=0.2, hue=0.3, p=1)
-)
-# p=0.5 is the probability of applying the transformation
-""",
- """
-nn.Sequential(
- kornia.augmentation.RandomErasing(scale=(.4, .8), ratio=(.3, 1/.3), p=0.5),
-)
-""",
- """
-nn.Sequential(
- kornia.augmentation.RandomErasing(scale=(.4, .8), ratio=(.3, 1/.3), p=1, same_on_batch=False),
-)
-#By setting same_on_batch=True you can apply the same transform across the batch
-""",
- f"""
-nn.Sequential(
- kornia.augmentation.RandomResizedCrop(size=({scaler}, {scaler}), scale=(3., 3.), ratio=(2., 2.), p=1.),
- kornia.augmentation.RandomHorizontalFlip(p=0.7),
- kornia.augmentation.RandomGrayscale(p=0.5),
-)
-""",
-]
-
-selected_transform = st.selectbox(
- "Pick an augmentation pipeline example:", predefined_transforms
-)
-
-st.write("Transform to apply:")
-readonly = False
-content = st_ace(
- value=selected_transform,
- height=150,
- language="python",
- keybinding="vscode",
- show_gutter=True,
- show_print_margin=True,
- wrap=False,
- auto_update=False,
- readonly=readonly,
-)
-if content:
- # st.write(content)
- transform = set_transform(content)
-
-# st.write(transform)
-
-# with st.echo():
-# transform = nn.Sequential(
-# K.RandomAffine(360),
-# K.ColorJitter(0.2, 0.3, 0.2, 0.3)
-# )
-
-process = st.button("Next Batch")
-
-# Fake dataloader
-if len(ims)>1:
- image_batch = torch.stack(ims)
-else:
- image_batch = torch.stack(batch_size * ims)
-
-
-image_batch.to(device)
-transformeds = None
-try:
- transformeds = transform(image_batch)
-except Exception as e:
- st.write(f"There was an error: {e}")
-
-
-
-
-cols = st.columns(4)
-
-# st.image(F.to_pil_image(make_grid(transformeds)))
-if transformeds is not None:
- for i, x in enumerate(transformeds):
- i = i % 4
- cols[i].image(F.to_pil_image(x), use_column_width=True)
-
-st.markdown(
- "There are a lot more transformations available: [Documentation](https://kornia.readthedocs.io/en/latest/augmentation.module.html)"
-)
-st.markdown(
- "Kornia can do a lot more than augmentations~ [Check it out](https://kornia.readthedocs.io/en/latest/introduction.html#highlighted-features)"
-)
-# if process:
-# pass
-
diff --git a/spaces/cfwef/gpt/crazy_functions/test_project/cpp/cppipc/queue.h b/spaces/cfwef/gpt/crazy_functions/test_project/cpp/cppipc/queue.h
deleted file mode 100644
index a21f3446e06b5826af7b554c8a7d9c5d80848b62..0000000000000000000000000000000000000000
--- a/spaces/cfwef/gpt/crazy_functions/test_project/cpp/cppipc/queue.h
+++ /dev/null
@@ -1,216 +0,0 @@
-#pragma once
-
-#include
-#include
-#include // [[since C++14]]: std::exchange
-#include
-#include
-#include
-#include
-#include
-#include
-#include // assert
-
-#include "libipc/def.h"
-#include "libipc/shm.h"
-#include "libipc/rw_lock.h"
-
-#include "libipc/utility/log.h"
-#include "libipc/platform/detail.h"
-#include "libipc/circ/elem_def.h"
-
-namespace ipc {
-namespace detail {
-
-class queue_conn {
-protected:
- circ::cc_t connected_ = 0;
- shm::handle elems_h_;
-
- template
- Elems* open(char const * name) {
- if (name == nullptr || name[0] == '\0') {
- ipc::error("fail open waiter: name is empty!\n");
- return nullptr;
- }
- if (!elems_h_.acquire(name, sizeof(Elems))) {
- return nullptr;
- }
- auto elems = static_cast(elems_h_.get());
- if (elems == nullptr) {
- ipc::error("fail acquire elems: %s\n", name);
- return nullptr;
- }
- elems->init();
- return elems;
- }
-
- void close() {
- elems_h_.release();
- }
-
-public:
- queue_conn() = default;
- queue_conn(const queue_conn&) = delete;
- queue_conn& operator=(const queue_conn&) = delete;
-
- bool connected() const noexcept {
- return connected_ != 0;
- }
-
- circ::cc_t connected_id() const noexcept {
- return connected_;
- }
-
- template
- auto connect(Elems* elems) noexcept
- /*needs 'optional' here*/
- -> std::tuple().cursor())> {
- if (elems == nullptr) return {};
- // if it's already connected, just return
- if (connected()) return {connected(), false, 0};
- connected_ = elems->connect_receiver();
- return {connected(), true, elems->cursor()};
- }
-
- template
- bool disconnect(Elems* elems) noexcept {
- if (elems == nullptr) return false;
- // if it's already disconnected, just return false
- if (!connected()) return false;
- elems->disconnect_receiver(std::exchange(connected_, 0));
- return true;
- }
-};
-
-template
-class queue_base : public queue_conn {
- using base_t = queue_conn;
-
-public:
- using elems_t = Elems;
- using policy_t = typename elems_t::policy_t;
-
-protected:
- elems_t * elems_ = nullptr;
- decltype(std::declval().cursor()) cursor_ = 0;
- bool sender_flag_ = false;
-
-public:
- using base_t::base_t;
-
- queue_base() = default;
-
- explicit queue_base(char const * name)
- : queue_base{} {
- elems_ = open(name);
- }
-
- explicit queue_base(elems_t * elems) noexcept
- : queue_base{} {
- assert(elems != nullptr);
- elems_ = elems;
- }
-
- /* not virtual */ ~queue_base() {
- base_t::close();
- }
-
- elems_t * elems() noexcept { return elems_; }
- elems_t const * elems() const noexcept { return elems_; }
-
- bool ready_sending() noexcept {
- if (elems_ == nullptr) return false;
- return sender_flag_ || (sender_flag_ = elems_->connect_sender());
- }
-
- void shut_sending() noexcept {
- if (elems_ == nullptr) return;
- if (!sender_flag_) return;
- elems_->disconnect_sender();
- }
-
- bool connect() noexcept {
- auto tp = base_t::connect(elems_);
- if (std::get<0>(tp) && std::get<1>(tp)) {
- cursor_ = std::get<2>(tp);
- return true;
- }
- return std::get<0>(tp);
- }
-
- bool disconnect() noexcept {
- return base_t::disconnect(elems_);
- }
-
- std::size_t conn_count() const noexcept {
- return (elems_ == nullptr) ? static_cast(invalid_value) : elems_->conn_count();
- }
-
- bool valid() const noexcept {
- return elems_ != nullptr;
- }
-
- bool empty() const noexcept {
- return !valid() || (cursor_ == elems_->cursor());
- }
-
- template
- bool push(F&& prep, P&&... params) {
- if (elems_ == nullptr) return false;
- return elems_->push(this, [&](void* p) {
- if (prep(p)) ::new (p) T(std::forward(params)...);
- });
- }
-
- template
- bool force_push(F&& prep, P&&... params) {
- if (elems_ == nullptr) return false;
- return elems_->force_push(this, [&](void* p) {
- if (prep(p)) ::new (p) T(std::forward(params)...);
- });
- }
-
- template
- bool pop(T& item, F&& out) {
- if (elems_ == nullptr) {
- return false;
- }
- return elems_->pop(this, &(this->cursor_), [&item](void* p) {
- ::new (&item) T(std::move(*static_cast(p)));
- }, std::forward(out));
- }
-};
-
-} // namespace detail
-
-template
-class queue final : public detail::queue_base> {
- using base_t = detail::queue_base>;
-
-public:
- using value_t = T;
-
- using base_t::base_t;
-
- template
- bool push(P&&... params) {
- return base_t::template push(std::forward(params)...);
- }
-
- template
- bool force_push(P&&... params) {
- return base_t::template force_push(std::forward(params)...);
- }
-
- bool pop(T& item) {
- return base_t::pop(item, [](bool) {});
- }
-
- template
- bool pop(T& item, F&& out) {
- return base_t::pop(item, std::forward(out));
- }
-};
-
-} // namespace ipc
diff --git a/spaces/chasemcdo/hf_localai/examples/README.md b/spaces/chasemcdo/hf_localai/examples/README.md
deleted file mode 100644
index 29a4f857cef5591b6c973801cac9493fc91f4262..0000000000000000000000000000000000000000
--- a/spaces/chasemcdo/hf_localai/examples/README.md
+++ /dev/null
@@ -1,145 +0,0 @@
-# Examples
-
-Here is a list of projects that can easily be integrated with the LocalAI backend.
-
-### Projects
-
-### AutoGPT
-
-_by [@mudler](https://github.com/mudler)_
-
-This example shows how to use AutoGPT with LocalAI.
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/autoGPT/)
-
-### Chatbot-UI
-
-_by [@mkellerman](https://github.com/mkellerman)_
-
-
-
-This integration shows how to use LocalAI with [mckaywrigley/chatbot-ui](https://github.com/mckaywrigley/chatbot-ui).
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui/)
-
-There is also a separate example to show how to manually setup a model: [example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui-manual/)
-
-### K8sGPT
-
-_by [@mudler](https://github.com/mudler)_
-
-This example show how to use LocalAI inside Kubernetes with [k8sgpt](https://k8sgpt.ai).
-
-
-
-### Flowise
-
-_by [@mudler](https://github.com/mudler)_
-
-This example shows how to use [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise) with LocalAI.
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/flowise/)
-
-### Discord bot
-
-_by [@mudler](https://github.com/mudler)_
-
-Run a discord bot which lets you talk directly with a model
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/discord-bot/), or for a live demo you can talk with our bot in #random-bot in our discord server.
-
-### Langchain
-
-_by [@dave-gray101](https://github.com/dave-gray101)_
-
-A ready to use example to show e2e how to integrate LocalAI with langchain
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain/)
-
-### Langchain Python
-
-_by [@mudler](https://github.com/mudler)_
-
-A ready to use example to show e2e how to integrate LocalAI with langchain
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-python/)
-
-### LocalAI WebUI
-
-_by [@dhruvgera](https://github.com/dhruvgera)_
-
-
-
-A light, community-maintained web interface for LocalAI
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/localai-webui/)
-
-### How to run rwkv models
-
-_by [@mudler](https://github.com/mudler)_
-
-A full example on how to run RWKV models with LocalAI
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/rwkv/)
-
-### PrivateGPT
-
-_by [@mudler](https://github.com/mudler)_
-
-A full example on how to run PrivateGPT with LocalAI
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/privateGPT/)
-
-### Slack bot
-
-_by [@mudler](https://github.com/mudler)_
-
-Run a slack bot which lets you talk directly with a model
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/slack-bot/)
-
-### Slack bot (Question answering)
-
-_by [@mudler](https://github.com/mudler)_
-
-Run a slack bot, ideally for teams, which lets you ask questions on a documentation website, or a github repository.
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/slack-qa-bot/)
-
-### Question answering on documents with llama-index
-
-_by [@mudler](https://github.com/mudler)_
-
-Shows how to integrate with [Llama-Index](https://gpt-index.readthedocs.io/en/stable/getting_started/installation.html) to enable question answering on a set of documents.
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/query_data/)
-
-### Question answering on documents with langchain and chroma
-
-_by [@mudler](https://github.com/mudler)_
-
-Shows how to integrate with `Langchain` and `Chroma` to enable question answering on a set of documents.
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-chroma/)
-
-### Telegram bot
-
-_by [@mudler](https://github.com/mudler)
-
-
-
-Use LocalAI to power a Telegram bot assistant, with Image generation and audio support!
-
-[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/telegram-bot/)
-
-### Template for Runpod.io
-
-_by [@fHachenberg](https://github.com/fHachenberg)_
-
-Allows to run any LocalAI-compatible model as a backend on the servers of https://runpod.io
-
-[Check it out here](https://runpod.io/gsc?template=uv9mtqnrd0&ref=984wlcra)
-
-## Want to contribute?
-
-Create an issue, and put `Example: ` in the title! We will post your examples here.
diff --git a/spaces/chendl/compositional_test/transformers/examples/legacy/token-classification/run_pos.sh b/spaces/chendl/compositional_test/transformers/examples/legacy/token-classification/run_pos.sh
deleted file mode 100644
index 7d76ed8a2a8a94bc2cd258c42b78bcdb9ba3243b..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/legacy/token-classification/run_pos.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-if ! [ -f ./dev.txt ]; then
- echo "Download dev dataset...."
- curl -L -o ./dev.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-dev.conllu'
-fi
-
-if ! [ -f ./test.txt ]; then
- echo "Download test dataset...."
- curl -L -o ./test.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-test.conllu'
-fi
-
-if ! [ -f ./train.txt ]; then
- echo "Download train dataset...."
- curl -L -o ./train.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-train.conllu'
-fi
-
-export MAX_LENGTH=200
-export BERT_MODEL=bert-base-uncased
-export OUTPUT_DIR=postagger-model
-export BATCH_SIZE=32
-export NUM_EPOCHS=3
-export SAVE_STEPS=750
-export SEED=1
-
-python3 run_ner.py \
---task_type POS \
---data_dir . \
---model_name_or_path $BERT_MODEL \
---output_dir $OUTPUT_DIR \
---max_seq_length $MAX_LENGTH \
---num_train_epochs $NUM_EPOCHS \
---per_gpu_train_batch_size $BATCH_SIZE \
---save_steps $SAVE_STEPS \
---seed $SEED \
---do_train \
---do_eval \
---do_predict
-
diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/question-answering/README.md b/spaces/chendl/compositional_test/transformers/examples/pytorch/question-answering/README.md
deleted file mode 100644
index 6b86a4effa95084cd33ab25b918103b9e5b30c4f..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/examples/pytorch/question-answering/README.md
+++ /dev/null
@@ -1,183 +0,0 @@
-
-
-# Question answering
-
-This folder contains several scripts that showcase how to fine-tune a 🤗 Transformers model on a question answering dataset,
-like SQuAD.
-
-## Trainer-based scripts
-
-The [`run_qa.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa.py),
-[`run_qa_beam_search.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_beam_search.py) and [`run_seq2seq_qa.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_seq2seq_qa.py) leverage the 🤗 [Trainer](https://huggingface.co/transformers/main_classes/trainer.html) for fine-tuning.
-
-### Fine-tuning BERT on SQuAD1.0
-
-The [`run_qa.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa.py) script
-allows to fine-tune any model from our [hub](https://huggingface.co/models) (as long as its architecture has a `ForQuestionAnswering` version in the library) on a question-answering dataset (such as SQuAD, or any other QA dataset available in the `datasets` library, or your own csv/jsonlines files) as long as they are structured the same way as SQuAD. You might need to tweak the data processing inside the script if your data is structured differently.
-
-**Note:** This script only works with models that have a fast tokenizer (backed by the 🤗 Tokenizers library) as it
-uses special features of those tokenizers. You can check if your favorite model has a fast tokenizer in
-[this table](https://huggingface.co/transformers/index.html#supported-frameworks), if it doesn't you can still use the old version of the script which can be found [here](https://github.com/huggingface/transformers/tree/main/examples/legacy/question-answering).
-
-Note that if your dataset contains samples with no possible answers (like SQuAD version 2), you need to pass along the flag `--version_2_with_negative`.
-
-This example code fine-tunes BERT on the SQuAD1.0 dataset. It runs in 24 min (with BERT-base) or 68 min (with BERT-large)
-on a single tesla V100 16GB.
-
-```bash
-python run_qa.py \
- --model_name_or_path bert-base-uncased \
- --dataset_name squad \
- --do_train \
- --do_eval \
- --per_device_train_batch_size 12 \
- --learning_rate 3e-5 \
- --num_train_epochs 2 \
- --max_seq_length 384 \
- --doc_stride 128 \
- --output_dir /tmp/debug_squad/
-```
-
-Training with the previously defined hyper-parameters yields the following results:
-
-```bash
-f1 = 88.52
-exact_match = 81.22
-```
-
-### Fine-tuning XLNet with beam search on SQuAD
-
-The [`run_qa_beam_search.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa_beam_search.py) script is only meant to fine-tune XLNet, which is a special encoder-only Transformer model. The example code below fine-tunes XLNet on the SQuAD1.0 and SQuAD2.0 datasets.
-
-#### Command for SQuAD1.0:
-
-```bash
-python run_qa_beam_search.py \
- --model_name_or_path xlnet-large-cased \
- --dataset_name squad \
- --do_train \
- --do_eval \
- --learning_rate 3e-5 \
- --num_train_epochs 2 \
- --max_seq_length 384 \
- --doc_stride 128 \
- --output_dir ./wwm_cased_finetuned_squad/ \
- --per_device_eval_batch_size=4 \
- --per_device_train_batch_size=4 \
- --save_steps 5000
-```
-
-#### Command for SQuAD2.0:
-
-```bash
-export SQUAD_DIR=/path/to/SQUAD
-
-python run_qa_beam_search.py \
- --model_name_or_path xlnet-large-cased \
- --dataset_name squad_v2 \
- --do_train \
- --do_eval \
- --version_2_with_negative \
- --learning_rate 3e-5 \
- --num_train_epochs 4 \
- --max_seq_length 384 \
- --doc_stride 128 \
- --output_dir ./wwm_cased_finetuned_squad/ \
- --per_device_eval_batch_size=2 \
- --per_device_train_batch_size=2 \
- --save_steps 5000
-```
-
-### Fine-tuning T5 on SQuAD2.0
-
-The [`run_seq2seq_qa.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_seq2seq_qa.py) script is meant for encoder-decoder (also called seq2seq) Transformer models, such as T5 or BART. These
-models are generative, rather than discriminative. This means that they learn to generate the correct answer, rather than predicting the start and end position of the tokens of the answer.
-
-This example code fine-tunes T5 on the SQuAD2.0 dataset.
-
-```bash
-python run_seq2seq_qa.py \
- --model_name_or_path t5-small \
- --dataset_name squad_v2 \
- --context_column context \
- --question_column question \
- --answer_column answers \
- --do_train \
- --do_eval \
- --per_device_train_batch_size 12 \
- --learning_rate 3e-5 \
- --num_train_epochs 2 \
- --max_seq_length 384 \
- --doc_stride 128 \
- --output_dir /tmp/debug_seq2seq_squad/
-```
-
-## Accelerate-based scripts
-
-Based on the scripts `run_qa_no_trainer.py` and `run_qa_beam_search_no_trainer.py`.
-
-Like `run_qa.py` and `run_qa_beam_search.py`, these scripts allow you to fine-tune any of the models supported on a
-SQuAD or a similar dataset, the main difference is that this script exposes the bare training loop, to allow you to quickly experiment and add any customization you would like. It offers less options than the script with `Trainer` (for instance you can easily change the options for the optimizer or the dataloaders directly in the script), but still run in a distributed setup, on TPU and supports mixed precision by leveraging the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library.
-
-You can use the script normally after installing it:
-
-```bash
-pip install git+https://github.com/huggingface/accelerate
-```
-
-then
-
-```bash
-python run_qa_no_trainer.py \
- --model_name_or_path bert-base-uncased \
- --dataset_name squad \
- --max_seq_length 384 \
- --doc_stride 128 \
- --output_dir ~/tmp/debug_squad
-```
-
-You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run
-
-```bash
-accelerate config
-```
-
-and reply to the questions asked. Then
-
-```bash
-accelerate test
-```
-
-that will check everything is ready for training. Finally, you can launch training with
-
-```bash
-accelerate launch run_qa_no_trainer.py \
- --model_name_or_path bert-base-uncased \
- --dataset_name squad \
- --max_seq_length 384 \
- --doc_stride 128 \
- --output_dir ~/tmp/debug_squad
-```
-
-This command is the same and will work for:
-
-- a CPU-only setup
-- a setup with one GPU
-- a distributed training with several GPUs (single or multi node)
-- a training on TPUs
-
-Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it.
diff --git a/spaces/chronopt-research/ViTExCo/src/models/CNN/GAN_models.py b/spaces/chronopt-research/ViTExCo/src/models/CNN/GAN_models.py
deleted file mode 100644
index 137111bb8035c8d0dbd26b6b958c4036260b8821..0000000000000000000000000000000000000000
--- a/spaces/chronopt-research/ViTExCo/src/models/CNN/GAN_models.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# DCGAN-like generator and discriminator
-import torch
-from torch import nn
-import torch.nn.functional as F
-from torch.nn import Parameter
-
-
-def l2normalize(v, eps=1e-12):
- return v / (v.norm() + eps)
-
-
-class SpectralNorm(nn.Module):
- def __init__(self, module, name="weight", power_iterations=1):
- super(SpectralNorm, self).__init__()
- self.module = module
- self.name = name
- self.power_iterations = power_iterations
- if not self._made_params():
- self._make_params()
-
- def _update_u_v(self):
- u = getattr(self.module, self.name + "_u")
- v = getattr(self.module, self.name + "_v")
- w = getattr(self.module, self.name + "_bar")
-
- height = w.data.shape[0]
- for _ in range(self.power_iterations):
- v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data))
- u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data))
-
- sigma = u.dot(w.view(height, -1).mv(v))
- setattr(self.module, self.name, w / sigma.expand_as(w))
-
- def _made_params(self):
- try:
- u = getattr(self.module, self.name + "_u")
- v = getattr(self.module, self.name + "_v")
- w = getattr(self.module, self.name + "_bar")
- return True
- except AttributeError:
- return False
-
- def _make_params(self):
- w = getattr(self.module, self.name)
-
- height = w.data.shape[0]
- width = w.view(height, -1).data.shape[1]
-
- u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
- v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
- u.data = l2normalize(u.data)
- v.data = l2normalize(v.data)
- w_bar = Parameter(w.data)
-
- del self.module._parameters[self.name]
-
- self.module.register_parameter(self.name + "_u", u)
- self.module.register_parameter(self.name + "_v", v)
- self.module.register_parameter(self.name + "_bar", w_bar)
-
- def forward(self, *args):
- self._update_u_v()
- return self.module.forward(*args)
-
-
-class Generator(nn.Module):
- def __init__(self, z_dim):
- super(Generator, self).__init__()
- self.z_dim = z_dim
-
- self.model = nn.Sequential(
- nn.ConvTranspose2d(z_dim, 512, 4, stride=1),
- nn.InstanceNorm2d(512),
- nn.ReLU(),
- nn.ConvTranspose2d(512, 256, 4, stride=2, padding=(1, 1)),
- nn.InstanceNorm2d(256),
- nn.ReLU(),
- nn.ConvTranspose2d(256, 128, 4, stride=2, padding=(1, 1)),
- nn.InstanceNorm2d(128),
- nn.ReLU(),
- nn.ConvTranspose2d(128, 64, 4, stride=2, padding=(1, 1)),
- nn.InstanceNorm2d(64),
- nn.ReLU(),
- nn.ConvTranspose2d(64, channels, 3, stride=1, padding=(1, 1)),
- nn.Tanh(),
- )
-
- def forward(self, z):
- return self.model(z.view(-1, self.z_dim, 1, 1))
-
-
-channels = 3
-leak = 0.1
-w_g = 4
-
-
-class Discriminator(nn.Module):
- def __init__(self):
- super(Discriminator, self).__init__()
-
- self.conv1 = SpectralNorm(nn.Conv2d(channels, 64, 3, stride=1, padding=(1, 1)))
- self.conv2 = SpectralNorm(nn.Conv2d(64, 64, 4, stride=2, padding=(1, 1)))
- self.conv3 = SpectralNorm(nn.Conv2d(64, 128, 3, stride=1, padding=(1, 1)))
- self.conv4 = SpectralNorm(nn.Conv2d(128, 128, 4, stride=2, padding=(1, 1)))
- self.conv5 = SpectralNorm(nn.Conv2d(128, 256, 3, stride=1, padding=(1, 1)))
- self.conv6 = SpectralNorm(nn.Conv2d(256, 256, 4, stride=2, padding=(1, 1)))
- self.conv7 = SpectralNorm(nn.Conv2d(256, 256, 3, stride=1, padding=(1, 1)))
- self.conv8 = SpectralNorm(nn.Conv2d(256, 512, 4, stride=2, padding=(1, 1)))
- self.fc = SpectralNorm(nn.Linear(w_g * w_g * 512, 1))
-
- def forward(self, x):
- m = x
- m = nn.LeakyReLU(leak)(self.conv1(m))
- m = nn.LeakyReLU(leak)(nn.InstanceNorm2d(64)(self.conv2(m)))
- m = nn.LeakyReLU(leak)(nn.InstanceNorm2d(128)(self.conv3(m)))
- m = nn.LeakyReLU(leak)(nn.InstanceNorm2d(128)(self.conv4(m)))
- m = nn.LeakyReLU(leak)(nn.InstanceNorm2d(256)(self.conv5(m)))
- m = nn.LeakyReLU(leak)(nn.InstanceNorm2d(256)(self.conv6(m)))
- m = nn.LeakyReLU(leak)(nn.InstanceNorm2d(256)(self.conv7(m)))
- m = nn.LeakyReLU(leak)(self.conv8(m))
-
- return self.fc(m.view(-1, w_g * w_g * 512))
-
-
-class Self_Attention(nn.Module):
- """Self attention Layer"""
-
- def __init__(self, in_dim):
- super(Self_Attention, self).__init__()
- self.chanel_in = in_dim
-
- self.query_conv = SpectralNorm(nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 1, kernel_size=1))
- self.key_conv = SpectralNorm(nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 1, kernel_size=1))
- self.value_conv = SpectralNorm(nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1))
- self.gamma = nn.Parameter(torch.zeros(1))
-
- self.softmax = nn.Softmax(dim=-1) #
-
- def forward(self, x):
- """
- inputs :
- x : input feature maps( B X C X W X H)
- returns :
- out : self attention value + input feature
- attention: B X N X N (N is Width*Height)
- """
- m_batchsize, C, width, height = x.size()
- proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N)
- proj_key = self.key_conv(x).view(m_batchsize, -1, width * height) # B X C x (*W*H)
- energy = torch.bmm(proj_query, proj_key) # transpose check
- attention = self.softmax(energy) # BX (N) X (N)
- proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B X C X N
-
- out = torch.bmm(proj_value, attention.permute(0, 2, 1))
- out = out.view(m_batchsize, C, width, height)
-
- out = self.gamma * out + x
- return out
-
-
-class Discriminator_x64(nn.Module):
- """
- Discriminative Network
- """
-
- def __init__(self, in_size=6, ndf=64):
- super(Discriminator_x64, self).__init__()
- self.in_size = in_size
- self.ndf = ndf
-
- self.layer1 = nn.Sequential(SpectralNorm(nn.Conv2d(self.in_size, self.ndf, 4, 2, 1)), nn.LeakyReLU(0.2, inplace=True))
-
- self.layer2 = nn.Sequential(
- SpectralNorm(nn.Conv2d(self.ndf, self.ndf, 4, 2, 1)),
- nn.InstanceNorm2d(self.ndf),
- nn.LeakyReLU(0.2, inplace=True),
- )
- self.attention = Self_Attention(self.ndf)
- self.layer3 = nn.Sequential(
- SpectralNorm(nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1)),
- nn.InstanceNorm2d(self.ndf * 2),
- nn.LeakyReLU(0.2, inplace=True),
- )
- self.layer4 = nn.Sequential(
- SpectralNorm(nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1)),
- nn.InstanceNorm2d(self.ndf * 4),
- nn.LeakyReLU(0.2, inplace=True),
- )
- self.layer5 = nn.Sequential(
- SpectralNorm(nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1)),
- nn.InstanceNorm2d(self.ndf * 8),
- nn.LeakyReLU(0.2, inplace=True),
- )
- self.layer6 = nn.Sequential(
- SpectralNorm(nn.Conv2d(self.ndf * 8, self.ndf * 16, 4, 2, 1)),
- nn.InstanceNorm2d(self.ndf * 16),
- nn.LeakyReLU(0.2, inplace=True),
- )
-
- self.last = SpectralNorm(nn.Conv2d(self.ndf * 16, 1, [3, 6], 1, 0))
-
- def forward(self, input):
- feature1 = self.layer1(input)
- feature2 = self.layer2(feature1)
- feature_attention = self.attention(feature2)
- feature3 = self.layer3(feature_attention)
- feature4 = self.layer4(feature3)
- feature5 = self.layer5(feature4)
- feature6 = self.layer6(feature5)
- output = self.last(feature6)
- output = F.avg_pool2d(output, output.size()[2:]).view(output.size()[0], -1)
-
- return output, feature4
-
-
-class Discriminator_x64_224(nn.Module):
- """
- Discriminative Network
- """
-
- def __init__(self, in_size=6, ndf=64):
- super(Discriminator_x64_224, self).__init__()
- self.in_size = in_size
- self.ndf = ndf
-
- self.layer1 = nn.Sequential(SpectralNorm(nn.Conv2d(self.in_size, self.ndf, 4, 2, 1)), nn.LeakyReLU(0.2, inplace=True))
-
- self.layer2 = nn.Sequential(
- SpectralNorm(nn.Conv2d(self.ndf, self.ndf, 4, 2, 1)),
- nn.InstanceNorm2d(self.ndf),
- nn.LeakyReLU(0.2, inplace=True),
- )
- self.attention = Self_Attention(self.ndf)
- self.layer3 = nn.Sequential(
- SpectralNorm(nn.Conv2d(self.ndf, self.ndf * 2, 4, 2, 1)),
- nn.InstanceNorm2d(self.ndf * 2),
- nn.LeakyReLU(0.2, inplace=True),
- )
- self.layer4 = nn.Sequential(
- SpectralNorm(nn.Conv2d(self.ndf * 2, self.ndf * 4, 4, 2, 1)),
- nn.InstanceNorm2d(self.ndf * 4),
- nn.LeakyReLU(0.2, inplace=True),
- )
- self.layer5 = nn.Sequential(
- SpectralNorm(nn.Conv2d(self.ndf * 4, self.ndf * 8, 4, 2, 1)),
- nn.InstanceNorm2d(self.ndf * 8),
- nn.LeakyReLU(0.2, inplace=True),
- )
- self.layer6 = nn.Sequential(
- SpectralNorm(nn.Conv2d(self.ndf * 8, self.ndf * 16, 4, 2, 1)),
- nn.InstanceNorm2d(self.ndf * 16),
- nn.LeakyReLU(0.2, inplace=True),
- )
-
- self.last = SpectralNorm(nn.Conv2d(self.ndf * 16, 1, [3, 3], 1, 0))
-
- def forward(self, input):
- feature1 = self.layer1(input)
- feature2 = self.layer2(feature1)
- feature_attention = self.attention(feature2)
- feature3 = self.layer3(feature_attention)
- feature4 = self.layer4(feature3)
- feature5 = self.layer5(feature4)
- feature6 = self.layer6(feature5)
- output = self.last(feature6)
- output = F.avg_pool2d(output, output.size()[2:]).view(output.size()[0], -1)
-
- return output, feature4
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/varLib/plot.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/varLib/plot.py
deleted file mode 100644
index e0a7ca50d3f317d7c3219b77ff84f0f8bb310c6d..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/varLib/plot.py
+++ /dev/null
@@ -1,238 +0,0 @@
-"""Visualize DesignSpaceDocument and resulting VariationModel."""
-
-from fontTools.varLib.models import VariationModel, supportScalar
-from fontTools.designspaceLib import DesignSpaceDocument
-from matplotlib import pyplot
-from mpl_toolkits.mplot3d import axes3d
-from itertools import cycle
-import math
-import logging
-import sys
-
-log = logging.getLogger(__name__)
-
-
-def stops(support, count=10):
- a, b, c = support
-
- return (
- [a + (b - a) * i / count for i in range(count)]
- + [b + (c - b) * i / count for i in range(count)]
- + [c]
- )
-
-
-def _plotLocationsDots(locations, axes, subplot, **kwargs):
- for loc, color in zip(locations, cycle(pyplot.cm.Set1.colors)):
- if len(axes) == 1:
- subplot.plot([loc.get(axes[0], 0)], [1.0], "o", color=color, **kwargs)
- elif len(axes) == 2:
- subplot.plot(
- [loc.get(axes[0], 0)],
- [loc.get(axes[1], 0)],
- [1.0],
- "o",
- color=color,
- **kwargs,
- )
- else:
- raise AssertionError(len(axes))
-
-
-def plotLocations(locations, fig, names=None, **kwargs):
- n = len(locations)
- cols = math.ceil(n**0.5)
- rows = math.ceil(n / cols)
-
- if names is None:
- names = [None] * len(locations)
-
- model = VariationModel(locations)
- names = [names[model.reverseMapping[i]] for i in range(len(names))]
-
- axes = sorted(locations[0].keys())
- if len(axes) == 1:
- _plotLocations2D(model, axes[0], fig, cols, rows, names=names, **kwargs)
- elif len(axes) == 2:
- _plotLocations3D(model, axes, fig, cols, rows, names=names, **kwargs)
- else:
- raise ValueError("Only 1 or 2 axes are supported")
-
-
-def _plotLocations2D(model, axis, fig, cols, rows, names, **kwargs):
- subplot = fig.add_subplot(111)
- for i, (support, color, name) in enumerate(
- zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
- ):
- if name is not None:
- subplot.set_title(name)
- subplot.set_xlabel(axis)
- pyplot.xlim(-1.0, +1.0)
-
- Xs = support.get(axis, (-1.0, 0.0, +1.0))
- X, Y = [], []
- for x in stops(Xs):
- y = supportScalar({axis: x}, support)
- X.append(x)
- Y.append(y)
- subplot.plot(X, Y, color=color, **kwargs)
-
- _plotLocationsDots(model.locations, [axis], subplot)
-
-
-def _plotLocations3D(model, axes, fig, rows, cols, names, **kwargs):
- ax1, ax2 = axes
-
- axis3D = fig.add_subplot(111, projection="3d")
- for i, (support, color, name) in enumerate(
- zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names))
- ):
- if name is not None:
- axis3D.set_title(name)
- axis3D.set_xlabel(ax1)
- axis3D.set_ylabel(ax2)
- pyplot.xlim(-1.0, +1.0)
- pyplot.ylim(-1.0, +1.0)
-
- Xs = support.get(ax1, (-1.0, 0.0, +1.0))
- Ys = support.get(ax2, (-1.0, 0.0, +1.0))
- for x in stops(Xs):
- X, Y, Z = [], [], []
- for y in Ys:
- z = supportScalar({ax1: x, ax2: y}, support)
- X.append(x)
- Y.append(y)
- Z.append(z)
- axis3D.plot(X, Y, Z, color=color, **kwargs)
- for y in stops(Ys):
- X, Y, Z = [], [], []
- for x in Xs:
- z = supportScalar({ax1: x, ax2: y}, support)
- X.append(x)
- Y.append(y)
- Z.append(z)
- axis3D.plot(X, Y, Z, color=color, **kwargs)
-
- _plotLocationsDots(model.locations, [ax1, ax2], axis3D)
-
-
-def plotDocument(doc, fig, **kwargs):
- doc.normalize()
- locations = [s.location for s in doc.sources]
- names = [s.name for s in doc.sources]
- plotLocations(locations, fig, names, **kwargs)
-
-
-def _plotModelFromMasters2D(model, masterValues, fig, **kwargs):
- assert len(model.axisOrder) == 1
- axis = model.axisOrder[0]
-
- axis_min = min(loc.get(axis, 0) for loc in model.locations)
- axis_max = max(loc.get(axis, 0) for loc in model.locations)
-
- import numpy as np
-
- X = np.arange(axis_min, axis_max, (axis_max - axis_min) / 100)
- Y = []
-
- for x in X:
- loc = {axis: x}
- v = model.interpolateFromMasters(loc, masterValues)
- Y.append(v)
-
- subplot = fig.add_subplot(111)
- subplot.plot(X, Y, "-", **kwargs)
-
-
-def _plotModelFromMasters3D(model, masterValues, fig, **kwargs):
- assert len(model.axisOrder) == 2
- axis1, axis2 = model.axisOrder[0], model.axisOrder[1]
-
- axis1_min = min(loc.get(axis1, 0) for loc in model.locations)
- axis1_max = max(loc.get(axis1, 0) for loc in model.locations)
- axis2_min = min(loc.get(axis2, 0) for loc in model.locations)
- axis2_max = max(loc.get(axis2, 0) for loc in model.locations)
-
- import numpy as np
-
- X = np.arange(axis1_min, axis1_max, (axis1_max - axis1_min) / 100)
- Y = np.arange(axis2_min, axis2_max, (axis2_max - axis2_min) / 100)
- X, Y = np.meshgrid(X, Y)
- Z = []
-
- for row_x, row_y in zip(X, Y):
- z_row = []
- Z.append(z_row)
- for x, y in zip(row_x, row_y):
- loc = {axis1: x, axis2: y}
- v = model.interpolateFromMasters(loc, masterValues)
- z_row.append(v)
- Z = np.array(Z)
-
- axis3D = fig.add_subplot(111, projection="3d")
- axis3D.plot_surface(X, Y, Z, **kwargs)
-
-
-def plotModelFromMasters(model, masterValues, fig, **kwargs):
- """Plot a variation model and set of master values corresponding
- to the locations to the model into a pyplot figure. Variation
- model must have axisOrder of size 1 or 2."""
- if len(model.axisOrder) == 1:
- _plotModelFromMasters2D(model, masterValues, fig, **kwargs)
- elif len(model.axisOrder) == 2:
- _plotModelFromMasters3D(model, masterValues, fig, **kwargs)
- else:
- raise ValueError("Only 1 or 2 axes are supported")
-
-
-def main(args=None):
- from fontTools import configLogger
-
- if args is None:
- args = sys.argv[1:]
-
- # configure the library logger (for >= WARNING)
- configLogger()
- # comment this out to enable debug messages from logger
- # log.setLevel(logging.DEBUG)
-
- if len(args) < 1:
- print("usage: fonttools varLib.plot source.designspace", file=sys.stderr)
- print(" or")
- print("usage: fonttools varLib.plot location1 location2 ...", file=sys.stderr)
- print(" or")
- print(
- "usage: fonttools varLib.plot location1=value1 location2=value2 ...",
- file=sys.stderr,
- )
- sys.exit(1)
-
- fig = pyplot.figure()
- fig.set_tight_layout(True)
-
- if len(args) == 1 and args[0].endswith(".designspace"):
- doc = DesignSpaceDocument()
- doc.read(args[0])
- plotDocument(doc, fig)
- else:
- axes = [chr(c) for c in range(ord("A"), ord("Z") + 1)]
- if "=" not in args[0]:
- locs = [dict(zip(axes, (float(v) for v in s.split(",")))) for s in args]
- plotLocations(locs, fig)
- else:
- locations = []
- masterValues = []
- for arg in args:
- loc, v = arg.split("=")
- locations.append(dict(zip(axes, (float(v) for v in loc.split(",")))))
- masterValues.append(float(v))
- model = VariationModel(locations, axes[: len(locations[0])])
- plotModelFromMasters(model, masterValues, fig)
-
- pyplot.show()
-
-
-if __name__ == "__main__":
- import sys
-
- sys.exit(main())
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/dircache.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/dircache.py
deleted file mode 100644
index eca19566b135e5a7a4f6e7407d56411ec58bfe44..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/dircache.py
+++ /dev/null
@@ -1,98 +0,0 @@
-import time
-from collections.abc import MutableMapping
-from functools import lru_cache
-
-
-class DirCache(MutableMapping):
- """
- Caching of directory listings, in a structure like::
-
- {"path0": [
- {"name": "path0/file0",
- "size": 123,
- "type": "file",
- ...
- },
- {"name": "path0/file1",
- },
- ...
- ],
- "path1": [...]
- }
-
- Parameters to this class control listing expiry or indeed turn
- caching off
- """
-
- def __init__(
- self,
- use_listings_cache=True,
- listings_expiry_time=None,
- max_paths=None,
- **kwargs,
- ):
- """
-
- Parameters
- ----------
- use_listings_cache: bool
- If False, this cache never returns items, but always reports KeyError,
- and setting items has no effect
- listings_expiry_time: int or float (optional)
- Time in seconds that a listing is considered valid. If None,
- listings do not expire.
- max_paths: int (optional)
- The number of most recent listings that are considered valid; 'recent'
- refers to when the entry was set.
- """
- self._cache = {}
- self._times = {}
- if max_paths:
- self._q = lru_cache(max_paths + 1)(lambda key: self._cache.pop(key, None))
- self.use_listings_cache = use_listings_cache
- self.listings_expiry_time = listings_expiry_time
- self.max_paths = max_paths
-
- def __getitem__(self, item):
- if self.listings_expiry_time is not None:
- if self._times.get(item, 0) - time.time() < -self.listings_expiry_time:
- del self._cache[item]
- if self.max_paths:
- self._q(item)
- return self._cache[item] # maybe raises KeyError
-
- def clear(self):
- self._cache.clear()
-
- def __len__(self):
- return len(self._cache)
-
- def __contains__(self, item):
- try:
- self[item]
- return True
- except KeyError:
- return False
-
- def __setitem__(self, key, value):
- if not self.use_listings_cache:
- return
- if self.max_paths:
- self._q(key)
- self._cache[key] = value
- if self.listings_expiry_time is not None:
- self._times[key] = time.time()
-
- def __delitem__(self, key):
- del self._cache[key]
-
- def __iter__(self):
- entries = list(self._cache)
-
- return (k for k in entries if k in self)
-
- def __reduce__(self):
- return (
- DirCache,
- (self.use_listings_cache, self.listings_expiry_time, self.max_paths),
- )
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/functorch/_src/make_functional/__init__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/functorch/_src/make_functional/__init__.py
deleted file mode 100644
index 3de7787df0c3304207b42b51e9fb62da9d33c7d0..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/functorch/_src/make_functional/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# This file has moved to under torch/_functorch. It is not public API.
-# If you are not a PyTorch developer and you are relying on the following
-# imports, please file an issue.
-from torch._functorch.make_functional import _swap_state
diff --git a/spaces/cihyFjudo/fairness-paper-search/The Killer Of Killers Dubbed Italian Movie Free LINK Download Torrent.md b/spaces/cihyFjudo/fairness-paper-search/The Killer Of Killers Dubbed Italian Movie Free LINK Download Torrent.md
deleted file mode 100644
index f649a2861b01dc9989ede361c9291a7b27d8a0ec..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/The Killer Of Killers Dubbed Italian Movie Free LINK Download Torrent.md
+++ /dev/null
@@ -1,12 +0,0 @@
-
-