", "()V", FF_JNI_METHOD, offsetof(struct JNIAMediaCodecFields, init_id), 1 },
- { "android/media/MediaCodec.BufferInfo", "flags", "I", FF_JNI_FIELD, offsetof(struct JNIAMediaCodecFields, flags_id), 1 },
- { "android/media/MediaCodec.BufferInfo", "offset", "I", FF_JNI_FIELD, offsetof(struct JNIAMediaCodecFields, offset_id), 1 },
- { "android/media/MediaCodec.BufferInfo", "presentationTimeUs", "J", FF_JNI_FIELD, offsetof(struct JNIAMediaCodecFields, presentation_time_us_id), 1 },
- { "android/media/MediaCodec.BufferInfo", "size", "I", FF_JNI_FIELD, offsetof(struct JNIAMediaCodecFields, size_id), 1 },
-
- { NULL }
-};
-
-static const AVClass amediacodec_class = {
- .class_name = "amediacodec",
- .item_name = av_default_item_name,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-typedef struct FFAMediaCodecJni {
- FFAMediaCodec api;
-
- struct JNIAMediaCodecFields jfields;
-
- jobject object;
- jobject buffer_info;
-
- jobject input_buffers;
- jobject output_buffers;
-
- int INFO_TRY_AGAIN_LATER;
- int INFO_OUTPUT_BUFFERS_CHANGED;
- int INFO_OUTPUT_FORMAT_CHANGED;
-
- int BUFFER_FLAG_CODEC_CONFIG;
- int BUFFER_FLAG_END_OF_STREAM;
- int BUFFER_FLAG_KEY_FRAME;
-
- int CONFIGURE_FLAG_ENCODE;
-
- int has_get_i_o_buffer;
-} FFAMediaCodecJni;
-
-static const FFAMediaCodec media_codec_jni;
-
-#define JNI_GET_ENV_OR_RETURN(env, log_ctx, ret) do { \
- (env) = ff_jni_get_env(log_ctx); \
- if (!(env)) { \
- return ret; \
- } \
-} while (0)
-
-#define JNI_GET_ENV_OR_RETURN_VOID(env, log_ctx) do { \
- (env) = ff_jni_get_env(log_ctx); \
- if (!(env)) { \
- return; \
- } \
-} while (0)
-
-int ff_AMediaCodecProfile_getProfileFromAVCodecContext(AVCodecContext *avctx)
-{
- // Copy and modified from MediaCodecInfo.java
- static const int AVCProfileBaseline = 0x01;
- static const int AVCProfileMain = 0x02;
- static const int AVCProfileExtended = 0x04;
- static const int AVCProfileHigh = 0x08;
- static const int AVCProfileHigh10 = 0x10;
- static const int AVCProfileHigh422 = 0x20;
- static const int AVCProfileHigh444 = 0x40;
- static const int AVCProfileConstrainedBaseline = 0x10000;
- static const int AVCProfileConstrainedHigh = 0x80000;
-
- static const int HEVCProfileMain = 0x01;
- static const int HEVCProfileMain10 = 0x02;
- static const int HEVCProfileMainStill = 0x04;
- static const int HEVCProfileMain10HDR10 = 0x1000;
- static const int HEVCProfileMain10HDR10Plus = 0x2000;
-
- static const int VP9Profile0 = 0x01;
- static const int VP9Profile1 = 0x02;
- static const int VP9Profile2 = 0x04;
- static const int VP9Profile3 = 0x08;
- static const int VP9Profile2HDR = 0x1000;
- static const int VP9Profile3HDR = 0x2000;
- static const int VP9Profile2HDR10Plus = 0x4000;
- static const int VP9Profile3HDR10Plus = 0x8000;
-
- static const int MPEG4ProfileSimple = 0x01;
- static const int MPEG4ProfileSimpleScalable = 0x02;
- static const int MPEG4ProfileCore = 0x04;
- static const int MPEG4ProfileMain = 0x08;
- static const int MPEG4ProfileNbit = 0x10;
- static const int MPEG4ProfileScalableTexture = 0x20;
- static const int MPEG4ProfileSimpleFBA = 0x80;
- static const int MPEG4ProfileSimpleFace = 0x40;
- static const int MPEG4ProfileBasicAnimated = 0x100;
- static const int MPEG4ProfileHybrid = 0x200;
- static const int MPEG4ProfileAdvancedRealTime = 0x400;
- static const int MPEG4ProfileCoreScalable = 0x800;
- static const int MPEG4ProfileAdvancedCoding = 0x1000;
- static const int MPEG4ProfileAdvancedCore = 0x2000;
- static const int MPEG4ProfileAdvancedScalable = 0x4000;
- static const int MPEG4ProfileAdvancedSimple = 0x8000;
-
- // Unused yet.
- (void)AVCProfileConstrainedHigh;
- (void)HEVCProfileMain10HDR10;
- (void)HEVCProfileMain10HDR10Plus;
- (void)VP9Profile2HDR;
- (void)VP9Profile3HDR;
- (void)VP9Profile2HDR10Plus;
- (void)VP9Profile3HDR10Plus;
-
- if (avctx->codec_id == AV_CODEC_ID_H264) {
- switch(avctx->profile) {
- case FF_PROFILE_H264_BASELINE:
- return AVCProfileBaseline;
- case FF_PROFILE_H264_CONSTRAINED_BASELINE:
- return AVCProfileConstrainedBaseline;
- case FF_PROFILE_H264_MAIN:
- return AVCProfileMain;
- break;
- case FF_PROFILE_H264_EXTENDED:
- return AVCProfileExtended;
- case FF_PROFILE_H264_HIGH:
- return AVCProfileHigh;
- case FF_PROFILE_H264_HIGH_10:
- case FF_PROFILE_H264_HIGH_10_INTRA:
- return AVCProfileHigh10;
- case FF_PROFILE_H264_HIGH_422:
- case FF_PROFILE_H264_HIGH_422_INTRA:
- return AVCProfileHigh422;
- case FF_PROFILE_H264_HIGH_444:
- case FF_PROFILE_H264_HIGH_444_INTRA:
- case FF_PROFILE_H264_HIGH_444_PREDICTIVE:
- return AVCProfileHigh444;
- }
- } else if (avctx->codec_id == AV_CODEC_ID_HEVC) {
- switch (avctx->profile) {
- case FF_PROFILE_HEVC_MAIN:
- return HEVCProfileMain;
- case FF_PROFILE_HEVC_MAIN_STILL_PICTURE:
- return HEVCProfileMainStill;
- case FF_PROFILE_HEVC_MAIN_10:
- return HEVCProfileMain10;
- }
- } else if (avctx->codec_id == AV_CODEC_ID_VP9) {
- switch (avctx->profile) {
- case FF_PROFILE_VP9_0:
- return VP9Profile0;
- case FF_PROFILE_VP9_1:
- return VP9Profile1;
- case FF_PROFILE_VP9_2:
- return VP9Profile2;
- case FF_PROFILE_VP9_3:
- return VP9Profile3;
- }
- } else if(avctx->codec_id == AV_CODEC_ID_MPEG4) {
- switch (avctx->profile)
- {
- case FF_PROFILE_MPEG4_SIMPLE:
- return MPEG4ProfileSimple;
- case FF_PROFILE_MPEG4_SIMPLE_SCALABLE:
- return MPEG4ProfileSimpleScalable;
- case FF_PROFILE_MPEG4_CORE:
- return MPEG4ProfileCore;
- case FF_PROFILE_MPEG4_MAIN:
- return MPEG4ProfileMain;
- case FF_PROFILE_MPEG4_N_BIT:
- return MPEG4ProfileNbit;
- case FF_PROFILE_MPEG4_SCALABLE_TEXTURE:
- return MPEG4ProfileScalableTexture;
- case FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION:
- return MPEG4ProfileSimpleFBA;
- case FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE:
- return MPEG4ProfileBasicAnimated;
- case FF_PROFILE_MPEG4_HYBRID:
- return MPEG4ProfileHybrid;
- case FF_PROFILE_MPEG4_ADVANCED_REAL_TIME:
- return MPEG4ProfileAdvancedRealTime;
- case FF_PROFILE_MPEG4_CORE_SCALABLE:
- return MPEG4ProfileCoreScalable;
- case FF_PROFILE_MPEG4_ADVANCED_CODING:
- return MPEG4ProfileAdvancedCoding;
- case FF_PROFILE_MPEG4_ADVANCED_CORE:
- return MPEG4ProfileAdvancedCore;
- case FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE:
- return MPEG4ProfileAdvancedScalable;
- case FF_PROFILE_MPEG4_ADVANCED_SIMPLE:
- return MPEG4ProfileAdvancedSimple;
- case FF_PROFILE_MPEG4_SIMPLE_STUDIO:
- // Studio profiles are not supported by mediacodec.
- default:
- break;
- }
- }
-
- return -1;
-}
-
-char *ff_AMediaCodecList_getCodecNameByType(const char *mime, int profile, int encoder, void *log_ctx)
-{
- int ret;
- int i;
- int codec_count;
- int found_codec = 0;
- char *name = NULL;
- char *supported_type = NULL;
-
- JNIEnv *env = NULL;
- struct JNIAMediaCodecListFields jfields = { 0 };
- struct JNIAMediaFormatFields mediaformat_jfields = { 0 };
-
- jobject codec_name = NULL;
-
- jobject info = NULL;
- jobject type = NULL;
- jobjectArray types = NULL;
-
- jobject capabilities = NULL;
- jobject profile_level = NULL;
- jobjectArray profile_levels = NULL;
-
- JNI_GET_ENV_OR_RETURN(env, log_ctx, NULL);
-
- if ((ret = ff_jni_init_jfields(env, &jfields, jni_amediacodeclist_mapping, 0, log_ctx)) < 0) {
- goto done;
- }
-
- if ((ret = ff_jni_init_jfields(env, &mediaformat_jfields, jni_amediaformat_mapping, 0, log_ctx)) < 0) {
- goto done;
- }
-
- codec_count = (*env)->CallStaticIntMethod(env, jfields.mediacodec_list_class, jfields.get_codec_count_id);
- if (ff_jni_exception_check(env, 1, log_ctx) < 0) {
- goto done;
- }
-
- for(i = 0; i < codec_count; i++) {
- int j;
- int type_count;
- int is_encoder;
-
- info = (*env)->CallStaticObjectMethod(env, jfields.mediacodec_list_class, jfields.get_codec_info_at_id, i);
- if (ff_jni_exception_check(env, 1, log_ctx) < 0) {
- goto done;
- }
-
- types = (*env)->CallObjectMethod(env, info, jfields.get_supported_types_id);
- if (ff_jni_exception_check(env, 1, log_ctx) < 0) {
- goto done;
- }
-
- is_encoder = (*env)->CallBooleanMethod(env, info, jfields.is_encoder_id);
- if (ff_jni_exception_check(env, 1, log_ctx) < 0) {
- goto done;
- }
-
- if (is_encoder != encoder) {
- goto done_with_info;
- }
-
- if (jfields.is_software_only_id) {
- int is_software_only = (*env)->CallBooleanMethod(env, info, jfields.is_software_only_id);
- if (ff_jni_exception_check(env, 1, log_ctx) < 0) {
- goto done;
- }
-
- if (is_software_only) {
- goto done_with_info;
- }
- }
-
- codec_name = (*env)->CallObjectMethod(env, info, jfields.get_name_id);
- if (ff_jni_exception_check(env, 1, log_ctx) < 0) {
- goto done;
- }
-
- name = ff_jni_jstring_to_utf_chars(env, codec_name, log_ctx);
- if (!name) {
- goto done;
- }
-
- if (codec_name) {
- (*env)->DeleteLocalRef(env, codec_name);
- codec_name = NULL;
- }
-
- /* Skip software decoders */
- if (
- strstr(name, "OMX.google") ||
- strstr(name, "OMX.ffmpeg") ||
- (strstr(name, "OMX.SEC") && strstr(name, ".sw.")) ||
- !strcmp(name, "OMX.qcom.video.decoder.hevcswvdec")) {
- goto done_with_info;
- }
-
- type_count = (*env)->GetArrayLength(env, types);
- for (j = 0; j < type_count; j++) {
- int k;
- int profile_count;
-
- type = (*env)->GetObjectArrayElement(env, types, j);
- if (ff_jni_exception_check(env, 1, log_ctx) < 0) {
- goto done;
- }
-
- supported_type = ff_jni_jstring_to_utf_chars(env, type, log_ctx);
- if (!supported_type) {
- goto done;
- }
-
- if (av_strcasecmp(supported_type, mime)) {
- goto done_with_type;
- }
-
- capabilities = (*env)->CallObjectMethod(env, info, jfields.get_codec_capabilities_id, type);
- if (ff_jni_exception_check(env, 1, log_ctx) < 0) {
- goto done;
- }
-
- profile_levels = (*env)->GetObjectField(env, capabilities, jfields.profile_levels_id);
- if (ff_jni_exception_check(env, 1, log_ctx) < 0) {
- goto done;
- }
-
- profile_count = (*env)->GetArrayLength(env, profile_levels);
- if (!profile_count) {
- found_codec = 1;
- }
- for (k = 0; k < profile_count; k++) {
- int supported_profile = 0;
-
- if (profile < 0) {
- found_codec = 1;
- break;
- }
-
- profile_level = (*env)->GetObjectArrayElement(env, profile_levels, k);
- if (ff_jni_exception_check(env, 1, log_ctx) < 0) {
- goto done;
- }
-
- supported_profile = (*env)->GetIntField(env, profile_level, jfields.profile_id);
- if (ff_jni_exception_check(env, 1, log_ctx) < 0) {
- goto done;
- }
-
- found_codec = profile == supported_profile;
-
- if (profile_level) {
- (*env)->DeleteLocalRef(env, profile_level);
- profile_level = NULL;
- }
-
- if (found_codec) {
- break;
- }
- }
-
-done_with_type:
- if (profile_levels) {
- (*env)->DeleteLocalRef(env, profile_levels);
- profile_levels = NULL;
- }
-
- if (capabilities) {
- (*env)->DeleteLocalRef(env, capabilities);
- capabilities = NULL;
- }
-
- if (type) {
- (*env)->DeleteLocalRef(env, type);
- type = NULL;
- }
-
- av_freep(&supported_type);
-
- if (found_codec) {
- break;
- }
- }
-
-done_with_info:
- if (info) {
- (*env)->DeleteLocalRef(env, info);
- info = NULL;
- }
-
- if (types) {
- (*env)->DeleteLocalRef(env, types);
- types = NULL;
- }
-
- if (found_codec) {
- break;
- }
-
- av_freep(&name);
- }
-
-done:
- if (codec_name) {
- (*env)->DeleteLocalRef(env, codec_name);
- }
-
- if (info) {
- (*env)->DeleteLocalRef(env, info);
- }
-
- if (type) {
- (*env)->DeleteLocalRef(env, type);
- }
-
- if (types) {
- (*env)->DeleteLocalRef(env, types);
- }
-
- if (capabilities) {
- (*env)->DeleteLocalRef(env, capabilities);
- }
-
- if (profile_level) {
- (*env)->DeleteLocalRef(env, profile_level);
- }
-
- if (profile_levels) {
- (*env)->DeleteLocalRef(env, profile_levels);
- }
-
- av_freep(&supported_type);
-
- ff_jni_reset_jfields(env, &jfields, jni_amediacodeclist_mapping, 0, log_ctx);
- ff_jni_reset_jfields(env, &mediaformat_jfields, jni_amediaformat_mapping, 0, log_ctx);
-
- if (!found_codec) {
- av_freep(&name);
- }
-
- return name;
-}
-
-static FFAMediaFormat *mediaformat_jni_new(void)
-{
- JNIEnv *env = NULL;
- FFAMediaFormatJni *format = NULL;
- jobject object = NULL;
-
- format = av_mallocz(sizeof(*format));
- if (!format) {
- return NULL;
- }
- format->api = media_format_jni;
-
- env = ff_jni_get_env(format);
- if (!env) {
- av_freep(&format);
- return NULL;
- }
-
- if (ff_jni_init_jfields(env, &format->jfields, jni_amediaformat_mapping, 1, format) < 0) {
- goto fail;
- }
-
- object = (*env)->NewObject(env, format->jfields.mediaformat_class, format->jfields.init_id);
- if (!object) {
- goto fail;
- }
-
- format->object = (*env)->NewGlobalRef(env, object);
- if (!format->object) {
- goto fail;
- }
-
-fail:
- if (object) {
- (*env)->DeleteLocalRef(env, object);
- }
-
- if (!format->object) {
- ff_jni_reset_jfields(env, &format->jfields, jni_amediaformat_mapping, 1, format);
- av_freep(&format);
- }
-
- return (FFAMediaFormat *)format;
-}
-
-static FFAMediaFormat *mediaformat_jni_newFromObject(void *object)
-{
- JNIEnv *env = NULL;
- FFAMediaFormatJni *format = NULL;
-
- format = av_mallocz(sizeof(*format));
- if (!format) {
- return NULL;
- }
- format->api = media_format_jni;
-
- env = ff_jni_get_env(format);
- if (!env) {
- av_freep(&format);
- return NULL;
- }
-
- if (ff_jni_init_jfields(env, &format->jfields, jni_amediaformat_mapping, 1, format) < 0) {
- goto fail;
- }
-
- format->object = (*env)->NewGlobalRef(env, object);
- if (!format->object) {
- goto fail;
- }
-
- return (FFAMediaFormat *)format;
-fail:
- ff_jni_reset_jfields(env, &format->jfields, jni_amediaformat_mapping, 1, format);
-
- av_freep(&format);
-
- return NULL;
-}
-
-static int mediaformat_jni_delete(FFAMediaFormat* ctx)
-{
- int ret = 0;
- FFAMediaFormatJni *format = (FFAMediaFormatJni *)ctx;
- JNIEnv *env = NULL;
-
- if (!format) {
- return 0;
- }
-
- JNI_GET_ENV_OR_RETURN(env, format, AVERROR_EXTERNAL);
-
- (*env)->DeleteGlobalRef(env, format->object);
- format->object = NULL;
-
- ff_jni_reset_jfields(env, &format->jfields, jni_amediaformat_mapping, 1, format);
-
- av_freep(&format);
-
- return ret;
-}
-
-static char* mediaformat_jni_toString(FFAMediaFormat* ctx)
-{
- char *ret = NULL;
- FFAMediaFormatJni *format = (FFAMediaFormatJni *)ctx;
- JNIEnv *env = NULL;
- jstring description = NULL;
-
- av_assert0(format != NULL);
-
- JNI_GET_ENV_OR_RETURN(env, format, NULL);
-
- description = (*env)->CallObjectMethod(env, format->object, format->jfields.to_string_id);
- if (ff_jni_exception_check(env, 1, NULL) < 0) {
- goto fail;
- }
-
- ret = ff_jni_jstring_to_utf_chars(env, description, format);
-fail:
- if (description) {
- (*env)->DeleteLocalRef(env, description);
- }
-
- return ret;
-}
-
-static int mediaformat_jni_getInt32(FFAMediaFormat* ctx, const char *name, int32_t *out)
-{
- int ret = 1;
- FFAMediaFormatJni *format = (FFAMediaFormatJni *)ctx;
- JNIEnv *env = NULL;
- jstring key = NULL;
- jboolean contains_key;
-
- av_assert0(format != NULL);
-
- JNI_GET_ENV_OR_RETURN(env, format, 0);
-
- key = ff_jni_utf_chars_to_jstring(env, name, format);
- if (!key) {
- ret = 0;
- goto fail;
- }
-
- contains_key = (*env)->CallBooleanMethod(env, format->object, format->jfields.contains_key_id, key);
- if (!contains_key || (ret = ff_jni_exception_check(env, 1, format)) < 0) {
- ret = 0;
- goto fail;
- }
-
- *out = (*env)->CallIntMethod(env, format->object, format->jfields.get_integer_id, key);
- if ((ret = ff_jni_exception_check(env, 1, format)) < 0) {
- ret = 0;
- goto fail;
- }
-
- ret = 1;
-fail:
- if (key) {
- (*env)->DeleteLocalRef(env, key);
- }
-
- return ret;
-}
-
-static int mediaformat_jni_getInt64(FFAMediaFormat* ctx, const char *name, int64_t *out)
-{
- int ret = 1;
- FFAMediaFormatJni *format = (FFAMediaFormatJni *)ctx;
- JNIEnv *env = NULL;
- jstring key = NULL;
- jboolean contains_key;
-
- av_assert0(format != NULL);
-
- JNI_GET_ENV_OR_RETURN(env, format, 0);
-
- key = ff_jni_utf_chars_to_jstring(env, name, format);
- if (!key) {
- ret = 0;
- goto fail;
- }
-
- contains_key = (*env)->CallBooleanMethod(env, format->object, format->jfields.contains_key_id, key);
- if (!contains_key || (ret = ff_jni_exception_check(env, 1, format)) < 0) {
- ret = 0;
- goto fail;
- }
-
- *out = (*env)->CallLongMethod(env, format->object, format->jfields.get_long_id, key);
- if ((ret = ff_jni_exception_check(env, 1, format)) < 0) {
- ret = 0;
- goto fail;
- }
-
- ret = 1;
-fail:
- if (key) {
- (*env)->DeleteLocalRef(env, key);
- }
-
- return ret;
-}
-
-static int mediaformat_jni_getFloat(FFAMediaFormat* ctx, const char *name, float *out)
-{
- int ret = 1;
- FFAMediaFormatJni *format = (FFAMediaFormatJni *)ctx;
- JNIEnv *env = NULL;
- jstring key = NULL;
- jboolean contains_key;
-
- av_assert0(format != NULL);
-
- JNI_GET_ENV_OR_RETURN(env, format, 0);
-
- key = ff_jni_utf_chars_to_jstring(env, name, format);
- if (!key) {
- ret = 0;
- goto fail;
- }
-
- contains_key = (*env)->CallBooleanMethod(env, format->object, format->jfields.contains_key_id, key);
- if (!contains_key || (ret = ff_jni_exception_check(env, 1, format)) < 0) {
- ret = 0;
- goto fail;
- }
-
- *out = (*env)->CallFloatMethod(env, format->object, format->jfields.get_float_id, key);
- if ((ret = ff_jni_exception_check(env, 1, format)) < 0) {
- ret = 0;
- goto fail;
- }
-
- ret = 1;
-fail:
- if (key) {
- (*env)->DeleteLocalRef(env, key);
- }
-
- return ret;
-}
-
-static int mediaformat_jni_getBuffer(FFAMediaFormat* ctx, const char *name, void** data, size_t *size)
-{
- int ret = 1;
- FFAMediaFormatJni *format = (FFAMediaFormatJni *)ctx;
- JNIEnv *env = NULL;
- jstring key = NULL;
- jboolean contains_key;
- jobject result = NULL;
-
- av_assert0(format != NULL);
-
- JNI_GET_ENV_OR_RETURN(env, format, 0);
-
- key = ff_jni_utf_chars_to_jstring(env, name, format);
- if (!key) {
- ret = 0;
- goto fail;
- }
-
- contains_key = (*env)->CallBooleanMethod(env, format->object, format->jfields.contains_key_id, key);
- if (!contains_key || (ret = ff_jni_exception_check(env, 1, format)) < 0) {
- ret = 0;
- goto fail;
- }
-
- result = (*env)->CallObjectMethod(env, format->object, format->jfields.get_bytebuffer_id, key);
- if ((ret = ff_jni_exception_check(env, 1, format)) < 0) {
- ret = 0;
- goto fail;
- }
-
- *data = (*env)->GetDirectBufferAddress(env, result);
- *size = (*env)->GetDirectBufferCapacity(env, result);
-
- if (*data && *size) {
- void *src = *data;
- *data = av_malloc(*size);
- if (!*data) {
- ret = 0;
- goto fail;
- }
-
- memcpy(*data, src, *size);
- }
-
- ret = 1;
-fail:
- if (key) {
- (*env)->DeleteLocalRef(env, key);
- }
-
- if (result) {
- (*env)->DeleteLocalRef(env, result);
- }
-
- return ret;
-}
-
-static int mediaformat_jni_getString(FFAMediaFormat* ctx, const char *name, const char **out)
-{
- int ret = 1;
- FFAMediaFormatJni *format = (FFAMediaFormatJni *)ctx;
- JNIEnv *env = NULL;
- jstring key = NULL;
- jboolean contains_key;
- jstring result = NULL;
-
- av_assert0(format != NULL);
-
- JNI_GET_ENV_OR_RETURN(env, format, 0);
-
- key = ff_jni_utf_chars_to_jstring(env, name, format);
- if (!key) {
- ret = 0;
- goto fail;
- }
-
- contains_key = (*env)->CallBooleanMethod(env, format->object, format->jfields.contains_key_id, key);
- if (!contains_key || (ret = ff_jni_exception_check(env, 1, format)) < 0) {
- ret = 0;
- goto fail;
- }
-
- result = (*env)->CallObjectMethod(env, format->object, format->jfields.get_string_id, key);
- if ((ret = ff_jni_exception_check(env, 1, format)) < 0) {
- ret = 0;
- goto fail;
- }
-
- *out = ff_jni_jstring_to_utf_chars(env, result, format);
- if (!*out) {
- ret = 0;
- goto fail;
- }
-
- ret = 1;
-fail:
- if (key) {
- (*env)->DeleteLocalRef(env, key);
- }
-
- if (result) {
- (*env)->DeleteLocalRef(env, result);
- }
-
- return ret;
-}
-
-static void mediaformat_jni_setInt32(FFAMediaFormat* ctx, const char* name, int32_t value)
-{
- JNIEnv *env = NULL;
- jstring key = NULL;
- FFAMediaFormatJni *format = (FFAMediaFormatJni *)ctx;
-
- av_assert0(format != NULL);
-
- JNI_GET_ENV_OR_RETURN_VOID(env, format);
-
- key = ff_jni_utf_chars_to_jstring(env, name, format);
- if (!key) {
- goto fail;
- }
-
- (*env)->CallVoidMethod(env, format->object, format->jfields.set_integer_id, key, value);
- if (ff_jni_exception_check(env, 1, format) < 0) {
- goto fail;
- }
-
-fail:
- if (key) {
- (*env)->DeleteLocalRef(env, key);
- }
-}
-
-static void mediaformat_jni_setInt64(FFAMediaFormat* ctx, const char* name, int64_t value)
-{
- JNIEnv *env = NULL;
- jstring key = NULL;
- FFAMediaFormatJni *format = (FFAMediaFormatJni *)ctx;
-
- av_assert0(format != NULL);
-
- JNI_GET_ENV_OR_RETURN_VOID(env, format);
-
- key = ff_jni_utf_chars_to_jstring(env, name, format);
- if (!key) {
- goto fail;
- }
-
- (*env)->CallVoidMethod(env, format->object, format->jfields.set_long_id, key, value);
- if (ff_jni_exception_check(env, 1, format) < 0) {
- goto fail;
- }
-
-fail:
- if (key) {
- (*env)->DeleteLocalRef(env, key);
- }
-}
-
-static void mediaformat_jni_setFloat(FFAMediaFormat* ctx, const char* name, float value)
-{
- JNIEnv *env = NULL;
- jstring key = NULL;
- FFAMediaFormatJni *format = (FFAMediaFormatJni *)ctx;
-
- av_assert0(format != NULL);
-
- JNI_GET_ENV_OR_RETURN_VOID(env, format);
-
- key = ff_jni_utf_chars_to_jstring(env, name, format);
- if (!key) {
- goto fail;
- }
-
- (*env)->CallVoidMethod(env, format->object, format->jfields.set_float_id, key, value);
- if (ff_jni_exception_check(env, 1, format) < 0) {
- goto fail;
- }
-
-fail:
- if (key) {
- (*env)->DeleteLocalRef(env, key);
- }
-}
-
-static void mediaformat_jni_setString(FFAMediaFormat* ctx, const char* name, const char* value)
-{
- JNIEnv *env = NULL;
- jstring key = NULL;
- jstring string = NULL;
- FFAMediaFormatJni *format = (FFAMediaFormatJni *)ctx;
-
- av_assert0(format != NULL);
-
- JNI_GET_ENV_OR_RETURN_VOID(env, format);
-
- key = ff_jni_utf_chars_to_jstring(env, name, format);
- if (!key) {
- goto fail;
- }
-
- string = ff_jni_utf_chars_to_jstring(env, value, format);
- if (!string) {
- goto fail;
- }
-
- (*env)->CallVoidMethod(env, format->object, format->jfields.set_string_id, key, string);
- if (ff_jni_exception_check(env, 1, format) < 0) {
- goto fail;
- }
-
-fail:
- if (key) {
- (*env)->DeleteLocalRef(env, key);
- }
-
- if (string) {
- (*env)->DeleteLocalRef(env, string);
- }
-}
-
-static void mediaformat_jni_setBuffer(FFAMediaFormat* ctx, const char* name, void* data, size_t size)
-{
- JNIEnv *env = NULL;
- jstring key = NULL;
- jobject buffer = NULL;
- void *buffer_data = NULL;
- FFAMediaFormatJni *format = (FFAMediaFormatJni *)ctx;
-
- av_assert0(format != NULL);
-
- JNI_GET_ENV_OR_RETURN_VOID(env, format);
-
- key = ff_jni_utf_chars_to_jstring(env, name, format);
- if (!key) {
- goto fail;
- }
-
- if (!data || !size) {
- goto fail;
- }
-
- buffer_data = av_malloc(size);
- if (!buffer_data) {
- goto fail;
- }
-
- memcpy(buffer_data, data, size);
-
- buffer = (*env)->NewDirectByteBuffer(env, buffer_data, size);
- if (!buffer) {
- goto fail;
- }
-
- (*env)->CallVoidMethod(env, format->object, format->jfields.set_bytebuffer_id, key, buffer);
- if (ff_jni_exception_check(env, 1, format) < 0) {
- goto fail;
- }
-
-fail:
- if (key) {
- (*env)->DeleteLocalRef(env, key);
- }
-
- if (buffer) {
- (*env)->DeleteLocalRef(env, buffer);
- }
-}
-
-static int codec_init_static_fields(FFAMediaCodecJni *codec)
-{
- int ret = 0;
- JNIEnv *env = NULL;
-
- JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL);
-
- codec->INFO_TRY_AGAIN_LATER = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.info_try_again_later_id);
- if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) {
- goto fail;
- }
-
- codec->BUFFER_FLAG_CODEC_CONFIG = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.buffer_flag_codec_config_id);
- if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) {
- goto fail;
- }
-
- codec->BUFFER_FLAG_END_OF_STREAM = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.buffer_flag_end_of_stream_id);
- if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) {
- goto fail;
- }
-
- if (codec->jfields.buffer_flag_key_frame_id) {
- codec->BUFFER_FLAG_KEY_FRAME = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.buffer_flag_key_frame_id);
- if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) {
- goto fail;
- }
- }
-
- codec->CONFIGURE_FLAG_ENCODE = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.configure_flag_encode_id);
- if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) {
- goto fail;
- }
-
- codec->INFO_TRY_AGAIN_LATER = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.info_try_again_later_id);
- if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) {
- goto fail;
- }
-
- codec->INFO_OUTPUT_BUFFERS_CHANGED = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.info_output_buffers_changed_id);
- if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) {
- goto fail;
- }
-
- codec->INFO_OUTPUT_FORMAT_CHANGED = (*env)->GetStaticIntField(env, codec->jfields.mediacodec_class, codec->jfields.info_output_format_changed_id);
- if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) {
- goto fail;
- }
-
-fail:
-
- return ret;
-}
-
-#define CREATE_CODEC_BY_NAME 0
-#define CREATE_DECODER_BY_TYPE 1
-#define CREATE_ENCODER_BY_TYPE 2
-
-static inline FFAMediaCodec *codec_create(int method, const char *arg)
-{
- int ret = -1;
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = NULL;
- jstring jarg = NULL;
- jobject object = NULL;
- jobject buffer_info = NULL;
- jmethodID create_id = NULL;
-
- codec = av_mallocz(sizeof(*codec));
- if (!codec) {
- return NULL;
- }
- codec->api = media_codec_jni;
-
- env = ff_jni_get_env(codec);
- if (!env) {
- av_freep(&codec);
- return NULL;
- }
-
- if (ff_jni_init_jfields(env, &codec->jfields, jni_amediacodec_mapping, 1, codec) < 0) {
- goto fail;
- }
-
- jarg = ff_jni_utf_chars_to_jstring(env, arg, codec);
- if (!jarg) {
- goto fail;
- }
-
- switch (method) {
- case CREATE_CODEC_BY_NAME: create_id = codec->jfields.create_by_codec_name_id; break;
- case CREATE_DECODER_BY_TYPE: create_id = codec->jfields.create_decoder_by_type_id; break;
- case CREATE_ENCODER_BY_TYPE: create_id = codec->jfields.create_encoder_by_type_id; break;
- default:
- av_assert0(0);
- }
-
- object = (*env)->CallStaticObjectMethod(env,
- codec->jfields.mediacodec_class,
- create_id,
- jarg);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- goto fail;
- }
-
- codec->object = (*env)->NewGlobalRef(env, object);
- if (!codec->object) {
- goto fail;
- }
-
- if (codec_init_static_fields(codec) < 0) {
- goto fail;
- }
-
- if (codec->jfields.get_input_buffer_id && codec->jfields.get_output_buffer_id) {
- codec->has_get_i_o_buffer = 1;
- }
-
- buffer_info = (*env)->NewObject(env, codec->jfields.mediainfo_class, codec->jfields.init_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- goto fail;
- }
-
- codec->buffer_info = (*env)->NewGlobalRef(env, buffer_info);
- if (!codec->buffer_info) {
- goto fail;
- }
-
- ret = 0;
-fail:
- if (jarg) {
- (*env)->DeleteLocalRef(env, jarg);
- }
-
- if (object) {
- (*env)->DeleteLocalRef(env, object);
- }
-
- if (buffer_info) {
- (*env)->DeleteLocalRef(env, buffer_info);
- }
-
- if (ret < 0) {
- if (codec->object) {
- (*env)->DeleteGlobalRef(env, codec->object);
- }
-
- if (codec->buffer_info) {
- (*env)->DeleteGlobalRef(env, codec->buffer_info);
- }
-
- ff_jni_reset_jfields(env, &codec->jfields, jni_amediacodec_mapping, 1, codec);
- av_freep(&codec);
- }
-
- return (FFAMediaCodec *)codec;
-}
-
-#define DECLARE_FF_AMEDIACODEC_CREATE_FUNC(name, method) \
-static FFAMediaCodec *mediacodec_jni_##name(const char *arg) \
-{ \
- return codec_create(method, arg); \
-} \
-
-DECLARE_FF_AMEDIACODEC_CREATE_FUNC(createCodecByName, CREATE_CODEC_BY_NAME)
-DECLARE_FF_AMEDIACODEC_CREATE_FUNC(createDecoderByType, CREATE_DECODER_BY_TYPE)
-DECLARE_FF_AMEDIACODEC_CREATE_FUNC(createEncoderByType, CREATE_ENCODER_BY_TYPE)
-
-static int mediacodec_jni_delete(FFAMediaCodec* ctx)
-{
- int ret = 0;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
- JNIEnv *env = NULL;
-
- if (!codec) {
- return 0;
- }
-
- JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL);
-
- (*env)->CallVoidMethod(env, codec->object, codec->jfields.release_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- ret = AVERROR_EXTERNAL;
- }
-
- (*env)->DeleteGlobalRef(env, codec->input_buffers);
- codec->input_buffers = NULL;
-
- (*env)->DeleteGlobalRef(env, codec->output_buffers);
- codec->output_buffers = NULL;
-
- (*env)->DeleteGlobalRef(env, codec->object);
- codec->object = NULL;
-
- (*env)->DeleteGlobalRef(env, codec->buffer_info);
- codec->buffer_info = NULL;
-
- ff_jni_reset_jfields(env, &codec->jfields, jni_amediacodec_mapping, 1, codec);
-
- av_freep(&codec);
-
- return ret;
-}
-
-static char *mediacodec_jni_getName(FFAMediaCodec *ctx)
-{
- char *ret = NULL;
- JNIEnv *env = NULL;
- jobject *name = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
-
- JNI_GET_ENV_OR_RETURN(env, codec, NULL);
-
- name = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_name_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- goto fail;
- }
-
- ret = ff_jni_jstring_to_utf_chars(env, name, codec);
-
-fail:
- if (name) {
- (*env)->DeleteLocalRef(env, name);
- }
-
- return ret;
-}
-
-static int mediacodec_jni_configure(FFAMediaCodec *ctx,
- const FFAMediaFormat* format_ctx,
- FFANativeWindow* window,
- void *crypto,
- uint32_t flags)
-{
- int ret = 0;
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
- const FFAMediaFormatJni *format = (FFAMediaFormatJni *)format_ctx;
- jobject *surface = window ? window->surface : NULL;
-
- JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL);
-
- if (flags & codec->CONFIGURE_FLAG_ENCODE) {
- if (surface && !codec->jfields.set_input_surface_id) {
- av_log(ctx, AV_LOG_ERROR, "System doesn't support setInputSurface\n");
- return AVERROR_EXTERNAL;
- }
-
- (*env)->CallVoidMethod(env, codec->object, codec->jfields.configure_id, format->object, NULL, NULL, flags);
- if (ff_jni_exception_check(env, 1, codec) < 0)
- return AVERROR_EXTERNAL;
-
- if (!surface)
- return 0;
-
- (*env)->CallVoidMethod(env, codec->object, codec->jfields.set_input_surface_id, surface);
- if (ff_jni_exception_check(env, 1, codec) < 0)
- return AVERROR_EXTERNAL;
- return 0;
- } else {
- (*env)->CallVoidMethod(env, codec->object, codec->jfields.configure_id, format->object, surface, NULL, flags);
- }
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- ret = AVERROR_EXTERNAL;
- goto fail;
- }
-
-fail:
- return ret;
-}
-
-static int mediacodec_jni_start(FFAMediaCodec* ctx)
-{
- int ret = 0;
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
-
- JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL);
-
- (*env)->CallVoidMethod(env, codec->object, codec->jfields.start_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- ret = AVERROR_EXTERNAL;
- goto fail;
- }
-
-fail:
- return ret;
-}
-
-static int mediacodec_jni_stop(FFAMediaCodec* ctx)
-{
- int ret = 0;
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
-
- JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL);
-
- (*env)->CallVoidMethod(env, codec->object, codec->jfields.stop_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- ret = AVERROR_EXTERNAL;
- goto fail;
- }
-
-fail:
- return ret;
-}
-
-static int mediacodec_jni_flush(FFAMediaCodec* ctx)
-{
- int ret = 0;
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
-
- JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL);
-
- (*env)->CallVoidMethod(env, codec->object, codec->jfields.flush_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- ret = AVERROR_EXTERNAL;
- goto fail;
- }
-
-fail:
- return ret;
-}
-
-static int mediacodec_jni_releaseOutputBuffer(FFAMediaCodec* ctx, size_t idx, int render)
-{
- int ret = 0;
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
-
- JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL);
-
- (*env)->CallVoidMethod(env, codec->object, codec->jfields.release_output_buffer_id, (jint)idx, (jboolean)render);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- ret = AVERROR_EXTERNAL;
- goto fail;
- }
-
-fail:
- return ret;
-}
-
-static int mediacodec_jni_releaseOutputBufferAtTime(FFAMediaCodec *ctx, size_t idx, int64_t timestampNs)
-{
- int ret = 0;
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
-
- JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL);
-
- (*env)->CallVoidMethod(env, codec->object, codec->jfields.release_output_buffer_at_time_id, (jint)idx, (jlong)timestampNs);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- ret = AVERROR_EXTERNAL;
- goto fail;
- }
-
-fail:
- return ret;
-}
-
-static ssize_t mediacodec_jni_dequeueInputBuffer(FFAMediaCodec* ctx, int64_t timeoutUs)
-{
- int ret = 0;
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
-
- JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL);
-
- ret = (*env)->CallIntMethod(env, codec->object, codec->jfields.dequeue_input_buffer_id, timeoutUs);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- ret = AVERROR_EXTERNAL;
- goto fail;
- }
-
-fail:
- return ret;
-}
-
-static int mediacodec_jni_queueInputBuffer(FFAMediaCodec* ctx, size_t idx, off_t offset, size_t size, uint64_t time, uint32_t flags)
-{
- int ret = 0;
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
-
- JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL);
-
- (*env)->CallVoidMethod(env, codec->object, codec->jfields.queue_input_buffer_id, (jint)idx, (jint)offset, (jint)size, time, flags);
- if ((ret = ff_jni_exception_check(env, 1, codec)) < 0) {
- ret = AVERROR_EXTERNAL;
- goto fail;
- }
-
-fail:
- return ret;
-}
-
-static ssize_t mediacodec_jni_dequeueOutputBuffer(FFAMediaCodec* ctx, FFAMediaCodecBufferInfo *info, int64_t timeoutUs)
-{
- int ret = 0;
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
-
- JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL);
-
- ret = (*env)->CallIntMethod(env, codec->object, codec->jfields.dequeue_output_buffer_id, codec->buffer_info, timeoutUs);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- return AVERROR_EXTERNAL;
- }
-
- info->flags = (*env)->GetIntField(env, codec->buffer_info, codec->jfields.flags_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- return AVERROR_EXTERNAL;
- }
-
- info->offset = (*env)->GetIntField(env, codec->buffer_info, codec->jfields.offset_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- return AVERROR_EXTERNAL;
- }
-
- info->presentationTimeUs = (*env)->GetLongField(env, codec->buffer_info, codec->jfields.presentation_time_us_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- return AVERROR_EXTERNAL;
- }
-
- info->size = (*env)->GetIntField(env, codec->buffer_info, codec->jfields.size_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- return AVERROR_EXTERNAL;
- }
-
- return ret;
-}
-
-static uint8_t* mediacodec_jni_getInputBuffer(FFAMediaCodec* ctx, size_t idx, size_t *out_size)
-{
- uint8_t *ret = NULL;
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
- jobject buffer = NULL;
- jobject input_buffers = NULL;
-
- JNI_GET_ENV_OR_RETURN(env, codec, NULL);
-
- if (codec->has_get_i_o_buffer) {
- buffer = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_input_buffer_id, (jint)idx);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- goto fail;
- }
- } else {
- if (!codec->input_buffers) {
- input_buffers = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_input_buffers_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- goto fail;
- }
-
- codec->input_buffers = (*env)->NewGlobalRef(env, input_buffers);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- goto fail;
- }
- }
-
- buffer = (*env)->GetObjectArrayElement(env, codec->input_buffers, idx);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- goto fail;
- }
- }
-
- ret = (*env)->GetDirectBufferAddress(env, buffer);
- *out_size = (*env)->GetDirectBufferCapacity(env, buffer);
-fail:
- if (buffer) {
- (*env)->DeleteLocalRef(env, buffer);
- }
-
- if (input_buffers) {
- (*env)->DeleteLocalRef(env, input_buffers);
- }
-
- return ret;
-}
-
-static uint8_t* mediacodec_jni_getOutputBuffer(FFAMediaCodec* ctx, size_t idx, size_t *out_size)
-{
- uint8_t *ret = NULL;
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
- jobject buffer = NULL;
- jobject output_buffers = NULL;
-
- JNI_GET_ENV_OR_RETURN(env, codec, NULL);
-
- if (codec->has_get_i_o_buffer) {
- buffer = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_output_buffer_id, (jint)idx);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- goto fail;
- }
- } else {
- if (!codec->output_buffers) {
- output_buffers = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_output_buffers_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- goto fail;
- }
-
- codec->output_buffers = (*env)->NewGlobalRef(env, output_buffers);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- goto fail;
- }
- }
-
- buffer = (*env)->GetObjectArrayElement(env, codec->output_buffers, idx);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- goto fail;
- }
- }
-
- ret = (*env)->GetDirectBufferAddress(env, buffer);
- *out_size = (*env)->GetDirectBufferCapacity(env, buffer);
-fail:
- if (buffer) {
- (*env)->DeleteLocalRef(env, buffer);
- }
-
- if (output_buffers) {
- (*env)->DeleteLocalRef(env, output_buffers);
- }
-
- return ret;
-}
-
-static FFAMediaFormat* mediacodec_jni_getOutputFormat(FFAMediaCodec* ctx)
-{
- FFAMediaFormat *ret = NULL;
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
-
- jobject mediaformat = NULL;
-
- JNI_GET_ENV_OR_RETURN(env, codec, NULL);
-
- mediaformat = (*env)->CallObjectMethod(env, codec->object, codec->jfields.get_output_format_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- goto fail;
- }
-
- ret = mediaformat_jni_newFromObject(mediaformat);
-fail:
- if (mediaformat) {
- (*env)->DeleteLocalRef(env, mediaformat);
- }
-
- return ret;
-}
-
-static int mediacodec_jni_infoTryAgainLater(FFAMediaCodec *ctx, ssize_t idx)
-{
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
- return idx == codec->INFO_TRY_AGAIN_LATER;
-}
-
-static int mediacodec_jni_infoOutputBuffersChanged(FFAMediaCodec *ctx, ssize_t idx)
-{
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
- return idx == codec->INFO_OUTPUT_BUFFERS_CHANGED;
-}
-
-static int mediacodec_jni_infoOutputFormatChanged(FFAMediaCodec *ctx, ssize_t idx)
-{
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
- return idx == codec->INFO_OUTPUT_FORMAT_CHANGED;
-}
-
-static int mediacodec_jni_getBufferFlagCodecConfig(FFAMediaCodec *ctx)
-{
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
- return codec->BUFFER_FLAG_CODEC_CONFIG;
-}
-
-static int mediacodec_jni_getBufferFlagEndOfStream(FFAMediaCodec *ctx)
-{
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
- return codec->BUFFER_FLAG_END_OF_STREAM;
-}
-
-static int mediacodec_jni_getBufferFlagKeyFrame(FFAMediaCodec *ctx)
-{
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
- return codec->BUFFER_FLAG_KEY_FRAME;
-}
-
-static int mediacodec_jni_getConfigureFlagEncode(FFAMediaCodec *ctx)
-{
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
- return codec->CONFIGURE_FLAG_ENCODE;
-}
-
-static int mediacodec_jni_cleanOutputBuffers(FFAMediaCodec *ctx)
-{
- int ret = 0;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
-
- if (!codec->has_get_i_o_buffer) {
- if (codec->output_buffers) {
- JNIEnv *env = NULL;
-
- env = ff_jni_get_env(codec);
- if (!env) {
- ret = AVERROR_EXTERNAL;
- goto fail;
- }
-
- (*env)->DeleteGlobalRef(env, codec->output_buffers);
- codec->output_buffers = NULL;
- }
- }
-
-fail:
- return ret;
-}
-
-static int mediacodec_jni_signalEndOfInputStream(FFAMediaCodec *ctx)
-{
- JNIEnv *env = NULL;
- FFAMediaCodecJni *codec = (FFAMediaCodecJni *)ctx;
-
- JNI_GET_ENV_OR_RETURN(env, codec, AVERROR_EXTERNAL);
-
- (*env)->CallVoidMethod(env, codec->object, codec->jfields.signal_end_of_input_stream_id);
- if (ff_jni_exception_check(env, 1, codec) < 0) {
- return AVERROR_EXTERNAL;
- }
-
- return 0;
-}
-
-static const FFAMediaFormat media_format_jni = {
- .class = &amediaformat_class,
-
- .create = mediaformat_jni_new,
- .delete = mediaformat_jni_delete,
-
- .toString = mediaformat_jni_toString,
-
- .getInt32 = mediaformat_jni_getInt32,
- .getInt64 = mediaformat_jni_getInt64,
- .getFloat = mediaformat_jni_getFloat,
- .getBuffer = mediaformat_jni_getBuffer,
- .getString = mediaformat_jni_getString,
-
- .setInt32 = mediaformat_jni_setInt32,
- .setInt64 = mediaformat_jni_setInt64,
- .setFloat = mediaformat_jni_setFloat,
- .setString = mediaformat_jni_setString,
- .setBuffer = mediaformat_jni_setBuffer,
-};
-
-static const FFAMediaCodec media_codec_jni = {
- .class = &amediacodec_class,
-
- .getName = mediacodec_jni_getName,
-
- .createCodecByName = mediacodec_jni_createCodecByName,
- .createDecoderByType = mediacodec_jni_createDecoderByType,
- .createEncoderByType = mediacodec_jni_createEncoderByType,
- .delete = mediacodec_jni_delete,
-
- .configure = mediacodec_jni_configure,
- .start = mediacodec_jni_start,
- .stop = mediacodec_jni_stop,
- .flush = mediacodec_jni_flush,
-
- .getInputBuffer = mediacodec_jni_getInputBuffer,
- .getOutputBuffer = mediacodec_jni_getOutputBuffer,
-
- .dequeueInputBuffer = mediacodec_jni_dequeueInputBuffer,
- .queueInputBuffer = mediacodec_jni_queueInputBuffer,
-
- .dequeueOutputBuffer = mediacodec_jni_dequeueOutputBuffer,
- .getOutputFormat = mediacodec_jni_getOutputFormat,
-
- .releaseOutputBuffer = mediacodec_jni_releaseOutputBuffer,
- .releaseOutputBufferAtTime = mediacodec_jni_releaseOutputBufferAtTime,
-
- .infoTryAgainLater = mediacodec_jni_infoTryAgainLater,
- .infoOutputBuffersChanged = mediacodec_jni_infoOutputBuffersChanged,
- .infoOutputFormatChanged = mediacodec_jni_infoOutputFormatChanged,
-
- .getBufferFlagCodecConfig = mediacodec_jni_getBufferFlagCodecConfig,
- .getBufferFlagEndOfStream = mediacodec_jni_getBufferFlagEndOfStream,
- .getBufferFlagKeyFrame = mediacodec_jni_getBufferFlagKeyFrame,
-
- .getConfigureFlagEncode = mediacodec_jni_getConfigureFlagEncode,
- .cleanOutputBuffers = mediacodec_jni_cleanOutputBuffers,
- .signalEndOfInputStream = mediacodec_jni_signalEndOfInputStream,
-};
-
-typedef struct FFAMediaFormatNdk {
- FFAMediaFormat api;
-
- void *libmedia;
- AMediaFormat *impl;
-
- AMediaFormat *(*new)(void);
- media_status_t (*delete)(AMediaFormat*);
-
- const char* (*toString)(AMediaFormat*);
-
- bool (*getInt32)(AMediaFormat*, const char *name, int32_t *out);
- bool (*getInt64)(AMediaFormat*, const char *name, int64_t *out);
- bool (*getFloat)(AMediaFormat*, const char *name, float *out);
- bool (*getSize)(AMediaFormat*, const char *name, size_t *out);
- bool (*getBuffer)(AMediaFormat*, const char *name, void** data, size_t *size);
- bool (*getString)(AMediaFormat*, const char *name, const char **out);
- bool (*getRect)(AMediaFormat *, const char *name,
- int32_t *left, int32_t *top, int32_t *right, int32_t *bottom);
-
- void (*setInt32)(AMediaFormat*, const char* name, int32_t value);
- void (*setInt64)(AMediaFormat*, const char* name, int64_t value);
- void (*setFloat)(AMediaFormat*, const char* name, float value);
- void (*setString)(AMediaFormat*, const char* name, const char* value);
- void (*setBuffer)(AMediaFormat*, const char* name, const void* data, size_t size);
- void (*setRect)(AMediaFormat *, const char *name,
- int32_t left, int32_t top, int32_t right, int32_t bottom);
-} FFAMediaFormatNdk;
-
-typedef struct FFAMediaCodecNdk {
- FFAMediaCodec api;
-
- void *libmedia;
- AMediaCodec *impl;
- ANativeWindow *window;
-
- AMediaCodec* (*createCodecByName)(const char *name);
- AMediaCodec* (*createDecoderByType)(const char *mime_type);
- AMediaCodec* (*createEncoderByType)(const char *mime_type);
- media_status_t (*delete)(AMediaCodec*);
-
- media_status_t (*configure)(AMediaCodec *,
- const AMediaFormat *format,
- ANativeWindow *surface,
- AMediaCrypto *crypto,
- uint32_t flags);
- media_status_t (*start)(AMediaCodec*);
- media_status_t (*stop)(AMediaCodec*);
- media_status_t (*flush)(AMediaCodec*);
-
- uint8_t* (*getInputBuffer)(AMediaCodec*, size_t idx, size_t *out_size);
- uint8_t* (*getOutputBuffer)(AMediaCodec*, size_t idx, size_t *out_size);
-
- ssize_t (*dequeueInputBuffer)(AMediaCodec*, int64_t timeoutUs);
- media_status_t (*queueInputBuffer)(AMediaCodec*, size_t idx,
- long offset, size_t size,
- uint64_t time, uint32_t flags);
-
- ssize_t (*dequeueOutputBuffer)(AMediaCodec*, AMediaCodecBufferInfo *info, int64_t timeoutUs);
- AMediaFormat* (*getOutputFormat)(AMediaCodec*);
-
- media_status_t (*releaseOutputBuffer)(AMediaCodec*, size_t idx, bool render);
- media_status_t (*releaseOutputBufferAtTime)(AMediaCodec *mData, size_t idx, int64_t timestampNs);
-
- // Available since API level 28.
- media_status_t (*getName)(AMediaCodec*, char** out_name);
- void (*releaseName)(AMediaCodec*, char* name);
-
- // Available since API level 26.
- media_status_t (*setInputSurface)(AMediaCodec*, ANativeWindow *);
- media_status_t (*signalEndOfInputStream)(AMediaCodec *);
-} FFAMediaCodecNdk;
-
-static const FFAMediaFormat media_format_ndk;
-static const FFAMediaCodec media_codec_ndk;
-
-static const AVClass amediaformat_ndk_class = {
- .class_name = "amediaformat_ndk",
- .item_name = av_default_item_name,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-static const AVClass amediacodec_ndk_class = {
- .class_name = "amediacodec_ndk",
- .item_name = av_default_item_name,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-static FFAMediaFormat *mediaformat_ndk_create(AMediaFormat *impl)
-{
- FFAMediaFormatNdk *format = av_mallocz(sizeof(*format));
- if (!format)
- return NULL;
-
- format->api = media_format_ndk;
-
- format->libmedia = dlopen("libmediandk.so", RTLD_NOW);
- if (!format->libmedia)
- goto error;
-
-#define GET_OPTIONAL_SYMBOL(sym) \
- format->sym = dlsym(format->libmedia, "AMediaFormat_" #sym);
-
-#define GET_SYMBOL(sym) \
- GET_OPTIONAL_SYMBOL(sym) \
- if (!format->sym) \
- goto error;
-
- GET_SYMBOL(new)
- GET_SYMBOL(delete)
-
- GET_SYMBOL(toString)
-
- GET_SYMBOL(getInt32)
- GET_SYMBOL(getInt64)
- GET_SYMBOL(getFloat)
- GET_SYMBOL(getSize)
- GET_SYMBOL(getBuffer)
- GET_SYMBOL(getString)
- GET_OPTIONAL_SYMBOL(getRect)
-
- GET_SYMBOL(setInt32)
- GET_SYMBOL(setInt64)
- GET_SYMBOL(setFloat)
- GET_SYMBOL(setString)
- GET_SYMBOL(setBuffer)
- GET_OPTIONAL_SYMBOL(setRect)
-
-#undef GET_SYMBOL
-#undef GET_OPTIONAL_SYMBOL
-
- if (impl) {
- format->impl = impl;
- } else {
- format->impl = format->new();
- if (!format->impl)
- goto error;
- }
-
- return (FFAMediaFormat *)format;
-
-error:
- if (format->libmedia)
- dlclose(format->libmedia);
- av_freep(&format);
- return NULL;
-}
-
-static FFAMediaFormat *mediaformat_ndk_new(void)
-{
- return mediaformat_ndk_create(NULL);
-}
-
-static int mediaformat_ndk_delete(FFAMediaFormat* ctx)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- int ret = 0;
- if (!format)
- return 0;
-
- av_assert0(format->api.class == &amediaformat_ndk_class);
-
- if (format->impl && (format->delete(format->impl) != AMEDIA_OK))
- ret = AVERROR_EXTERNAL;
- if (format->libmedia)
- dlclose(format->libmedia);
- av_free(format);
-
- return ret;
-}
-
-static char* mediaformat_ndk_toString(FFAMediaFormat* ctx)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- const char *str = format->toString(format->impl);
- return av_strdup(str);
-}
-
-static int mediaformat_ndk_getInt32(FFAMediaFormat* ctx, const char *name, int32_t *out)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- return format->getInt32(format->impl, name, out);
-}
-
-static int mediaformat_ndk_getInt64(FFAMediaFormat* ctx, const char *name, int64_t *out)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- return format->getInt64(format->impl, name, out);
-}
-
-static int mediaformat_ndk_getFloat(FFAMediaFormat* ctx, const char *name, float *out)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- return format->getFloat(format->impl, name, out);
-}
-
-static int mediaformat_ndk_getBuffer(FFAMediaFormat* ctx, const char *name, void** data, size_t *size)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- return format->getBuffer(format->impl, name, data, size);
-}
-
-static int mediaformat_ndk_getString(FFAMediaFormat* ctx, const char *name, const char **out)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- const char *tmp = NULL;
- int ret = format->getString(format->impl, name, &tmp);
-
- if (tmp)
- *out = av_strdup(tmp);
- return ret;
-}
-
-static int mediaformat_ndk_getRect(FFAMediaFormat *ctx, const char *name,
- int32_t *left, int32_t *top, int32_t *right, int32_t *bottom)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- if (!format->getRect)
- return AVERROR_EXTERNAL;
- return format->getRect(format->impl, name, left, top, right, bottom);
-}
-
-static void mediaformat_ndk_setInt32(FFAMediaFormat* ctx, const char* name, int32_t value)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- format->setInt32(format->impl, name, value);
-}
-
-static void mediaformat_ndk_setInt64(FFAMediaFormat* ctx, const char* name, int64_t value)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- format->setInt64(format->impl, name, value);
-}
-
-static void mediaformat_ndk_setFloat(FFAMediaFormat* ctx, const char* name, float value)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- format->setFloat(format->impl, name, value);
-}
-
-static void mediaformat_ndk_setString(FFAMediaFormat* ctx, const char* name, const char* value)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- format->setString(format->impl, name, value);
-}
-
-static void mediaformat_ndk_setBuffer(FFAMediaFormat* ctx, const char* name, void* data, size_t size)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- format->setBuffer(format->impl, name, data, size);
-}
-
-static void mediaformat_ndk_setRect(FFAMediaFormat *ctx, const char *name,
- int32_t left, int32_t top, int32_t right, int32_t bottom)
-{
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)ctx;
- if (!format->setRect) {
- av_log(ctx, AV_LOG_WARNING, "Doesn't support setRect\n");
- return;
- }
- format->setRect(format->impl, name, left, top, right, bottom);
-}
-
-static char *mediacodec_ndk_getName(FFAMediaCodec *ctx)
-{
- FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx;
- char *ret = NULL;
- char *name = NULL;
-
- if (!codec->getName || !codec->releaseName) {
- av_log(ctx, AV_LOG_DEBUG, "getName() unavailable\n");
- return ret;
- }
-
- codec->getName(codec->impl, &name);
- if (name) {
- ret = av_strdup(name);
- codec->releaseName(codec->impl, name);
- }
-
- return ret;
-}
-
-static inline FFAMediaCodec *ndk_codec_create(int method, const char *arg) {
- FFAMediaCodecNdk *codec = av_mallocz(sizeof(*codec));
- const char *lib_name = "libmediandk.so";
-
- if (!codec)
- return NULL;
-
- codec->api = media_codec_ndk;
- codec->libmedia = dlopen(lib_name, RTLD_NOW);
- if (!codec->libmedia)
- goto error;
-
-#define GET_SYMBOL(sym, required) \
- codec->sym = dlsym(codec->libmedia, "AMediaCodec_" #sym); \
- if (!codec->sym) { \
- av_log(codec, required ? AV_LOG_ERROR : AV_LOG_INFO, \
- #sym "() unavailable from %s\n", lib_name); \
- if (required) \
- goto error; \
- }
-
- GET_SYMBOL(createCodecByName, 1)
- GET_SYMBOL(createDecoderByType, 1)
- GET_SYMBOL(createEncoderByType, 1)
- GET_SYMBOL(delete, 1)
-
- GET_SYMBOL(configure, 1)
- GET_SYMBOL(start, 1)
- GET_SYMBOL(stop, 1)
- GET_SYMBOL(flush, 1)
-
- GET_SYMBOL(getInputBuffer, 1)
- GET_SYMBOL(getOutputBuffer, 1)
-
- GET_SYMBOL(dequeueInputBuffer, 1)
- GET_SYMBOL(queueInputBuffer, 1)
-
- GET_SYMBOL(dequeueOutputBuffer, 1)
- GET_SYMBOL(getOutputFormat, 1)
-
- GET_SYMBOL(releaseOutputBuffer, 1)
- GET_SYMBOL(releaseOutputBufferAtTime, 1)
-
- GET_SYMBOL(getName, 0)
- GET_SYMBOL(releaseName, 0)
-
- GET_SYMBOL(setInputSurface, 0)
- GET_SYMBOL(signalEndOfInputStream, 0)
-
-#undef GET_SYMBOL
-
- switch (method) {
- case CREATE_CODEC_BY_NAME:
- codec->impl = codec->createCodecByName(arg);
- break;
- case CREATE_DECODER_BY_TYPE:
- codec->impl = codec->createDecoderByType(arg);
- break;
- case CREATE_ENCODER_BY_TYPE:
- codec->impl = codec->createEncoderByType(arg);
- break;
- default:
- av_assert0(0);
- }
- if (!codec->impl)
- goto error;
-
- return (FFAMediaCodec *)codec;
-
-error:
- if (codec->libmedia)
- dlclose(codec->libmedia);
- av_freep(&codec);
- return NULL;
-}
-
-#define DECLARE_NDK_AMEDIACODEC_CREATE_FUNC(name, method) \
-static FFAMediaCodec *mediacodec_ndk_##name(const char *arg) \
-{ \
- return ndk_codec_create(method, arg); \
-} \
-
-DECLARE_NDK_AMEDIACODEC_CREATE_FUNC(createCodecByName, CREATE_CODEC_BY_NAME)
-DECLARE_NDK_AMEDIACODEC_CREATE_FUNC(createDecoderByType, CREATE_DECODER_BY_TYPE)
-DECLARE_NDK_AMEDIACODEC_CREATE_FUNC(createEncoderByType, CREATE_ENCODER_BY_TYPE)
-
-static int mediacodec_ndk_delete(FFAMediaCodec* ctx)
-{
- FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx;
- int ret = 0;
-
- if (!codec)
- return 0;
-
- av_assert0(codec->api.class == &amediacodec_ndk_class);
-
- if (codec->impl && (codec->delete(codec->impl) != AMEDIA_OK))
- ret = AVERROR_EXTERNAL;
- if (codec->window)
- ANativeWindow_release(codec->window);
- if (codec->libmedia)
- dlclose(codec->libmedia);
- av_free(codec);
-
- return ret;
-}
-
-static int mediacodec_ndk_configure(FFAMediaCodec* ctx,
- const FFAMediaFormat* format_ctx,
- FFANativeWindow* window,
- void *crypto,
- uint32_t flags)
-{
- FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx;
- FFAMediaFormatNdk *format = (FFAMediaFormatNdk *)format_ctx;
- media_status_t status;
- ANativeWindow *native_window = NULL;
-
- if (window) {
- if (window->surface) {
- JNIEnv *env = NULL;
- JNI_GET_ENV_OR_RETURN(env, ctx, -1);
- native_window = ANativeWindow_fromSurface(env, window->surface);
- // Save for release
- codec->window = native_window;
- } else if (window->native_window) {
- native_window = window->native_window;
- }
- }
-
- if (format_ctx->class != &amediaformat_ndk_class) {
- av_log(ctx, AV_LOG_ERROR, "invalid media format\n");
- return AVERROR(EINVAL);
- }
-
- if (flags & AMEDIACODEC_CONFIGURE_FLAG_ENCODE) {
- if (native_window && !codec->setInputSurface) {
- av_log(ctx, AV_LOG_ERROR, "System doesn't support setInputSurface\n");
- return AVERROR_EXTERNAL;
- }
-
- status = codec->configure(codec->impl, format->impl, NULL, NULL, flags);
- if (status != AMEDIA_OK) {
- av_log(codec, AV_LOG_ERROR, "Encoder configure failed, %d\n", status);
- return AVERROR_EXTERNAL;
- }
-
- if (!native_window)
- return 0;
-
- status = codec->setInputSurface(codec->impl, native_window);
- if (status != AMEDIA_OK) {
- av_log(codec, AV_LOG_ERROR, "Encoder set input surface failed, %d\n", status);
- return AVERROR_EXTERNAL;
- }
- } else {
- status = codec->configure(codec->impl, format->impl, native_window, NULL, flags);
- if (status != AMEDIA_OK) {
- av_log(codec, AV_LOG_ERROR, "Decoder configure failed, %d\n", status);
- return AVERROR_EXTERNAL;
- }
- }
-
- return 0;
-}
-
-#define MEDIACODEC_NDK_WRAPPER(method) \
-static int mediacodec_ndk_ ## method(FFAMediaCodec* ctx) \
-{ \
- FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx; \
- media_status_t status = codec->method(codec->impl); \
- \
- if (status != AMEDIA_OK) { \
- av_log(codec, AV_LOG_ERROR, #method " failed, %d\n", status); \
- return AVERROR_EXTERNAL; \
- } \
- \
- return 0; \
-} \
-
-MEDIACODEC_NDK_WRAPPER(start)
-MEDIACODEC_NDK_WRAPPER(stop)
-MEDIACODEC_NDK_WRAPPER(flush)
-
-static uint8_t* mediacodec_ndk_getInputBuffer(FFAMediaCodec* ctx, size_t idx, size_t *out_size)
-{
- FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx;
- return codec->getInputBuffer(codec->impl, idx, out_size);
-}
-
-static uint8_t* mediacodec_ndk_getOutputBuffer(FFAMediaCodec* ctx, size_t idx, size_t *out_size)
-{
- FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx;
- return codec->getOutputBuffer(codec->impl, idx, out_size);
-}
-
-static ssize_t mediacodec_ndk_dequeueInputBuffer(FFAMediaCodec* ctx, int64_t timeoutUs)
-{
- FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx;
- return codec->dequeueInputBuffer(codec->impl, timeoutUs);
-}
-
-static int mediacodec_ndk_queueInputBuffer(FFAMediaCodec *ctx, size_t idx,
- off_t offset, size_t size,
- uint64_t time, uint32_t flags)
-{
- FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx;
- return codec->queueInputBuffer(codec->impl, idx, offset, size, time, flags);
-}
-
-static ssize_t mediacodec_ndk_dequeueOutputBuffer(FFAMediaCodec* ctx, FFAMediaCodecBufferInfo *info, int64_t timeoutUs)
-{
- FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx;
- AMediaCodecBufferInfo buf_info = {0};
- ssize_t ret;
-
- ret = codec->dequeueOutputBuffer(codec->impl, &buf_info, timeoutUs);
- info->offset = buf_info.offset;
- info->size = buf_info.size;
- info->presentationTimeUs = buf_info.presentationTimeUs;
- info->flags = buf_info.flags;
-
- return ret;
-}
-
-static FFAMediaFormat* mediacodec_ndk_getOutputFormat(FFAMediaCodec* ctx)
-{
- FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx;
- AMediaFormat *format = codec->getOutputFormat(codec->impl);
-
- if (!format)
- return NULL;
- return mediaformat_ndk_create(format);
-}
-
-static int mediacodec_ndk_releaseOutputBuffer(FFAMediaCodec* ctx, size_t idx, int render)
-{
- FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx;
- media_status_t status;
-
- status = codec->releaseOutputBuffer(codec->impl, idx, render);
- if (status != AMEDIA_OK) {
- av_log(codec, AV_LOG_ERROR, "release output buffer failed, %d\n", status);
- return AVERROR_EXTERNAL;
- }
-
- return 0;
-}
-
-static int mediacodec_ndk_releaseOutputBufferAtTime(FFAMediaCodec *ctx, size_t idx, int64_t timestampNs)
-{
- FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx;
- media_status_t status;
-
- status = codec->releaseOutputBufferAtTime(codec->impl, idx, timestampNs);
- if (status != AMEDIA_OK) {
- av_log(codec, AV_LOG_ERROR, "releaseOutputBufferAtTime failed, %d\n", status);
- return AVERROR_EXTERNAL;
- }
-
- return 0;
-}
-
-static int mediacodec_ndk_infoTryAgainLater(FFAMediaCodec *ctx, ssize_t idx)
-{
- return idx == AMEDIACODEC_INFO_TRY_AGAIN_LATER;
-}
-
-static int mediacodec_ndk_infoOutputBuffersChanged(FFAMediaCodec *ctx, ssize_t idx)
-{
- return idx == AMEDIACODEC_INFO_OUTPUT_BUFFERS_CHANGED;
-}
-
-static int mediacodec_ndk_infoOutputFormatChanged(FFAMediaCodec *ctx, ssize_t idx)
-{
- return idx == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED;
-}
-
-static int mediacodec_ndk_getBufferFlagCodecConfig(FFAMediaCodec *ctx)
-{
- return AMEDIACODEC_BUFFER_FLAG_CODEC_CONFIG;
-}
-
-static int mediacodec_ndk_getBufferFlagEndOfStream(FFAMediaCodec *ctx)
-{
- return AMEDIACODEC_BUFFER_FLAG_END_OF_STREAM;
-}
-
-static int mediacodec_ndk_getBufferFlagKeyFrame(FFAMediaCodec *ctx)
-{
- return 1;
-}
-
-static int mediacodec_ndk_getConfigureFlagEncode(FFAMediaCodec *ctx)
-{
- return AMEDIACODEC_CONFIGURE_FLAG_ENCODE;
-}
-
-static int mediacodec_ndk_cleanOutputBuffers(FFAMediaCodec *ctx)
-{
- return 0;
-}
-
-static int mediacodec_ndk_signalEndOfInputStream(FFAMediaCodec *ctx)
-{
- FFAMediaCodecNdk *codec = (FFAMediaCodecNdk *)ctx;
- media_status_t status;
-
- if (!codec->signalEndOfInputStream) {
- av_log(codec, AV_LOG_ERROR, "signalEndOfInputStream unavailable\n");
- return AVERROR_EXTERNAL;
- }
-
- status = codec->signalEndOfInputStream(codec->impl);
- if (status != AMEDIA_OK) {
- av_log(codec, AV_LOG_ERROR, "signalEndOfInputStream failed, %d\n", status);
- return AVERROR_EXTERNAL;
- }
- av_log(codec, AV_LOG_DEBUG, "signalEndOfInputStream success\n");
-
- return 0;
-}
-
-static const FFAMediaFormat media_format_ndk = {
- .class = &amediaformat_ndk_class,
-
- .create = mediaformat_ndk_new,
- .delete = mediaformat_ndk_delete,
-
- .toString = mediaformat_ndk_toString,
-
- .getInt32 = mediaformat_ndk_getInt32,
- .getInt64 = mediaformat_ndk_getInt64,
- .getFloat = mediaformat_ndk_getFloat,
- .getBuffer = mediaformat_ndk_getBuffer,
- .getString = mediaformat_ndk_getString,
- .getRect = mediaformat_ndk_getRect,
-
- .setInt32 = mediaformat_ndk_setInt32,
- .setInt64 = mediaformat_ndk_setInt64,
- .setFloat = mediaformat_ndk_setFloat,
- .setString = mediaformat_ndk_setString,
- .setBuffer = mediaformat_ndk_setBuffer,
- .setRect = mediaformat_ndk_setRect,
-};
-
-static const FFAMediaCodec media_codec_ndk = {
- .class = &amediacodec_ndk_class,
-
- .getName = mediacodec_ndk_getName,
-
- .createCodecByName = mediacodec_ndk_createCodecByName,
- .createDecoderByType = mediacodec_ndk_createDecoderByType,
- .createEncoderByType = mediacodec_ndk_createEncoderByType,
- .delete = mediacodec_ndk_delete,
-
- .configure = mediacodec_ndk_configure,
- .start = mediacodec_ndk_start,
- .stop = mediacodec_ndk_stop,
- .flush = mediacodec_ndk_flush,
-
- .getInputBuffer = mediacodec_ndk_getInputBuffer,
- .getOutputBuffer = mediacodec_ndk_getOutputBuffer,
-
- .dequeueInputBuffer = mediacodec_ndk_dequeueInputBuffer,
- .queueInputBuffer = mediacodec_ndk_queueInputBuffer,
-
- .dequeueOutputBuffer = mediacodec_ndk_dequeueOutputBuffer,
- .getOutputFormat = mediacodec_ndk_getOutputFormat,
-
- .releaseOutputBuffer = mediacodec_ndk_releaseOutputBuffer,
- .releaseOutputBufferAtTime = mediacodec_ndk_releaseOutputBufferAtTime,
-
- .infoTryAgainLater = mediacodec_ndk_infoTryAgainLater,
- .infoOutputBuffersChanged = mediacodec_ndk_infoOutputBuffersChanged,
- .infoOutputFormatChanged = mediacodec_ndk_infoOutputFormatChanged,
-
- .getBufferFlagCodecConfig = mediacodec_ndk_getBufferFlagCodecConfig,
- .getBufferFlagEndOfStream = mediacodec_ndk_getBufferFlagEndOfStream,
- .getBufferFlagKeyFrame = mediacodec_ndk_getBufferFlagKeyFrame,
-
- .getConfigureFlagEncode = mediacodec_ndk_getConfigureFlagEncode,
- .cleanOutputBuffers = mediacodec_ndk_cleanOutputBuffers,
- .signalEndOfInputStream = mediacodec_ndk_signalEndOfInputStream,
-};
-
-FFAMediaFormat *ff_AMediaFormat_new(int ndk)
-{
- if (ndk)
- return media_format_ndk.create();
- return media_format_jni.create();
-}
-
-FFAMediaCodec* ff_AMediaCodec_createCodecByName(const char *name, int ndk)
-{
- if (ndk)
- return media_codec_ndk.createCodecByName(name);
- return media_codec_jni.createCodecByName(name);
-}
-
-FFAMediaCodec* ff_AMediaCodec_createDecoderByType(const char *mime_type, int ndk)
-{
- if (ndk)
- return media_codec_ndk.createDecoderByType(mime_type);
- return media_codec_jni.createDecoderByType(mime_type);
-}
-
-FFAMediaCodec* ff_AMediaCodec_createEncoderByType(const char *mime_type, int ndk)
-{
- if (ndk)
- return media_codec_ndk.createEncoderByType(mime_type);
- return media_codec_jni.createEncoderByType(mime_type);
-}
-
-int ff_Build_SDK_INT(AVCodecContext *avctx)
-{
- int ret = -1;
-
-#if __ANDROID_API__ >= 24
- // android_get_device_api_level() is a static inline before API level 29.
- // dlsym() might doesn't work.
- //
- // We can implement android_get_device_api_level() by
- // __system_property_get(), but __system_property_get() has created a lot of
- // troubles and is deprecated. So avoid using __system_property_get() for
- // now.
- //
- // Hopy we can remove the conditional compilation finally by bumping the
- // required API level.
- //
- ret = android_get_device_api_level();
-#else
- JNIEnv *env = NULL;
- jclass versionClass;
- jfieldID sdkIntFieldID;
- JNI_GET_ENV_OR_RETURN(env, avctx, -1);
-
- versionClass = (*env)->FindClass(env, "android/os/Build$VERSION");
- sdkIntFieldID = (*env)->GetStaticFieldID(env, versionClass, "SDK_INT", "I");
- ret = (*env)->GetStaticIntField(env, versionClass, sdkIntFieldID);
- (*env)->DeleteLocalRef(env, versionClass);
-#endif
- av_log(avctx, AV_LOG_DEBUG, "device api level %d\n", ret);
-
- return ret;
-}
-
-static struct {
- enum FFAMediaFormatColorRange mf_range;
- enum AVColorRange range;
-} color_range_map[] = {
- { COLOR_RANGE_FULL, AVCOL_RANGE_JPEG },
- { COLOR_RANGE_LIMITED, AVCOL_RANGE_MPEG },
-};
-
-static struct {
- enum FFAMediaFormatColorStandard mf_standard;
- enum AVColorSpace space;
-} color_space_map[] = {
- { COLOR_STANDARD_BT709, AVCOL_SPC_BT709 },
- { COLOR_STANDARD_BT601_PAL, AVCOL_SPC_BT470BG },
- { COLOR_STANDARD_BT601_NTSC, AVCOL_SPC_SMPTE170M },
- { COLOR_STANDARD_BT2020, AVCOL_SPC_BT2020_NCL },
-};
-
-static struct {
- enum FFAMediaFormatColorStandard mf_standard;
- enum AVColorPrimaries primaries;
-} color_primaries_map[] = {
- { COLOR_STANDARD_BT709, AVCOL_PRI_BT709 },
- { COLOR_STANDARD_BT601_PAL, AVCOL_PRI_BT470BG },
- { COLOR_STANDARD_BT601_NTSC, AVCOL_PRI_SMPTE170M },
- { COLOR_STANDARD_BT2020, AVCOL_PRI_BT2020 },
-};
-
-static struct {
- enum FFAMediaFormatColorTransfer mf_transfer;
- enum AVColorTransferCharacteristic transfer;
-} color_transfer_map[] = {
- { COLOR_TRANSFER_LINEAR, AVCOL_TRC_LINEAR },
- { COLOR_TRANSFER_SDR_VIDEO, AVCOL_TRC_SMPTE170M },
- { COLOR_TRANSFER_ST2084, AVCOL_TRC_SMPTEST2084 },
- { COLOR_TRANSFER_HLG, AVCOL_TRC_ARIB_STD_B67 },
-};
-
-enum AVColorRange ff_AMediaFormatColorRange_to_AVColorRange(int color_range)
-{
- for (int i = 0; i < FF_ARRAY_ELEMS(color_range_map); i++)
- if (color_range_map[i].mf_range == color_range)
- return color_range_map[i].range;
-
- return AVCOL_RANGE_UNSPECIFIED;
-}
-
-int ff_AMediaFormatColorRange_from_AVColorRange(enum AVColorRange color_range)
-{
- for (int i = 0; i < FF_ARRAY_ELEMS(color_range_map); i++)
- if (color_range_map[i].range == color_range)
- return color_range_map[i].mf_range;
- return COLOR_RANGE_UNSPECIFIED;
-}
-
-enum AVColorSpace ff_AMediaFormatColorStandard_to_AVColorSpace(int color_standard)
-{
- for (int i = 0; i < FF_ARRAY_ELEMS(color_space_map); i++)
- if (color_space_map[i].mf_standard == color_standard)
- return color_space_map[i].space;
-
- return AVCOL_SPC_UNSPECIFIED;
-}
-
-int ff_AMediaFormatColorStandard_from_AVColorSpace(enum AVColorSpace color_space)
-{
- for (int i = 0; i < FF_ARRAY_ELEMS(color_space_map); i++)
- if (color_space_map[i].space == color_space)
- return color_space_map[i].mf_standard;
-
- return COLOR_STANDARD_UNSPECIFIED;
-}
-
-enum AVColorPrimaries ff_AMediaFormatColorStandard_to_AVColorPrimaries(int color_standard)
-{
- for (int i = 0; i < FF_ARRAY_ELEMS(color_primaries_map); i++)
- if (color_primaries_map[i].mf_standard == color_standard)
- return color_primaries_map[i].primaries;
-
- return AVCOL_PRI_UNSPECIFIED;
-}
-
-enum AVColorTransferCharacteristic
-ff_AMediaFormatColorTransfer_to_AVColorTransfer(int color_transfer)
-{
- for (int i = 0; i < FF_ARRAY_ELEMS(color_transfer_map); i++)
- if (color_transfer_map[i].mf_transfer == color_transfer)
- return color_transfer_map[i].transfer;
-
- return AVCOL_TRC_UNSPECIFIED;
-}
-
-int ff_AMediaFormatColorTransfer_from_AVColorTransfer(
- enum AVColorTransferCharacteristic color_transfer)
-{
- for (int i = 0; i < FF_ARRAY_ELEMS(color_transfer_map); i++)
- if (color_transfer_map[i].transfer == color_transfer)
- return color_transfer_map[i].mf_transfer;
-
- return COLOR_TRANSFER_UNSPECIFIED;
-}
diff --git a/spaces/congsaPfin/Manga-OCR/logs/ABC Kids Play Games Watch Videos and Learn the Alphabet with Your 3 Year Old.md b/spaces/congsaPfin/Manga-OCR/logs/ABC Kids Play Games Watch Videos and Learn the Alphabet with Your 3 Year Old.md
deleted file mode 100644
index 1600c0015d38044b07e7b4f829fb95d9252c69f7..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/ABC Kids Play Games Watch Videos and Learn the Alphabet with Your 3 Year Old.md
+++ /dev/null
@@ -1,56 +0,0 @@
-
- - Enhances vocabulary and language skills - Boosts memory and concentration - Fosters creativity and imagination - Strengthens bonding and communication | | H2: 10 Best ABC Games for 3 Year Olds | - Alphabet BINGO - Letter Bubble - Alphabetical Order - Alphabet Hunt - Alphabet Slider Puzzle - Alphabats - Alliteration - Letter Sort with Sticky Notes - Write Letters in Shaving Cream - Sponge the Alphabet - Make an Alphabet Book - Play Musical Alphabet | | H2: How to Choose the Right ABC Game for Your 3 Year Old | - Consider your child's interests and preferences - Choose games that are age-appropriate and challenging - Look for games that are interactive and engaging - Mix up different types of games to keep it fun and varied | | H2: Conclusion | Summarize the main points of the article and provide a call to action | | H2: FAQs | - What are some tips to make ABC games more fun for 3 year olds? - How often should I play ABC games with my 3 year old? - How can I track my 3 year old's progress in learning the alphabet? - What are some other resources to help my 3 year old learn the alphabet? - What are some common mistakes to avoid when playing ABC games with my 3 year old? | Table 2: Article with HTML formatting ABC Games for 3 Year Olds: Fun, Easy, and Educational
-
-Learning the alphabet is one of the most important skills for preschoolers. It lays the foundation for reading, writing, and communication. However, learning the alphabet can also be boring and frustrating for some kids. That's why playing games is a great way to make it fun and easy. Games can help your 3 year old learn the letters, sounds, and words in a playful and interactive way. In this article, we will share with you some of the benefits of playing ABC games with your 3 year old, some of the best ABC games you can try, and some tips on how to choose the right game for your child.
- 5 Benefits of Playing ABC Games with Your 3 Year Old
-
-Playing ABC games with your 3 year old can have many positive effects on their development. Here are some of the benefits you can expect from playing these games:
-abc games for 3 year olds DOWNLOAD ➡ https://urlca.com/2uO7Xt
-
-Improves letter recognition and phonemic awareness. Playing ABC games can help your child recognize the shapes and names of the letters, as well as their sounds and positions in words. This can help them develop phonemic awareness, which is the ability to hear and manipulate the sounds in words. Phonemic awareness is essential for learning to read and spell.
-Enhances vocabulary and language skills. Playing ABC games can expose your child to new words and meanings, as well as reinforce their existing vocabulary. This can help them expand their language skills and comprehension. Playing ABC games can also encourage your child to use words in sentences and conversations, which can improve their communication skills.
-Boosts memory and concentration. Playing ABC games can challenge your child's memory and attention span, as they have to recall the letters, sounds, and words they have learned. This can help them improve their cognitive skills and mental abilities.
-Fosters creativity and imagination. Playing ABC games can stimulate your child's creativity and imagination, as they have to use their senses, emotions, and thoughts to play. For example, they can make up stories or songs with the letters or words they have learned, or they can draw or act out the letters or words they have learned.
-Strengthens bonding and communication. Playing ABC games with your 3 year old can be a fun and enjoyable activity that can strengthen your bond and communication with your child. You can share your feedback, praise, and encouragement with your child, as well as listen to their ideas and opinions. You can also learn more about your child's personality, interests, and abilities through playing ABC games.
-
- 10 Best ABC Games for 3 Year Olds
-
-There are many ABC games that you can play with your 3 year old, either online or offline. Here are some of the best ones that we recommend:
-
-Alphabet BINGO . This is an online game that teaches your child to identify the uppercase and lowercase letters of the alphabet. Your child has to match the letters on the bingo card with the ones that are spoken by the narrator. The game has four levels of difficulty, from easy to hard.
-Letter Bubble - Alphabetical Order . This is another online game that teaches your child to arrange the letters of the alphabet in order. Your child has to pop the bubbles that contain the letters in the correct sequence. The game has three modes: easy, medium, and hard.
-Alphabet Hunt . This is an offline game that involves finding and collecting objects that start with each letter of the alphabet. You can use any objects that you have at home or outside, such as toys, books, clothes, fruits, etc. You can make a list of the objects for each letter, or you can let your child choose them. You can also make it more fun by setting a timer or hiding the objects.
-Alphabet Slider Puzzle . This is an online game that challenges your child to solve a puzzle that contains a picture and a word that starts with a certain letter. Your child has to slide the tiles to form the picture and the word. The game has four levels of difficulty, from easy to hard.
-Alphabats - Alliteration . This is an online game that teaches your child to recognize and produce words that have the same initial sound. Your child has to help the bats catch the bugs that have words that start with the same letter as the bat. The game has three levels of difficulty, from easy to hard.
-Letter Sort with Sticky Notes . This is an offline game that teaches your child to sort the letters of the alphabet into different categories. You can use sticky notes or paper to write the letters on them. You can then ask your child to sort them by color, shape, size, or sound. For example, you can ask your child to sort the letters by vowels and consonants, or by curved and straight lines.
-Write Letters in Shaving Cream . This is an offline game that teaches your child to write the letters of the alphabet using their fingers. You can use shaving cream or any other sensory material, such as whipped cream, sand, or paint. You can spread the material on a tray or a table, and then ask your child to write the letters on it. You can also write the letters first and ask your child to trace them.
-Sponge the Alphabet . This is an offline game that teaches your child to recognize and name the letters of the alphabet using sponges. You can use sponges or any other soft material, such as cotton balls or pom poms. You can cut out the shapes of the letters from the sponges, or you can buy them ready-made. You can then ask your child to sponge paint the letters on a paper or a wall.
-Make an Alphabet Book . This is an offline game that teaches your child to create their own book with pictures and words for each letter of the alphabet. You can use paper or cardboard to make the pages of the book, and then ask your child to draw or paste pictures that start with each letter on them. You can also help your child write the words under each picture.
-Play Musical Alphabet . This is an offline game that teaches your child to sing and dance along with the alphabet song. You can use music or sing it yourself, and then ask your child to join you. You can also make it more fun by adding some actions or movements for each letter, such as clapping, jumping, or spinning.
-
- How to Choose the Right ABC Game for Your 3 Year Old
-
-Not all ABC games are suitable for every 3 year old. Some games may be too easy or too hard for your child, depending on their level of development and learning style. Here are some tips on how to choose the right ABC game for your 3 year old:
-
-Consider your child's interests and preferences. Choose games that match your child's hobbies, passions, and personality. For example, if your child loves animals, you can choose games that feature animal pictures or sounds. If your child is adventurous, you can choose games that involve exploration or discovery. If your child is artistic, you can choose games that involve drawing or painting.
-Choose games that are age-appropriate and challenging. Choose games that are suitable for your child's level of development and learning ability. For example, if your child is just starting to learn the alphabet, you can choose games that focus on letter recognition and naming. If your child already knows the letters, you can choose games that focus on letter sounds and words. You can also choose games that have different levels of difficulty, so you can adjust them according to your child's progress.
-Look for games that are interactive and engaging. Choose games that require your child to use their senses, motor skills, and cognitive skills to play. For example, you can choose games that involve touching, moving, listening, speaking, or thinking. You can also choose games that have feedback, rewards, or surprises to keep your child motivated and interested.
-Mix up different types of games to keep it fun and varied. Choose games that have different formats, themes, and objectives to avoid boredom and repetition. For example, you can mix up online and offline games, or games that use letters, pictures, sounds, or words. You can also mix up games that are individual and cooperative, or games that are competitive and cooperative.
-
- Conclusion
-
-Playing ABC games with your 3 year old can be a fun and effective way to help them learn the alphabet. ABC games can improve your child's letter recognition, phonemic awareness, vocabulary, language skills, memory, concentration, creativity, imagination, bonding, and communication. There are many ABC games that you can choose from, either online or offline. You can choose the right game for your child by considering their interests, preferences, level of development, learning style, and personality. You can also mix up different types of games to keep it fun and varied. Playing ABC games with your 3 year old can be a rewarding and enjoyable experience for both of you.
- FAQs
-
-Here are some frequently asked questions about playing ABC games with your 3 year old:
-
-What are some tips to make ABC games more fun for 3 year olds? Some tips to make ABC games more fun for 3 year olds are: - Use props, costumes, or accessories to make the games more realistic and exciting - Use songs, rhymes, or chants to make the games more musical and rhythmic - Use humor, jokes, or silly words to make the games more funny and amusing - Use praise, encouragement, or rewards to make the games more positive and supportive - Use variations, adaptations, or extensions to make the games more diverse and challenging
-How often should I play ABC games with my 3 year old? There is no fixed rule on how often you should play ABC games with your 3 year old. It depends on your child's interest, attention span, and mood. However, a general guideline is to play ABC games with your 3 year old for about 10 to 15 minutes a day, or as long as they are having fun and learning.
-How can I track my 3 year old's progress in learning the alphabet? You can track your 3 year old's progress in learning the alphabet by observing their performance in playing ABC games. You can also use informal assessments such as quizzes, tests, or checklists to measure your child's knowledge and skills in learning the alphabet. You can also use formal assessments such as standardized tests or evaluations to compare your child's progress with other children of the same age and grade level.
-What are some other resources to help my 3 year old learn the alphabet? Some other resources to help your 3 year old learn the alphabet are: - Books, magazines, or newspapers that have letters, words, or stories that your child can read or listen to - Videos, podcasts, or apps that have songs, animations, or games that teach the alphabet - Flashcards, posters, or stickers that have letters, pictures, or words that your child can see or touch - Toys, puzzles, or crafts that have letters, shapes, or colors that your child can play with or make
-What are some common mistakes to avoid when playing ABC games with my 3 year old? Some common mistakes to avoid when playing ABC games with your 3 year old are: - Forcing your child to play when they are not interested, tired, or hungry - Expecting your child to learn the alphabet too fast or too slow - Comparing your child's progress with other children's progress - Criticizing your child's mistakes or failures - Making the games too easy or too hard for your child
-
-
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Able Cee Live Performance Vol 3 Download MP3 Songs of Gospel Praise and Worship.md b/spaces/congsaPfin/Manga-OCR/logs/Able Cee Live Performance Vol 3 Download MP3 Songs of Gospel Praise and Worship.md
deleted file mode 100644
index ade023252470f845e00a23c8aa06691af9ae70eb..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Able Cee Live Performance Vol 3 Download MP3 Songs of Gospel Praise and Worship.md
+++ /dev/null
@@ -1,89 +0,0 @@
-
-Able Cee Live Performance Vol 3 MP3 Download
-If you are looking for a gospel album that will uplift your spirit and fill your heart with joy, then you should check out Able Cee Live Performance Vol 3 . This is a live recording of Able Cee, a Nigerian gospel singer, producer, and songwriter, who delivers a powerful performance of praise and worship songs. In this article, we will tell you everything you need to know about Able Cee Live Performance Vol 3, including who is Able Cee, what is Live Performance Vol 3, why should you download it, how to download it, and where to download it. We will also answer some frequently asked questions about the album. So, let's get started!
-able cee live performance vol 3 mp3 download Download Zip ⚙ https://urlca.com/2uO738
- Who is Able Cee?
-Able Cee, whose real name is Chikaodiri Okpara, is a Nigerian contemporary gospel singer, producer, and songwriter from Ebonyi State. He is also known as Nothing Mega, which is the title of his debut album with his group C-JEC International in 2006. Able Cee has been singing since he was a child, and he has a passion for spreading the gospel through music. He has released several albums and singles, such as Gospel Live Performance Praise Vol 1, Gospel Live Performance Praise Vol 2, Gospel Live Performance Praise Vol 4, Nothing Mega Reloaded, Ihe Dikwuru Bu Ndu (Life), and many more. He is also a music director and a keyboardist. He has performed in many churches and events across Nigeria and beyond.
- What is Live Performance Vol 3?
-Live Performance Vol 3 is the third installment of Able Cee's live performance series, which he started in 2017. It is a live recording of Able Cee performing various gospel songs in a church setting, accompanied by a band and a choir. The album consists of two tracks, Track A and Track B, each lasting about 30 minutes. The songs are a mix of Nigerian/Naija gospel praise and worship songs, some of which are original compositions by Able Cee, and some of which are covers of popular songs by other artists. The album showcases Able Cee's vocal skills, musical talent, and spiritual fervor.
- Why should you download Live Performance Vol 3?
-There are many reasons why you should download Live Performance Vol 3. Here are some of them:
-
-It is a great source of inspiration and motivation for your daily life. The songs are uplifting, encouraging, and edifying. They will help you to praise God, thank Him for His blessings, trust Him in times of trouble, and seek His guidance.
-It is a great way to enjoy gospel music at its best. The songs are well-produced, well-arranged, and well-performed. The sound quality is excellent, the instrumentation is rich, and the vocals are clear. You will feel like you are in the presence of Able Cee and his band.
-It is a great way to support Able Cee and his ministry. By downloading the album, you are showing your appreciation for his work and his message. You are also helping him to reach more people with the gospel through his music.
-
- How to download Live Performance Vol 3?
-Downloading Live Performance Vol 3 is easy and simple. Here are the steps you need to follow:
-
-Go to Go to https://www.ablecee.com , the official website of Able Cee. You will see a banner that says "Download Live Performance Vol 3 Now". Click on it and you will be redirected to a page where you can choose your preferred format and quality. You can download the album as MP3, WAV, or FLAC files, and you can choose between high, medium, or low quality. The higher the quality, the larger the file size.
-Select the format and quality that you want and click on the "Download" button. You will be asked to enter your name and email address. This is to verify that you are a human and not a robot, and also to send you a confirmation email with the download link. Enter your details and click on "Submit".
-Check your email inbox for the confirmation email from Able Cee. It should arrive within a few minutes. If you don't see it, check your spam or junk folder. Open the email and click on the download link. You will be taken to a page where you can download the album as a ZIP file. Save the file to your device and extract it using a ZIP extractor software.
-Enjoy listening to Live Performance Vol 3 on your device. You can also transfer it to other devices or burn it to a CD if you want.
-
- Where to download Live Performance Vol 3?
-Besides the official website of Able Cee, there are other sources where you can download Live Performance Vol 3. However, you should be careful about the quality and authenticity of these sources, as some of them may contain viruses, malware, or fake files. Here are some of the reputable sources where you can download Live Performance Vol 3 safely and legally:
-
- Conclusion
-We hope that this article has given you all the information you need about Able Cee Live Performance Vol 3 MP3 Download. This is a wonderful gospel album that will bless your soul and make you dance for joy. You can download it from the official website of Able Cee or from other sources that we have mentioned above. You can also share it with your friends and family who love gospel music. Thank you for reading this article and may God bless you!
-Able Cee gospel live performance praise vol 3 track B
-Able Cee live performance (live) album download
-Able Cee nothing mega live praise and worship
-Able Cee Nigerian gospel singer live performance
-Able Cee live performance praise vol 3 Qobuz streaming
-Able Cee gospel soft praise and worship live
-Able Cee live performance praise vol 3 track A
-Able Cee member of C-JEC International live
-Able Cee contemporary gospel music live performance
-Able Cee producer and songwriter live performance
-Able Cee live performance praise vol 3 Hi-Res quality
-Able Cee Ebonyi State gospel artist live
-Able Cee live performance praise vol 3 lyrics
-Able Cee gospel live performance praise vol 3 video
-Able Cee live performance (live) album review
-Able Cee live performance praise vol 3 Gospelminds
-Able Cee gospel live performance praise vol 3 playlist
-Able Cee live performance (live) album cover
-Able Cee live performance praise vol 3 free download
-Able Cee gospel live performance praise vol 3 mp4 download
-Able Cee live performance (live) album tracklist
-Able Cee live performance praise vol 3 zip file download
-Able Cee gospel live performance praise vol 3 audio download
-Able Cee live performance (live) album release date
-Able Cee live performance praise vol 3 online streaming
-Able Cee gospel live performance praise vol 3 Ngospelmedia
-Able Cee live performance (live) album genre
-Able Cee live performance praise vol 3 full album download
-Able Cee gospel live performance praise vol 3 YouTube
-Able Cee live performance (live) album Qobuz download
-Able Cee live performance praise vol 3 mp3skull download
-Able Cee gospel live performance praise vol 3 Spotify
-Able Cee live performance (live) album songs download
-Able Cee live performance praise vol 3 Naija music download
-Able Cee gospel live performance praise vol 3 SoundCloud
-Able Cee live performance (live) album credits
-Able Cee live performance praise vol 3 Waploaded download
-Able Cee gospel live performance praise vol 3 Apple Music
-Able Cee live performance (live) album features
-Able Cee live performance praise vol 3 Naijaloaded download
-Able Cee gospel live performance praise vol 3 Amazon Music
-Able Cee live performance (live) album ratings
-Able Cee live performance praise vol 3 Gospel Songs Mp3 download
-Able Chikaodiri Okpara Live Performance Praise Vol. 3 Mp3 Download
- FAQs
-Q1: What is the genre of Live Performance Vol 3?
-A1: The genre of Live Performance Vol 3 is gospel music, specifically Nigerian/Naija gospel praise and worship music.
- Q2: How long is Live Performance Vol 3?
-A2: Live Performance Vol 3 is about one hour long, divided into two tracks of about 30 minutes each.
- Q3: How much does Live Performance Vol 3 cost?
-A3: Live Performance Vol 3 is free to download from the official website of Able Cee or from other sources that we have mentioned above. However, if you want to support Able Cee and his ministry, you can make a donation through his website or buy his other albums.
- Q4: Is Live Performance Vol 3 available on other platforms?
-A4: Yes, Live Performance Vol 3 is also available on other platforms such as YouTube, Spotify, Apple Music, Deezer, Boomplay, and more.
- Q5: What are some other albums by Able Cee?
-A5: Some other albums by Able Cee are Gospel Live Performance Praise Vol 1, Gospel Live Performance Praise Vol 2, Gospel Live Performance Praise Vol 4, Nothing Mega Reloaded, Ihe Dikwuru Bu Ndu ( Life), and many more. You can find them on his website or on other platforms that we have mentioned above.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Agar.io Free Skins How to Get Them and What They Look Like.md b/spaces/congsaPfin/Manga-OCR/logs/Agar.io Free Skins How to Get Them and What They Look Like.md
deleted file mode 100644
index 131f8fc161e3df32c3e5756563d2fa07fbd3cee1..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Agar.io Free Skins How to Get Them and What They Look Like.md
+++ /dev/null
@@ -1,122 +0,0 @@
-
-Agar.io Free Skins Download: How to Customize Your Cell and Play with Style
-Do you love playing online games that are simple yet addictive? Do you want to customize your character and stand out from the crowd? If you answered yes to both questions, then you should try agar.io, a popular browser game that lets you control a cell and eat other cells to grow bigger. In this article, we will tell you everything you need to know about agar.io, including how to get free skins for your cell and how to install and use them.
-agar.io free skins download Download Zip >>> https://urlca.com/2uOaF7
- What is Agar.io and Why You Should Play It
-Agar.io is a multiplayer online game that was released in 2015 by a Brazilian developer named Matheus Valadares. The game is inspired by the agar, a substance used in microbiology to culture bacteria. The game is simple: you control a cell on a map filled with other cells, both players and bots. Your goal is to eat smaller cells and avoid being eaten by bigger ones. You can also split your cell into smaller pieces to move faster or to escape from predators. The game has different modes, such as free-for-all, teams, experimental, and battle royale.
- The Basics of Agar.io Gameplay
-To play agar.io, you just need a web browser and an internet connection. You can choose a nickname for your cell, or leave it blank if you want to be anonymous. You can also choose a region and a mode to play in. Once you enter the game, you will see your cell on the map, along with other cells of different colors and sizes. You can move your cell with your mouse cursor, and use the space bar to split it or the W key to eject some mass. You can also zoom in and out with the mouse wheel or the + and - keys.
- The Benefits of Playing Agar.io
-Agar.io is not only fun, but also beneficial for your brain. Playing agar.io can help you improve your reflexes, concentration, strategy, and spatial awareness. It can also help you relieve stress, boredom, and anxiety. Moreover, playing agar.io can help you socialize with other players from around the world. You can chat with them using the chat box, or join clans and groups with them. You can also make friends or enemies with other players based on your interactions.
-agar.io free skins download for android
-agar.io free skins download for pc
-agar.io free skins download apk
-agar.io free skins download no survey
-agar.io free skins download ios
-agar.io free skins download online
-agar.io free skins download 2023
-agar.io free skins download chrome
-agar.io free skins download mac
-agar.io free skins download windows 10
-agar.io free skins download hack
-agar.io free skins download mod
-agar.io free skins download app
-agar.io free skins download reddit
-agar.io free skins download without verification
-agar.io free skins download generator
-agar.io free skins download website
-agar.io free skins download link
-agar.io free skins download zip
-agar.io free skins download tutorial
-agar.io free skins download latest version
-agar.io free skins download unlimited
-agar.io free skins download safe
-agar.io free skins download easy
-agar.io free skins download how to
-agar.io free skins download best
-agar.io free skins download new
-agar.io free skins download 2022
-agar.io free skins download code
-agar.io free skins download file
-agar.io free skins download extension
-agar.io free skins download exe
-agar.io free skins download software
-agar.io free skins download tool
-agar.io free skins download guide
-agar.io free skins download legit
-agar.io free skins download working
-agar.io free skins download update
-agar.io free skins download premium
-agar.io free skins download pro
-agar.io free skins download official
-agar.io free skins download original
-agar.io free skins download custom
-agar.io free skins download cool
-agar.io free skins download awesome
-agar.io free skins download fun
-agar.io free skins download popular
-agar.io free skins download rare
- What are Agar.io Skins and How to Get Them
-One of the best features of agar.io is that you can customize your cell with different skins. Skins are images or patterns that cover your cell and make it look more unique and attractive. There are hundreds of skins available for agar.io, ranging from flags, logos, memes, celebrities, animals, cartoons, and more. Some skins are official and can be used by anyone, while others are unofficial and require some tricks to use.
- The Types of Agar.io Skins
-There are three main types of agar.io skins: default skins, premium skins, and custom skins.
-
-Default skins are the official skins that are available for everyone. You can use them by typing their name as your nickname in the game. For example, if you type "doge" as your nickname, your cell will have the doge meme skin. You can find a list of all default skins here .
-Premium skins are the official skins that require some coins or coupons to unlock. You can get coins or coupons by playing the game, watching ads, completing surveys, or buying them with real money. You can use them to buy skins from the in-game shop. Premium skins are usually more detailed and exclusive than default skins. You can find a list of all premium skins here .
-Custom skins are the unofficial skins that are created by users or third-party websites. They are not supported by the game developers, but they can be used with some hacks or extensions. Custom skins can be any image or design that you want, as long as it follows the game rules and does not violate any copyrights or trademarks. You can find a list of some custom skins here .
-
- The Methods of Getting Agar.io Skins
-There are three main methods of getting agar.io skins: using the in-game shop, using user scripts, and using custom skins.
- Using the In-Game Shop
-The easiest and safest way to get agar.io skins is to use the in-game shop. The shop offers a variety of premium skins that you can buy with coins or coupons. You can also get some free skins by watching ads or completing surveys. To access the shop, you need to create an account and log in to the game. Then, you can click on the shop icon on the main menu and browse through the available skins. You can preview the skins before buying them, and you can also see how many coins or coupons they cost. Once you buy a skin, you can use it by selecting it from your inventory.
- Using User Scripts
-Another way to get agar.io skins is to use user scripts. User scripts are pieces of code that modify the game's behavior or appearance. They can be used to add features, change settings, or enable custom skins. To use user scripts, you need to install a browser extension that supports them, such as Tampermonkey or Greasemonkey. Then, you need to find a user script that offers agar.io skins, such as Agar Tool or Legend Mod . You can install the user script by clicking on its link and following the instructions. Once you install the user script, you can access its settings and choose the skin that you want to use.
- Using Custom Skins
-The third way to get agar.io skins is to use custom skins. Custom skins are images that you upload to a third-party website and then use in the game. To use custom skins, you need to find a website that offers them, such as Agar.io Skins or Agario Skins . Then, you need to upload your image or choose one from the gallery. You can also edit your image with filters, stickers, text, and more. Once you have your image ready, you need to copy its URL and paste it as your nickname in the game. Your cell will then have the custom skin that you chose.
- How to Install and Use Agar.io Skins
-Now that you know how to get agar.io skins, let's see how to install and use them.
- How to Install User Scripts for Agar.io Skins
-To install user scripts for agar.io skins, you need to follow these steps:
-
-Download and install a browser extension that supports user scripts, such as Tampermonkey or Greasemonkey.
-Find a user script that offers agar.io skins, such as Agar Tool or Legend Mod.
-Click on the link of the user script and follow the instructions to install it.
-Restart your browser and open agar.io.
-Access the settings of the user script and choose the skin that you want to use.
-Enjoy playing with your new skin!
-
- How to Use Custom Skins for Agar.io
-To use custom skins for agar.io, you need to follow these steps:
-
-Find a website that offers custom skins for agar.io, such as Agar.io Skins or Agario Skins.
-Upload your image or choose one from the gallery.
-Edit your image with filters, stickers, text, and more if you want.
-Copy the URL of your image.
-Paste it as your nickname in agar.io.
-Enjoy playing with your custom skin! li>Enjoy playing with your custom skin!
-
- Conclusion
-Agar.io is a fun and addictive online game that lets you control a cell and eat other cells to grow bigger. You can also customize your cell with different skins that make it look more unique and attractive. There are many ways to get agar.io skins, such as using the in-game shop, using user scripts, or using custom skins. You can also install and use agar.io skins easily by following the steps we explained in this article. We hope you enjoyed reading this article and learned something new about agar.io skins. Now, go ahead and try them out for yourself and see how they change your gaming experience!
- Summary of the Main Points
-
-Agar.io is a multiplayer online game that lets you control a cell and eat other cells to grow bigger.
-You can customize your cell with different skins that cover your cell and make it look more unique and attractive.
-There are three types of agar.io skins: default skins, premium skins, and custom skins.
-There are three methods of getting agar.io skins: using the in-game shop, using user scripts, and using custom skins.
-You can install and use agar.io skins by following the steps we explained in this article.
-
- FAQs
-
-What is the best skin for agar.io?
-There is no definitive answer to this question, as different skins may appeal to different players. However, some factors that may influence your choice are the design, the color, the size, and the popularity of the skin. You may want to choose a skin that matches your personality, your mood, or your strategy. For example, you may want to use a flag skin to show your nationality, a meme skin to make others laugh, or a camouflage skin to blend in with the background.
-How do I change my skin in agar.io?
-You can change your skin in agar.io by selecting a different one from your inventory or from the user script settings. You can also change your skin by typing a different name or URL as your nickname. However, you need to restart the game or refresh the page to apply the changes.
-Are agar.io skins safe to use?
-Agar.io skins are generally safe to use, as long as they are from official or reputable sources. However, some user scripts or custom skins may contain viruses, malware, or spyware that can harm your computer or steal your personal information. Therefore, you should always be careful when downloading or installing anything from unknown or suspicious websites. You should also use antivirus software and firewall to protect your device.
-Can I create my own skin for agar.io?
-Yes, you can create your own skin for agar.io by using custom skins. Custom skins are images that you upload to a third-party website and then use in the game. You can use any image or design that you want, as long as it follows the game rules and does not violate any copyrights or trademarks. You can also edit your image with filters, stickers, text, and more.
-Can I share my skin with other players?
-Yes, you can share your skin with other players by sending them the URL of your image or by posting it on social media or forums. However, keep in mind that not all players may be able to see your skin, as they may have different settings or extensions that block or replace it. Also, some players may not like your skin or find it offensive, so be respectful and considerate when sharing it.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Experience Epic Battles with Godzilla Mod in Animal Revolt Battle Simulator.md b/spaces/congsaPfin/Manga-OCR/logs/Experience Epic Battles with Godzilla Mod in Animal Revolt Battle Simulator.md
deleted file mode 100644
index 3deeb4541612e1295690157366ce5c384ae3ad74..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Experience Epic Battles with Godzilla Mod in Animal Revolt Battle Simulator.md
+++ /dev/null
@@ -1,102 +0,0 @@
-
-Animal Revolt Battle Simulator Godzilla Mod Download: How to Unleash the King of the Monsters
- Do you love animals, simulators, and giant monsters? If so, you might want to check out Animal Revolt Battle Simulator, a game that lets you create funny and chaotic battles between all sorts of ragdoll creatures. And if you want to spice things up even more, you can download the Godzilla mod, which adds the legendary kaiju as a playable unit in the game. In this article, we will tell you everything you need to know about Animal Revolt Battle Simulator and the Godzilla mod, including how to download and install it, how to customize it, and how to use it in the game.
-animal revolt battle simulator godzilla mod download Download ✯✯✯ https://urlca.com/2uOanh
- What is Animal Revolt Battle Simulator?
- Animal Revolt Battle Simulator is a physics-based sandbox game where you can create epic battles between ragdoll creatures. You can build your own maps or pick from a selection of ready-made ones, place up to seven opposing armies made of different types of beasts, and watch them tear each other apart in an explosive spectacle. You can also join the fight yourself in the first-person mode and blow the enemy away with some powerful guns.
- A game with a huge selection of animals, weapons, maps, and custom units
- One of the best features of Animal Revolt Battle Simulator is its variety. You can choose from more than 70 creatures to use in your battles, ranging from dinosaurs, dragons, sharks, spiders, elephants, lions, bears, wolves, gorillas, humans, zombies, robots, aliens, and more. You can also equip them with different weapons, such as swords, axes, spears, guns, rockets, lasers, flamethrowers, grenades, mines, etc. You can also create your own custom monsters by combining different body parts and weapons. The possibilities are endless!
- The game also offers a wide range of maps to play on. You can use realistic environments like forests, deserts, islands, mountains, cities, farms, etc., or fantasy settings like castles, temples, pyramids, spaceships, etc. You can also build your own maps using various props and objects. You can even download custom maps created by other players from the Steam Workshop.
- A game with a campaign mode, a first-person mode, and a Steam Workshop integration
- If you want to test your tactical and strategic skills in Animal Revolt Battle Simulator
If you want to test your tactical and strategic skills in Animal Revolt Battle Simulator, you can try the campaign mode, where you have to face different challenges and scenarios with limited resources and units. You can also play the game in the first-person mode, where you can control one of the animals or weapons and join the battle yourself. You can switch between different units and weapons during the fight, and experience the action from different perspectives.
- Another great feature of Animal Revolt Battle Simulator is its Steam Workshop integration, which allows you to download and upload custom content created by other players. You can find hundreds of mods, maps, units, weapons, skins, and more on the Workshop, and easily add them to your game. You can also share your own creations with the community and get feedback and ratings.
- What is the Godzilla mod?
- If you are a fan of Godzilla, the king of the monsters, you will love the Godzilla mod for Animal Revolt Battle Simulator. This mod adds Godzilla as a weaponized animal unit that you can use in your battles. You can unleash his atomic breath, his tail swipe, his roar, and his sheer size and strength against your enemies. You can also customize his appearance and abilities to suit your preferences. And you can fight against or alongside Godzilla in various scenarios inspired by his movies.
-How to change Godzilla skin in Animal Revolt Battle Simulator
-Godzilla vs King Kong mod for Animal Revolt Battle Simulator
-Animal Revolt Battle Simulator Steam Workshop Godzilla
-Download Godzilla texture pack for Animal Revolt Battle Simulator
-Animal Revolt Battle Simulator Nexus Mods Godzilla
-Best Godzilla mods for Animal Revolt Battle Simulator
-How to create Godzilla template in Animal Revolt Battle Simulator
-Animal Revolt Battle Simulator Godzilla vs Mechagodzilla
-Animal Revolt Battle Simulator Godzilla update 2023
-How to install Godzilla mod for Animal Revolt Battle Simulator
-Animal Revolt Battle Simulator Godzilla vs dinosaurs
-Animal Revolt Battle Simulator Godzilla vs Pacific Rim
-Animal Revolt Battle Simulator Godzilla vs Avengers
-Animal Revolt Battle Simulator Godzilla vs Jurassic World
-Animal Revolt Battle Simulator Godzilla vs Transformers
-Animal Revolt Battle Simulator Godzilla vs Dragon Ball Z
-Animal Revolt Battle Simulator Godzilla vs Spongebob
-Animal Revolt Battle Simulator Godzilla vs Minecraft
-Animal Revolt Battle Simulator Godzilla vs Fortnite
-Animal Revolt Battle Simulator Godzilla vs Star Wars
-Animal Revolt Battle Simulator Godzilla vs Naruto
-Animal Revolt Battle Simulator Godzilla vs Pokemon
-Animal Revolt Battle Simulator Godzilla vs Marvel
-Animal Revolt Battle Simulator Godzilla vs DC
-Animal Revolt Battle Simulator Godzilla vs Harry Potter
-Animal Revolt Battle Simulator Godzilla vs Lord of the Rings
-Animal Revolt Battle Simulator Godzilla vs Halo
-Animal Revolt Battle Simulator Godzilla vs Call of Duty
-Animal Revolt Battle Simulator Godzilla vs GTA 5
-Animal Revolt Battle Simulator Godzilla vs Cyberpunk 2077
-Animal Revolt Battle Simulator Godzilla vs Among Us
-Animal Revolt Battle Simulator Godzilla vs Fall Guys
-Animal Revolt Battle Simulator Godzilla vs Roblox
-Animal Revolt Battle Simulator Godzilla vs Five Nights at Freddy's
-Animal Revolt Battle Simulator Godzilla vs Undertale
-Animal Revolt Battle Simulator Godzilla vs Sonic the Hedgehog
-Animal Revolt Battle Simulator Godzilla vs Super Mario Bros.
-Animal Revolt Battle Simulator Godzilla vs The Legend of Zelda
-Animal Revolt Battle Simulator Godzilla vs Kirby
-Animal Revolt Battle Simulator Godzilla vs Mega Man
-Animal Revolt Battle Simulator Godzilla vs Street Fighter
-Animal Revolt Battle Simulator Godzilla vs Mortal Kombat
-Animal Revolt Battle Simulator Godzilla vs Tekken
-Animal Revolt Battle Simulator Godzilla vs Smash Bros.
-Animal Revolt Battle Simulator Godzilla vs Final Fantasy
- A mod that adds Godzilla as a weaponized animal unit
- The Godzilla mod is one of the most popular and impressive mods for Animal Revolt Battle Simulator. It adds Godzilla as a new animal unit that you can place on your maps and use in your battles. Godzilla is a massive creature that towers over most other units in the game. He has a lot of health and armor, making him very durable and resistant to damage. He also has a lot of power and speed, making him very deadly and agile.
- But what makes Godzilla truly unique is his arsenal of attacks. He can use his atomic breath, a powerful beam of energy that he shoots from his mouth, to destroy anything in his path. He can also use his tail swipe, a sweeping motion that he does with his long tail, to knock down and crush his foes. He can also use his roar, a loud sound that he emits from his throat, to stun and scare his enemies. And he can also use his claws, teeth, and body to bite, scratch, and slam his opponents.
- A mod that lets you customize Godzilla's appearance and abilities
- One of the coolest features of the Godzilla mod is that it lets you customize Godzilla's appearance and abilities to your liking. You can change his texture and skin color, making him look more realistic or more cartoonish. You can also change his size, making him bigger or smaller. You can also change his stats, such as his health, armor, speed, power, etc., making him stronger or weaker. And you can also change his attacks, such as their damage, range, cooldown, etc., making them more or less effective.
- To customize Godzilla's appearance and abilities, you need to use the customizer tool that comes with the mod. You can access it by clicking on the "Customize" button on the bottom right corner of the screen when you select Godzilla as a unit. You will see a menu with different options that you can adjust using sliders or buttons. You can also save your customizations as presets that you can load later.
- A mod that lets you fight against or alongside Godzilla in various scenarios
- Another fun feature of the Godzilla mod is that it lets you fight against or alongside Godzilla in various scenarios based on his movies. You can recreate some of the iconic battles that Godzilla had with other monsters or humans in different settings. For example, you can pit Godzilla against King Kong on Skull Island, or against Mechagodzilla in Tokyo, or against Mothra in Hawaii. You can also team up with Godzilla and help him defeat his enemies or protect him from harm.
- To fight against or alongside Godzilla in various scenarios, you need to use the scenario tool that comes with the mod. You can access it by clicking on the "Scenario" button on the bottom right corner of the screen when you select Godzilla as a unit. You will see a list of scenarios that you can choose from, each with a description and a preview image. You can also create your own scenarios by using the map editor and placing different units and props on your map.
- How to download and install the Godzilla mod?
- If you want to download and install the Godzilla mod for Animal Revolt Battle Simulator
If you want to download and install the Godzilla mod for Animal Revolt Battle Simulator, you need to follow these simple steps:
- Step 1: Subscribe to the mod on Steam Workshop
- The easiest way to get the Godzilla mod is to subscribe to it on Steam Workshop, the online platform where you can find and share custom content for various games. To do this, you need to have Animal Revolt Battle Simulator installed on your computer and have a Steam account. Then, you need to go to the Godzilla mod page on Steam Workshop and click on the "Subscribe" button. This will automatically download the mod and add it to your game.
- Step 2: Launch Animal Revolt Battle Simulator and select "Workshop" from the main menu
- Once you have subscribed to the Godzilla mod, you need to launch Animal Revolt Battle Simulator and select "Workshop" from the main menu. This will take you to a screen where you can see all the mods that you have subscribed to on Steam Workshop. You can also browse and search for other mods that you might be interested in.
- Step 3: Find the Godzilla mod and click on "Load"
- On the Workshop screen, you need to find the Godzilla mod and click on the "Load" button. This will load the mod into your game and make it available for use. You will see a confirmation message that says "Mod loaded successfully". You can also unload the mod by clicking on the "Unload" button if you want to disable it.
- Step 4: Enjoy playing with Godzilla in the game
- Now that you have loaded the Godzilla mod, you can enjoy playing with Godzilla in the game. You can find him in the animal selection menu under the category "Weaponized Animals". You can place him on your maps and use him in your battles. You can also customize his appearance and abilities using the customizer tool, and fight against or alongside him in various scenarios using the scenario tool.
- Tips and tricks for using the Godzilla mod
- The Godzilla mod is a lot of fun to use, but it can also be challenging and tricky at times. Here are some tips and tricks that can help you get the most out of it:
- How to change Godzilla's texture and skin
- If you want to change Godzilla's texture and skin, you need to use the customizer tool that comes with the mod. You can access it by clicking on the "Customize" button on the bottom right corner of the screen when you select Godzilla as a unit. Then, you need to click on the "Texture" tab on the top left corner of the menu. You will see a list of different textures that you can choose from, such as classic, realistic, cartoon, etc. You can also adjust the color of each texture using sliders or buttons. You can preview your changes on a 3D model of Godzilla on the right side of the menu.
- How to attach weapons and body parts to Godzilla
- If you want to attach weapons and body parts to Godzilla, you need to use the customizer tool that comes with the mod. You can access it by clicking on the "Customize" button on the bottom right corner of the screen when you select Godzilla as a unit. Then, you need to click on the "Attachments" tab on the top left corner of
If you want to attach weapons and body parts to Godzilla, you need to use the customizer tool that comes with the mod. You can access it by clicking on the "Customize" button on the bottom right corner of the screen when you select Godzilla as a unit. Then, you need to click on the "Attachments" tab on the top left corner of the menu. You will see a list of different attachments that you can choose from, such as horns, spikes, wings, guns, rockets, lasers, etc. You can also adjust the size, position, and rotation of each attachment using sliders or buttons. You can preview your changes on a 3D model of Godzilla on the right side of the menu.
- How to control Godzilla in the first-person mode
- If you want to control Godzilla in the first-person mode, you need to use the first-person tool that comes with the mod. You can access it by clicking on the "First-Person" button on the bottom right corner of the screen when you select Godzilla as a unit. This will switch your view to Godzilla's perspective and let you control him using your keyboard and mouse. You can move around using the W, A, S, and D keys, and look around using your mouse. You can also use your mouse wheel to zoom in and out. You can use your left mouse button to use your atomic breath, your right mouse button to use your tail swipe, and your space bar to use your roar. You can also switch between different units and weapons during the fight by pressing the Q and E keys.
- Conclusion and FAQs
- Animal Revolt Battle Simulator is a fun and crazy game that lets you create epic battles between ragdoll creatures. And if you want to make it even more fun and crazy, you can download the Godzilla mod, which adds the king of the monsters as a playable unit in the game. You can customize his appearance and abilities, and fight against or alongside him in various scenarios. You can also control him in the first-person mode and unleash his power on your enemies.
- If you are interested in downloading and installing the Godzilla mod for Animal Revolt Battle Simulator, you can follow the steps that we have explained in this article. You can also check out some tips and tricks that we have shared to help you get the most out of it. We hope that you have enjoyed this article and that you have learned something new.
- Here are some FAQs that you might have about Animal Revolt Battle Simulator and the Godzilla mod:
- Q: Is Animal Revolt Battle Simulator free to play?
-A: No, Animal Revolt Battle Simulator is not free to play. It is a paid game that you can buy on Steam for $14.99 USD.
- Q: Is the Godzilla mod free to download?
-A: Yes, the Godzilla mod is free to download. You can subscribe to it on Steam Workshop and add it to your game.
- Q: Can I play Animal Revolt Battle Simulator online with other players?
-A: No, Animal Revolt Battle Simulator does not have an online multiplayer mode. It is a single-player game that you can play offline or online with Steam Cloud support.
- Q: Can I play Animal Revolt Battle Simulator on other platforms besides PC?
-A: No, Animal Revolt Battle Simulator is only available for PC. It is not compatible with other platforms such as consoles or mobile devices.
- Q: Can I request or suggest new features or improvements for Animal Revolt Battle Simulator or the Godzilla mod?
-A: Yes, you can request or suggest new features or improvements for Animal Revolt Battle Simulator or the Godzilla mod by contacting the developers or the modders through their Steam pages or social media accounts. They are always open to feedback and ideas from their fans.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/GTA 5 Copy Download Enjoy the Stunning Visuals Faster Loading and More of the Ultimate Grand Theft Auto V Experience.md b/spaces/congsaPfin/Manga-OCR/logs/GTA 5 Copy Download Enjoy the Stunning Visuals Faster Loading and More of the Ultimate Grand Theft Auto V Experience.md
deleted file mode 100644
index cedd19b9b802bef9bf74b5df742e7c6ebcc9cb65..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/GTA 5 Copy Download Enjoy the Stunning Visuals Faster Loading and More of the Ultimate Grand Theft Auto V Experience.md
+++ /dev/null
@@ -1,112 +0,0 @@
-
-How to Download GTA 5 for Free
-Grand Theft Auto V, or GTA 5, is one of the most popular and successful video games of all time. It is an open-world action-adventure game that lets you explore the city of Los Santos and its surrounding areas, as well as engage in various missions, heists, activities, and online multiplayer modes. GTA 5 has been praised for its stunning graphics, immersive gameplay, rich story, diverse characters, hilarious satire, and endless replay value.
-If you are a fan of GTA 5 or want to experience it for yourself, you might be wondering how to download it for free. After all, GTA 5 is not a cheap game, and buying it from official sources can cost you anywhere from $20 to $60, depending on the platform and edition. However, there are some ways to get GTA 5 for free or at a lower price, if you know where to look.
-download gta 5 copy Download File ☆ https://urlca.com/2uObZO
-In this article, we will show you how to download GTA 5 for free from different platforms and sources, as well as what are the system requirements, installation guide, and reviews of the game. By the end of this article, you will be able to enjoy GTA 5 on your PC without spending a dime.
- GTA 5 System Requirements
-Before you download GTA 5 for free, you need to make sure that your PC can run it smoothly. GTA 5 is a demanding game that requires a powerful system to run at its full potential. Here are the minimum and recommended system requirements for running GTA 5 on PC:
-
-Minimum System Requirements Recommended System Requirements
-OS: Windows 8.1 64 Bit, Windows 8 64 Bit, Windows 7 64 Bit Service Pack 1 Processor: Intel Core 2 Quad CPU Q6600 @ 2.40GHz (4 CPUs) / AMD Phenom 9850 Quad-Core Processor (4 CPUs) @ 2.5GHz Memory: 4GB Video Card: NVIDIA 9800 GT 1GB / AMD HD 4870 1GB (DX 10, 10.1, 11) Sound Card: 100% DirectX 10 compatible HDD Space: ... OS: Windows 10 - April Update (v1803) Processor: Intel Core i7-8700K / AMD Ryzen Threadripper Memory: ...
-
-If your PC meets or exceeds these requirements, you should be able to run GTA 5 without any major issues. However, if your PC falls short of these requirements, you might experience low frame rates, crashes, glitches, or other problems. In that case, you might want to upgrade your PC or lower the graphics settings in the game.
- GTA ... GTA 5 Download Options
- Now that you know the system requirements for GTA 5, you might be wondering where to download it for free. There are several platforms and sources that offer GTA 5 for free or at a discounted price, but they also have their own advantages and disadvantages. In this section, we will compare the different options and help you choose the best one for you.
- Steam
-Steam is one of the most popular and trusted platforms for buying and downloading PC games. It has a huge library of games, including GTA 5, and it also offers various features such as cloud saves, achievements, community, and more. Steam also has frequent sales and discounts, so you might be able to get GTA 5 for a lower price than usual.
-To download GTA 5 from Steam, you need to have a Steam account and the Steam client installed on your PC. You can create a Steam account for free on their website, and download the Steam client from there as well. Once you have Steam on your PC, you can search for GTA 5 in the store and purchase it with your preferred payment method. After that, you can download and install GTA 5 on your PC through Steam.
-How to download GTA 5 PC digital copy for free
-GTA 5 free download on Epic Games Store
-GTA 5 PC download size and system requirements
-GTA 5 Premium Edition download with Criminal Enterprise Starter Pack
-GTA 5 download with Shark Cash Cards bundle
-GTA 5 Steam download vs Epic Games Store download
-GTA 5 PC download with mods and cheats
-GTA 5 download error and how to fix it
-GTA 5 PC download with online multiplayer mode
-GTA 5 download with Rockstar Games Social Club account
-GTA 5 PC download with ray tracing and graphics enhancements
-GTA 5 download with all DLCs and updates
-GTA 5 PC download with controller support
-GTA 5 download speed and how to improve it
-GTA 5 PC download with VR and motion sickness settings
-GTA 5 download with cross-play and cross-save features
-GTA 5 PC download with custom music and radio stations
-GTA 5 download with offline and single-player mode
-GTA 5 PC download with keyboard and mouse controls
-GTA 5 download with achievements and trophies
-GTA 5 PC download with split-screen and co-op mode
-GTA 5 download with character creation and customization
-GTA 5 PC download with role-playing and simulation mods
-GTA 5 download with heists and missions guide
-GTA 5 PC download with best cars and vehicles list
-GTA 5 download with Easter eggs and secrets
-GTA 5 PC download with best weapons and gear list
-GTA 5 download with cheats and codes for PC
-GTA 5 PC download with best settings and optimization tips
-GTA 5 download with map and locations guide
-GTA 5 PC download with best characters and storylines list
-GTA 5 download with reviews and ratings from critics and users
-GTA 5 PC download with best soundtracks and songs list
-GTA 5 download with comparison to previous GTA games
-GTA 5 PC download with best wallpapers and screenshots gallery
-GTA 5 download with trivia and fun facts about the game
-GTA 5 PC download with best jokes and memes about the game
-GTA 5 download with fan theories and speculations about the game
-GTA 5 PC download with best fan art and cosplay about the game
-GTA 5 download with news and updates about the game
-GTA 5 PC download with best alternatives and similar games list
-GTA 5 download with rumors and leaks about the game's future
-GTA 5 PC download with best tips and tricks for beginners and experts
-GTA 5 download with challenges and achievements guide for PC
-GTA 5 PC download with best videos and streams about the game
-The benefits of using Steam are that you get a legitimate copy of GTA 5 that is updated and supported by the developers, and that you can access all the features and modes of the game, including online multiplayer. The drawbacks are that you have to pay for the game, unless it is on sale or free for a limited time, and that you have to use Steam to launch and play the game, which might affect your performance or privacy.
- Epic Games Store
-Epic Games Store is another platform that sells and distributes PC games. It is owned by Epic Games, the creators of Fortnite and Unreal Engine, and it competes with Steam by offering exclusive games and free games every week. In fact, GTA 5 was one of the free games that Epic Games Store gave away in May 2020, and millions of users claimed it during that period.
-To download GTA 5 from Epic Games Store, you need to have an Epic Games account and the Epic Games Launcher installed on your PC. You can create an Epic Games account for free on their website, and download the Epic Games Launcher from there as well. Once you have Epic Games Launcher on your PC, you can search for GTA 5 in the store and purchase it with your preferred payment method. After that, you can download and install GTA 5 on your PC through Epic Games Launcher.
-The benefits of using Epic Games Store are similar to Steam, except that you might be able to get GTA 5 for free if it is part of their weekly giveaways. The drawbacks are also similar to Steam, except that some users have reported issues with Epic Games Launcher such as crashes, errors, or slow downloads.
- Rockstar Games Launcher
-Rockstar Games Launcher is the official platform for buying and downloading games from Rockstar Games, the developers of GTA 5. It is a relatively new platform that was launched in September 2019, and it offers some exclusive bonuses and rewards for Rockstar Games fans. For example, if you download Rockstar Games Launcher before October 8th 2019, you can get a free copy of GTA San Andreas on your PC.
-To download GTA 5 from Rockstar Games Launcher, you need to have a Rockstar Games Social Club account and the Rockstar Games Launcher installed on your PC. You can create a Rockstar Games Social Club account for free on their website, and download the Rockstar Games Launcher from there as well. Once you have Rockstar Games Launcher on your PC, you can search for GTA 5 in the store and purchase it with your preferred payment method. After that, you can download and install GTA 5 on your PC through Rockstar Games Launcher.
-The benefits of using Rockstar Games Launcher are that you get a direct connection to the developers of GTA 5, and that you can access some exclusive content and offers for Rockstar Games titles. The drawbacks are that you have to pay full price for GTA 5, unless it is on sale or free for a limited time, and that you have to use Rockstar Games Launcher to launch and play the game, which might affect your performance or privacy.
- Torrent Sites
-Torrent sites are websites that allow users to share files through peer-to-peer networks. They are often used to distribute pirated or illegal content, such as movies, music, software, or games. GTA 5 is one of the most pirated games in history, and there are many torrent sites that offer cracked versions of GTA 5 for free.
-To download GTA 5 from torrent sites ... [ To download GTA 5 from torrent sites, you need to have a torrent client and a VPN installed on your PC. A torrent client is a software that allows you to download files from torrent sites, such as uTorrent, BitTorrent, or qBittorrent. A VPN is a service that encrypts and hides your online activity, such as NordVPN, ExpressVPN, or CyberGhost. You can download and install these tools from their respective websites.
-Once you have a torrent client and a VPN on your PC, you can search for GTA 5 on torrent sites such as The Pirate Bay, RARBG, or 1337x. You can choose the torrent that has the most seeders, leechers, and positive comments, and download it to your PC. After that, you can open the torrent file with your torrent client and start downloading GTA 5 on your PC. You should also activate your VPN before downloading GTA 5 from torrent sites, to protect your privacy and avoid legal issues.
-The benefits of using torrent sites are that you can get GTA 5 for free, and that you can download it faster than other sources, depending on the number of seeders and leechers. The drawbacks are that you are breaking the law and risking fines or lawsuits, that you are exposing your PC to viruses, malware, or spyware, that you are not supporting the developers of GTA 5, and that you might not be able to access all the features and modes of the game, especially online multiplayer.
- GTA 5 Installation Guide
-After you have downloaded GTA 5 from your preferred source, you need to install it on your PC. The installation process might vary depending on the source, but here are some general steps that you can follow:
-
-Locate the GTA 5 setup file on your PC. It might be in a folder named GTA 5 or Grand Theft Auto V, or in your Downloads folder.
-Double-click on the GTA 5 setup file to launch the installation wizard. You might need to run it as administrator or enter a password if prompted.
-Follow the instructions on the screen to choose the installation location, language, and options for GTA 5. You might need to agree to the terms and conditions or enter a product key if required.
-Wait for the installation to complete. It might take several minutes or hours depending on the size of GTA 5 and the speed of your PC.
-Once the installation is done, you can launch GTA 5 from your desktop shortcut or start menu. You might need to sign in to your Steam, Epic Games Store, Rockstar Games Social Club, or other accounts if necessary.
-
-Congratulations! You have successfully installed GTA 5 on your PC. You can now enjoy playing one of the best games ever made.
- GTA 5 Reviews
-GTA 5 is not only a popular game, but also a critically acclaimed one. It has received universal praise from critics and gamers alike, and it has won numerous awards and accolades. Here are some of the reviews of GTA 5 from reputable sources:
-
-"Grand Theft Auto V is not only a preposterously enjoyable video game, but also an intelligent and sharp-tongued satire of contemporary America." - The Guardian
-"GTA V is an incredible achievement in world design ... It represents a refinement of everything that GTA IV brought to the table five years ago." - IGN
-"GTA V is one of the most exhilarating experiences this console generation ... It's beautiful, funny, heartbreaking and anarchic." - GamesRadar
-"GTA V is an immensely ambitious game that lives up to every ounce of hype it received over the last few years." - GameSpot
-"GTA V is one of those rare games where I found myself caring about everything I did ... It's a remarkable achievement in open-world gaming." - Polygon
-
-As you can see, GTA 5 is a masterpiece of gaming that deserves all the praise it gets. It is a game that you should not miss out on.
- Conclusion
-In this article, we have shown you how to download GTA 5 for free from different platforms and sources. We have also covered the system requirements, installation guide, and reviews of GTA 5. We hope that this article has been helpful and informative for you.
-GTA 5 is a game that you should definitely try at least once in your life. It is a game that will keep you entertained for hours with its amazing gameplay, story, characters, graphics, and modes. It is a game that is a game that will make you feel like you are living in a different world. It is a game that you can download for free from various sources, as we have shown you in this article.
- So, what are you waiting for? Download GTA 5 for free today and start your adventure in Los Santos. You won't regret it.
- FAQs
-Here are some of the frequently asked questions about GTA 5 and their answers:
-
-Is GTA 5 free? GTA 5 is not free by default, but you can get it for free from certain platforms or sources, such as Steam, Epic Games Store, Rockstar Games Launcher, or torrent sites. However, each option has its own pros and cons, as we have explained in this article.
-Is GTA 5 online free? GTA 5 online is the multiplayer mode of GTA 5, where you can play with other players online. GTA 5 online is free to play if you have a copy of GTA 5 on your PC, but you might need to pay for some additional content or services, such as the GTA Online Premium Edition, Shark Cards, or subscriptions.
-Is GTA 5 safe to download? GTA 5 is safe to download if you get it from official or trusted sources, such as Steam, Epic Games Store, Rockstar Games Launcher, or reputable torrent sites. However, if you download GTA 5 from unknown or shady sources, you might expose your PC to viruses, malware, spyware, or other threats.
-How long does it take to download GTA 5? The download time of GTA 5 depends on several factors, such as the size of the game, the speed of your internet connection, the source of the download, and the performance of your PC. Generally speaking, GTA 5 is a large game that can take anywhere from a few minutes to several hours to download.
-How much space does GTA 5 take on PC? GTA 5 takes about 65 GB of space on PC, but it might vary depending on the version and updates of the game. You should also have some extra space for the installation and save files of GTA 5.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Merge DNA of Different Monsters and Fight for Glory in Merge Ragdoll Fighting.md b/spaces/congsaPfin/Manga-OCR/logs/Merge DNA of Different Monsters and Fight for Glory in Merge Ragdoll Fighting.md
deleted file mode 100644
index 089ce3e63bd83619fa74a2e902073d2303426b1e..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Merge DNA of Different Monsters and Fight for Glory in Merge Ragdoll Fighting.md
+++ /dev/null
@@ -1,141 +0,0 @@
-
-Merge Ragdoll Fighting Download: How to Create and Fight with Mutant Monsters
- Introduction
- If you are looking for a fun and addictive fighting game that lets you unleash your creativity and imagination, then you should try Merge Ragdoll Fighting. This game is a unique blend of action, arcade, and casual genres that will keep you entertained for hours. In this game, you can merge DNA of different monsters and create the ultimate fighter. You can then use your mutant fighter to battle against ragdoll enemies in epic fights for survival. You can also customize your fighter with different weapons, skins, and special skills. The game has amazing graphics, bright animations, and hilarious ways to die. It is suitable for players of all ages who enjoy fighting games with a twist.
- Why should you play Merge Ragdoll Fighting?
- There are many reasons why you should play Merge Ragdoll Fighting, but here are some of the main ones:
-merge ragdoll fighting download Download File 🗸 https://urlca.com/2uO7Nn
-
-Addictive gameplay : The game is easy to play but hard to master. You can merge DNA of different monsters and create endless combinations of fighters. You can also upgrade your fighters and unlock new weapons and skills. The game has a lot of levels and challenges to complete, as well as achievements and rewards to collect.
-Amazing graphics : The game has stunning graphics that will make you feel like you are in a lab full of monsters. The game also has bright animations that will make the fights more exciting and dynamic. The game has a stylized art style that gives it a unique charm.
-Bright animations : The game has hilarious animations that will make you laugh out loud. You can see your fighters perform different moves and combos, as well as use special skills and weapons. You can also see your enemies react in funny ways when they get hit or die. The game has a lot of dumb and funny ways to die that will make you want to replay the levels.
-Great variety of weapons : The game has a lot of weapons that you can use to enhance your fighters and make them more powerful. You can use swords, axes, hammers, guns, rockets, lasers, bombs, and more. You can also unlock new weapons as you progress in the game.
-Different ragdoll stickman skins : The game has a lot of skins that you can use to customize your fighters and make them look more cool or funny. You can use skins of animals, superheroes, zombies, pirates, ninjas, robots, aliens, and more. You can also unlock new skins as you progress in the game.
-Epic fights : The game has epic fights that will test your skills and strategy. You can fight against different types of ragdoll enemies, such as boxers, wrestlers, ninjas, zombies, robots, aliens, and more. You can also fight against bosses that have special abilities and attacks. The fights are fast-paced and intense, and you will need to use your fighters' skills and weapons wisely.
-Dumb & funny ways to die : The game has a lot of dumb and funny ways to die that will make you laugh out loud. You can see your fighters die in hilarious ways, such as getting crushed by a giant hammer, getting blown up by a bomb, getting electrocuted by a laser, getting impaled by a sword, getting eaten by a monster, and more. You can also see your enemies die in funny ways, such as getting knocked out by a punch, getting sliced by an axe, getting shot by a gun, getting burned by a rocket , and more. The game has a lot of ragdoll physics and blood effects that make the deaths more realistic and fun.
-
- How to download and install Merge Ragdoll Fighting on your device
- If you want to play Merge Ragdoll Fighting, you will need to download and install it on your device. The game is available for free on different platforms, such as Android, iOS, and PC. Here are the steps to download and install the game on your device:
- For Android users
-
-Go to the Google Play Store and search for Merge Ragdoll Fighting.
-Select the game from the list of results and tap on Install.
-Wait for the game to download and install on your device.
-Once the game is installed, tap on Open to launch the game and start playing.
-
- For iOS users
-
-Go to the App Store and search for Merge Ragdoll Fighting.
-Select the game from the list of results and tap on Get.
-Enter your Apple ID and password if prompted.
-Wait for the game to download and install on your device.
-Once the game is installed, tap on Open to launch the game and start playing.
-
- For PC users
-
-Go to the official website of Merge Ragdoll Fighting and click on Download for PC.
-Select the version of the game that is compatible with your operating system (Windows or Mac).
-Save the file to your computer and run it to start the installation process.
-Follow the instructions on the screen to complete the installation.
-Once the game is installed, double-click on the game icon to launch the game and start playing.
-
- How to play Merge Ragdoll Fighting
- Merge Ragdoll Fighting is a simple but fun game that anyone can play. However, if you want to master the game and become the best fighter, you will need to learn some tips and tricks. Here are some of the basics of how to play Merge Ragdoll Fighting:
- The basics of the game
- The game has two main modes: Lab Mode and Arena Mode. In Lab Mode, you can merge DNA of different monsters and create your own fighters. In Arena Mode, you can use your fighters to battle against ragdoll enemies in different levels. You can switch between the modes by tapping on the buttons at the bottom of the screen.
- In Lab Mode, you will see a grid of DNA tubes that contain different monsters. You can tap on a tube to select a monster, and then drag it to another tube to merge them. The result will be a new monster that has a combination of traits from both parents. You can also tap on a tube to see more information about a monster, such as its name, type, stats, skills, and weapons. You can also upgrade your monsters by spending coins or gems that you earn from playing Arena Mode.
- In Arena Mode, you will see a list of levels that you can play. Each level has a different theme, such as forest, desert, city, space, etc. You can also see the difficulty level, the number of enemies, and the rewards for completing each level. You can select a level by tapping on it, and then choose up to three fighters that you want to use in that level. You can also change your fighters' weapons and skins by tapping on them before starting a level.
- Once you start a level, you will see your fighters and enemies on opposite sides of the screen. You can control your fighters by tapping on them and dragging them around. You can also use their skills and weapons by tapping on the buttons at the bottom of the screen. Your goal is to defeat all enemies before they defeat you. You can also collect coins, gems, health packs, and power-ups that appear randomly on the screen during a fight. You can use these items to improve your fighters' performance or heal them if they get injured.
- How to merge DNA and create mutant fighters
- Merging DNA is one of the most fun and creative aspects of Merge Ragdoll Fighting. You can create endless combinations of fighters by merging different monsters' DNA. Here are some tips on how to merge DNA and create mutant fighters:
-merge ragdoll fighting game download
-download merge ragdoll fighting for android
-how to play merge ragdoll fighting on pc
-merge ragdoll fighting mod apk download
-merge ragdoll fighting online free download
-best tips and tricks for merge ragdoll fighting
-merge ragdoll fighting hack download
-download merge ragdoll fighting latest version
-merge ragdoll fighting review and rating
-merge ragdoll fighting cheats and codes download
-download merge ragdoll fighting for ios
-merge ragdoll fighting gameplay and features
-how to install merge ragdoll fighting on windows
-merge ragdoll fighting unlimited money download
-download merge ragdoll fighting for mac
-merge ragdoll fighting guide and walkthrough
-how to update merge ragdoll fighting
-merge ragdoll fighting premium download
-download merge ragdoll fighting for linux
-merge ragdoll fighting strategy and tips
-how to uninstall merge ragdoll fighting
-merge ragdoll fighting pro download
-download merge ragdoll fighting for chromebook
-merge ragdoll fighting tutorial and help
-how to backup and restore merge ragdoll fighting data
-merge ragdoll fighting vip download
-download merge ragdoll fighting for fire tablet
-merge ragdoll fighting support and feedback
-how to fix merge ragdoll fighting errors and bugs
-merge ragdoll fighting deluxe download
-download merge ragdoll fighting for kindle
-merge ragdoll fighting forum and community
-how to transfer merge ragdoll fighting data to another device
-merge ragdoll fighting gold download
-download merge ragdoll fighting for roku
-merge ragdoll fighting news and updates
-how to customize merge ragdoll fighting settings and options
-merge ragdoll fighting plus download
-download merge ragdoll fighting for smart tv
-merge ragdoll fighting faq and troubleshooting
-
-Experiment with different combinations : The best way to create unique and powerful fighters is to experiment with different combinations of monsters' DNA. You can try merging monsters of different types, such as fire, water, earth, air, etc., or monsters of similar types, such as zombies, robots, aliens, etc. You can also merge monsters of different sizes, shapes, colors, etc. You can also merge monsters that have special traits, such as wings, horns, tails, etc. The more you experiment, the more you will discover new and amazing fighters.
-Pay attention to the stats and skills : When you merge monsters' DNA, you will create a new monster that has a combination of stats and skills from both parents. The stats include health, attack, defense, and speed. The skills include passive and active abilities that can help you in a fight. You can see the stats and skills of a monster by tapping on its tube in Lab Mode. You should pay attention to these factors when merging DNA, as they will affect your fighters' performance in Arena Mode. You should try to create fighters that have balanced stats and useful skills.
-Upgrade your fighters : You can upgrade your fighters by spending coins or gems that you earn from playing Arena Mode. You can upgrade your fighters' stats, skills, and weapons by tapping on the Upgrade button in Lab Mode. Upgrading your fighters will make them stronger and more effective in a fight. You should upgrade your fighters regularly to keep up with the increasing difficulty of the levels.
-Customize your fighters : You can customize your fighters by changing their weapons and skins. You can change your fighters' weapons by tapping on the Weapon button in Lab Mode. You can choose from a variety of weapons that have different effects and damage types. You can also unlock new weapons as you progress in the game. You can change your fighters' skins by tapping on the Skin button in Lab Mode. You can choose from a variety of skins that have different looks and themes. You can also unlock new skins as you progress in the game. Customizing your fighters will make them look more cool or funny, and also give them some advantages or disadvantages in a fight.
-
- How to fight and win against ragdoll enemies
- Fighting against ragdoll enemies is one of the most exciting and challenging aspects of Merge Ragdoll Fighting. You will face different types of enemies that have different abilities and attacks. You will also face bosses that are more powerful and have special moves. Here are some tips on how to fight and win against ragdoll enemies:
-
-Use your fighters' skills and weapons wisely : Your fighters have skills and weapons that can help you in a fight. You can use their skills by tapping on the buttons at the bottom of the screen. You can use their weapons by dragging them to the enemies. However, you should use these abilities wisely, as they have cooldowns or limited ammo. You should also consider the effects and damage types of your skills and weapons, as they may be more or less effective against certain enemies.
-Avoid enemy attacks : Your enemies have attacks that can hurt or kill your fighters. You can see their attacks by looking at their animations or indicators on the screen. You should avoid their attacks by moving your fighters around or using their skills or weapons to block or dodge them. You should also pay attention to the environment, as it may have hazards or obstacles that can harm your fighters.
-Collect items : During a fight, you may see items that appear randomly on the screen. These items include coins, gems, health packs, and power-ups. You should collect these items by dragging your fighters to them or using their skills or weapons to grab them. These items can help you improve your fighters' performance or heal them if they get injured.
-Use strategy : Fighting against ragdoll enemies requires strategy and planning. You should choose your fighters carefully based on their stats, skills, weapons, and skins. You should also choose your enemies wisely based on their types, abilities, and weaknesses. You should also use your fighters' skills and weapons strategically based on the situation and the enemy's behavior. You should also use items smartly based on your needs and goals.
-
- Tips and tricks to master the game
- Merge Ragdoll Fighting is a game that is easy to play but hard to master. If you want to become the best fighter and complete all levels with ease, you will need some tips and tricks to master the game. Here are some of them:
-
-Play regularly : The best way to master the game is to play it regularly and practice your skills. The more you play, the more you will learn about the game mechanics, the monsters, the enemies, the levels, etc. You will also earn more coins and gems that you can use to upgrade and customize your fighters.
-Watch ads : The game has ads that you can watch voluntarily to get some benefits. You can watch ads to get free coins, gems , or health packs. You can also watch ads to double your rewards after completing a level. You can also watch ads to revive your fighters if they die in a fight. Watching ads can help you progress faster and easier in the game.
-Complete achievements and daily quests : The game has achievements and daily quests that you can complete to get extra coins and gems. You can see the achievements and daily quests by tapping on the buttons at the top of the screen. The achievements are based on your overall progress and performance in the game, such as creating a certain number of fighters, completing a certain number of levels, killing a certain number of enemies, etc. The daily quests are based on specific tasks that you have to do within a day, such as merging a certain type of monster, using a certain weapon, collecting a certain amount of coins, etc. Completing achievements and daily quests can help you earn more coins and gems that you can use to upgrade and customize your fighters.
-Join the community : The game has a community of players that you can join and interact with. You can see the community by tapping on the button at the bottom right of the screen. The community has a chat room where you can chat with other players, share your tips and tricks, ask for help, or just have fun. The community also has a leaderboard where you can see your rank and compare your score with other players. You can also see other players' profiles and their fighters. Joining the community can help you learn more about the game, make new friends, and have more fun.
-
- Conclusion
- Merge Ragdoll Fighting is a fun and addictive fighting game that lets you create and fight with mutant monsters. You can merge DNA of different monsters and create endless combinations of fighters. You can also customize your fighters with different weapons and skins. You can then use your fighters to battle against ragdoll enemies in epic fights for survival. The game has amazing graphics, bright animations, and hilarious ways to die. It is suitable for players of all ages who enjoy fighting games with a twist.
- If you want to play Merge Ragdoll Fighting, you can download and install it on your device for free. The game is available for Android, iOS, and PC platforms. You can also follow some tips and tricks to master the game and become the best fighter.
- Merge Ragdoll Fighting is a game that will keep you entertained for hours. It is a game that will challenge your skills and strategy. It is a game that will unleash your creativity and imagination. It is a game that will make you laugh out loud. It is a game that you should try today.
- FAQs
- Here are some of the frequently asked questions about Merge Ragdoll Fighting:
-
-How many monsters are there in Merge Ragdoll Fighting?
-There are over 100 monsters that you can merge and create in Merge Ragdoll Fighting. Each monster has its own name, type, stats, skills, and weapons. You can see all the monsters that you have created by tapping on the Collection button in Lab Mode.
-How do I unlock new weapons and skins in Merge Ragdoll Fighting?
-You can unlock new weapons and skins in Merge Ragdoll Fighting by completing levels, achievements, and daily quests. You can also buy them with coins or gems that you earn from playing Arena Mode or watching ads.
-What are the power-ups in Merge Ragdoll Fighting?
-The power-ups are items that appear randomly on the screen during a fight. They have different effects that can help you or hinder your enemies. Some of the power-ups are:
-
-Bomb : Explodes and damages nearby enemies.
-Freeze : Freezes all enemies for a few seconds.
-Fire : Sets all enemies on fire for a few seconds.
-Poison : Poisons all enemies for a few seconds.
-Shield : Protects your fighters from enemy attacks for a few seconds.
-Speed : Increases your fighters' speed for a few seconds.
-Strength : Increases your fighters' attack for a few seconds.
-
-How do I revive my fighters if they die in a fight?
-If your fighters die in a fight, you have two options to revive them: watch an ad or spend gems. If you watch an ad, you can revive one fighter for free. If you spend gems, you can revive all fighters at once. However, you can only revive your fighters once per level , so you should use this option wisely.
-How do I contact the developers of Merge Ragdoll Fighting?
-If you have any questions, feedback, or suggestions about Merge Ragdoll Fighting, you can contact the developers by tapping on the Settings button at the top right of the screen. You can then tap on the Contact Us button and fill out the form with your message. You can also follow the developers on their social media accounts, such as Facebook, Twitter, Instagram, and YouTube.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Monster Box Mod Apk All Unlocked The Best Action Game for Android Devices.md b/spaces/congsaPfin/Manga-OCR/logs/Monster Box Mod Apk All Unlocked The Best Action Game for Android Devices.md
deleted file mode 100644
index 4a6fb6eaf00ff637140429a02b4c29c956cb9e2c..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Monster Box Mod Apk All Unlocked The Best Action Game for Android Devices.md
+++ /dev/null
@@ -1,109 +0,0 @@
-
-Monster Box Mod APK: A Fun and Addictive Action Game
-If you are looking for a simple yet challenging action game that will keep you entertained for hours, then you should try Monster Box. Monster Box is a game where you have to collect and upgrade various monsters and use them to fight against waves of enemies. You can also customize your monsters with different skins, weapons, and accessories. Monster Box is a game that will test your reflexes, strategy, and creativity.
-What is Monster Box?
-Monster Box is a game developed by PONOS Corporation, the same company that created the popular Battle Cats series. Monster Box was released in 2019 and has been downloaded over 10 million times on Google Play. The game has a rating of 4.4 out of 5 stars and has received positive reviews from players.
-monster box mod apk all unlocked Download Zip ✸✸✸ https://urlca.com/2uOgiM
-Features of Monster Box
-Monster Box has many features that make it a fun and addictive game. Some of these features are:
-
-Simple and intuitive controls: You can control your monsters with just one finger. Tap to move, swipe to attack, and hold to charge your special skill.
-Cute and colorful graphics: The game has a pixel art style that is charming and nostalgic. The monsters are adorable and have unique designs and animations.
-Various modes and stages: The game has different modes and stages that offer different challenges and rewards. You can play the normal mode, the hard mode, the endless mode, or the special events. You can also unlock new stages by clearing the previous ones.
-Diverse and upgradeable monsters: The game has over 100 monsters that you can collect and upgrade. Each monster has its own stats, skills, and personality. You can also equip them with different items to boost their performance.
-Social features: The game allows you to interact with other players through the chat system, the friend system, or the guild system. You can also compete with other players in the ranking system or cooperate with them in the raid system.
-
-How to play Monster Box
-The gameplay of Monster Box is simple but challenging. Here are the basic steps to play the game:
-
-Select a stage and a team of monsters to start the game.
-Tap on the screen to move your monsters and swipe to attack the enemies.
-Use your special skills when they are fully charged.
-Defeat all the enemies and clear the stage.
-Earn coins, gems, items, and new monsters as rewards.
-Use your rewards to upgrade your monsters and items.
-Repeat the process and enjoy the game.
-
-Why download Monster Box Mod APK?
-Monster Box is a free-to-play game, but it also has some in-app purchases that can enhance your gaming experience. However, if you don't want to spend real money on the game, you can download Monster Box Mod APK instead. Monster Box Mod APK is a modified version of the original game that gives you unlimited access to all the features and resources of the game without any cost.
-Benefits of Monster Box Mod APK
-Some of the benefits of downloading Monster Box Mod APK are:
-
-Unlimited money: You can get unlimited coins and gems in the game, which you can use to buy new monsters, items, skins, and more.
-All unlocked: You can unlock all the modes, stages, monsters, items, skins, and more in the game without any restrictions.
-No ads: You can enjoy the game without any annoying ads that interrupt your gameplay.
-No root required: You don't need to root your device to install or run Monster Box Mod APK.
-
-How to download and install Monster Box Mod APK
-Downloading and installing Monster Box Mod APK is easy and fast. Here are the steps to follow:
-
-Click on the download button below to download the Monster Box Mod APK file on your device.
-Go to your device settings and enable the installation of apps from unknown sources.
-Locate the downloaded file and tap on it to start the installation process.
-Follow the instructions on the screen and wait for the installation to finish.
-Launch the game and enjoy.
-
-Conclusion
-Monster Box is a fun and addictive action game that will keep you hooked for hours. You can collect and upgrade various monsters and use them to fight against waves of enemies. You can also customize your monsters with different skins, weapons, and accessories. Monster Box has simple and intuitive controls, cute and colorful graphics, various modes and stages, diverse and upgradeable monsters, and social features. You can download Monster Box Mod APK to get unlimited money, all unlocked, no ads, and no root required. Monster Box Mod APK is a great way to enjoy the game without any limitations. Download it now and have fun.
-monster box mod apk unlimited money
-monster box mod apk latest version
-monster box mod apk download for android
-monster box mod apk free shopping
-monster box mod apk no ads
-monster box hack mod apk
-monster box cheat mod apk
-monster box premium mod apk
-monster box pro mod apk
-monster box vip mod apk
-monster box full mod apk
-monster box cracked mod apk
-monster box unlocked everything mod apk
-monster box all features mod apk
-monster box all levels mod apk
-monster box action game mod apk
-monster box adventure game mod apk
-monster box arcade game mod apk
-monster box offline game mod apk
-monster box online game mod apk
-monster box 3d game mod apk
-monster box pixel game mod apk
-monster box retro game mod apk
-monster box simulation game mod apk
-monster box strategy game mod apk
-monster box 2023 mod apk
-monster box 0.6.6 mod apk
-monster box 0.5.10 mod apk
-monster box v0.6.6 mod apk
-monster box v0.5.10 mod apk
-download monster box hack mod apk
-download monster box cheat mod apk
-download monster box premium mod apk
-download monster box pro mod apk
-download monster box vip mod apk
-download monster box full mod apk
-download monster box cracked mod apk
-download monster box unlocked everything mod apk
-download monster box all features mod apk
-download monster box all levels mod apk
-how to install monster box mod apk
-how to play monster box mod apk
-how to update monster box mod apk
-how to get monster box mod apk for free
-how to get unlimited money in monster box mod apk
-how to unlock all features in monster box mod apk
-how to unlock all levels in monster box mod apk
-FAQs
-
-Q: Is Monster Box Mod APK safe to use?
-A: Yes, Monster Box Mod APK is safe to use. It does not contain any viruses or malware that can harm your device or data. It is also compatible with most Android devices.
-Q: Can I play Monster Box online with other players?
-A: Yes, you can play Monster Box online with other players. You can chat with them, add them as friends, join or create a guild, compete in the ranking system, or cooperate in the raid system.
-Q: How can I get more coins and gems in Monster Box?
-A: You can get more coins and gems in Monster Box by clearing stages, completing quests, participating in events, watching ads, or buying them with real money. You can also download Monster Box Mod APK to get unlimited coins and gems for free.
-Q: How can I unlock new monsters and items in Monster Box?
-A: You can unlock new monsters and items in Monster Box by clearing stages, completing quests, participating in events, or buying them with coins or gems. You can also download Monster Box Mod APK to unlock all the monsters and items for free.
-Q: How can I update Monster Box Mod APK?
-A: You can update Monster Box Mod APK by downloading the latest version from this website. You don't need to uninstall the previous version, just install the new one over it.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Sniper 3D MOD APK How to Become the Ultimate Sniper Assassin with Unlimited Money and Premium Features.md b/spaces/congsaPfin/Manga-OCR/logs/Sniper 3D MOD APK How to Become the Ultimate Sniper Assassin with Unlimited Money and Premium Features.md
deleted file mode 100644
index 4acb06d8aa66761242d4089744ceab4b11e92fb5..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Sniper 3D MOD APK How to Become the Ultimate Sniper Assassin with Unlimited Money and Premium Features.md
+++ /dev/null
@@ -1,106 +0,0 @@
-
-Sniper Assassin 3D Mod APK: A Fun and Thrilling Shooting Game
-If you are looking for a free online multiplayer FPS game that lets you fight in a global war on crime and become the ultimate sniper assassin, then you should try Sniper Assassin 3D. This game has great gameplay, awesome visuals, and entertaining missions that will keep you hooked for hours. And if you want to make your gaming experience even more exciting, you should download Sniper Assassin 3D Mod APK. This mod apk will give you access to unlimited money, menu, premium features, and more. In this article, we will tell you everything you need to know about Sniper Assassin 3D and its mod apk.
-sniper assassin 3d mod apk Download File ⚡ https://urlca.com/2uO5ou
- What is Sniper Assassin 3D?
-Sniper Assassin 3D is a fun shooting game developed by Fun Games For Free. It is available for Android and iOS devices, as well as Windows and macOS computers. In this game, you play as a professional sniper who has to take down various targets in different locations around the world. You can choose from over 150+ snipers and rifles, customize them with upgrades and evolutions, and use them in over 850+ thrilling missions. You can also play in multiple battlegrounds, from huge cities to beautiful beaches. You can also compete with other players online in PvP mode or join a clan and cooperate with your teammates.
- What is Sniper Assassin 3D Mod APK?
-Sniper Assassin 3D Mod APK is a modified version of the original game that gives you some extra benefits that are not available in the official version. Mod APK stands for modified application package file. It is a file that contains the installation data of an app that has been altered by someone other than the original developer. By installing a mod apk file, you can bypass some restrictions or limitations imposed by the developer or the platform. For example, you can get unlimited money, unlock all weapons, access premium features, etc.
- How to Download and Install Sniper Assassin 3D Mod APK?
-If you want to download and install Sniper Assassin 3D Mod APK on your device, you need to follow these steps:
-
-First, you need to find a reliable source that provides the mod apk file. You can search for it on Google or use one of these links: or . Make sure you download the latest version of the mod apk that is compatible with your device.
-Second, you need to enable unknown sources on your device. This will allow you to install apps from sources other than the official app store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
-Third, you need to locate the downloaded mod apk file on your device. You can use a file manager app or go to Downloads folder. Tap on the file and follow the instructions to install it.
- Fourth, you need to launch the game and enjoy the mod apk features. You may need to grant some permissions or allow some pop-ups to access the mod menu. You can also adjust the settings according to your preferences.
-
-What are the Features of Sniper Assassin 3D Mod APK?
-Sniper Assassin 3D Mod APK has many features that make it more fun and thrilling than the original game. Here are some of the main features of the mod apk:
-
-Unlimited Money: You can get unlimited money in the game, which you can use to buy and upgrade weapons, items, skills, etc. You can also unlock all the snipers and rifles without spending any real money.
-Mod Menu: You can access a mod menu that lets you enable or disable various options, such as god mode, one hit kill, no recoil, no reload, etc. You can also change the game speed, zoom level, aim assist, etc.
-Premium Features: You can enjoy all the premium features of the game, such as no ads, unlimited energy, VIP access, exclusive weapons, etc. You can also get free diamonds and coins every day.
-Anti-Ban: You can play the game without worrying about getting banned by the developers or the platform. The mod apk has an anti-ban system that protects your account and device from detection and suspension.
-
-What are the Tips and Tricks for Playing Sniper Assassin 3D?
-Sniper Assassin 3D is a challenging game that requires skill, strategy, and patience. Here are some tips and tricks that can help you improve your performance and enjoy the game more:
-sniper 3d assassin mod apk unlimited money and gems
-sniper 3d assassin mod apk download for android
-sniper 3d assassin mod apk menu
-sniper 3d assassin mod apk premium
-sniper 3d assassin mod apk latest version
-sniper 3d assassin mod apk offline
-sniper 3d assassin mod apk unlimited coins and diamonds
-sniper 3d assassin mod apk free shopping
-sniper 3d assassin mod apk hack
-sniper 3d assassin mod apk all guns unlocked
-sniper 3d assassin mod apk unlimited energy
-sniper 3d assassin mod apk online
-sniper 3d assassin mod apk no ads
-sniper 3d assassin mod apk unlimited everything
-sniper 3d assassin mod apk android 1
-sniper 3d assassin mod apk revdl
-sniper 3d assassin mod apk rexdl
-sniper 3d assassin mod apk happymod
-sniper 3d assassin mod apk unlimited ammo
-sniper 3d assassin mod apk god mode
-sniper 3d assassin mod apk unlimited gold and diamonds
-sniper 3d assassin mod apk obb
-sniper 3d assassin mod apk anti ban
-sniper 3d assassin mod apk vip unlocked
-sniper 3d assassin mod apk new update
-sniper 3d assassin mod apk unlimited money and gems download
-sniper 3d assassin mod apk full version
-sniper 3d assassin mod apk mega mod
-sniper 3d assassin mod apk unlimited coins and gems download
-sniper 3d assassin mod apk high damage
-sniper 3d assassin mod apk unlimited money and gems android oyun club
-sniper 3d assassin mod apk all weapons unlocked and upgraded
-sniper 3d assassin mod apk unlimited money and gems ios
-sniper 3d assassin mod apk old version
-sniper 3d assassin mod apk world of mods
-sniper 3d assassin mod apk unlimited money and gems offline download
-sniper 3d assassin mod apk data file host
-sniper 3d assassin mod apk no root
-sniper 3d assassin mod apk pvp mode unlocked
-sniper 3d assassin mod apk unlimited money and gems apkpure
-sniper 3d assassin mod apk all missions unlocked
-sniper 3d assassin mod apk best graphics settings
-sniper 3d assassin mod apk cheat codes
-sniper 3d assassin mod apk diamond hack
-sniper 3d assassin mod apk easy download
-sniper 3d assassin mod apk free fire
-sniper 3d assassin mod apk gameplay
-sniper 3d assassin mod apk how to install
-sniper 3d assassin mod apk ios download
-
-Aim Carefully: The most important skill in sniping is aiming. You need to aim carefully and accurately at your target, taking into account factors such as distance, wind, gravity, movement, etc. You can use the scope to zoom in and out and adjust the crosshair. You can also use the aim assist feature in the mod apk to help you with aiming.
-Choose the Right Weapon: The game offers a variety of weapons to choose from, each with its own strengths and weaknesses. You need to choose the right weapon for each mission and situation. Some weapons have higher damage, range, stability, fire rate, etc. than others. You can also upgrade your weapons with evolutions and attachments to enhance their performance.
-Complete Missions: The game has over 850+ missions to complete, each with its own objectives, challenges, and rewards. You need to complete missions to progress in the game and earn money, diamonds, coins, etc. You can also replay missions to improve your score and rank. Some missions have special requirements or conditions that you need to fulfill.
-Play Online: The game has an online mode that lets you compete with other players around the world in PvP mode or join a clan and cooperate with your teammates. You can also chat with other players and share your achievements. Playing online can help you improve your skills, earn more rewards, and have more fun.
-
-What are the Reviews of Sniper Assassin 3D?
-Sniper Assassin 3D is a popular game that has received many positive reviews from players and critics alike. Here are some of the reviews of the game from various sources:
-
-Source Rating Review
-[Google Play Store] 4.4/5 "This is one of my favorite games ever! It's so realistic and fun! I love how you can customize your weapons and upgrade them. The graphics are amazing and the missions are challenging. I highly recommend this game to anyone who loves shooting games!"
-[App Store] 4.6/5 "This game is awesome! It's very addictive and entertaining! I like how you can play online with other people and join clans. The mod apk is also great! It gives you unlimited money and premium features! This game is a must-have for sniping fans!"
-[PCMag] 4/5 "Sniper Assassin 3D is a fun and thrilling shooting game that offers a lot of content and variety. The game has great gameplay, awesome visuals, and entertaining missions that will keep you hooked for hours. The mod apk adds more excitement and convenience to the game."
-Conclusion
-Sniper Assassin 3D is a fun and thrilling shooting game that lets you become the ultimate sniper assassin. You can enjoy the game with its original features, or you can download Sniper Assassin 3D Mod APK to get unlimited money, menu, premium features, and more. You can also follow some tips and tricks to improve your performance and enjoy the game more. Sniper Assassin 3D has received many positive reviews from players and critics alike, and it is one of the best sniping games available. If you are looking for a free online multiplayer FPS game that offers a lot of content and variety, you should try Sniper Assassin 3D.
- FAQs
-Here are some of the frequently asked questions and answers about Sniper Assassin 3D and its mod apk:
-
-Is Sniper Assassin 3D Mod APK safe to use?
-Yes, Sniper Assassin 3D Mod APK is safe to use, as long as you download it from a reliable source and enable unknown sources on your device. The mod apk has an anti-ban system that protects your account and device from detection and suspension.
-Can I play Sniper Assassin 3D offline?
-Yes, you can play Sniper Assassin 3D offline, as long as you have downloaded the game data beforehand. However, some features of the game, such as online mode, clan, chat, etc., require an internet connection to work.
-How can I get more diamonds and coins in Sniper Assassin 3D?
-You can get more diamonds and coins in Sniper Assassin 3D by completing missions, watching ads, spinning the wheel, opening chests, etc. You can also get free diamonds and coins every day by using the mod apk.
-How can I update Sniper Assassin 3D Mod APK?
-You can update Sniper Assassin 3D Mod APK by downloading the latest version of the mod apk file from the same source that you used before and installing it over the existing one. You may need to uninstall the previous version first if it is not compatible with the new one.
-How can I contact the developers of Sniper Assassin 3D?
-You can contact the developers of Sniper Assassin 3D by sending them an email at support+sniper3d@fungames-forfree.com or by visiting their website at https://www.fungames-forfree.com/games/sniper3d.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/fileio/file_client.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/fileio/file_client.py
deleted file mode 100644
index 1ed2bf5f41a29000f9a080066497d8f3674fae15..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/fileio/file_client.py
+++ /dev/null
@@ -1,1148 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import inspect
-import os
-import os.path as osp
-import re
-import tempfile
-import warnings
-from abc import ABCMeta, abstractmethod
-from contextlib import contextmanager
-from pathlib import Path
-from typing import Iterable, Iterator, Optional, Tuple, Union
-from urllib.request import urlopen
-
-import annotator.mmpkg.mmcv as mmcv
-from annotator.mmpkg.mmcv.utils.misc import has_method
-from annotator.mmpkg.mmcv.utils.path import is_filepath
-
-
-class BaseStorageBackend(metaclass=ABCMeta):
- """Abstract class of storage backends.
-
- All backends need to implement two apis: ``get()`` and ``get_text()``.
- ``get()`` reads the file as a byte stream and ``get_text()`` reads the file
- as texts.
- """
-
- # a flag to indicate whether the backend can create a symlink for a file
- _allow_symlink = False
-
- @property
- def name(self):
- return self.__class__.__name__
-
- @property
- def allow_symlink(self):
- return self._allow_symlink
-
- @abstractmethod
- def get(self, filepath):
- pass
-
- @abstractmethod
- def get_text(self, filepath):
- pass
-
-
-class CephBackend(BaseStorageBackend):
- """Ceph storage backend (for internal use).
-
- Args:
- path_mapping (dict|None): path mapping dict from local path to Petrel
- path. When ``path_mapping={'src': 'dst'}``, ``src`` in ``filepath``
- will be replaced by ``dst``. Default: None.
-
- .. warning::
- :class:`mmcv.fileio.file_client.CephBackend` will be deprecated,
- please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.
- """
-
- def __init__(self, path_mapping=None):
- try:
- import ceph
- except ImportError:
- raise ImportError('Please install ceph to enable CephBackend.')
-
- warnings.warn(
- 'CephBackend will be deprecated, please use PetrelBackend instead')
- self._client = ceph.S3Client()
- assert isinstance(path_mapping, dict) or path_mapping is None
- self.path_mapping = path_mapping
-
- def get(self, filepath):
- filepath = str(filepath)
- if self.path_mapping is not None:
- for k, v in self.path_mapping.items():
- filepath = filepath.replace(k, v)
- value = self._client.Get(filepath)
- value_buf = memoryview(value)
- return value_buf
-
- def get_text(self, filepath, encoding=None):
- raise NotImplementedError
-
-
-class PetrelBackend(BaseStorageBackend):
- """Petrel storage backend (for internal use).
-
- PetrelBackend supports reading and writing data to multiple clusters.
- If the file path contains the cluster name, PetrelBackend will read data
- from specified cluster or write data to it. Otherwise, PetrelBackend will
- access the default cluster.
-
- Args:
- path_mapping (dict, optional): Path mapping dict from local path to
- Petrel path. When ``path_mapping={'src': 'dst'}``, ``src`` in
- ``filepath`` will be replaced by ``dst``. Default: None.
- enable_mc (bool, optional): Whether to enable memcached support.
- Default: True.
-
- Examples:
- >>> filepath1 = 's3://path/of/file'
- >>> filepath2 = 'cluster-name:s3://path/of/file'
- >>> client = PetrelBackend()
- >>> client.get(filepath1) # get data from default cluster
- >>> client.get(filepath2) # get data from 'cluster-name' cluster
- """
-
- def __init__(self,
- path_mapping: Optional[dict] = None,
- enable_mc: bool = True):
- try:
- from petrel_client import client
- except ImportError:
- raise ImportError('Please install petrel_client to enable '
- 'PetrelBackend.')
-
- self._client = client.Client(enable_mc=enable_mc)
- assert isinstance(path_mapping, dict) or path_mapping is None
- self.path_mapping = path_mapping
-
- def _map_path(self, filepath: Union[str, Path]) -> str:
- """Map ``filepath`` to a string path whose prefix will be replaced by
- :attr:`self.path_mapping`.
-
- Args:
- filepath (str): Path to be mapped.
- """
- filepath = str(filepath)
- if self.path_mapping is not None:
- for k, v in self.path_mapping.items():
- filepath = filepath.replace(k, v)
- return filepath
-
- def _format_path(self, filepath: str) -> str:
- """Convert a ``filepath`` to standard format of petrel oss.
-
- If the ``filepath`` is concatenated by ``os.path.join``, in a Windows
- environment, the ``filepath`` will be the format of
- 's3://bucket_name\\image.jpg'. By invoking :meth:`_format_path`, the
- above ``filepath`` will be converted to 's3://bucket_name/image.jpg'.
-
- Args:
- filepath (str): Path to be formatted.
- """
- return re.sub(r'\\+', '/', filepath)
-
- def get(self, filepath: Union[str, Path]) -> memoryview:
- """Read data from a given ``filepath`` with 'rb' mode.
-
- Args:
- filepath (str or Path): Path to read data.
-
- Returns:
- memoryview: A memory view of expected bytes object to avoid
- copying. The memoryview object can be converted to bytes by
- ``value_buf.tobytes()``.
- """
- filepath = self._map_path(filepath)
- filepath = self._format_path(filepath)
- value = self._client.Get(filepath)
- value_buf = memoryview(value)
- return value_buf
-
- def get_text(self,
- filepath: Union[str, Path],
- encoding: str = 'utf-8') -> str:
- """Read data from a given ``filepath`` with 'r' mode.
-
- Args:
- filepath (str or Path): Path to read data.
- encoding (str): The encoding format used to open the ``filepath``.
- Default: 'utf-8'.
-
- Returns:
- str: Expected text reading from ``filepath``.
- """
- return str(self.get(filepath), encoding=encoding)
-
- def put(self, obj: bytes, filepath: Union[str, Path]) -> None:
- """Save data to a given ``filepath``.
-
- Args:
- obj (bytes): Data to be saved.
- filepath (str or Path): Path to write data.
- """
- filepath = self._map_path(filepath)
- filepath = self._format_path(filepath)
- self._client.put(filepath, obj)
-
- def put_text(self,
- obj: str,
- filepath: Union[str, Path],
- encoding: str = 'utf-8') -> None:
- """Save data to a given ``filepath``.
-
- Args:
- obj (str): Data to be written.
- filepath (str or Path): Path to write data.
- encoding (str): The encoding format used to encode the ``obj``.
- Default: 'utf-8'.
- """
- self.put(bytes(obj, encoding=encoding), filepath)
-
- def remove(self, filepath: Union[str, Path]) -> None:
- """Remove a file.
-
- Args:
- filepath (str or Path): Path to be removed.
- """
- if not has_method(self._client, 'delete'):
- raise NotImplementedError(
- ('Current version of Petrel Python SDK has not supported '
- 'the `delete` method, please use a higher version or dev'
- ' branch instead.'))
-
- filepath = self._map_path(filepath)
- filepath = self._format_path(filepath)
- self._client.delete(filepath)
-
- def exists(self, filepath: Union[str, Path]) -> bool:
- """Check whether a file path exists.
-
- Args:
- filepath (str or Path): Path to be checked whether exists.
-
- Returns:
- bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.
- """
- if not (has_method(self._client, 'contains')
- and has_method(self._client, 'isdir')):
- raise NotImplementedError(
- ('Current version of Petrel Python SDK has not supported '
- 'the `contains` and `isdir` methods, please use a higher'
- 'version or dev branch instead.'))
-
- filepath = self._map_path(filepath)
- filepath = self._format_path(filepath)
- return self._client.contains(filepath) or self._client.isdir(filepath)
-
- def isdir(self, filepath: Union[str, Path]) -> bool:
- """Check whether a file path is a directory.
-
- Args:
- filepath (str or Path): Path to be checked whether it is a
- directory.
-
- Returns:
- bool: Return ``True`` if ``filepath`` points to a directory,
- ``False`` otherwise.
- """
- if not has_method(self._client, 'isdir'):
- raise NotImplementedError(
- ('Current version of Petrel Python SDK has not supported '
- 'the `isdir` method, please use a higher version or dev'
- ' branch instead.'))
-
- filepath = self._map_path(filepath)
- filepath = self._format_path(filepath)
- return self._client.isdir(filepath)
-
- def isfile(self, filepath: Union[str, Path]) -> bool:
- """Check whether a file path is a file.
-
- Args:
- filepath (str or Path): Path to be checked whether it is a file.
-
- Returns:
- bool: Return ``True`` if ``filepath`` points to a file, ``False``
- otherwise.
- """
- if not has_method(self._client, 'contains'):
- raise NotImplementedError(
- ('Current version of Petrel Python SDK has not supported '
- 'the `contains` method, please use a higher version or '
- 'dev branch instead.'))
-
- filepath = self._map_path(filepath)
- filepath = self._format_path(filepath)
- return self._client.contains(filepath)
-
- def join_path(self, filepath: Union[str, Path],
- *filepaths: Union[str, Path]) -> str:
- """Concatenate all file paths.
-
- Args:
- filepath (str or Path): Path to be concatenated.
-
- Returns:
- str: The result after concatenation.
- """
- filepath = self._format_path(self._map_path(filepath))
- if filepath.endswith('/'):
- filepath = filepath[:-1]
- formatted_paths = [filepath]
- for path in filepaths:
- formatted_paths.append(self._format_path(self._map_path(path)))
- return '/'.join(formatted_paths)
-
- @contextmanager
- def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]:
- """Download a file from ``filepath`` and return a temporary path.
-
- ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It
- can be called with ``with`` statement, and when exists from the
- ``with`` statement, the temporary path will be released.
-
- Args:
- filepath (str | Path): Download a file from ``filepath``.
-
- Examples:
- >>> client = PetrelBackend()
- >>> # After existing from the ``with`` clause,
- >>> # the path will be removed
- >>> with client.get_local_path('s3://path/of/your/file') as path:
- ... # do something here
-
- Yields:
- Iterable[str]: Only yield one temporary path.
- """
- filepath = self._map_path(filepath)
- filepath = self._format_path(filepath)
- assert self.isfile(filepath)
- try:
- f = tempfile.NamedTemporaryFile(delete=False)
- f.write(self.get(filepath))
- f.close()
- yield f.name
- finally:
- os.remove(f.name)
-
- def list_dir_or_file(self,
- dir_path: Union[str, Path],
- list_dir: bool = True,
- list_file: bool = True,
- suffix: Optional[Union[str, Tuple[str]]] = None,
- recursive: bool = False) -> Iterator[str]:
- """Scan a directory to find the interested directories or files in
- arbitrary order.
-
- Note:
- Petrel has no concept of directories but it simulates the directory
- hierarchy in the filesystem through public prefixes. In addition,
- if the returned path ends with '/', it means the path is a public
- prefix which is a logical directory.
-
- Note:
- :meth:`list_dir_or_file` returns the path relative to ``dir_path``.
- In addition, the returned path of directory will not contains the
- suffix '/' which is consistent with other backends.
-
- Args:
- dir_path (str | Path): Path of the directory.
- list_dir (bool): List the directories. Default: True.
- list_file (bool): List the path of files. Default: True.
- suffix (str or tuple[str], optional): File suffix
- that we are interested in. Default: None.
- recursive (bool): If set to True, recursively scan the
- directory. Default: False.
-
- Yields:
- Iterable[str]: A relative path to ``dir_path``.
- """
- if not has_method(self._client, 'list'):
- raise NotImplementedError(
- ('Current version of Petrel Python SDK has not supported '
- 'the `list` method, please use a higher version or dev'
- ' branch instead.'))
-
- dir_path = self._map_path(dir_path)
- dir_path = self._format_path(dir_path)
- if list_dir and suffix is not None:
- raise TypeError(
- '`list_dir` should be False when `suffix` is not None')
-
- if (suffix is not None) and not isinstance(suffix, (str, tuple)):
- raise TypeError('`suffix` must be a string or tuple of strings')
-
- # Petrel's simulated directory hierarchy assumes that directory paths
- # should end with `/`
- if not dir_path.endswith('/'):
- dir_path += '/'
-
- root = dir_path
-
- def _list_dir_or_file(dir_path, list_dir, list_file, suffix,
- recursive):
- for path in self._client.list(dir_path):
- # the `self.isdir` is not used here to determine whether path
- # is a directory, because `self.isdir` relies on
- # `self._client.list`
- if path.endswith('/'): # a directory path
- next_dir_path = self.join_path(dir_path, path)
- if list_dir:
- # get the relative path and exclude the last
- # character '/'
- rel_dir = next_dir_path[len(root):-1]
- yield rel_dir
- if recursive:
- yield from _list_dir_or_file(next_dir_path, list_dir,
- list_file, suffix,
- recursive)
- else: # a file path
- absolute_path = self.join_path(dir_path, path)
- rel_path = absolute_path[len(root):]
- if (suffix is None
- or rel_path.endswith(suffix)) and list_file:
- yield rel_path
-
- return _list_dir_or_file(dir_path, list_dir, list_file, suffix,
- recursive)
-
-
-class MemcachedBackend(BaseStorageBackend):
- """Memcached storage backend.
-
- Attributes:
- server_list_cfg (str): Config file for memcached server list.
- client_cfg (str): Config file for memcached client.
- sys_path (str | None): Additional path to be appended to `sys.path`.
- Default: None.
- """
-
- def __init__(self, server_list_cfg, client_cfg, sys_path=None):
- if sys_path is not None:
- import sys
- sys.path.append(sys_path)
- try:
- import mc
- except ImportError:
- raise ImportError(
- 'Please install memcached to enable MemcachedBackend.')
-
- self.server_list_cfg = server_list_cfg
- self.client_cfg = client_cfg
- self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg,
- self.client_cfg)
- # mc.pyvector servers as a point which points to a memory cache
- self._mc_buffer = mc.pyvector()
-
- def get(self, filepath):
- filepath = str(filepath)
- import mc
- self._client.Get(filepath, self._mc_buffer)
- value_buf = mc.ConvertBuffer(self._mc_buffer)
- return value_buf
-
- def get_text(self, filepath, encoding=None):
- raise NotImplementedError
-
-
-class LmdbBackend(BaseStorageBackend):
- """Lmdb storage backend.
-
- Args:
- db_path (str): Lmdb database path.
- readonly (bool, optional): Lmdb environment parameter. If True,
- disallow any write operations. Default: True.
- lock (bool, optional): Lmdb environment parameter. If False, when
- concurrent access occurs, do not lock the database. Default: False.
- readahead (bool, optional): Lmdb environment parameter. If False,
- disable the OS filesystem readahead mechanism, which may improve
- random read performance when a database is larger than RAM.
- Default: False.
-
- Attributes:
- db_path (str): Lmdb database path.
- """
-
- def __init__(self,
- db_path,
- readonly=True,
- lock=False,
- readahead=False,
- **kwargs):
- try:
- import lmdb
- except ImportError:
- raise ImportError('Please install lmdb to enable LmdbBackend.')
-
- self.db_path = str(db_path)
- self._client = lmdb.open(
- self.db_path,
- readonly=readonly,
- lock=lock,
- readahead=readahead,
- **kwargs)
-
- def get(self, filepath):
- """Get values according to the filepath.
-
- Args:
- filepath (str | obj:`Path`): Here, filepath is the lmdb key.
- """
- filepath = str(filepath)
- with self._client.begin(write=False) as txn:
- value_buf = txn.get(filepath.encode('ascii'))
- return value_buf
-
- def get_text(self, filepath, encoding=None):
- raise NotImplementedError
-
-
-class HardDiskBackend(BaseStorageBackend):
- """Raw hard disks storage backend."""
-
- _allow_symlink = True
-
- def get(self, filepath: Union[str, Path]) -> bytes:
- """Read data from a given ``filepath`` with 'rb' mode.
-
- Args:
- filepath (str or Path): Path to read data.
-
- Returns:
- bytes: Expected bytes object.
- """
- with open(filepath, 'rb') as f:
- value_buf = f.read()
- return value_buf
-
- def get_text(self,
- filepath: Union[str, Path],
- encoding: str = 'utf-8') -> str:
- """Read data from a given ``filepath`` with 'r' mode.
-
- Args:
- filepath (str or Path): Path to read data.
- encoding (str): The encoding format used to open the ``filepath``.
- Default: 'utf-8'.
-
- Returns:
- str: Expected text reading from ``filepath``.
- """
- with open(filepath, 'r', encoding=encoding) as f:
- value_buf = f.read()
- return value_buf
-
- def put(self, obj: bytes, filepath: Union[str, Path]) -> None:
- """Write data to a given ``filepath`` with 'wb' mode.
-
- Note:
- ``put`` will create a directory if the directory of ``filepath``
- does not exist.
-
- Args:
- obj (bytes): Data to be written.
- filepath (str or Path): Path to write data.
- """
- mmcv.mkdir_or_exist(osp.dirname(filepath))
- with open(filepath, 'wb') as f:
- f.write(obj)
-
- def put_text(self,
- obj: str,
- filepath: Union[str, Path],
- encoding: str = 'utf-8') -> None:
- """Write data to a given ``filepath`` with 'w' mode.
-
- Note:
- ``put_text`` will create a directory if the directory of
- ``filepath`` does not exist.
-
- Args:
- obj (str): Data to be written.
- filepath (str or Path): Path to write data.
- encoding (str): The encoding format used to open the ``filepath``.
- Default: 'utf-8'.
- """
- mmcv.mkdir_or_exist(osp.dirname(filepath))
- with open(filepath, 'w', encoding=encoding) as f:
- f.write(obj)
-
- def remove(self, filepath: Union[str, Path]) -> None:
- """Remove a file.
-
- Args:
- filepath (str or Path): Path to be removed.
- """
- os.remove(filepath)
-
- def exists(self, filepath: Union[str, Path]) -> bool:
- """Check whether a file path exists.
-
- Args:
- filepath (str or Path): Path to be checked whether exists.
-
- Returns:
- bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.
- """
- return osp.exists(filepath)
-
- def isdir(self, filepath: Union[str, Path]) -> bool:
- """Check whether a file path is a directory.
-
- Args:
- filepath (str or Path): Path to be checked whether it is a
- directory.
-
- Returns:
- bool: Return ``True`` if ``filepath`` points to a directory,
- ``False`` otherwise.
- """
- return osp.isdir(filepath)
-
- def isfile(self, filepath: Union[str, Path]) -> bool:
- """Check whether a file path is a file.
-
- Args:
- filepath (str or Path): Path to be checked whether it is a file.
-
- Returns:
- bool: Return ``True`` if ``filepath`` points to a file, ``False``
- otherwise.
- """
- return osp.isfile(filepath)
-
- def join_path(self, filepath: Union[str, Path],
- *filepaths: Union[str, Path]) -> str:
- """Concatenate all file paths.
-
- Join one or more filepath components intelligently. The return value
- is the concatenation of filepath and any members of *filepaths.
-
- Args:
- filepath (str or Path): Path to be concatenated.
-
- Returns:
- str: The result of concatenation.
- """
- return osp.join(filepath, *filepaths)
-
- @contextmanager
- def get_local_path(
- self, filepath: Union[str, Path]) -> Iterable[Union[str, Path]]:
- """Only for unified API and do nothing."""
- yield filepath
-
- def list_dir_or_file(self,
- dir_path: Union[str, Path],
- list_dir: bool = True,
- list_file: bool = True,
- suffix: Optional[Union[str, Tuple[str]]] = None,
- recursive: bool = False) -> Iterator[str]:
- """Scan a directory to find the interested directories or files in
- arbitrary order.
-
- Note:
- :meth:`list_dir_or_file` returns the path relative to ``dir_path``.
-
- Args:
- dir_path (str | Path): Path of the directory.
- list_dir (bool): List the directories. Default: True.
- list_file (bool): List the path of files. Default: True.
- suffix (str or tuple[str], optional): File suffix
- that we are interested in. Default: None.
- recursive (bool): If set to True, recursively scan the
- directory. Default: False.
-
- Yields:
- Iterable[str]: A relative path to ``dir_path``.
- """
- if list_dir and suffix is not None:
- raise TypeError('`suffix` should be None when `list_dir` is True')
-
- if (suffix is not None) and not isinstance(suffix, (str, tuple)):
- raise TypeError('`suffix` must be a string or tuple of strings')
-
- root = dir_path
-
- def _list_dir_or_file(dir_path, list_dir, list_file, suffix,
- recursive):
- for entry in os.scandir(dir_path):
- if not entry.name.startswith('.') and entry.is_file():
- rel_path = osp.relpath(entry.path, root)
- if (suffix is None
- or rel_path.endswith(suffix)) and list_file:
- yield rel_path
- elif osp.isdir(entry.path):
- if list_dir:
- rel_dir = osp.relpath(entry.path, root)
- yield rel_dir
- if recursive:
- yield from _list_dir_or_file(entry.path, list_dir,
- list_file, suffix,
- recursive)
-
- return _list_dir_or_file(dir_path, list_dir, list_file, suffix,
- recursive)
-
-
-class HTTPBackend(BaseStorageBackend):
- """HTTP and HTTPS storage bachend."""
-
- def get(self, filepath):
- value_buf = urlopen(filepath).read()
- return value_buf
-
- def get_text(self, filepath, encoding='utf-8'):
- value_buf = urlopen(filepath).read()
- return value_buf.decode(encoding)
-
- @contextmanager
- def get_local_path(self, filepath: str) -> Iterable[str]:
- """Download a file from ``filepath``.
-
- ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It
- can be called with ``with`` statement, and when exists from the
- ``with`` statement, the temporary path will be released.
-
- Args:
- filepath (str): Download a file from ``filepath``.
-
- Examples:
- >>> client = HTTPBackend()
- >>> # After existing from the ``with`` clause,
- >>> # the path will be removed
- >>> with client.get_local_path('http://path/of/your/file') as path:
- ... # do something here
- """
- try:
- f = tempfile.NamedTemporaryFile(delete=False)
- f.write(self.get(filepath))
- f.close()
- yield f.name
- finally:
- os.remove(f.name)
-
-
-class FileClient:
- """A general file client to access files in different backends.
-
- The client loads a file or text in a specified backend from its path
- and returns it as a binary or text file. There are two ways to choose a
- backend, the name of backend and the prefix of path. Although both of them
- can be used to choose a storage backend, ``backend`` has a higher priority
- that is if they are all set, the storage backend will be chosen by the
- backend argument. If they are all `None`, the disk backend will be chosen.
- Note that It can also register other backend accessor with a given name,
- prefixes, and backend class. In addition, We use the singleton pattern to
- avoid repeated object creation. If the arguments are the same, the same
- object will be returned.
-
- Args:
- backend (str, optional): The storage backend type. Options are "disk",
- "ceph", "memcached", "lmdb", "http" and "petrel". Default: None.
- prefix (str, optional): The prefix of the registered storage backend.
- Options are "s3", "http", "https". Default: None.
-
- Examples:
- >>> # only set backend
- >>> file_client = FileClient(backend='petrel')
- >>> # only set prefix
- >>> file_client = FileClient(prefix='s3')
- >>> # set both backend and prefix but use backend to choose client
- >>> file_client = FileClient(backend='petrel', prefix='s3')
- >>> # if the arguments are the same, the same object is returned
- >>> file_client1 = FileClient(backend='petrel')
- >>> file_client1 is file_client
- True
-
- Attributes:
- client (:obj:`BaseStorageBackend`): The backend object.
- """
-
- _backends = {
- 'disk': HardDiskBackend,
- 'ceph': CephBackend,
- 'memcached': MemcachedBackend,
- 'lmdb': LmdbBackend,
- 'petrel': PetrelBackend,
- 'http': HTTPBackend,
- }
- # This collection is used to record the overridden backends, and when a
- # backend appears in the collection, the singleton pattern is disabled for
- # that backend, because if the singleton pattern is used, then the object
- # returned will be the backend before overwriting
- _overridden_backends = set()
- _prefix_to_backends = {
- 's3': PetrelBackend,
- 'http': HTTPBackend,
- 'https': HTTPBackend,
- }
- _overridden_prefixes = set()
-
- _instances = {}
-
- def __new__(cls, backend=None, prefix=None, **kwargs):
- if backend is None and prefix is None:
- backend = 'disk'
- if backend is not None and backend not in cls._backends:
- raise ValueError(
- f'Backend {backend} is not supported. Currently supported ones'
- f' are {list(cls._backends.keys())}')
- if prefix is not None and prefix not in cls._prefix_to_backends:
- raise ValueError(
- f'prefix {prefix} is not supported. Currently supported ones '
- f'are {list(cls._prefix_to_backends.keys())}')
-
- # concatenate the arguments to a unique key for determining whether
- # objects with the same arguments were created
- arg_key = f'{backend}:{prefix}'
- for key, value in kwargs.items():
- arg_key += f':{key}:{value}'
-
- # if a backend was overridden, it will create a new object
- if (arg_key in cls._instances
- and backend not in cls._overridden_backends
- and prefix not in cls._overridden_prefixes):
- _instance = cls._instances[arg_key]
- else:
- # create a new object and put it to _instance
- _instance = super().__new__(cls)
- if backend is not None:
- _instance.client = cls._backends[backend](**kwargs)
- else:
- _instance.client = cls._prefix_to_backends[prefix](**kwargs)
-
- cls._instances[arg_key] = _instance
-
- return _instance
-
- @property
- def name(self):
- return self.client.name
-
- @property
- def allow_symlink(self):
- return self.client.allow_symlink
-
- @staticmethod
- def parse_uri_prefix(uri: Union[str, Path]) -> Optional[str]:
- """Parse the prefix of a uri.
-
- Args:
- uri (str | Path): Uri to be parsed that contains the file prefix.
-
- Examples:
- >>> FileClient.parse_uri_prefix('s3://path/of/your/file')
- 's3'
-
- Returns:
- str | None: Return the prefix of uri if the uri contains '://'
- else ``None``.
- """
- assert is_filepath(uri)
- uri = str(uri)
- if '://' not in uri:
- return None
- else:
- prefix, _ = uri.split('://')
- # In the case of PetrelBackend, the prefix may contains the cluster
- # name like clusterName:s3
- if ':' in prefix:
- _, prefix = prefix.split(':')
- return prefix
-
- @classmethod
- def infer_client(cls,
- file_client_args: Optional[dict] = None,
- uri: Optional[Union[str, Path]] = None) -> 'FileClient':
- """Infer a suitable file client based on the URI and arguments.
-
- Args:
- file_client_args (dict, optional): Arguments to instantiate a
- FileClient. Default: None.
- uri (str | Path, optional): Uri to be parsed that contains the file
- prefix. Default: None.
-
- Examples:
- >>> uri = 's3://path/of/your/file'
- >>> file_client = FileClient.infer_client(uri=uri)
- >>> file_client_args = {'backend': 'petrel'}
- >>> file_client = FileClient.infer_client(file_client_args)
-
- Returns:
- FileClient: Instantiated FileClient object.
- """
- assert file_client_args is not None or uri is not None
- if file_client_args is None:
- file_prefix = cls.parse_uri_prefix(uri) # type: ignore
- return cls(prefix=file_prefix)
- else:
- return cls(**file_client_args)
-
- @classmethod
- def _register_backend(cls, name, backend, force=False, prefixes=None):
- if not isinstance(name, str):
- raise TypeError('the backend name should be a string, '
- f'but got {type(name)}')
- if not inspect.isclass(backend):
- raise TypeError(
- f'backend should be a class but got {type(backend)}')
- if not issubclass(backend, BaseStorageBackend):
- raise TypeError(
- f'backend {backend} is not a subclass of BaseStorageBackend')
- if not force and name in cls._backends:
- raise KeyError(
- f'{name} is already registered as a storage backend, '
- 'add "force=True" if you want to override it')
-
- if name in cls._backends and force:
- cls._overridden_backends.add(name)
- cls._backends[name] = backend
-
- if prefixes is not None:
- if isinstance(prefixes, str):
- prefixes = [prefixes]
- else:
- assert isinstance(prefixes, (list, tuple))
- for prefix in prefixes:
- if prefix not in cls._prefix_to_backends:
- cls._prefix_to_backends[prefix] = backend
- elif (prefix in cls._prefix_to_backends) and force:
- cls._overridden_prefixes.add(prefix)
- cls._prefix_to_backends[prefix] = backend
- else:
- raise KeyError(
- f'{prefix} is already registered as a storage backend,'
- ' add "force=True" if you want to override it')
-
- @classmethod
- def register_backend(cls, name, backend=None, force=False, prefixes=None):
- """Register a backend to FileClient.
-
- This method can be used as a normal class method or a decorator.
-
- .. code-block:: python
-
- class NewBackend(BaseStorageBackend):
-
- def get(self, filepath):
- return filepath
-
- def get_text(self, filepath):
- return filepath
-
- FileClient.register_backend('new', NewBackend)
-
- or
-
- .. code-block:: python
-
- @FileClient.register_backend('new')
- class NewBackend(BaseStorageBackend):
-
- def get(self, filepath):
- return filepath
-
- def get_text(self, filepath):
- return filepath
-
- Args:
- name (str): The name of the registered backend.
- backend (class, optional): The backend class to be registered,
- which must be a subclass of :class:`BaseStorageBackend`.
- When this method is used as a decorator, backend is None.
- Defaults to None.
- force (bool, optional): Whether to override the backend if the name
- has already been registered. Defaults to False.
- prefixes (str or list[str] or tuple[str], optional): The prefixes
- of the registered storage backend. Default: None.
- `New in version 1.3.15.`
- """
- if backend is not None:
- cls._register_backend(
- name, backend, force=force, prefixes=prefixes)
- return
-
- def _register(backend_cls):
- cls._register_backend(
- name, backend_cls, force=force, prefixes=prefixes)
- return backend_cls
-
- return _register
-
- def get(self, filepath: Union[str, Path]) -> Union[bytes, memoryview]:
- """Read data from a given ``filepath`` with 'rb' mode.
-
- Note:
- There are two types of return values for ``get``, one is ``bytes``
- and the other is ``memoryview``. The advantage of using memoryview
- is that you can avoid copying, and if you want to convert it to
- ``bytes``, you can use ``.tobytes()``.
-
- Args:
- filepath (str or Path): Path to read data.
-
- Returns:
- bytes | memoryview: Expected bytes object or a memory view of the
- bytes object.
- """
- return self.client.get(filepath)
-
- def get_text(self, filepath: Union[str, Path], encoding='utf-8') -> str:
- """Read data from a given ``filepath`` with 'r' mode.
-
- Args:
- filepath (str or Path): Path to read data.
- encoding (str): The encoding format used to open the ``filepath``.
- Default: 'utf-8'.
-
- Returns:
- str: Expected text reading from ``filepath``.
- """
- return self.client.get_text(filepath, encoding)
-
- def put(self, obj: bytes, filepath: Union[str, Path]) -> None:
- """Write data to a given ``filepath`` with 'wb' mode.
-
- Note:
- ``put`` should create a directory if the directory of ``filepath``
- does not exist.
-
- Args:
- obj (bytes): Data to be written.
- filepath (str or Path): Path to write data.
- """
- self.client.put(obj, filepath)
-
- def put_text(self, obj: str, filepath: Union[str, Path]) -> None:
- """Write data to a given ``filepath`` with 'w' mode.
-
- Note:
- ``put_text`` should create a directory if the directory of
- ``filepath`` does not exist.
-
- Args:
- obj (str): Data to be written.
- filepath (str or Path): Path to write data.
- encoding (str, optional): The encoding format used to open the
- `filepath`. Default: 'utf-8'.
- """
- self.client.put_text(obj, filepath)
-
- def remove(self, filepath: Union[str, Path]) -> None:
- """Remove a file.
-
- Args:
- filepath (str, Path): Path to be removed.
- """
- self.client.remove(filepath)
-
- def exists(self, filepath: Union[str, Path]) -> bool:
- """Check whether a file path exists.
-
- Args:
- filepath (str or Path): Path to be checked whether exists.
-
- Returns:
- bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.
- """
- return self.client.exists(filepath)
-
- def isdir(self, filepath: Union[str, Path]) -> bool:
- """Check whether a file path is a directory.
-
- Args:
- filepath (str or Path): Path to be checked whether it is a
- directory.
-
- Returns:
- bool: Return ``True`` if ``filepath`` points to a directory,
- ``False`` otherwise.
- """
- return self.client.isdir(filepath)
-
- def isfile(self, filepath: Union[str, Path]) -> bool:
- """Check whether a file path is a file.
-
- Args:
- filepath (str or Path): Path to be checked whether it is a file.
-
- Returns:
- bool: Return ``True`` if ``filepath`` points to a file, ``False``
- otherwise.
- """
- return self.client.isfile(filepath)
-
- def join_path(self, filepath: Union[str, Path],
- *filepaths: Union[str, Path]) -> str:
- """Concatenate all file paths.
-
- Join one or more filepath components intelligently. The return value
- is the concatenation of filepath and any members of *filepaths.
-
- Args:
- filepath (str or Path): Path to be concatenated.
-
- Returns:
- str: The result of concatenation.
- """
- return self.client.join_path(filepath, *filepaths)
-
- @contextmanager
- def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]:
- """Download data from ``filepath`` and write the data to local path.
-
- ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It
- can be called with ``with`` statement, and when exists from the
- ``with`` statement, the temporary path will be released.
-
- Note:
- If the ``filepath`` is a local path, just return itself.
-
- .. warning::
- ``get_local_path`` is an experimental interface that may change in
- the future.
-
- Args:
- filepath (str or Path): Path to be read data.
-
- Examples:
- >>> file_client = FileClient(prefix='s3')
- >>> with file_client.get_local_path('s3://bucket/abc.jpg') as path:
- ... # do something here
-
- Yields:
- Iterable[str]: Only yield one path.
- """
- with self.client.get_local_path(str(filepath)) as local_path:
- yield local_path
-
- def list_dir_or_file(self,
- dir_path: Union[str, Path],
- list_dir: bool = True,
- list_file: bool = True,
- suffix: Optional[Union[str, Tuple[str]]] = None,
- recursive: bool = False) -> Iterator[str]:
- """Scan a directory to find the interested directories or files in
- arbitrary order.
-
- Note:
- :meth:`list_dir_or_file` returns the path relative to ``dir_path``.
-
- Args:
- dir_path (str | Path): Path of the directory.
- list_dir (bool): List the directories. Default: True.
- list_file (bool): List the path of files. Default: True.
- suffix (str or tuple[str], optional): File suffix
- that we are interested in. Default: None.
- recursive (bool): If set to True, recursively scan the
- directory. Default: False.
-
- Yields:
- Iterable[str]: A relative path to ``dir_path``.
- """
- yield from self.client.list_dir_or_file(dir_path, list_dir, list_file,
- suffix, recursive)
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/normalbae/models/submodules/efficientnet_repo/README.md b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/normalbae/models/submodules/efficientnet_repo/README.md
deleted file mode 100644
index 463368280d6a5015060eb73d20fe6512f8e04c50..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/normalbae/models/submodules/efficientnet_repo/README.md
+++ /dev/null
@@ -1,323 +0,0 @@
-# (Generic) EfficientNets for PyTorch
-
-A 'generic' implementation of EfficientNet, MixNet, MobileNetV3, etc. that covers most of the compute/parameter efficient architectures derived from the MobileNet V1/V2 block sequence, including those found via automated neural architecture search.
-
-All models are implemented by GenEfficientNet or MobileNetV3 classes, with string based architecture definitions to configure the block layouts (idea from [here](https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py))
-
-## What's New
-
-### Aug 19, 2020
-* Add updated PyTorch trained EfficientNet-B3 weights trained by myself with `timm` (82.1 top-1)
-* Add PyTorch trained EfficientNet-Lite0 contributed by [@hal-314](https://github.com/hal-314) (75.5 top-1)
-* Update ONNX and Caffe2 export / utility scripts to work with latest PyTorch / ONNX
-* ONNX runtime based validation script added
-* activations (mostly) brought in sync with `timm` equivalents
-
-
-### April 5, 2020
-* Add some newly trained MobileNet-V2 models trained with latest h-params, rand augment. They compare quite favourably to EfficientNet-Lite
- * 3.5M param MobileNet-V2 100 @ 73%
- * 4.5M param MobileNet-V2 110d @ 75%
- * 6.1M param MobileNet-V2 140 @ 76.5%
- * 5.8M param MobileNet-V2 120d @ 77.3%
-
-### March 23, 2020
- * Add EfficientNet-Lite models w/ weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
- * Add PyTorch trained MobileNet-V3 Large weights with 75.77% top-1
- * IMPORTANT CHANGE (if training from scratch) - weight init changed to better match Tensorflow impl, set `fix_group_fanout=False` in `initialize_weight_goog` for old behavior
-
-### Feb 12, 2020
- * Add EfficientNet-L2 and B0-B7 NoisyStudent weights ported from [Tensorflow TPU](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet)
- * Port new EfficientNet-B8 (RandAugment) weights from TF TPU, these are different than the B8 AdvProp, different input normalization.
- * Add RandAugment PyTorch trained EfficientNet-ES (EdgeTPU-Small) weights with 78.1 top-1. Trained by [Andrew Lavin](https://github.com/andravin)
-
-### Jan 22, 2020
- * Update weights for EfficientNet B0, B2, B3 and MixNet-XL with latest RandAugment trained weights. Trained with (https://github.com/rwightman/pytorch-image-models)
- * Fix torchscript compatibility for PyTorch 1.4, add torchscript support for MixedConv2d using ModuleDict
- * Test models, torchscript, onnx export with PyTorch 1.4 -- no issues
-
-### Nov 22, 2019
- * New top-1 high! Ported official TF EfficientNet AdvProp (https://arxiv.org/abs/1911.09665) weights and B8 model spec. Created a new set of `ap` models since they use a different
- preprocessing (Inception mean/std) from the original EfficientNet base/AA/RA weights.
-
-### Nov 15, 2019
- * Ported official TF MobileNet-V3 float32 large/small/minimalistic weights
- * Modifications to MobileNet-V3 model and components to support some additional config needed for differences between TF MobileNet-V3 and mine
-
-### Oct 30, 2019
- * Many of the models will now work with torch.jit.script, MixNet being the biggest exception
- * Improved interface for enabling torchscript or ONNX export compatible modes (via config)
- * Add JIT optimized mem-efficient Swish/Mish autograd.fn in addition to memory-efficient autgrad.fn
- * Activation factory to select best version of activation by name or override one globally
- * Add pretrained checkpoint load helper that handles input conv and classifier changes
-
-### Oct 27, 2019
- * Add CondConv EfficientNet variants ported from https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv
- * Add RandAug weights for TF EfficientNet B5 and B7 from https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
- * Bring over MixNet-XL model and depth scaling algo from my pytorch-image-models code base
- * Switch activations and global pooling to modules
- * Add memory-efficient Swish/Mish impl
- * Add as_sequential() method to all models and allow as an argument in entrypoint fns
- * Move MobileNetV3 into own file since it has a different head
- * Remove ChamNet, MobileNet V2/V1 since they will likely never be used here
-
-## Models
-
-Implemented models include:
- * EfficientNet NoisyStudent (B0-B7, L2) (https://arxiv.org/abs/1911.04252)
- * EfficientNet AdvProp (B0-B8) (https://arxiv.org/abs/1911.09665)
- * EfficientNet (B0-B8) (https://arxiv.org/abs/1905.11946)
- * EfficientNet-EdgeTPU (S, M, L) (https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html)
- * EfficientNet-CondConv (https://arxiv.org/abs/1904.04971)
- * EfficientNet-Lite (https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite)
- * MixNet (https://arxiv.org/abs/1907.09595)
- * MNASNet B1, A1 (Squeeze-Excite), and Small (https://arxiv.org/abs/1807.11626)
- * MobileNet-V3 (https://arxiv.org/abs/1905.02244)
- * FBNet-C (https://arxiv.org/abs/1812.03443)
- * Single-Path NAS (https://arxiv.org/abs/1904.02877)
-
-I originally implemented and trained some these models with code [here](https://github.com/rwightman/pytorch-image-models), this repository contains just the GenEfficientNet models, validation, and associated ONNX/Caffe2 export code.
-
-## Pretrained
-
-I've managed to train several of the models to accuracies close to or above the originating papers and official impl. My training code is here: https://github.com/rwightman/pytorch-image-models
-
-
-|Model | Prec@1 (Err) | Prec@5 (Err) | Param#(M) | MAdds(M) | Image Scaling | Resolution | Crop |
-|---|---|---|---|---|---|---|---|
-| efficientnet_b3 | 82.240 (17.760) | 96.116 (3.884) | 12.23 | TBD | bicubic | 320 | 1.0 |
-| efficientnet_b3 | 82.076 (17.924) | 96.020 (3.980) | 12.23 | TBD | bicubic | 300 | 0.904 |
-| mixnet_xl | 81.074 (18.926) | 95.282 (4.718) | 11.90 | TBD | bicubic | 256 | 1.0 |
-| efficientnet_b2 | 80.612 (19.388) | 95.318 (4.682) | 9.1 | TBD | bicubic | 288 | 1.0 |
-| mixnet_xl | 80.476 (19.524) | 94.936 (5.064) | 11.90 | TBD | bicubic | 224 | 0.875 |
-| efficientnet_b2 | 80.288 (19.712) | 95.166 (4.834) | 9.1 | 1003 | bicubic | 260 | 0.890 |
-| mixnet_l | 78.976 (21.024 | 94.184 (5.816) | 7.33 | TBD | bicubic | 224 | 0.875 |
-| efficientnet_b1 | 78.692 (21.308) | 94.086 (5.914) | 7.8 | 694 | bicubic | 240 | 0.882 |
-| efficientnet_es | 78.066 (21.934) | 93.926 (6.074) | 5.44 | TBD | bicubic | 224 | 0.875 |
-| efficientnet_b0 | 77.698 (22.302) | 93.532 (6.468) | 5.3 | 390 | bicubic | 224 | 0.875 |
-| mobilenetv2_120d | 77.294 (22.706 | 93.502 (6.498) | 5.8 | TBD | bicubic | 224 | 0.875 |
-| mixnet_m | 77.256 (22.744) | 93.418 (6.582) | 5.01 | 353 | bicubic | 224 | 0.875 |
-| mobilenetv2_140 | 76.524 (23.476) | 92.990 (7.010) | 6.1 | TBD | bicubic | 224 | 0.875 |
-| mixnet_s | 75.988 (24.012) | 92.794 (7.206) | 4.13 | TBD | bicubic | 224 | 0.875 |
-| mobilenetv3_large_100 | 75.766 (24.234) | 92.542 (7.458) | 5.5 | TBD | bicubic | 224 | 0.875 |
-| mobilenetv3_rw | 75.634 (24.366) | 92.708 (7.292) | 5.5 | 219 | bicubic | 224 | 0.875 |
-| efficientnet_lite0 | 75.472 (24.528) | 92.520 (7.480) | 4.65 | TBD | bicubic | 224 | 0.875 |
-| mnasnet_a1 | 75.448 (24.552) | 92.604 (7.396) | 3.9 | 312 | bicubic | 224 | 0.875 |
-| fbnetc_100 | 75.124 (24.876) | 92.386 (7.614) | 5.6 | 385 | bilinear | 224 | 0.875 |
-| mobilenetv2_110d | 75.052 (24.948) | 92.180 (7.820) | 4.5 | TBD | bicubic | 224 | 0.875 |
-| mnasnet_b1 | 74.658 (25.342) | 92.114 (7.886) | 4.4 | 315 | bicubic | 224 | 0.875 |
-| spnasnet_100 | 74.084 (25.916) | 91.818 (8.182) | 4.4 | TBD | bilinear | 224 | 0.875 |
-| mobilenetv2_100 | 72.978 (27.022) | 91.016 (8.984) | 3.5 | TBD | bicubic | 224 | 0.875 |
-
-
-More pretrained models to come...
-
-
-## Ported Weights
-
-The weights ported from Tensorflow checkpoints for the EfficientNet models do pretty much match accuracy in Tensorflow once a SAME convolution padding equivalent is added, and the same crop factors, image scaling, etc (see table) are used via cmd line args.
-
-**IMPORTANT:**
-* Tensorflow ported weights for EfficientNet AdvProp (AP), EfficientNet EdgeTPU, EfficientNet-CondConv, EfficientNet-Lite, and MobileNet-V3 models use Inception style (0.5, 0.5, 0.5) for mean and std.
-* Enabling the Tensorflow preprocessing pipeline with `--tf-preprocessing` at validation time will improve scores by 0.1-0.5%, very close to original TF impl.
-
-To run validation for tf_efficientnet_b5:
-`python validate.py /path/to/imagenet/validation/ --model tf_efficientnet_b5 -b 64 --img-size 456 --crop-pct 0.934 --interpolation bicubic`
-
-To run validation w/ TF preprocessing for tf_efficientnet_b5:
-`python validate.py /path/to/imagenet/validation/ --model tf_efficientnet_b5 -b 64 --img-size 456 --tf-preprocessing`
-
-To run validation for a model with Inception preprocessing, ie EfficientNet-B8 AdvProp:
-`python validate.py /path/to/imagenet/validation/ --model tf_efficientnet_b8_ap -b 48 --num-gpu 2 --img-size 672 --crop-pct 0.954 --mean 0.5 --std 0.5`
-
-|Model | Prec@1 (Err) | Prec@5 (Err) | Param # | Image Scaling | Image Size | Crop |
-|---|---|---|---|---|---|---|
-| tf_efficientnet_l2_ns *tfp | 88.352 (11.648) | 98.652 (1.348) | 480 | bicubic | 800 | N/A |
-| tf_efficientnet_l2_ns | TBD | TBD | 480 | bicubic | 800 | 0.961 |
-| tf_efficientnet_l2_ns_475 | 88.234 (11.766) | 98.546 (1.454) | 480 | bicubic | 475 | 0.936 |
-| tf_efficientnet_l2_ns_475 *tfp | 88.172 (11.828) | 98.566 (1.434) | 480 | bicubic | 475 | N/A |
-| tf_efficientnet_b7_ns *tfp | 86.844 (13.156) | 98.084 (1.916) | 66.35 | bicubic | 600 | N/A |
-| tf_efficientnet_b7_ns | 86.840 (13.160) | 98.094 (1.906) | 66.35 | bicubic | 600 | N/A |
-| tf_efficientnet_b6_ns | 86.452 (13.548) | 97.882 (2.118) | 43.04 | bicubic | 528 | N/A |
-| tf_efficientnet_b6_ns *tfp | 86.444 (13.556) | 97.880 (2.120) | 43.04 | bicubic | 528 | N/A |
-| tf_efficientnet_b5_ns *tfp | 86.064 (13.936) | 97.746 (2.254) | 30.39 | bicubic | 456 | N/A |
-| tf_efficientnet_b5_ns | 86.088 (13.912) | 97.752 (2.248) | 30.39 | bicubic | 456 | N/A |
-| tf_efficientnet_b8_ap *tfp | 85.436 (14.564) | 97.272 (2.728) | 87.4 | bicubic | 672 | N/A |
-| tf_efficientnet_b8 *tfp | 85.384 (14.616) | 97.394 (2.606) | 87.4 | bicubic | 672 | N/A |
-| tf_efficientnet_b8 | 85.370 (14.630) | 97.390 (2.610) | 87.4 | bicubic | 672 | 0.954 |
-| tf_efficientnet_b8_ap | 85.368 (14.632) | 97.294 (2.706) | 87.4 | bicubic | 672 | 0.954 |
-| tf_efficientnet_b4_ns *tfp | 85.298 (14.702) | 97.504 (2.496) | 19.34 | bicubic | 380 | N/A |
-| tf_efficientnet_b4_ns | 85.162 (14.838) | 97.470 (2.530) | 19.34 | bicubic | 380 | 0.922 |
-| tf_efficientnet_b7_ap *tfp | 85.154 (14.846) | 97.244 (2.756) | 66.35 | bicubic | 600 | N/A |
-| tf_efficientnet_b7_ap | 85.118 (14.882) | 97.252 (2.748) | 66.35 | bicubic | 600 | 0.949 |
-| tf_efficientnet_b7 *tfp | 84.940 (15.060) | 97.214 (2.786) | 66.35 | bicubic | 600 | N/A |
-| tf_efficientnet_b7 | 84.932 (15.068) | 97.208 (2.792) | 66.35 | bicubic | 600 | 0.949 |
-| tf_efficientnet_b6_ap | 84.786 (15.214) | 97.138 (2.862) | 43.04 | bicubic | 528 | 0.942 |
-| tf_efficientnet_b6_ap *tfp | 84.760 (15.240) | 97.124 (2.876) | 43.04 | bicubic | 528 | N/A |
-| tf_efficientnet_b5_ap *tfp | 84.276 (15.724) | 96.932 (3.068) | 30.39 | bicubic | 456 | N/A |
-| tf_efficientnet_b5_ap | 84.254 (15.746) | 96.976 (3.024) | 30.39 | bicubic | 456 | 0.934 |
-| tf_efficientnet_b6 *tfp | 84.140 (15.860) | 96.852 (3.148) | 43.04 | bicubic | 528 | N/A |
-| tf_efficientnet_b6 | 84.110 (15.890) | 96.886 (3.114) | 43.04 | bicubic | 528 | 0.942 |
-| tf_efficientnet_b3_ns *tfp | 84.054 (15.946) | 96.918 (3.082) | 12.23 | bicubic | 300 | N/A |
-| tf_efficientnet_b3_ns | 84.048 (15.952) | 96.910 (3.090) | 12.23 | bicubic | 300 | .904 |
-| tf_efficientnet_b5 *tfp | 83.822 (16.178) | 96.756 (3.244) | 30.39 | bicubic | 456 | N/A |
-| tf_efficientnet_b5 | 83.812 (16.188) | 96.748 (3.252) | 30.39 | bicubic | 456 | 0.934 |
-| tf_efficientnet_b4_ap *tfp | 83.278 (16.722) | 96.376 (3.624) | 19.34 | bicubic | 380 | N/A |
-| tf_efficientnet_b4_ap | 83.248 (16.752) | 96.388 (3.612) | 19.34 | bicubic | 380 | 0.922 |
-| tf_efficientnet_b4 | 83.022 (16.978) | 96.300 (3.700) | 19.34 | bicubic | 380 | 0.922 |
-| tf_efficientnet_b4 *tfp | 82.948 (17.052) | 96.308 (3.692) | 19.34 | bicubic | 380 | N/A |
-| tf_efficientnet_b2_ns *tfp | 82.436 (17.564) | 96.268 (3.732) | 9.11 | bicubic | 260 | N/A |
-| tf_efficientnet_b2_ns | 82.380 (17.620) | 96.248 (3.752) | 9.11 | bicubic | 260 | 0.89 |
-| tf_efficientnet_b3_ap *tfp | 81.882 (18.118) | 95.662 (4.338) | 12.23 | bicubic | 300 | N/A |
-| tf_efficientnet_b3_ap | 81.828 (18.172) | 95.624 (4.376) | 12.23 | bicubic | 300 | 0.904 |
-| tf_efficientnet_b3 | 81.636 (18.364) | 95.718 (4.282) | 12.23 | bicubic | 300 | 0.904 |
-| tf_efficientnet_b3 *tfp | 81.576 (18.424) | 95.662 (4.338) | 12.23 | bicubic | 300 | N/A |
-| tf_efficientnet_lite4 | 81.528 (18.472) | 95.668 (4.332) | 13.00 | bilinear | 380 | 0.92 |
-| tf_efficientnet_b1_ns *tfp | 81.514 (18.486) | 95.776 (4.224) | 7.79 | bicubic | 240 | N/A |
-| tf_efficientnet_lite4 *tfp | 81.502 (18.498) | 95.676 (4.324) | 13.00 | bilinear | 380 | N/A |
-| tf_efficientnet_b1_ns | 81.388 (18.612) | 95.738 (4.262) | 7.79 | bicubic | 240 | 0.88 |
-| tf_efficientnet_el | 80.534 (19.466) | 95.190 (4.810) | 10.59 | bicubic | 300 | 0.904 |
-| tf_efficientnet_el *tfp | 80.476 (19.524) | 95.200 (4.800) | 10.59 | bicubic | 300 | N/A |
-| tf_efficientnet_b2_ap *tfp | 80.420 (19.580) | 95.040 (4.960) | 9.11 | bicubic | 260 | N/A |
-| tf_efficientnet_b2_ap | 80.306 (19.694) | 95.028 (4.972) | 9.11 | bicubic | 260 | 0.890 |
-| tf_efficientnet_b2 *tfp | 80.188 (19.812) | 94.974 (5.026) | 9.11 | bicubic | 260 | N/A |
-| tf_efficientnet_b2 | 80.086 (19.914) | 94.908 (5.092) | 9.11 | bicubic | 260 | 0.890 |
-| tf_efficientnet_lite3 | 79.812 (20.188) | 94.914 (5.086) | 8.20 | bilinear | 300 | 0.904 |
-| tf_efficientnet_lite3 *tfp | 79.734 (20.266) | 94.838 (5.162) | 8.20 | bilinear | 300 | N/A |
-| tf_efficientnet_b1_ap *tfp | 79.532 (20.468) | 94.378 (5.622) | 7.79 | bicubic | 240 | N/A |
-| tf_efficientnet_cc_b1_8e *tfp | 79.464 (20.536)| 94.492 (5.508) | 39.7 | bicubic | 240 | 0.88 |
-| tf_efficientnet_cc_b1_8e | 79.298 (20.702) | 94.364 (5.636) | 39.7 | bicubic | 240 | 0.88 |
-| tf_efficientnet_b1_ap | 79.278 (20.722) | 94.308 (5.692) | 7.79 | bicubic | 240 | 0.88 |
-| tf_efficientnet_b1 *tfp | 79.172 (20.828) | 94.450 (5.550) | 7.79 | bicubic | 240 | N/A |
-| tf_efficientnet_em *tfp | 78.958 (21.042) | 94.458 (5.542) | 6.90 | bicubic | 240 | N/A |
-| tf_efficientnet_b0_ns *tfp | 78.806 (21.194) | 94.496 (5.504) | 5.29 | bicubic | 224 | N/A |
-| tf_mixnet_l *tfp | 78.846 (21.154) | 94.212 (5.788) | 7.33 | bilinear | 224 | N/A |
-| tf_efficientnet_b1 | 78.826 (21.174) | 94.198 (5.802) | 7.79 | bicubic | 240 | 0.88 |
-| tf_mixnet_l | 78.770 (21.230) | 94.004 (5.996) | 7.33 | bicubic | 224 | 0.875 |
-| tf_efficientnet_em | 78.742 (21.258) | 94.332 (5.668) | 6.90 | bicubic | 240 | 0.875 |
-| tf_efficientnet_b0_ns | 78.658 (21.342) | 94.376 (5.624) | 5.29 | bicubic | 224 | 0.875 |
-| tf_efficientnet_cc_b0_8e *tfp | 78.314 (21.686) | 93.790 (6.210) | 24.0 | bicubic | 224 | 0.875 |
-| tf_efficientnet_cc_b0_8e | 77.908 (22.092) | 93.656 (6.344) | 24.0 | bicubic | 224 | 0.875 |
-| tf_efficientnet_cc_b0_4e *tfp | 77.746 (22.254) | 93.552 (6.448) | 13.3 | bicubic | 224 | 0.875 |
-| tf_efficientnet_cc_b0_4e | 77.304 (22.696) | 93.332 (6.668) | 13.3 | bicubic | 224 | 0.875 |
-| tf_efficientnet_es *tfp | 77.616 (22.384) | 93.750 (6.250) | 5.44 | bicubic | 224 | N/A |
-| tf_efficientnet_lite2 *tfp | 77.544 (22.456) | 93.800 (6.200) | 6.09 | bilinear | 260 | N/A |
-| tf_efficientnet_lite2 | 77.460 (22.540) | 93.746 (6.254) | 6.09 | bicubic | 260 | 0.89 |
-| tf_efficientnet_b0_ap *tfp | 77.514 (22.486) | 93.576 (6.424) | 5.29 | bicubic | 224 | N/A |
-| tf_efficientnet_es | 77.264 (22.736) | 93.600 (6.400) | 5.44 | bicubic | 224 | N/A |
-| tf_efficientnet_b0 *tfp | 77.258 (22.742) | 93.478 (6.522) | 5.29 | bicubic | 224 | N/A |
-| tf_efficientnet_b0_ap | 77.084 (22.916) | 93.254 (6.746) | 5.29 | bicubic | 224 | 0.875 |
-| tf_mixnet_m *tfp | 77.072 (22.928) | 93.368 (6.632) | 5.01 | bilinear | 224 | N/A |
-| tf_mixnet_m | 76.950 (23.050) | 93.156 (6.844) | 5.01 | bicubic | 224 | 0.875 |
-| tf_efficientnet_b0 | 76.848 (23.152) | 93.228 (6.772) | 5.29 | bicubic | 224 | 0.875 |
-| tf_efficientnet_lite1 *tfp | 76.764 (23.236) | 93.326 (6.674) | 5.42 | bilinear | 240 | N/A |
-| tf_efficientnet_lite1 | 76.638 (23.362) | 93.232 (6.768) | 5.42 | bicubic | 240 | 0.882 |
-| tf_mixnet_s *tfp | 75.800 (24.200) | 92.788 (7.212) | 4.13 | bilinear | 224 | N/A |
-| tf_mobilenetv3_large_100 *tfp | 75.768 (24.232) | 92.710 (7.290) | 5.48 | bilinear | 224 | N/A |
-| tf_mixnet_s | 75.648 (24.352) | 92.636 (7.364) | 4.13 | bicubic | 224 | 0.875 |
-| tf_mobilenetv3_large_100 | 75.516 (24.484) | 92.600 (7.400) | 5.48 | bilinear | 224 | 0.875 |
-| tf_efficientnet_lite0 *tfp | 75.074 (24.926) | 92.314 (7.686) | 4.65 | bilinear | 224 | N/A |
-| tf_efficientnet_lite0 | 74.842 (25.158) | 92.170 (7.830) | 4.65 | bicubic | 224 | 0.875 |
-| tf_mobilenetv3_large_075 *tfp | 73.730 (26.270) | 91.616 (8.384) | 3.99 | bilinear | 224 |N/A |
-| tf_mobilenetv3_large_075 | 73.442 (26.558) | 91.352 (8.648) | 3.99 | bilinear | 224 | 0.875 |
-| tf_mobilenetv3_large_minimal_100 *tfp | 72.678 (27.322) | 90.860 (9.140) | 3.92 | bilinear | 224 | N/A |
-| tf_mobilenetv3_large_minimal_100 | 72.244 (27.756) | 90.636 (9.364) | 3.92 | bilinear | 224 | 0.875 |
-| tf_mobilenetv3_small_100 *tfp | 67.918 (32.082) | 87.958 (12.042 | 2.54 | bilinear | 224 | N/A |
-| tf_mobilenetv3_small_100 | 67.918 (32.082) | 87.662 (12.338) | 2.54 | bilinear | 224 | 0.875 |
-| tf_mobilenetv3_small_075 *tfp | 66.142 (33.858) | 86.498 (13.502) | 2.04 | bilinear | 224 | N/A |
-| tf_mobilenetv3_small_075 | 65.718 (34.282) | 86.136 (13.864) | 2.04 | bilinear | 224 | 0.875 |
-| tf_mobilenetv3_small_minimal_100 *tfp | 63.378 (36.622) | 84.802 (15.198) | 2.04 | bilinear | 224 | N/A |
-| tf_mobilenetv3_small_minimal_100 | 62.898 (37.102) | 84.230 (15.770) | 2.04 | bilinear | 224 | 0.875 |
-
-
-*tfp models validated with `tf-preprocessing` pipeline
-
-Google tf and tflite weights ported from official Tensorflow repositories
-* https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet
-* https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
-* https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet
-
-## Usage
-
-### Environment
-
-All development and testing has been done in Conda Python 3 environments on Linux x86-64 systems, specifically Python 3.6.x, 3.7.x, 3.8.x.
-
-Users have reported that a Python 3 Anaconda install in Windows works. I have not verified this myself.
-
-PyTorch versions 1.4, 1.5, 1.6 have been tested with this code.
-
-I've tried to keep the dependencies minimal, the setup is as per the PyTorch default install instructions for Conda:
-```
-conda create -n torch-env
-conda activate torch-env
-conda install -c pytorch pytorch torchvision cudatoolkit=10.2
-```
-
-### PyTorch Hub
-
-Models can be accessed via the PyTorch Hub API
-
-```
->>> torch.hub.list('rwightman/gen-efficientnet-pytorch')
-['efficientnet_b0', ...]
->>> model = torch.hub.load('rwightman/gen-efficientnet-pytorch', 'efficientnet_b0', pretrained=True)
->>> model.eval()
->>> output = model(torch.randn(1,3,224,224))
-```
-
-### Pip
-This package can be installed via pip.
-
-Install (after conda env/install):
-```
-pip install geffnet
-```
-
-Eval use:
-```
->>> import geffnet
->>> m = geffnet.create_model('mobilenetv3_large_100', pretrained=True)
->>> m.eval()
-```
-
-Train use:
-```
->>> import geffnet
->>> # models can also be created by using the entrypoint directly
->>> m = geffnet.efficientnet_b2(pretrained=True, drop_rate=0.25, drop_connect_rate=0.2)
->>> m.train()
-```
-
-Create in a nn.Sequential container, for fast.ai, etc:
-```
->>> import geffnet
->>> m = geffnet.mixnet_l(pretrained=True, drop_rate=0.25, drop_connect_rate=0.2, as_sequential=True)
-```
-
-### Exporting
-
-Scripts are included to
-* export models to ONNX (`onnx_export.py`)
-* optimized ONNX graph (`onnx_optimize.py` or `onnx_validate.py` w/ `--onnx-output-opt` arg)
-* validate with ONNX runtime (`onnx_validate.py`)
-* convert ONNX model to Caffe2 (`onnx_to_caffe.py`)
-* validate in Caffe2 (`caffe2_validate.py`)
-* benchmark in Caffe2 w/ FLOPs, parameters output (`caffe2_benchmark.py`)
-
-As an example, to export the MobileNet-V3 pretrained model and then run an Imagenet validation:
-```
-python onnx_export.py --model mobilenetv3_large_100 ./mobilenetv3_100.onnx
-python onnx_validate.py /imagenet/validation/ --onnx-input ./mobilenetv3_100.onnx
-```
-
-These scripts were tested to be working as of PyTorch 1.6 and ONNX 1.7 w/ ONNX runtime 1.4. Caffe2 compatible
-export now requires additional args mentioned in the export script (not needed in earlier versions).
-
-#### Export Notes
-1. The TF ported weights with the 'SAME' conv padding activated cannot be exported to ONNX unless `_EXPORTABLE` flag in `config.py` is set to True. Use `config.set_exportable(True)` as in the `onnx_export.py` script.
-2. TF ported models with 'SAME' padding will have the padding fixed at export time to the resolution used for export. Even though dynamic padding is supported in opset >= 11, I can't get it working.
-3. ONNX optimize facility doesn't work reliably in PyTorch 1.6 / ONNX 1.7. Fortunately, the onnxruntime based inference is working very well now and includes on the fly optimization.
-3. ONNX / Caffe2 export/import frequently breaks with different PyTorch and ONNX version releases. Please check their respective issue trackers before filing issues here.
-
-
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/utils/pos_embed.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/utils/pos_embed.py
deleted file mode 100644
index aa11d60db65fa98c140e7d75bdf985ff7ece8f18..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/utils/pos_embed.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# --------------------------------------------------------
-# Position embedding utils
-# --------------------------------------------------------
-
-from typing import Tuple
-
-import numpy as np
-import torch
-
-
-# --------------------------------------------------------
-# 2D sine-cosine position embedding
-# References:
-# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py
-# MoCo v3: https://github.com/facebookresearch/moco-v3
-# --------------------------------------------------------
-def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
- """
- grid_size: int of the grid height and width
- return:
- pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
- """
- grid_h = np.arange(grid_size, dtype=np.float32)
- grid_w = np.arange(grid_size, dtype=np.float32)
- grid = np.meshgrid(grid_w, grid_h) # here w goes first
- grid = np.stack(grid, axis=0)
-
- grid = grid.reshape([2, 1, grid_size, grid_size])
- pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
- if cls_token:
- pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
- return pos_embed
-
-
-def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
- assert embed_dim % 2 == 0
-
- # use half of dimensions to encode grid_h
- emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
- emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
-
- emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
- return emb
-
-
-def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
- """
- embed_dim: output dimension for each position
- pos: a list of positions to be encoded: size (M,)
- out: (M, D)
- """
- assert embed_dim % 2 == 0
- omega = np.arange(embed_dim // 2, dtype=np.float)
- omega /= embed_dim / 2.0
- omega = 1.0 / 10000 ** omega # (D/2,)
-
- pos = pos.reshape(-1) # (M,)
- out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
-
- emb_sin = np.sin(out) # (M, D/2)
- emb_cos = np.cos(out) # (M, D/2)
-
- emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
- return emb
-
-
-# --------------------------------------------------------
-# Interpolate position embeddings for high-resolution
-# References:
-# DeiT: https://github.com/facebookresearch/deit
-# --------------------------------------------------------
-def interpolate_pos_embed(model, checkpoint_model, pos_embed_key):
- if pos_embed_key in checkpoint_model:
- pos_embed_checkpoint = checkpoint_model[pos_embed_key]
- embedding_size = pos_embed_checkpoint.shape[-1]
- num_patches = model.num_patches
- if pos_embed_key.startswith("decoder"):
- num_extra_tokens = model.decoder_pos_embed.shape[-2] - num_patches
- else:
- num_extra_tokens = model.pos_embed.shape[-2] - num_patches
- # height (== width) for the checkpoint position embedding
- orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
- # height (== width) for the new position embedding
- new_size = int(num_patches ** 0.5)
- # class_token and dist_token are kept unchanged
- if orig_size != new_size:
- print(
- "Position interpolate from %dx%d to %dx%d"
- % (orig_size, orig_size, new_size, new_size)
- )
- extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
- # only the position tokens are interpolated
- pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
- pos_tokens = pos_tokens.reshape(
- -1, orig_size, orig_size, embedding_size
- ).permute(0, 3, 1, 2)
- pos_tokens = torch.nn.functional.interpolate(
- pos_tokens,
- size=(new_size, new_size),
- mode="bicubic",
- align_corners=False,
- )
- pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
- new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
- checkpoint_model[pos_embed_key] = new_pos_embed
-
-
-def interpolate_pos_embed_online(
- pos_embed, orig_size: Tuple[int], new_size: Tuple[int], num_extra_tokens: int
-):
- extra_tokens = pos_embed[:, :num_extra_tokens]
- pos_tokens = pos_embed[:, num_extra_tokens:]
- embedding_size = pos_tokens.shape[-1]
- pos_tokens = pos_tokens.reshape(
- -1, orig_size[0], orig_size[1], embedding_size
- ).permute(0, 3, 1, 2)
- pos_tokens = torch.nn.functional.interpolate(
- pos_tokens, size=new_size, mode="bicubic", align_corners=False,
- )
- pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
- new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
- return new_pos_embed
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/exp/upernet_global_small/test_config_w32.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/exp/upernet_global_small/test_config_w32.py
deleted file mode 100644
index 3d9e06f029e46c14cb9ddb39319cabe86fef9b44..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/exp/upernet_global_small/test_config_w32.py
+++ /dev/null
@@ -1,39 +0,0 @@
-_base_ = [
- '../../configs/_base_/models/upernet_uniformer.py',
- '../../configs/_base_/datasets/ade20k.py',
- '../../configs/_base_/default_runtime.py',
- '../../configs/_base_/schedules/schedule_160k.py'
-]
-model = dict(
- backbone=dict(
- type='UniFormer',
- embed_dim=[64, 128, 320, 512],
- layers=[3, 4, 8, 3],
- head_dim=64,
- drop_path_rate=0.25,
- windows=True,
- hybrid=False,
- window_size=32
- ),
- decode_head=dict(
- in_channels=[64, 128, 320, 512],
- num_classes=150
- ),
- auxiliary_head=dict(
- in_channels=320,
- num_classes=150
- ))
-
-# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
-optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
- paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
- 'relative_position_bias_table': dict(decay_mult=0.),
- 'norm': dict(decay_mult=0.)}))
-
-lr_config = dict(_delete_=True, policy='poly',
- warmup='linear',
- warmup_iters=1500,
- warmup_ratio=1e-6,
- power=1.0, min_lr=0.0, by_epoch=False)
-
-data=dict(samples_per_gpu=2)
\ No newline at end of file
diff --git a/spaces/cymic/Talking_Head_Anime_3/tha3/compute/cached_computation_func.py b/spaces/cymic/Talking_Head_Anime_3/tha3/compute/cached_computation_func.py
deleted file mode 100644
index 4641629c1bc2ea2d8a3409a95bc2ae9dadd58289..0000000000000000000000000000000000000000
--- a/spaces/cymic/Talking_Head_Anime_3/tha3/compute/cached_computation_func.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from typing import Callable, Dict, List
-
-from torch import Tensor
-from torch.nn import Module
-
-TensorCachedComputationFunc = Callable[
- [Dict[str, Module], List[Tensor], Dict[str, List[Tensor]]], Tensor]
-TensorListCachedComputationFunc = Callable[
- [Dict[str, Module], List[Tensor], Dict[str, List[Tensor]]], List[Tensor]]
diff --git a/spaces/cymic/Waifu_Diffusion_Webui/javascript/progressbar.js b/spaces/cymic/Waifu_Diffusion_Webui/javascript/progressbar.js
deleted file mode 100644
index f9e9290e2152d57522cdac5b1c7032c67cffdeee..0000000000000000000000000000000000000000
--- a/spaces/cymic/Waifu_Diffusion_Webui/javascript/progressbar.js
+++ /dev/null
@@ -1,68 +0,0 @@
-// code related to showing and updating progressbar shown as the image is being made
-global_progressbars = {}
-
-function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_interrupt, id_preview, id_gallery){
- var progressbar = gradioApp().getElementById(id_progressbar)
- var interrupt = gradioApp().getElementById(id_interrupt)
-
- if(opts.show_progress_in_title && progressbar && progressbar.offsetParent){
- if(progressbar.innerText){
- let newtitle = 'Stable Diffusion - ' + progressbar.innerText
- if(document.title != newtitle){
- document.title = newtitle;
- }
- }else{
- let newtitle = 'Stable Diffusion'
- if(document.title != newtitle){
- document.title = newtitle;
- }
- }
- }
-
- if(progressbar!= null && progressbar != global_progressbars[id_progressbar]){
- global_progressbars[id_progressbar] = progressbar
-
- var mutationObserver = new MutationObserver(function(m){
- preview = gradioApp().getElementById(id_preview)
- gallery = gradioApp().getElementById(id_gallery)
-
- if(preview != null && gallery != null){
- preview.style.width = gallery.clientWidth + "px"
- preview.style.height = gallery.clientHeight + "px"
-
- var progressDiv = gradioApp().querySelectorAll('#' + id_progressbar_span).length > 0;
- if(!progressDiv){
- interrupt.style.display = "none"
- }
- }
-
- window.setTimeout(function(){ requestMoreProgress(id_part, id_progressbar_span, id_interrupt) }, 500)
- });
- mutationObserver.observe( progressbar, { childList:true, subtree:true })
- }
-}
-
-onUiUpdate(function(){
- check_progressbar('txt2img', 'txt2img_progressbar', 'txt2img_progress_span', 'txt2img_interrupt', 'txt2img_preview', 'txt2img_gallery')
- check_progressbar('img2img', 'img2img_progressbar', 'img2img_progress_span', 'img2img_interrupt', 'img2img_preview', 'img2img_gallery')
- check_progressbar('ti', 'ti_progressbar', 'ti_progress_span', 'ti_interrupt', 'ti_preview', 'ti_gallery')
-})
-
-function requestMoreProgress(id_part, id_progressbar_span, id_interrupt){
- btn = gradioApp().getElementById(id_part+"_check_progress");
- if(btn==null) return;
-
- btn.click();
- var progressDiv = gradioApp().querySelectorAll('#' + id_progressbar_span).length > 0;
- var interrupt = gradioApp().getElementById(id_interrupt)
- if(progressDiv && interrupt){
- interrupt.style.display = "block"
- }
-}
-
-function requestProgress(id_part){
- btn = gradioApp().getElementById(id_part+"_check_progress_initial");
- if(btn==null) return;
-
- btn.click();
-}
diff --git a/spaces/davda54/chat-nort5/question_detection_norbert3_small/configuration_norbert.py b/spaces/davda54/chat-nort5/question_detection_norbert3_small/configuration_norbert.py
deleted file mode 100644
index 450a0286801acce50a7dd9378efa34391e1ca918..0000000000000000000000000000000000000000
--- a/spaces/davda54/chat-nort5/question_detection_norbert3_small/configuration_norbert.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from transformers.configuration_utils import PretrainedConfig
-
-
-class NorbertConfig(PretrainedConfig):
- """Configuration class to store the configuration of a `NorbertModel`.
- """
- def __init__(
- self,
- vocab_size=50000,
- attention_probs_dropout_prob=0.1,
- hidden_dropout_prob=0.1,
- hidden_size=768,
- intermediate_size=2048,
- max_position_embeddings=512,
- position_bucket_size=32,
- num_attention_heads=12,
- num_hidden_layers=12,
- layer_norm_eps=1.0e-7,
- output_all_encoded_layers=True,
- **kwargs,
- ):
- super().__init__(**kwargs)
-
- self.vocab_size = vocab_size
- self.hidden_size = hidden_size
- self.num_hidden_layers = num_hidden_layers
- self.num_attention_heads = num_attention_heads
- self.intermediate_size = intermediate_size
- self.hidden_dropout_prob = hidden_dropout_prob
- self.attention_probs_dropout_prob = attention_probs_dropout_prob
- self.max_position_embeddings = max_position_embeddings
- self.output_all_encoded_layers = output_all_encoded_layers
- self.position_bucket_size = position_bucket_size
- self.layer_norm_eps = layer_norm_eps
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/r-3ca97919.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/r-3ca97919.js
deleted file mode 100644
index e460c951763f569906751f34aed4265f5d719d36..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/r-3ca97919.js
+++ /dev/null
@@ -1,2 +0,0 @@
-function f(e){for(var n={},r=0;r=!&|~$:]/,t;function p(e,n){t=null;var r=e.next();if(r=="#")return e.skipToEnd(),"comment";if(r=="0"&&e.eat("x"))return e.eatWhile(/[\da-f]/i),"number";if(r=="."&&e.eat(/\d/))return e.match(/\d*(?:e[+\-]?\d+)?/),"number";if(/\d/.test(r))return e.match(/\d*(?:\.\d+)?(?:e[+\-]\d+)?L?/),"number";if(r=="'"||r=='"')return n.tokenize=E(r),"string";if(r=="`")return e.match(/[^`]+`/),"string.special";if(r=="."&&e.match(/.(?:[.]|\d+)/))return"keyword";if(/[a-zA-Z\.]/.test(r)){e.eatWhile(/[\w\.]/);var i=e.current();return h.propertyIsEnumerable(i)?"atom":N.propertyIsEnumerable(i)?(A.propertyIsEnumerable(i)&&!e.match(/\s*if(\s+|$)/,!1)&&(t="block"),"keyword"):m.propertyIsEnumerable(i)?"builtin":"variable"}else return r=="%"?(e.skipTo("%")&&e.next(),"variableName.special"):r=="<"&&e.eat("-")||r=="<"&&e.match("<-")||r=="-"&&e.match(/>>?/)||r=="="&&n.ctx.argList?"operator":k.test(r)?(r=="$"||e.eatWhile(k),"operator"):/[\(\){}\[\];]/.test(r)?(t=r,r==";"?"punctuation":null):null}function E(e){return function(n,r){if(n.eat("\\")){var i=n.next();return i=="x"?n.match(/^[a-f0-9]{2}/i):(i=="u"||i=="U")&&n.eat("{")&&n.skipTo("}")?n.next():i=="u"?n.match(/^[a-f0-9]{4}/i):i=="U"?n.match(/^[a-f0-9]{8}/i):/[0-7]/.test(i)&&n.match(/^[0-7]{1,2}/),"string.special"}else{for(var l;(l=n.next())!=null;){if(l==e){r.tokenize=p;break}if(l=="\\"){n.backUp(1);break}}return"string"}}}var v=1,u=2,c=4;function o(e,n,r){e.ctx={type:n,indent:e.indent,flags:0,column:r.column(),prev:e.ctx}}function x(e,n){var r=e.ctx;e.ctx={type:r.type,indent:r.indent,flags:r.flags|n,column:r.column,prev:r.prev}}function a(e){e.indent=e.ctx.indent,e.ctx=e.ctx.prev}const I={name:"r",startState:function(e){return{tokenize:p,ctx:{type:"top",indent:-e,flags:u},indent:0,afterIdent:!1}},token:function(e,n){if(e.sol()&&(n.ctx.flags&3||(n.ctx.flags|=u),n.ctx.flags&c&&a(n),n.indent=e.indentation()),e.eatSpace())return null;var r=n.tokenize(e,n);return r!="comment"&&!(n.ctx.flags&u)&&x(n,v),(t==";"||t=="{"||t=="}")&&n.ctx.type=="block"&&a(n),t=="{"?o(n,"}",e):t=="("?(o(n,")",e),n.afterIdent&&(n.ctx.argList=!0)):t=="["?o(n,"]",e):t=="block"?o(n,"block",e):t==n.ctx.type?a(n):n.ctx.type=="block"&&r!="comment"&&x(n,c),n.afterIdent=r=="variable"||r=="keyword",r},indent:function(e,n,r){if(e.tokenize!=p)return 0;var i=n&&n.charAt(0),l=e.ctx,d=i==l.type;return l.flags&c&&(l=l.prev),l.type=="block"?l.indent+(i=="{"?0:r.unit):l.flags&v?l.column+(d?0:1):l.indent+(d?0:r.unit)},languageData:{wordChars:".",commentTokens:{line:"#"},autocomplete:b.concat(g,s)}};export{I as r};
-//# sourceMappingURL=r-3ca97919.js.map
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/__init__.py
deleted file mode 100644
index 6606868a7aaaef5ba3b85507bd0cc6f34806d947..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/markdown_it/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""A Python port of Markdown-It"""
-__all__ = ("MarkdownIt",)
-__version__ = "3.0.0"
-
-from .main import MarkdownIt
diff --git a/spaces/ddiddi/bhasha.dev/README.md b/spaces/ddiddi/bhasha.dev/README.md
deleted file mode 100644
index 90f22e0a17d0c541ef1e140e9a27d384eb750df1..0000000000000000000000000000000000000000
--- a/spaces/ddiddi/bhasha.dev/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Bhasha.dev
-emoji: 🚀
-colorFrom: red
-colorTo: pink
-sdk: gradio
-sdk_version: 3.4.1
-app_file: app.py
-pinned: false
-license: other
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/declare-lab/tango/diffusers/examples/community/clip_guided_stable_diffusion_img2img.py b/spaces/declare-lab/tango/diffusers/examples/community/clip_guided_stable_diffusion_img2img.py
deleted file mode 100644
index c3dee5aa9e9a159849ba260ccb706496af5ae84b..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/examples/community/clip_guided_stable_diffusion_img2img.py
+++ /dev/null
@@ -1,496 +0,0 @@
-import inspect
-from typing import List, Optional, Union
-
-import numpy as np
-import PIL
-import torch
-from torch import nn
-from torch.nn import functional as F
-from torchvision import transforms
-from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
-
-from diffusers import (
- AutoencoderKL,
- DDIMScheduler,
- DiffusionPipeline,
- DPMSolverMultistepScheduler,
- LMSDiscreteScheduler,
- PNDMScheduler,
- UNet2DConditionModel,
-)
-from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
-from diffusers.utils import (
- PIL_INTERPOLATION,
- deprecate,
- randn_tensor,
-)
-
-
-EXAMPLE_DOC_STRING = """
- Examples:
- ```
- from io import BytesIO
-
- import requests
- import torch
- from diffusers import DiffusionPipeline
- from PIL import Image
- from transformers import CLIPFeatureExtractor, CLIPModel
-
- feature_extractor = CLIPFeatureExtractor.from_pretrained(
- "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
- )
- clip_model = CLIPModel.from_pretrained(
- "laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
- )
-
-
- guided_pipeline = DiffusionPipeline.from_pretrained(
- "CompVis/stable-diffusion-v1-4",
- # custom_pipeline="clip_guided_stable_diffusion",
- custom_pipeline="/home/njindal/diffusers/examples/community/clip_guided_stable_diffusion.py",
- clip_model=clip_model,
- feature_extractor=feature_extractor,
- torch_dtype=torch.float16,
- )
- guided_pipeline.enable_attention_slicing()
- guided_pipeline = guided_pipeline.to("cuda")
-
- prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece"
-
- url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
-
- response = requests.get(url)
- init_image = Image.open(BytesIO(response.content)).convert("RGB")
-
- image = guided_pipeline(
- prompt=prompt,
- num_inference_steps=30,
- image=init_image,
- strength=0.75,
- guidance_scale=7.5,
- clip_guidance_scale=100,
- num_cutouts=4,
- use_cutouts=False,
- ).images[0]
- display(image)
- ```
-"""
-
-
-def preprocess(image, w, h):
- if isinstance(image, torch.Tensor):
- return image
- elif isinstance(image, PIL.Image.Image):
- image = [image]
-
- if isinstance(image[0], PIL.Image.Image):
- image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
- image = np.concatenate(image, axis=0)
- image = np.array(image).astype(np.float32) / 255.0
- image = image.transpose(0, 3, 1, 2)
- image = 2.0 * image - 1.0
- image = torch.from_numpy(image)
- elif isinstance(image[0], torch.Tensor):
- image = torch.cat(image, dim=0)
- return image
-
-
-class MakeCutouts(nn.Module):
- def __init__(self, cut_size, cut_power=1.0):
- super().__init__()
-
- self.cut_size = cut_size
- self.cut_power = cut_power
-
- def forward(self, pixel_values, num_cutouts):
- sideY, sideX = pixel_values.shape[2:4]
- max_size = min(sideX, sideY)
- min_size = min(sideX, sideY, self.cut_size)
- cutouts = []
- for _ in range(num_cutouts):
- size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
- offsetx = torch.randint(0, sideX - size + 1, ())
- offsety = torch.randint(0, sideY - size + 1, ())
- cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
- cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
- return torch.cat(cutouts)
-
-
-def spherical_dist_loss(x, y):
- x = F.normalize(x, dim=-1)
- y = F.normalize(y, dim=-1)
- return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
-
-
-def set_requires_grad(model, value):
- for param in model.parameters():
- param.requires_grad = value
-
-
-class CLIPGuidedStableDiffusion(DiffusionPipeline):
- """CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
- - https://github.com/Jack000/glid-3-xl
- - https://github.dev/crowsonkb/k-diffusion
- """
-
- def __init__(
- self,
- vae: AutoencoderKL,
- text_encoder: CLIPTextModel,
- clip_model: CLIPModel,
- tokenizer: CLIPTokenizer,
- unet: UNet2DConditionModel,
- scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
- feature_extractor: CLIPFeatureExtractor,
- ):
- super().__init__()
- self.register_modules(
- vae=vae,
- text_encoder=text_encoder,
- clip_model=clip_model,
- tokenizer=tokenizer,
- unet=unet,
- scheduler=scheduler,
- feature_extractor=feature_extractor,
- )
-
- self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
- self.cut_out_size = (
- feature_extractor.size
- if isinstance(feature_extractor.size, int)
- else feature_extractor.size["shortest_edge"]
- )
- self.make_cutouts = MakeCutouts(self.cut_out_size)
-
- set_requires_grad(self.text_encoder, False)
- set_requires_grad(self.clip_model, False)
-
- def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
- if slice_size == "auto":
- # half the attention head size is usually a good trade-off between
- # speed and memory
- slice_size = self.unet.config.attention_head_dim // 2
- self.unet.set_attention_slice(slice_size)
-
- def disable_attention_slicing(self):
- self.enable_attention_slicing(None)
-
- def freeze_vae(self):
- set_requires_grad(self.vae, False)
-
- def unfreeze_vae(self):
- set_requires_grad(self.vae, True)
-
- def freeze_unet(self):
- set_requires_grad(self.unet, False)
-
- def unfreeze_unet(self):
- set_requires_grad(self.unet, True)
-
- def get_timesteps(self, num_inference_steps, strength, device):
- # get the original timestep using init_timestep
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
-
- t_start = max(num_inference_steps - init_timestep, 0)
- timesteps = self.scheduler.timesteps[t_start:]
-
- return timesteps, num_inference_steps - t_start
-
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
- raise ValueError(
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
- )
-
- image = image.to(device=device, dtype=dtype)
-
- batch_size = batch_size * num_images_per_prompt
- if isinstance(generator, list) and len(generator) != batch_size:
- raise ValueError(
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
- )
-
- if isinstance(generator, list):
- init_latents = [
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
- ]
- init_latents = torch.cat(init_latents, dim=0)
- else:
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
-
- init_latents = self.vae.config.scaling_factor * init_latents
-
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
- # expand init_latents for batch_size
- deprecation_message = (
- f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
- " your script to pass as many initial images as text prompts to suppress this warning."
- )
- deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
- additional_image_per_prompt = batch_size // init_latents.shape[0]
- init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
- raise ValueError(
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
- )
- else:
- init_latents = torch.cat([init_latents], dim=0)
-
- shape = init_latents.shape
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
-
- # get latents
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
- latents = init_latents
-
- return latents
-
- @torch.enable_grad()
- def cond_fn(
- self,
- latents,
- timestep,
- index,
- text_embeddings,
- noise_pred_original,
- text_embeddings_clip,
- clip_guidance_scale,
- num_cutouts,
- use_cutouts=True,
- ):
- latents = latents.detach().requires_grad_()
-
- latent_model_input = self.scheduler.scale_model_input(latents, timestep)
-
- # predict the noise residual
- noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
-
- if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
- alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
- beta_prod_t = 1 - alpha_prod_t
- # compute predicted original sample from predicted noise also called
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
- pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
-
- fac = torch.sqrt(beta_prod_t)
- sample = pred_original_sample * (fac) + latents * (1 - fac)
- elif isinstance(self.scheduler, LMSDiscreteScheduler):
- sigma = self.scheduler.sigmas[index]
- sample = latents - sigma * noise_pred
- else:
- raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
-
- sample = 1 / self.vae.config.scaling_factor * sample
- image = self.vae.decode(sample).sample
- image = (image / 2 + 0.5).clamp(0, 1)
-
- if use_cutouts:
- image = self.make_cutouts(image, num_cutouts)
- else:
- image = transforms.Resize(self.cut_out_size)(image)
- image = self.normalize(image).to(latents.dtype)
-
- image_embeddings_clip = self.clip_model.get_image_features(image)
- image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
-
- if use_cutouts:
- dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
- dists = dists.view([num_cutouts, sample.shape[0], -1])
- loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
- else:
- loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
-
- grads = -torch.autograd.grad(loss, latents)[0]
-
- if isinstance(self.scheduler, LMSDiscreteScheduler):
- latents = latents.detach() + grads * (sigma**2)
- noise_pred = noise_pred_original
- else:
- noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
- return noise_pred, latents
-
- @torch.no_grad()
- def __call__(
- self,
- prompt: Union[str, List[str]],
- height: Optional[int] = 512,
- width: Optional[int] = 512,
- image: Union[torch.FloatTensor, PIL.Image.Image] = None,
- strength: float = 0.8,
- num_inference_steps: Optional[int] = 50,
- guidance_scale: Optional[float] = 7.5,
- num_images_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
- clip_guidance_scale: Optional[float] = 100,
- clip_prompt: Optional[Union[str, List[str]]] = None,
- num_cutouts: Optional[int] = 4,
- use_cutouts: Optional[bool] = True,
- generator: Optional[torch.Generator] = None,
- latents: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- ):
- if isinstance(prompt, str):
- batch_size = 1
- elif isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
-
- if height % 8 != 0 or width % 8 != 0:
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
-
- # get prompt text embeddings
- text_input = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
- # duplicate text embeddings for each generation per prompt
- text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
-
- # set timesteps
- accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
- extra_set_kwargs = {}
- if accepts_offset:
- extra_set_kwargs["offset"] = 1
-
- self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
- # Some schedulers like PNDM have timesteps as arrays
- # It's more optimized to move all timesteps to correct device beforehand
- self.scheduler.timesteps.to(self.device)
-
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device)
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
-
- # Preprocess image
- image = preprocess(image, width, height)
- latents = self.prepare_latents(
- image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, self.device, generator
- )
-
- if clip_guidance_scale > 0:
- if clip_prompt is not None:
- clip_text_input = self.tokenizer(
- clip_prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- ).input_ids.to(self.device)
- else:
- clip_text_input = text_input.input_ids.to(self.device)
- text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
- text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
- # duplicate text embeddings clip for each generation per prompt
- text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
-
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = guidance_scale > 1.0
- # get unconditional embeddings for classifier free guidance
- if do_classifier_free_guidance:
- max_length = text_input.input_ids.shape[-1]
- uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
- # duplicate unconditional embeddings for each generation per prompt
- uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
-
- # get the initial random noise unless the user supplied it
-
- # Unlike in other pipelines, latents need to be generated in the target device
- # for 1-to-1 results reproducibility with the CompVis implementation.
- # However this currently doesn't work in `mps`.
- latents_shape = (batch_size * num_images_per_prompt, self.unet.in_channels, height // 8, width // 8)
- latents_dtype = text_embeddings.dtype
- if latents is None:
- if self.device.type == "mps":
- # randn does not work reproducibly on mps
- latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
- self.device
- )
- else:
- latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
- else:
- if latents.shape != latents_shape:
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
- latents = latents.to(self.device)
-
- # scale the initial noise by the standard deviation required by the scheduler
- latents = latents * self.scheduler.init_noise_sigma
-
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
- # and should be between [0, 1]
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
- extra_step_kwargs = {}
- if accepts_eta:
- extra_step_kwargs["eta"] = eta
-
- # check if the scheduler accepts generator
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
- if accepts_generator:
- extra_step_kwargs["generator"] = generator
-
- with self.progress_bar(total=num_inference_steps):
- for i, t in enumerate(timesteps):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
-
- # predict the noise residual
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
-
- # perform classifier free guidance
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- # perform clip guidance
- if clip_guidance_scale > 0:
- text_embeddings_for_guidance = (
- text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
- )
- noise_pred, latents = self.cond_fn(
- latents,
- t,
- i,
- text_embeddings_for_guidance,
- noise_pred,
- text_embeddings_clip,
- clip_guidance_scale,
- num_cutouts,
- use_cutouts,
- )
-
- # compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
-
- # scale and decode the image latents with vae
- latents = 1 / self.vae.config.scaling_factor * latents
- image = self.vae.decode(latents).sample
-
- image = (image / 2 + 0.5).clamp(0, 1)
- image = image.cpu().permute(0, 2, 3, 1).numpy()
-
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image, None)
-
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py b/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py
deleted file mode 100644
index ce41572e683c4a59f4a6c2335875986ca18d9358..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_unclip.py
+++ /dev/null
@@ -1,933 +0,0 @@
-# Copyright 2023 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import inspect
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
-
-import torch
-from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
-from transformers.models.clip.modeling_clip import CLIPTextModelOutput
-
-from ...loaders import TextualInversionLoaderMixin
-from ...models import AutoencoderKL, PriorTransformer, UNet2DConditionModel
-from ...models.embeddings import get_timestep_embedding
-from ...schedulers import KarrasDiffusionSchedulers
-from ...utils import is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring
-from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
-from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-EXAMPLE_DOC_STRING = """
- Examples:
- ```py
- >>> import torch
- >>> from diffusers import StableUnCLIPPipeline
-
- >>> pipe = StableUnCLIPPipeline.from_pretrained(
- ... "fusing/stable-unclip-2-1-l", torch_dtype=torch.float16
- ... ) # TODO update model path
- >>> pipe = pipe.to("cuda")
-
- >>> prompt = "a photo of an astronaut riding a horse on mars"
- >>> images = pipe(prompt).images
- >>> images[0].save("astronaut_horse.png")
- ```
-"""
-
-
-class StableUnCLIPPipeline(DiffusionPipeline, TextualInversionLoaderMixin):
- """
- Pipeline for text-to-image generation using stable unCLIP.
-
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
-
- Args:
- prior_tokenizer ([`CLIPTokenizer`]):
- Tokenizer of class
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- prior_text_encoder ([`CLIPTextModelWithProjection`]):
- Frozen text-encoder.
- prior ([`PriorTransformer`]):
- The canonincal unCLIP prior to approximate the image embedding from the text embedding.
- prior_scheduler ([`KarrasDiffusionSchedulers`]):
- Scheduler used in the prior denoising process.
- image_normalizer ([`StableUnCLIPImageNormalizer`]):
- Used to normalize the predicted image embeddings before the noise is applied and un-normalize the image
- embeddings after the noise has been applied.
- image_noising_scheduler ([`KarrasDiffusionSchedulers`]):
- Noise schedule for adding noise to the predicted image embeddings. The amount of noise to add is determined
- by `noise_level` in `StableUnCLIPPipeline.__call__`.
- tokenizer (`CLIPTokenizer`):
- Tokenizer of class
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- text_encoder ([`CLIPTextModel`]):
- Frozen text-encoder.
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
- scheduler ([`KarrasDiffusionSchedulers`]):
- A scheduler to be used in combination with `unet` to denoise the encoded image latents.
- vae ([`AutoencoderKL`]):
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
- """
-
- # prior components
- prior_tokenizer: CLIPTokenizer
- prior_text_encoder: CLIPTextModelWithProjection
- prior: PriorTransformer
- prior_scheduler: KarrasDiffusionSchedulers
-
- # image noising components
- image_normalizer: StableUnCLIPImageNormalizer
- image_noising_scheduler: KarrasDiffusionSchedulers
-
- # regular denoising components
- tokenizer: CLIPTokenizer
- text_encoder: CLIPTextModel
- unet: UNet2DConditionModel
- scheduler: KarrasDiffusionSchedulers
-
- vae: AutoencoderKL
-
- def __init__(
- self,
- # prior components
- prior_tokenizer: CLIPTokenizer,
- prior_text_encoder: CLIPTextModelWithProjection,
- prior: PriorTransformer,
- prior_scheduler: KarrasDiffusionSchedulers,
- # image noising components
- image_normalizer: StableUnCLIPImageNormalizer,
- image_noising_scheduler: KarrasDiffusionSchedulers,
- # regular denoising components
- tokenizer: CLIPTokenizer,
- text_encoder: CLIPTextModelWithProjection,
- unet: UNet2DConditionModel,
- scheduler: KarrasDiffusionSchedulers,
- # vae
- vae: AutoencoderKL,
- ):
- super().__init__()
-
- self.register_modules(
- prior_tokenizer=prior_tokenizer,
- prior_text_encoder=prior_text_encoder,
- prior=prior,
- prior_scheduler=prior_scheduler,
- image_normalizer=image_normalizer,
- image_noising_scheduler=image_noising_scheduler,
- tokenizer=tokenizer,
- text_encoder=text_encoder,
- unet=unet,
- scheduler=scheduler,
- vae=vae,
- )
-
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
- def enable_vae_slicing(self):
- r"""
- Enable sliced VAE decoding.
-
- When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
- steps. This is useful to save some memory and allow larger batch sizes.
- """
- self.vae.enable_slicing()
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
- def disable_vae_slicing(self):
- r"""
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
- computing decoding in one step.
- """
- self.vae.disable_slicing()
-
- def enable_sequential_cpu_offload(self, gpu_id=0):
- r"""
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, the pipeline's
- models have their state dicts saved to CPU and then are moved to a `torch.device('meta') and loaded to GPU only
- when their specific submodule has its `forward` method called.
- """
- if is_accelerate_available():
- from accelerate import cpu_offload
- else:
- raise ImportError("Please install accelerate via `pip install accelerate`")
-
- device = torch.device(f"cuda:{gpu_id}")
-
- # TODO: self.prior.post_process_latents and self.image_noiser.{scale,unscale} are not covered by the offload hooks, so they fails if added to the list
- models = [
- self.prior_text_encoder,
- self.text_encoder,
- self.unet,
- self.vae,
- ]
- for cpu_offloaded_model in models:
- if cpu_offloaded_model is not None:
- cpu_offload(cpu_offloaded_model, device)
-
- def enable_model_cpu_offload(self, gpu_id=0):
- r"""
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
- """
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
- from accelerate import cpu_offload_with_hook
- else:
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
-
- device = torch.device(f"cuda:{gpu_id}")
-
- if self.device.type != "cpu":
- self.to("cpu", silence_dtype_warnings=True)
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
-
- hook = None
- for cpu_offloaded_model in [self.text_encoder, self.prior_text_encoder, self.unet, self.vae]:
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
-
- # We'll offload the last model manually.
- self.final_offload_hook = hook
-
- @property
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
- def _execution_device(self):
- r"""
- Returns the device on which the pipeline's models will be executed. After calling
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
- hooks.
- """
- if not hasattr(self.unet, "_hf_hook"):
- return self.device
- for module in self.unet.modules():
- if (
- hasattr(module, "_hf_hook")
- and hasattr(module._hf_hook, "execution_device")
- and module._hf_hook.execution_device is not None
- ):
- return torch.device(module._hf_hook.execution_device)
- return self.device
-
- # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline._encode_prompt with _encode_prompt->_encode_prior_prompt, tokenizer->prior_tokenizer, text_encoder->prior_text_encoder
- def _encode_prior_prompt(
- self,
- prompt,
- device,
- num_images_per_prompt,
- do_classifier_free_guidance,
- text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None,
- text_attention_mask: Optional[torch.Tensor] = None,
- ):
- if text_model_output is None:
- batch_size = len(prompt) if isinstance(prompt, list) else 1
- # get prompt text embeddings
- text_inputs = self.prior_tokenizer(
- prompt,
- padding="max_length",
- max_length=self.prior_tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- text_input_ids = text_inputs.input_ids
- text_mask = text_inputs.attention_mask.bool().to(device)
-
- untruncated_ids = self.prior_tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
-
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
- text_input_ids, untruncated_ids
- ):
- removed_text = self.prior_tokenizer.batch_decode(
- untruncated_ids[:, self.prior_tokenizer.model_max_length - 1 : -1]
- )
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.prior_tokenizer.model_max_length} tokens: {removed_text}"
- )
- text_input_ids = text_input_ids[:, : self.prior_tokenizer.model_max_length]
-
- prior_text_encoder_output = self.prior_text_encoder(text_input_ids.to(device))
-
- prompt_embeds = prior_text_encoder_output.text_embeds
- prior_text_encoder_hidden_states = prior_text_encoder_output.last_hidden_state
-
- else:
- batch_size = text_model_output[0].shape[0]
- prompt_embeds, prior_text_encoder_hidden_states = text_model_output[0], text_model_output[1]
- text_mask = text_attention_mask
-
- prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
- prior_text_encoder_hidden_states = prior_text_encoder_hidden_states.repeat_interleave(
- num_images_per_prompt, dim=0
- )
- text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)
-
- if do_classifier_free_guidance:
- uncond_tokens = [""] * batch_size
-
- uncond_input = self.prior_tokenizer(
- uncond_tokens,
- padding="max_length",
- max_length=self.prior_tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- uncond_text_mask = uncond_input.attention_mask.bool().to(device)
- negative_prompt_embeds_prior_text_encoder_output = self.prior_text_encoder(
- uncond_input.input_ids.to(device)
- )
-
- negative_prompt_embeds = negative_prompt_embeds_prior_text_encoder_output.text_embeds
- uncond_prior_text_encoder_hidden_states = (
- negative_prompt_embeds_prior_text_encoder_output.last_hidden_state
- )
-
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
-
- seq_len = negative_prompt_embeds.shape[1]
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt)
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len)
-
- seq_len = uncond_prior_text_encoder_hidden_states.shape[1]
- uncond_prior_text_encoder_hidden_states = uncond_prior_text_encoder_hidden_states.repeat(
- 1, num_images_per_prompt, 1
- )
- uncond_prior_text_encoder_hidden_states = uncond_prior_text_encoder_hidden_states.view(
- batch_size * num_images_per_prompt, seq_len, -1
- )
- uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0)
-
- # done duplicates
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
- prior_text_encoder_hidden_states = torch.cat(
- [uncond_prior_text_encoder_hidden_states, prior_text_encoder_hidden_states]
- )
-
- text_mask = torch.cat([uncond_text_mask, text_mask])
-
- return prompt_embeds, prior_text_encoder_hidden_states, text_mask
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
- def _encode_prompt(
- self,
- prompt,
- device,
- num_images_per_prompt,
- do_classifier_free_guidance,
- negative_prompt=None,
- prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- ):
- r"""
- Encodes the prompt into text encoder hidden states.
-
- Args:
- prompt (`str` or `List[str]`, *optional*):
- prompt to be encoded
- device: (`torch.device`):
- torch device
- num_images_per_prompt (`int`):
- number of images that should be generated per prompt
- do_classifier_free_guidance (`bool`):
- whether to use classifier free guidance or not
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
- less than `1`).
- prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
- provided, text embeddings will be generated from `prompt` input argument.
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
- argument.
- """
- if prompt is not None and isinstance(prompt, str):
- batch_size = 1
- elif prompt is not None and isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- batch_size = prompt_embeds.shape[0]
-
- if prompt_embeds is None:
- # textual inversion: procecss multi-vector tokens if necessary
- if isinstance(self, TextualInversionLoaderMixin):
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
-
- text_inputs = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- text_input_ids = text_inputs.input_ids
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
-
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
- text_input_ids, untruncated_ids
- ):
- removed_text = self.tokenizer.batch_decode(
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
- )
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = text_inputs.attention_mask.to(device)
- else:
- attention_mask = None
-
- prompt_embeds = self.text_encoder(
- text_input_ids.to(device),
- attention_mask=attention_mask,
- )
- prompt_embeds = prompt_embeds[0]
-
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
-
- bs_embed, seq_len, _ = prompt_embeds.shape
- # duplicate text embeddings for each generation per prompt, using mps friendly method
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
-
- # get unconditional embeddings for classifier free guidance
- if do_classifier_free_guidance and negative_prompt_embeds is None:
- uncond_tokens: List[str]
- if negative_prompt is None:
- uncond_tokens = [""] * batch_size
- elif type(prompt) is not type(negative_prompt):
- raise TypeError(
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
- f" {type(prompt)}."
- )
- elif isinstance(negative_prompt, str):
- uncond_tokens = [negative_prompt]
- elif batch_size != len(negative_prompt):
- raise ValueError(
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
- " the batch size of `prompt`."
- )
- else:
- uncond_tokens = negative_prompt
-
- # textual inversion: procecss multi-vector tokens if necessary
- if isinstance(self, TextualInversionLoaderMixin):
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
-
- max_length = prompt_embeds.shape[1]
- uncond_input = self.tokenizer(
- uncond_tokens,
- padding="max_length",
- max_length=max_length,
- truncation=True,
- return_tensors="pt",
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = uncond_input.attention_mask.to(device)
- else:
- attention_mask = None
-
- negative_prompt_embeds = self.text_encoder(
- uncond_input.input_ids.to(device),
- attention_mask=attention_mask,
- )
- negative_prompt_embeds = negative_prompt_embeds[0]
-
- if do_classifier_free_guidance:
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
- seq_len = negative_prompt_embeds.shape[1]
-
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
-
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
-
- return prompt_embeds
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
- def decode_latents(self, latents):
- latents = 1 / self.vae.config.scaling_factor * latents
- image = self.vae.decode(latents).sample
- image = (image / 2 + 0.5).clamp(0, 1)
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
- return image
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs with prepare_extra_step_kwargs->prepare_prior_extra_step_kwargs, scheduler->prior_scheduler
- def prepare_prior_extra_step_kwargs(self, generator, eta):
- # prepare extra kwargs for the prior_scheduler step, since not all prior_schedulers have the same signature
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other prior_schedulers.
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
- # and should be between [0, 1]
-
- accepts_eta = "eta" in set(inspect.signature(self.prior_scheduler.step).parameters.keys())
- extra_step_kwargs = {}
- if accepts_eta:
- extra_step_kwargs["eta"] = eta
-
- # check if the prior_scheduler accepts generator
- accepts_generator = "generator" in set(inspect.signature(self.prior_scheduler.step).parameters.keys())
- if accepts_generator:
- extra_step_kwargs["generator"] = generator
- return extra_step_kwargs
-
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
- def prepare_extra_step_kwargs(self, generator, eta):
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
- # and should be between [0, 1]
-
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
- extra_step_kwargs = {}
- if accepts_eta:
- extra_step_kwargs["eta"] = eta
-
- # check if the scheduler accepts generator
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
- if accepts_generator:
- extra_step_kwargs["generator"] = generator
- return extra_step_kwargs
-
- def check_inputs(
- self,
- prompt,
- height,
- width,
- callback_steps,
- noise_level,
- negative_prompt=None,
- prompt_embeds=None,
- negative_prompt_embeds=None,
- ):
- if height % 8 != 0 or width % 8 != 0:
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
-
- if (callback_steps is None) or (
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
- ):
- raise ValueError(
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
- f" {type(callback_steps)}."
- )
-
- if prompt is not None and prompt_embeds is not None:
- raise ValueError(
- "Provide either `prompt` or `prompt_embeds`. Please make sure to define only one of the two."
- )
-
- if prompt is None and prompt_embeds is None:
- raise ValueError(
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
- )
-
- if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
-
- if negative_prompt is not None and negative_prompt_embeds is not None:
- raise ValueError(
- "Provide either `negative_prompt` or `negative_prompt_embeds`. Cannot leave both `negative_prompt` and `negative_prompt_embeds` undefined."
- )
-
- if prompt is not None and negative_prompt is not None:
- if type(prompt) is not type(negative_prompt):
- raise TypeError(
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
- f" {type(prompt)}."
- )
-
- if prompt_embeds is not None and negative_prompt_embeds is not None:
- if prompt_embeds.shape != negative_prompt_embeds.shape:
- raise ValueError(
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
- f" {negative_prompt_embeds.shape}."
- )
-
- if noise_level < 0 or noise_level >= self.image_noising_scheduler.config.num_train_timesteps:
- raise ValueError(
- f"`noise_level` must be between 0 and {self.image_noising_scheduler.config.num_train_timesteps - 1}, inclusive."
- )
-
- # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
- def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
- if latents is None:
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
- else:
- if latents.shape != shape:
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
- latents = latents.to(device)
-
- latents = latents * scheduler.init_noise_sigma
- return latents
-
- def noise_image_embeddings(
- self,
- image_embeds: torch.Tensor,
- noise_level: int,
- noise: Optional[torch.FloatTensor] = None,
- generator: Optional[torch.Generator] = None,
- ):
- """
- Add noise to the image embeddings. The amount of noise is controlled by a `noise_level` input. A higher
- `noise_level` increases the variance in the final un-noised images.
-
- The noise is applied in two ways
- 1. A noise schedule is applied directly to the embeddings
- 2. A vector of sinusoidal time embeddings are appended to the output.
-
- In both cases, the amount of noise is controlled by the same `noise_level`.
-
- The embeddings are normalized before the noise is applied and un-normalized after the noise is applied.
- """
- if noise is None:
- noise = randn_tensor(
- image_embeds.shape, generator=generator, device=image_embeds.device, dtype=image_embeds.dtype
- )
-
- noise_level = torch.tensor([noise_level] * image_embeds.shape[0], device=image_embeds.device)
-
- self.image_normalizer.to(image_embeds.device)
- image_embeds = self.image_normalizer.scale(image_embeds)
-
- image_embeds = self.image_noising_scheduler.add_noise(image_embeds, timesteps=noise_level, noise=noise)
-
- image_embeds = self.image_normalizer.unscale(image_embeds)
-
- noise_level = get_timestep_embedding(
- timesteps=noise_level, embedding_dim=image_embeds.shape[-1], flip_sin_to_cos=True, downscale_freq_shift=0
- )
-
- # `get_timestep_embeddings` does not contain any weights and will always return f32 tensors,
- # but we might actually be running in fp16. so we need to cast here.
- # there might be better ways to encapsulate this.
- noise_level = noise_level.to(image_embeds.dtype)
-
- image_embeds = torch.cat((image_embeds, noise_level), 1)
-
- return image_embeds
-
- @torch.no_grad()
- @replace_example_docstring(EXAMPLE_DOC_STRING)
- def __call__(
- self,
- # regular denoising process args
- prompt: Optional[Union[str, List[str]]] = None,
- height: Optional[int] = None,
- width: Optional[int] = None,
- num_inference_steps: int = 20,
- guidance_scale: float = 10.0,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
- generator: Optional[torch.Generator] = None,
- latents: Optional[torch.FloatTensor] = None,
- prompt_embeds: Optional[torch.FloatTensor] = None,
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
- callback_steps: int = 1,
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
- noise_level: int = 0,
- # prior args
- prior_num_inference_steps: int = 25,
- prior_guidance_scale: float = 4.0,
- prior_latents: Optional[torch.FloatTensor] = None,
- ):
- """
- Function invoked when calling the pipeline for generation.
-
- Args:
- prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
- instead.
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
- The height in pixels of the generated image.
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
- The width in pixels of the generated image.
- num_inference_steps (`int`, *optional*, defaults to 20):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- guidance_scale (`float`, *optional*, defaults to 10.0):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
- less than `1`).
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- eta (`float`, *optional*, defaults to 0.0):
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
- [`schedulers.DDIMScheduler`], will be ignored for others.
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
- to make generation deterministic.
- latents (`torch.FloatTensor`, *optional*):
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
- tensor will ge generated by sampling using the supplied random `generator`.
- prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
- provided, text embeddings will be generated from `prompt` input argument.
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
- argument.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generate image. Choose between
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
- plain tuple.
- callback (`Callable`, *optional*):
- A function that will be called every `callback_steps` steps during inference. The function will be
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
- callback_steps (`int`, *optional*, defaults to 1):
- The frequency at which the `callback` function will be called. If not specified, the callback will be
- called at every step.
- cross_attention_kwargs (`dict`, *optional*):
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
- `self.processor` in
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
- noise_level (`int`, *optional*, defaults to `0`):
- The amount of noise to add to the image embeddings. A higher `noise_level` increases the variance in
- the final un-noised images. See `StableUnCLIPPipeline.noise_image_embeddings` for details.
- prior_num_inference_steps (`int`, *optional*, defaults to 25):
- The number of denoising steps in the prior denoising process. More denoising steps usually lead to a
- higher quality image at the expense of slower inference.
- prior_guidance_scale (`float`, *optional*, defaults to 4.0):
- Guidance scale for the prior denoising process as defined in [Classifier-Free Diffusion
- Guidance](https://arxiv.org/abs/2207.12598). `prior_guidance_scale` is defined as `w` of equation 2. of
- [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting
- `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to
- the text `prompt`, usually at the expense of lower image quality.
- prior_latents (`torch.FloatTensor`, *optional*):
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
- embedding generation in the prior denoising process. Can be used to tweak the same generation with
- different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied
- random `generator`.
-
- Examples:
-
- Returns:
- [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~ pipeline_utils.ImagePipelineOutput`] if `return_dict` is
- True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated images.
- """
- # 0. Default height and width to unet
- height = height or self.unet.config.sample_size * self.vae_scale_factor
- width = width or self.unet.config.sample_size * self.vae_scale_factor
-
- # 1. Check inputs. Raise error if not correct
- self.check_inputs(
- prompt=prompt,
- height=height,
- width=width,
- callback_steps=callback_steps,
- noise_level=noise_level,
- negative_prompt=negative_prompt,
- prompt_embeds=prompt_embeds,
- negative_prompt_embeds=negative_prompt_embeds,
- )
-
- # 2. Define call parameters
- if prompt is not None and isinstance(prompt, str):
- batch_size = 1
- elif prompt is not None and isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- batch_size = prompt_embeds.shape[0]
-
- batch_size = batch_size * num_images_per_prompt
-
- device = self._execution_device
-
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- prior_do_classifier_free_guidance = prior_guidance_scale > 1.0
-
- # 3. Encode input prompt
- prior_prompt_embeds, prior_text_encoder_hidden_states, prior_text_mask = self._encode_prior_prompt(
- prompt=prompt,
- device=device,
- num_images_per_prompt=num_images_per_prompt,
- do_classifier_free_guidance=prior_do_classifier_free_guidance,
- )
-
- # 4. Prepare prior timesteps
- self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device)
- prior_timesteps_tensor = self.prior_scheduler.timesteps
-
- # 5. Prepare prior latent variables
- embedding_dim = self.prior.config.embedding_dim
- prior_latents = self.prepare_latents(
- (batch_size, embedding_dim),
- prior_prompt_embeds.dtype,
- device,
- generator,
- prior_latents,
- self.prior_scheduler,
- )
-
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
- prior_extra_step_kwargs = self.prepare_prior_extra_step_kwargs(generator, eta)
-
- # 7. Prior denoising loop
- for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([prior_latents] * 2) if prior_do_classifier_free_guidance else prior_latents
- latent_model_input = self.prior_scheduler.scale_model_input(latent_model_input, t)
-
- predicted_image_embedding = self.prior(
- latent_model_input,
- timestep=t,
- proj_embedding=prior_prompt_embeds,
- encoder_hidden_states=prior_text_encoder_hidden_states,
- attention_mask=prior_text_mask,
- ).predicted_image_embedding
-
- if prior_do_classifier_free_guidance:
- predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2)
- predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * (
- predicted_image_embedding_text - predicted_image_embedding_uncond
- )
-
- prior_latents = self.prior_scheduler.step(
- predicted_image_embedding,
- timestep=t,
- sample=prior_latents,
- **prior_extra_step_kwargs,
- ).prev_sample
-
- if callback is not None and i % callback_steps == 0:
- callback(i, t, prior_latents)
-
- prior_latents = self.prior.post_process_latents(prior_latents)
-
- image_embeds = prior_latents
-
- # done prior
-
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = guidance_scale > 1.0
-
- # 8. Encode input prompt
- prompt_embeds = self._encode_prompt(
- prompt=prompt,
- device=device,
- num_images_per_prompt=num_images_per_prompt,
- do_classifier_free_guidance=do_classifier_free_guidance,
- negative_prompt=negative_prompt,
- prompt_embeds=prompt_embeds,
- negative_prompt_embeds=negative_prompt_embeds,
- )
-
- # 9. Prepare image embeddings
- image_embeds = self.noise_image_embeddings(
- image_embeds=image_embeds,
- noise_level=noise_level,
- generator=generator,
- )
-
- if do_classifier_free_guidance:
- negative_prompt_embeds = torch.zeros_like(image_embeds)
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- image_embeds = torch.cat([negative_prompt_embeds, image_embeds])
-
- # 10. Prepare timesteps
- self.scheduler.set_timesteps(num_inference_steps, device=device)
- timesteps = self.scheduler.timesteps
-
- # 11. Prepare latent variables
- num_channels_latents = self.unet.in_channels
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
- latents = self.prepare_latents(
- shape=shape,
- dtype=prompt_embeds.dtype,
- device=device,
- generator=generator,
- latents=latents,
- scheduler=self.scheduler,
- )
-
- # 12. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
-
- # 13. Denoising loop
- for i, t in enumerate(self.progress_bar(timesteps)):
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
-
- # predict the noise residual
- noise_pred = self.unet(
- latent_model_input,
- t,
- encoder_hidden_states=prompt_embeds,
- class_labels=image_embeds,
- cross_attention_kwargs=cross_attention_kwargs,
- ).sample
-
- # perform guidance
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- # compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
-
- if callback is not None and i % callback_steps == 0:
- callback(i, t, latents)
-
- # 14. Post-processing
- image = self.decode_latents(latents)
-
- # Offload last model to CPU
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
- self.final_offload_hook.offload()
-
- # 15. Convert to PIL
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image,)
-
- return ImagePipelineOutput(images=image)
diff --git a/spaces/deepset/retrieval-augmentation-svb/utils/__init__.py b/spaces/deepset/retrieval-augmentation-svb/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/extract_kp_videos.py b/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/extract_kp_videos.py
deleted file mode 100644
index 21616a3b4b5077ffdce99621395237b4edcff58c..0000000000000000000000000000000000000000
--- a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/extract_kp_videos.py
+++ /dev/null
@@ -1,108 +0,0 @@
-import os
-import cv2
-import time
-import glob
-import argparse
-import face_alignment
-import numpy as np
-from PIL import Image
-from tqdm import tqdm
-from itertools import cycle
-
-from torch.multiprocessing import Pool, Process, set_start_method
-
-class KeypointExtractor():
- def __init__(self, device):
- self.detector = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D,
- device=device)
-
- def extract_keypoint(self, images, name=None, info=True):
- if isinstance(images, list):
- keypoints = []
- if info:
- i_range = tqdm(images,desc='landmark Det:')
- else:
- i_range = images
-
- for image in i_range:
- current_kp = self.extract_keypoint(image)
- if np.mean(current_kp) == -1 and keypoints:
- keypoints.append(keypoints[-1])
- else:
- keypoints.append(current_kp[None])
-
- keypoints = np.concatenate(keypoints, 0)
- np.savetxt(os.path.splitext(name)[0]+'.txt', keypoints.reshape(-1))
- return keypoints
- else:
- while True:
- try:
- keypoints = self.detector.get_landmarks_from_image(np.array(images))[0]
- break
- except RuntimeError as e:
- if str(e).startswith('CUDA'):
- print("Warning: out of memory, sleep for 1s")
- time.sleep(1)
- else:
- print(e)
- break
- except TypeError:
- print('No face detected in this image')
- shape = [68, 2]
- keypoints = -1. * np.ones(shape)
- break
- if name is not None:
- np.savetxt(os.path.splitext(name)[0]+'.txt', keypoints.reshape(-1))
- return keypoints
-
-def read_video(filename):
- frames = []
- cap = cv2.VideoCapture(filename)
- while cap.isOpened():
- ret, frame = cap.read()
- if ret:
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
- frame = Image.fromarray(frame)
- frames.append(frame)
- else:
- break
- cap.release()
- return frames
-
-def run(data):
- filename, opt, device = data
- os.environ['CUDA_VISIBLE_DEVICES'] = device
- kp_extractor = KeypointExtractor()
- images = read_video(filename)
- name = filename.split('/')[-2:]
- os.makedirs(os.path.join(opt.output_dir, name[-2]), exist_ok=True)
- kp_extractor.extract_keypoint(
- images,
- name=os.path.join(opt.output_dir, name[-2], name[-1])
- )
-
-if __name__ == '__main__':
- set_start_method('spawn')
- parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
- parser.add_argument('--input_dir', type=str, help='the folder of the input files')
- parser.add_argument('--output_dir', type=str, help='the folder of the output files')
- parser.add_argument('--device_ids', type=str, default='0,1')
- parser.add_argument('--workers', type=int, default=4)
-
- opt = parser.parse_args()
- filenames = list()
- VIDEO_EXTENSIONS_LOWERCASE = {'mp4'}
- VIDEO_EXTENSIONS = VIDEO_EXTENSIONS_LOWERCASE.union({f.upper() for f in VIDEO_EXTENSIONS_LOWERCASE})
- extensions = VIDEO_EXTENSIONS
-
- for ext in extensions:
- os.listdir(f'{opt.input_dir}')
- print(f'{opt.input_dir}/*.{ext}')
- filenames = sorted(glob.glob(f'{opt.input_dir}/*.{ext}'))
- print('Total number of videos:', len(filenames))
- pool = Pool(opt.workers)
- args_list = cycle([opt])
- device_ids = opt.device_ids.split(",")
- device_ids = cycle(device_ids)
- for data in tqdm(pool.imap_unordered(run, zip(filenames, args_list, device_ids))):
- None
diff --git a/spaces/deepwisdom/MetaGPT/tests/metagpt/utils/test_pycst.py b/spaces/deepwisdom/MetaGPT/tests/metagpt/utils/test_pycst.py
deleted file mode 100644
index 07352eac26238080861007e946d8832d7e3d85cb..0000000000000000000000000000000000000000
--- a/spaces/deepwisdom/MetaGPT/tests/metagpt/utils/test_pycst.py
+++ /dev/null
@@ -1,136 +0,0 @@
-from metagpt.utils import pycst
-
-code = '''
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-from typing import overload
-
-@overload
-def add_numbers(a: int, b: int):
- ...
-
-@overload
-def add_numbers(a: float, b: float):
- ...
-
-def add_numbers(a: int, b: int):
- return a + b
-
-
-class Person:
- def __init__(self, name: str, age: int):
- self.name = name
- self.age = age
-
- def greet(self):
- return f"Hello, my name is {self.name} and I am {self.age} years old."
-'''
-
-documented_code = '''
-"""
-This is an example module containing a function and a class definition.
-"""
-
-
-def add_numbers(a: int, b: int):
- """This function is used to add two numbers and return the result.
-
- Parameters:
- a: The first integer.
- b: The second integer.
-
- Returns:
- int: The sum of the two numbers.
- """
- return a + b
-
-class Person:
- """This class represents a person's information, including name and age.
-
- Attributes:
- name: The person's name.
- age: The person's age.
- """
-
- def __init__(self, name: str, age: int):
- """Creates a new instance of the Person class.
-
- Parameters:
- name: The person's name.
- age: The person's age.
- """
- ...
-
- def greet(self):
- """
- Returns a greeting message including the name and age.
-
- Returns:
- str: The greeting message.
- """
- ...
-'''
-
-
-merged_code = '''
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-This is an example module containing a function and a class definition.
-"""
-
-from typing import overload
-
-@overload
-def add_numbers(a: int, b: int):
- ...
-
-@overload
-def add_numbers(a: float, b: float):
- ...
-
-def add_numbers(a: int, b: int):
- """This function is used to add two numbers and return the result.
-
- Parameters:
- a: The first integer.
- b: The second integer.
-
- Returns:
- int: The sum of the two numbers.
- """
- return a + b
-
-
-class Person:
- """This class represents a person's information, including name and age.
-
- Attributes:
- name: The person's name.
- age: The person's age.
- """
- def __init__(self, name: str, age: int):
- """Creates a new instance of the Person class.
-
- Parameters:
- name: The person's name.
- age: The person's age.
- """
- self.name = name
- self.age = age
-
- def greet(self):
- """
- Returns a greeting message including the name and age.
-
- Returns:
- str: The greeting message.
- """
- return f"Hello, my name is {self.name} and I am {self.age} years old."
-'''
-
-
-def test_merge_docstring():
- data = pycst.merge_docstring(code, documented_code)
- print(data)
- assert data == merged_code
diff --git a/spaces/diacanFperku/AutoGPT/ESET NOD32 Antivirus 12.2.29.0 Crack License Key (Lifetime) ((FULL)).md b/spaces/diacanFperku/AutoGPT/ESET NOD32 Antivirus 12.2.29.0 Crack License Key (Lifetime) ((FULL)).md
deleted file mode 100644
index 0f10f075be277bd42216ad10c54bed8ec3e2de14..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/ESET NOD32 Antivirus 12.2.29.0 Crack License Key (Lifetime) ((FULL)).md
+++ /dev/null
@@ -1,6 +0,0 @@
-ESET NOD32 Antivirus 12.2.29.0 Crack License Key (Lifetime) Download Zip ○ https://gohhs.com/2uFUFT
-
-Jump to ESET NOD32 Antivirus 14 License Key Lifetime — ESET NOD32 Antivirus 14 License Key Lifetime. EB5G-X2HW-AEAC-6VHE-5RVG. 4d29de3e1b
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Ek Tha Tiger 3 1080p Full Movie Download Fixed.md b/spaces/diacanFperku/AutoGPT/Ek Tha Tiger 3 1080p Full Movie Download Fixed.md
deleted file mode 100644
index 061b42cb2508c00d466596eabe5dee77e0709115..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Ek Tha Tiger 3 1080p Full Movie Download Fixed.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Ek Tha Tiger 3 1080p Full Movie Download DOWNLOAD ⭐ https://gohhs.com/2uFT4K
-
-123MKV Movies Download Bollywood, Hollywood, South Movies, Hindi Dubbed: You have lots of options on the ... It is one of the top sites for Tamil Movies Download. ... 420p; 720p; 1080p; HDRip; Bluray; DVDScr; DVDrip. 1fdad05405
-
-
-
diff --git a/spaces/digitalxingtong/Azuma-Bert-VITS2/models.py b/spaces/digitalxingtong/Azuma-Bert-VITS2/models.py
deleted file mode 100644
index d4afe44d883691610c5903e602a3ca245fcb3a5c..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Azuma-Bert-VITS2/models.py
+++ /dev/null
@@ -1,707 +0,0 @@
-import copy
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-import attentions
-import monotonic_align
-
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-
-from commons import init_weights, get_padding
-from text import symbols, num_tones, num_languages
-class DurationDiscriminator(nn.Module): #vits2
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
- super().__init__()
-
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
-
- self.drop = nn.Dropout(p_dropout)
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_1 = modules.LayerNorm(filter_channels)
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.norm_2 = modules.LayerNorm(filter_channels)
- self.dur_proj = nn.Conv1d(1, filter_channels, 1)
-
- self.pre_out_conv_1 = nn.Conv1d(2*filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.pre_out_norm_1 = modules.LayerNorm(filter_channels)
- self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
- self.pre_out_norm_2 = modules.LayerNorm(filter_channels)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
- self.output_layer = nn.Sequential(
- nn.Linear(filter_channels, 1),
- nn.Sigmoid()
- )
-
- def forward_probability(self, x, x_mask, dur, g=None):
- dur = self.dur_proj(dur)
- x = torch.cat([x, dur], dim=1)
- x = self.pre_out_conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.pre_out_norm_1(x)
- x = self.drop(x)
- x = self.pre_out_conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.pre_out_norm_2(x)
- x = self.drop(x)
- x = x * x_mask
- x = x.transpose(1, 2)
- output_prob = self.output_layer(x)
- return output_prob
-
- def forward(self, x, x_mask, dur_r, dur_hat, g=None):
- x = torch.detach(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.norm_1(x)
- x = self.drop(x)
- x = self.conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.norm_2(x)
- x = self.drop(x)
-
- output_probs = []
- for dur in [dur_r, dur_hat]:
- output_prob = self.forward_probability(x, x_mask, dur, g)
- output_probs.append(output_prob)
-
- return output_probs
-
-class TransformerCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- n_flows=4,
- gin_channels=0,
- share_parameter=False
- ):
-
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
-
- self.wn = attentions.FFT(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = self.gin_channels) if share_parameter else None
-
- for i in range(n_flows):
- self.flows.append(
- modules.TransformerCouplingLayer(channels, hidden_channels, kernel_size, n_layers, n_heads, p_dropout, filter_channels, mean_only=True, wn_sharing_parameter=self.wn, gin_channels = self.gin_channels))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-class StochasticDurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
- super().__init__()
- filter_channels = in_channels # it needs to be removed from future version.
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.log_flow = modules.Log()
- self.flows = nn.ModuleList()
- self.flows.append(modules.ElementwiseAffine(2))
- for i in range(n_flows):
- self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.flows.append(modules.Flip())
-
- self.post_pre = nn.Conv1d(1, filter_channels, 1)
- self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- self.post_flows = nn.ModuleList()
- self.post_flows.append(modules.ElementwiseAffine(2))
- for i in range(4):
- self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
- self.post_flows.append(modules.Flip())
-
- self.pre = nn.Conv1d(in_channels, filter_channels, 1)
- self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
- self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
-
- def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
- x = torch.detach(x)
- x = self.pre(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.convs(x, x_mask)
- x = self.proj(x) * x_mask
-
- if not reverse:
- flows = self.flows
- assert w is not None
-
- logdet_tot_q = 0
- h_w = self.post_pre(w)
- h_w = self.post_convs(h_w, x_mask)
- h_w = self.post_proj(h_w) * x_mask
- e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
- z_q = e_q
- for flow in self.post_flows:
- z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
- logdet_tot_q += logdet_q
- z_u, z1 = torch.split(z_q, [1, 1], 1)
- u = torch.sigmoid(z_u) * x_mask
- z0 = (w - u) * x_mask
- logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2])
- logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q
-
- logdet_tot = 0
- z0, logdet = self.log_flow(z0, x_mask)
- logdet_tot += logdet
- z = torch.cat([z0, z1], 1)
- for flow in flows:
- z, logdet = flow(z, x_mask, g=x, reverse=reverse)
- logdet_tot = logdet_tot + logdet
- nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot
- return nll + logq # [b]
- else:
- flows = list(reversed(self.flows))
- flows = flows[:-2] + [flows[-1]] # remove a useless vflow
- z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
- for flow in flows:
- z = flow(z, x_mask, g=x, reverse=reverse)
- z0, z1 = torch.split(z, [1, 1], 1)
- logw = z0
- return logw
-
-
-class DurationPredictor(nn.Module):
- def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
- super().__init__()
-
- self.in_channels = in_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
-
- self.drop = nn.Dropout(p_dropout)
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
- self.norm_1 = modules.LayerNorm(filter_channels)
- self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
- self.norm_2 = modules.LayerNorm(filter_channels)
- self.proj = nn.Conv1d(filter_channels, 1, 1)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, in_channels, 1)
-
- def forward(self, x, x_mask, g=None):
- x = torch.detach(x)
- if g is not None:
- g = torch.detach(g)
- x = x + self.cond(g)
- x = self.conv_1(x * x_mask)
- x = torch.relu(x)
- x = self.norm_1(x)
- x = self.drop(x)
- x = self.conv_2(x * x_mask)
- x = torch.relu(x)
- x = self.norm_2(x)
- x = self.drop(x)
- x = self.proj(x * x_mask)
- return x * x_mask
-
-
-class TextEncoder(nn.Module):
- def __init__(self,
- n_vocab,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- gin_channels=0):
- super().__init__()
- self.n_vocab = n_vocab
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.gin_channels = gin_channels
- self.emb = nn.Embedding(len(symbols), hidden_channels)
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5)
- self.tone_emb = nn.Embedding(num_tones, hidden_channels)
- nn.init.normal_(self.tone_emb.weight, 0.0, hidden_channels ** -0.5)
- self.language_emb = nn.Embedding(num_languages, hidden_channels)
- nn.init.normal_(self.language_emb.weight, 0.0, hidden_channels ** -0.5)
- self.bert_proj = nn.Conv1d(1024, hidden_channels, 1)
-
- self.encoder = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- gin_channels=self.gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, tone, language, bert, g=None):
- x = (self.emb(x)+ self.tone_emb(tone)+ self.language_emb(language)+self.bert_proj(bert).transpose(1,2)) * math.sqrt(self.hidden_channels) # [b, t, h]
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
-
- x = self.encoder(x * x_mask, x_mask, g=g)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return x, m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers,
- gin_channels=gin_channels, mean_only=True))
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class Generator(torch.nn.Module):
- def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
- upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
- resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(weight_norm(
- ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)),
- k, u, padding=(k - u) // 2)))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- print('Removing weight norm...')
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
- ])
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList([
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ])
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-class ReferenceEncoder(nn.Module):
- '''
- inputs --- [N, Ty/r, n_mels*r] mels
- outputs --- [N, ref_enc_gru_size]
- '''
-
- def __init__(self, spec_channels, gin_channels=0):
-
- super().__init__()
- self.spec_channels = spec_channels
- ref_enc_filters = [32, 32, 64, 64, 128, 128]
- K = len(ref_enc_filters)
- filters = [1] + ref_enc_filters
- convs = [weight_norm(nn.Conv2d(in_channels=filters[i],
- out_channels=filters[i + 1],
- kernel_size=(3, 3),
- stride=(2, 2),
- padding=(1, 1))) for i in range(K)]
- self.convs = nn.ModuleList(convs)
- # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)])
-
- out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K)
- self.gru = nn.GRU(input_size=ref_enc_filters[-1] * out_channels,
- hidden_size=256 // 2,
- batch_first=True)
- self.proj = nn.Linear(128, gin_channels)
-
- def forward(self, inputs, mask=None):
- N = inputs.size(0)
- out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs]
- for conv in self.convs:
- out = conv(out)
- # out = wn(out)
- out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K]
-
- out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K]
- T = out.size(1)
- N = out.size(0)
- out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K]
-
- self.gru.flatten_parameters()
- memory, out = self.gru(out) # out --- [1, N, 128]
-
- return self.proj(out.squeeze(0))
-
- def calculate_channels(self, L, kernel_size, stride, pad, n_convs):
- for i in range(n_convs):
- L = (L - kernel_size + 2 * pad) // stride + 1
- return L
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(self,
- n_vocab,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- n_speakers=256,
- gin_channels=256,
- use_sdp=True,
- n_flow_layer = 4,
- n_layers_trans_flow = 3,
- flow_share_parameter = False,
- use_transformer_flow = True,
- **kwargs):
-
- super().__init__()
- self.n_vocab = n_vocab
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.n_speakers = n_speakers
- self.gin_channels = gin_channels
- self.n_layers_trans_flow = n_layers_trans_flow
- self.use_spk_conditioned_encoder = kwargs.get("use_spk_conditioned_encoder", True)
- self.use_sdp = use_sdp
- self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False)
- self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01)
- self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6)
- self.current_mas_noise_scale = self.mas_noise_scale_initial
- if self.use_spk_conditioned_encoder and gin_channels > 0:
- self.enc_gin_channels = gin_channels
- self.enc_p = TextEncoder(n_vocab,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- gin_channels=self.enc_gin_channels)
- self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
- upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
- self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16,
- gin_channels=gin_channels)
- if use_transformer_flow:
- self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads, n_layers_trans_flow, 5, p_dropout, n_flow_layer, gin_channels=gin_channels,share_parameter= flow_share_parameter)
- else:
- self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer, gin_channels=gin_channels)
- self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
- self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
-
- if n_speakers >= 1:
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
- else:
- self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)
-
- def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert):
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
-
- with torch.no_grad():
- # negative cross-entropy
- s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
- neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
- neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2),
- s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
- neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
- neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
- if self.use_noise_scaled_mas:
- epsilon = torch.std(neg_cent) * torch.randn_like(neg_cent) * self.current_mas_noise_scale
- neg_cent = neg_cent + epsilon
-
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
-
- w = attn.sum(2)
-
- l_length_sdp = self.sdp(x, x_mask, w, g=g)
- l_length_sdp = l_length_sdp / torch.sum(x_mask)
-
- logw_ = torch.log(w + 1e-6) * x_mask
- logw = self.dp(x, x_mask, g=g)
- l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging
-
- l_length = l_length_dp + l_length_sdp
-
- # expand prior
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
-
- z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
- o = self.dec(z_slice, g=g)
- return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (x, logw, logw_)
-
- def infer(self, x, x_lengths, sid, tone, language, bert, noise_scale=.667, length_scale=1, noise_scale_w=0.8, max_len=None, sdp_ratio=0,y=None):
- #x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)
- # g = self.gst(y)
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1)
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g)
- logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)
- w = torch.exp(logw) * x_mask * length_scale
- w_ceil = torch.ceil(w)
- y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
- attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
- attn = commons.generate_path(w_ceil, attn_mask)
-
- m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
- logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1,
- 2) # [b, t', t], [b, t, d] -> [b, d, t']
-
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
- z = self.flow(z_p, y_mask, g=g, reverse=True)
- o = self.dec((z * y_mask)[:, :, :max_len], g=g)
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
diff --git a/spaces/digitalxingtong/Jiaran-Bert-VITS2/text/chinese.py b/spaces/digitalxingtong/Jiaran-Bert-VITS2/text/chinese.py
deleted file mode 100644
index 276753880b73de2e8889dcb2101cd98c09e0710b..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Jiaran-Bert-VITS2/text/chinese.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import os
-import re
-
-import cn2an
-from pypinyin import lazy_pinyin, Style
-
-from text import symbols
-from text.symbols import punctuation
-from text.tone_sandhi import ToneSandhi
-
-current_file_path = os.path.dirname(__file__)
-pinyin_to_symbol_map = {line.split("\t")[0]: line.strip().split("\t")[1] for line in
- open(os.path.join(current_file_path, 'opencpop-strict.txt')).readlines()}
-
-import jieba.posseg as psg
-
-
-rep_map = {
- ':': ',',
- ';': ',',
- ',': ',',
- '。': '.',
- '!': '!',
- '?': '?',
- '\n': '.',
- "·": ",",
- '、': ",",
- '...': '…',
- '$': '.',
- '“': "'",
- '”': "'",
- '‘': "'",
- '’': "'",
- '(': "'",
- ')': "'",
- '(': "'",
- ')': "'",
- '《': "'",
- '》': "'",
- '【': "'",
- '】': "'",
- '[': "'",
- ']': "'",
- '—': "-",
- '~': "-",
- '~': "-",
- '「': "'",
- '」': "'",
-
-}
-
-tone_modifier = ToneSandhi()
-
-def replace_punctuation(text):
- text = text.replace("嗯", "恩").replace("呣","母")
- pattern = re.compile('|'.join(re.escape(p) for p in rep_map.keys()))
-
- replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
-
- replaced_text = re.sub(r'[^\u4e00-\u9fa5'+"".join(punctuation)+r']+', '', replaced_text)
-
- return replaced_text
-
-def g2p(text):
- pattern = r'(?<=[{0}])\s*'.format(''.join(punctuation))
- sentences = [i for i in re.split(pattern, text) if i.strip()!='']
- phones, tones, word2ph = _g2p(sentences)
- assert sum(word2ph) == len(phones)
- assert len(word2ph) == len(text) #Sometimes it will crash,you can add a try-catch.
- phones = ['_'] + phones + ["_"]
- tones = [0] + tones + [0]
- word2ph = [1] + word2ph + [1]
- return phones, tones, word2ph
-
-
-def _get_initials_finals(word):
- initials = []
- finals = []
- orig_initials = lazy_pinyin(
- word, neutral_tone_with_five=True, style=Style.INITIALS)
- orig_finals = lazy_pinyin(
- word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
- for c, v in zip(orig_initials, orig_finals):
- initials.append(c)
- finals.append(v)
- return initials, finals
-
-
-def _g2p(segments):
- phones_list = []
- tones_list = []
- word2ph = []
- for seg in segments:
- pinyins = []
- # Replace all English words in the sentence
- seg = re.sub('[a-zA-Z]+', '', seg)
- seg_cut = psg.lcut(seg)
- initials = []
- finals = []
- seg_cut = tone_modifier.pre_merge_for_modify(seg_cut)
- for word, pos in seg_cut:
- if pos == 'eng':
- continue
- sub_initials, sub_finals = _get_initials_finals(word)
- sub_finals = tone_modifier.modified_tone(word, pos,
- sub_finals)
- initials.append(sub_initials)
- finals.append(sub_finals)
-
- # assert len(sub_initials) == len(sub_finals) == len(word)
- initials = sum(initials, [])
- finals = sum(finals, [])
- #
- for c, v in zip(initials, finals):
- raw_pinyin = c+v
- # NOTE: post process for pypinyin outputs
- # we discriminate i, ii and iii
- if c == v:
- assert c in punctuation
- phone = [c]
- tone = '0'
- word2ph.append(1)
- else:
- v_without_tone = v[:-1]
- tone = v[-1]
-
- pinyin = c+v_without_tone
- assert tone in '12345'
-
- if c:
- # 多音节
- v_rep_map = {
- "uei": 'ui',
- 'iou': 'iu',
- 'uen': 'un',
- }
- if v_without_tone in v_rep_map.keys():
- pinyin = c+v_rep_map[v_without_tone]
- else:
- # 单音节
- pinyin_rep_map = {
- 'ing': 'ying',
- 'i': 'yi',
- 'in': 'yin',
- 'u': 'wu',
- }
- if pinyin in pinyin_rep_map.keys():
- pinyin = pinyin_rep_map[pinyin]
- else:
- single_rep_map = {
- 'v': 'yu',
- 'e': 'e',
- 'i': 'y',
- 'u': 'w',
- }
- if pinyin[0] in single_rep_map.keys():
- pinyin = single_rep_map[pinyin[0]]+pinyin[1:]
-
- assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin)
- phone = pinyin_to_symbol_map[pinyin].split(' ')
- word2ph.append(len(phone))
-
- phones_list += phone
- tones_list += [int(tone)] * len(phone)
- return phones_list, tones_list, word2ph
-
-
-
-def text_normalize(text):
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
- for number in numbers:
- text = text.replace(number, cn2an.an2cn(number), 1)
- text = replace_punctuation(text)
- return text
-
-def get_bert_feature(text, word2ph):
- from text import chinese_bert
- return chinese_bert.get_bert_feature(text, word2ph)
-
-if __name__ == '__main__':
- from text.chinese_bert import get_bert_feature
- text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏"
- text = text_normalize(text)
- print(text)
- phones, tones, word2ph = g2p(text)
- bert = get_bert_feature(text, word2ph)
-
- print(phones, tones, word2ph, bert.shape)
-
-
-# # 示例用法
-# text = "这是一个示例文本:,你好!这是一个测试...."
-# print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试
diff --git a/spaces/ds520/bingo/postcss.config.js b/spaces/ds520/bingo/postcss.config.js
deleted file mode 100644
index 33ad091d26d8a9dc95ebdf616e217d985ec215b8..0000000000000000000000000000000000000000
--- a/spaces/ds520/bingo/postcss.config.js
+++ /dev/null
@@ -1,6 +0,0 @@
-module.exports = {
- plugins: {
- tailwindcss: {},
- autoprefixer: {},
- },
-}
diff --git a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py b/spaces/eIysia/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py
deleted file mode 100644
index ce3e12bbf0469426872eed5f681985d3e1be9b26..0000000000000000000000000000000000000000
--- a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import re
-import opencc
-
-
-dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou',
- 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing',
- 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang',
- 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan',
- 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen',
- 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'}
-
-converters = {}
-
-for dialect in dialects.values():
- try:
- converters[dialect] = opencc.OpenCC(dialect)
- except:
- pass
-
-
-def ngu_dialect_to_ipa(text, dialect):
- dialect = dialects[dialect]
- text = converters[dialect].convert(text).replace('-','').replace('$',' ')
- text = re.sub(r'[、;:]', ',', text)
- text = re.sub(r'\s*,\s*', ', ', text)
- text = re.sub(r'\s*。\s*', '. ', text)
- text = re.sub(r'\s*?\s*', '? ', text)
- text = re.sub(r'\s*!\s*', '! ', text)
- text = re.sub(r'\s*$', '', text)
- return text
diff --git a/spaces/eaglelandsonce/UploadaDocAskaQuestion/utils.py b/spaces/eaglelandsonce/UploadaDocAskaQuestion/utils.py
deleted file mode 100644
index e919dd00dfafa5a6b5449907bb54e8421bd54577..0000000000000000000000000000000000000000
--- a/spaces/eaglelandsonce/UploadaDocAskaQuestion/utils.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import os
-from langchain.embeddings import OpenAIEmbeddings
-from langchain.vectorstores import Chroma
-
-
-# stack up loading methods using elif statments for loading PDF, DOCX, TXT, and CSV files into LangChain Documents
-def load_document(file):
- import os
- name, extension = os.path.splitext(file)
-
- if extension == '.pdf':
- from langchain.document_loaders import PyPDFLoader
- print(f'Loading {file}')
- loader = PyPDFLoader(file)
- elif extension == '.docx':
- from langchain.document_loaders import Docx2txtLoader
- print(f'Loading {file}')
- loader = Docx2txtLoader(file)
- elif extension == '.txt':
- from langchain.document_loaders import TextLoader
- loader = TextLoader(file)
- elif extension == '.csv':
- from langchain.document_loaders import CSVLoader
- loader = CSVLoader(file)
- else:
- print('Document format is not supported!')
- return None
-
- data = loader.load()
- return data
-
-
-# chunck your data for embedding
-def chunk_data(data, chunk_size=256, chunk_overlap=20):
- from langchain.text_splitter import RecursiveCharacterTextSplitter
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
- chunks = text_splitter.split_documents(data)
- return chunks
-
-
-# using OpenAIEmbeddings() create your embeddings and save to the Chroma vector store
-def create_embeddings(chunks):
- embeddings = OpenAIEmbeddings()
- vector_store = Chroma.from_documents(chunks, embeddings)
- return vector_store
-
-# here where you ask your question, here we use a combination of RetrievalQA and ChatOpenAI but his is not the only way to do this
-def ask_and_get_answer(vector_store, q, k=3):
- from langchain.chains import RetrievalQA
- from langchain.chat_models import ChatOpenAI
- # choose the 3.5 turbo model which is default and set the temperature to 1 which is maximum
- llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=1)
- #VectorStoreRetrieverMemory stores memories in a VectorDB and queries the top-K most "salient" docs every time it is called.
- retriever = vector_store.as_retriever(search_type='similarity', search_kwargs={'k': k})
- chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
- answer = chain.run(q)
- return answer
-
-
-# return the embedding cost (using tiktoken)
-def calculate_embedding_cost(texts):
- import tiktoken
- enc = tiktoken.encoding_for_model('text-embedding-ada-002')
- total_tokens = sum([len(enc.encode(page.page_content)) for page in texts])
- return total_tokens, total_tokens / 1000 * 0.0004
diff --git a/spaces/elonmuskceo/persistent-data/README.md b/spaces/elonmuskceo/persistent-data/README.md
deleted file mode 100644
index 5daa71cbf1491b74952c1b9304911007f359f8d9..0000000000000000000000000000000000000000
--- a/spaces/elonmuskceo/persistent-data/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Persistent Data
-emoji: 🐨
-colorFrom: pink
-colorTo: gray
-sdk: gradio
-sdk_version: 2.4.2
-app_file: app.py
-pinned: false
----
-
-# Configuration
diff --git a/spaces/epexVfeibi/Imagedeblurr/24 Season 5 Complete Dvdrip X264 Mkv By Riddlera.md b/spaces/epexVfeibi/Imagedeblurr/24 Season 5 Complete Dvdrip X264 Mkv By Riddlera.md
deleted file mode 100644
index 5ca21955d463207f0b394b95b13916855253b6b6..0000000000000000000000000000000000000000
--- a/spaces/epexVfeibi/Imagedeblurr/24 Season 5 Complete Dvdrip X264 Mkv By Riddlera.md
+++ /dev/null
@@ -1,6 +0,0 @@
-24 Season 5 Complete Dvdrip X264 Mkv By Riddlera DOWNLOAD >>> https://jinyurl.com/2uEph8
-
- 3cee63e6c2
-
-
-
diff --git a/spaces/erbanku/gpt-academic/config.py b/spaces/erbanku/gpt-academic/config.py
deleted file mode 100644
index 4e79c2ad91504bb7b3de2fef86d7096679290b18..0000000000000000000000000000000000000000
--- a/spaces/erbanku/gpt-academic/config.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# [step 1]>> 例如: API_KEY = "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" (此key无效)
-API_KEY = "sk-此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey1,fkxxxx-api2dkey2"
-
-# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改
-USE_PROXY = False
-if USE_PROXY:
- # 填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改
- # 例如 "socks5h://localhost:11284"
- # [协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http
- # [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上)
- # [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上
-
- # 代理网络的地址,打开你的*学*网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
- proxies = {
- # [协议]:// [地址] :[端口]
- "http": "socks5h://localhost:11284",
- "https": "socks5h://localhost:11284",
- }
-else:
- proxies = None
-
-# [step 3]>> 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次
-# 一言以蔽之:免费用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview
-DEFAULT_WORKER_NUM = 3
-
-
-# [step 4]>> 以下配置可以优化体验,但大部分场合下并不需要修改
-# 对话窗的高度
-CHATBOT_HEIGHT = 1115
-
-# 代码高亮
-CODE_HIGHLIGHT = True
-
-# 窗口布局
-LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
-DARK_MODE = True # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局)
-
-# 发送请求到OpenAI后,等待多久判定为超时
-TIMEOUT_SECONDS = 30
-
-# 网页的端口, -1代表随机端口
-WEB_PORT = -1
-
-# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
-MAX_RETRY = 2
-
-# OpenAI模型选择是(gpt4现在只对申请成功的人开放)
-LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm"
-AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo"]
-
-# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
-LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
-
-# 设置gradio的并行线程数(不需要修改)
-CONCURRENT_COUNT = 100
-
-# 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个)
-# [("username", "password"), ("username2", "password2"), ...]
-AUTHENTICATION = []
-
-# 重新URL重新定向,实现更换API_URL的作用(常规情况下,不要修改!!)
-# (高危设置!通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
-# 格式 {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
-# 例如 API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://ai.open.com/api/conversation"}
-API_URL_REDIRECT = {}
-
-# 如果需要在二级路径下运行(常规情况下,不要修改!!)(需要配合修改main.py才能生效!)
-CUSTOM_PATH = "/"
-
-# 如果需要使用newbing,把newbing的长长的cookie放到这里
-NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
-NEWBING_COOKIES = """
-your bing cookies here
-"""
\ No newline at end of file
diff --git a/spaces/exbert-project/exbert/client/src/ts/data/AttentionCapsule.ts b/spaces/exbert-project/exbert/client/src/ts/data/AttentionCapsule.ts
deleted file mode 100644
index e945f36da58583984e752ed2b00ce3f58c693e5a..0000000000000000000000000000000000000000
--- a/spaces/exbert-project/exbert/client/src/ts/data/AttentionCapsule.ts
+++ /dev/null
@@ -1,124 +0,0 @@
-import * as _ from 'lodash'
-import * as x_ from '../etc/_Tools'
-import * as tp from '../etc/types'
-import * as tf from '@tensorflow/tfjs'
-
-/**
- * Notes:
- *
- * - Also encapsulate the CLS/SEP info vs. no CLS/SEP info
- * - When layer format changes from list, drop the index into conf.layer
- */
-
-const bpeTokens = ["[CLS]", "[SEP]", "", " ", "<|endoftext|>"]
-const findBadIndexes = (x: tp.FullSingleTokenInfo[]) => x_.findAllIndexes(x.map(t => t.text), (a) => _.includes(bpeTokens, a))
-
-export function makeFromMetaResponse(r:tp.AttentionResponse, isZeroed){
- const key = 'aa' // Change this if backend response changes to be simpler
- const currPair = r[key]
- const left = currPair.left
- const right = currPair.right
- const leftZero = x_.findAllIndexes(left.map(t => t.text), (a) => _.includes(bpeTokens, a))
- const rightZero = x_.findAllIndexes(right.map(t => t.text), (a) => _.includes(bpeTokens, a))
- return new AttentionWrapper(currPair.att, [leftZero, rightZero], isZeroed)
-}
-
-export class AttentionWrapper {
- protected _att:number[][][]
- protected _attTensor:tf.Tensor3D
- protected _zeroedAttTensor:tf.Tensor3D
-
- badToks:[number[], number[]] // Indexes for the CLS and SEP tokens
- isZeroed: boolean
- nLayers = 12;
- nHeads = 12;
-
- constructor(att:number[][][], badToks:[number[], number[]]=[[],[]], isZeroed=true){
- this.init(att, badToks, isZeroed)
- }
-
- init(att:number[][][], badToks:[number[], number[]]=[[],[]], isZeroed) {
- this.isZeroed = isZeroed
- this._att = att;
- this._zeroedAttTensor = zeroRowCol(tf.tensor3d(att), badToks[0], badToks[1])
- this._attTensor = tf.tensor3d(att) // If I put this first, buffer modifications change this too.
- this.badToks = badToks;
- }
-
- updateFromNormal(r:tp.AttentionResponse, isZeroed){
- const key = 'aa' // Change this if backend response changes to be simpler
- const currPair = r[key]
- const left = currPair.left
- const right = currPair.right
-
- const leftZero = findBadIndexes(left)
- const rightZero = findBadIndexes(right)
- this.init(currPair.att, [leftZero, rightZero], isZeroed)
- }
-
- get attTensor() {
- const tens = this.isZeroed ? this._zeroedAttTensor : this._attTensor
- return tens
- }
-
- get att() {
- return this.attTensor.arraySync()
- }
-
- zeroed(): boolean
- zeroed(val:boolean): this
- zeroed(val?) {
- if (val == null) return this.isZeroed
- this.isZeroed = val
- return this
- }
-
- toggleZeroing() {
- this.zeroed(!this.zeroed())
- }
-
- protected _byHeads(heads:number[]):tf.Tensor2D {
- if (heads.length == 0) {
- return tf.zerosLike(this._byHead(0))
- }
-
- return (this.attTensor.gather(heads, 0).sum(0))
- }
-
- protected _byHead(head:number):tf.Tensor2D {
- return (this.attTensor.gather([head], 0).squeeze([0]))
- }
-
- byHeads(heads:number[]):number[][] {
- return this._byHeads(heads).arraySync()
- }
-
- byHead(head:number):number[][] {
- return this._byHead(head).arraySync()
- }
-}
-
-function zeroRowCol(tens:tf.Tensor3D, rows:number[], cols:number[]):tf.Tensor3D {
- let outTens = tens.clone()
- let atb = outTens.bufferSync()
- _.range(atb.shape[0]).forEach((head) => {
- _.range(atb.shape[1]).forEach((i) => {
- // Set rows to 0
- if (_.includes(rows, i)) {
- _.range(atb.shape[2]).forEach((j) => {
- atb.set(0, head, i, j)
- })
- }
-
- // Set cols to 0
- _.range(atb.shape[2]).forEach((j) => {
- if (_.includes(cols, j))
- _.range(atb.shape[1]).forEach((i) => {
- atb.set(0, head, i, j)
- })
- })
- })
- })
-
- return outTens
-}
\ No newline at end of file
diff --git a/spaces/falterWliame/Face_Mask_Detection/Fileviewpro Lisans Anahtar.md b/spaces/falterWliame/Face_Mask_Detection/Fileviewpro Lisans Anahtar.md
deleted file mode 100644
index 4b408380fb0ed43f7e0c94da5511b905f8072e8d..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Fileviewpro Lisans Anahtar.md
+++ /dev/null
@@ -1,49 +0,0 @@
-
-FileViewPro Lisans Anahtar: Dosya Uzantılarını Açmanın Kolay Yolu
-FileViewPro, Windows'ta herhangi bir dosya uzantısını açmanıza yardımcı olan bir programdır. PDF, RAR, Word, Excel, JPG, MP4, MP3 ve daha fazlası dahil olmak üzere 120'den fazla dosya formatını destekler. Ayrıca dosyaları düzenlemenize, dönüÅtürmenize ve paylaÅmanıza da olanak tanır. FileViewPro ile bilgisayarınızda açamadıÄınız bir dosya kalmaz!
-Bu yazıda FileViewPro'yu nasıl ücretsiz indirebileceÄinizi, nasıl kurabileceÄinizi ve nasıl ücretsiz lisans anahtarı alabileceÄinizi anlatacaÄız. Ayrıca FileViewPro'yu nasıl kırabileceÄinizi de öÄreneceksiniz. Hadi baÅlayalım!
-fileviewpro lisans anahtar DOWNLOAD ✔✔✔ https://urlca.com/2uDc1N
-FileViewPro'yu Nasıl Ãcretsiz İndirir ve Kurarım?
-FileViewPro'yu resmi web sitesinden indirebilirsiniz: https://www.fileviewpro.com/en/download
-İndirdiÄiniz "Setup_2016.exe" dosyasını çalıÅtırın ve kurulum dilini seçin. "Next" butonuna tıklayın ve kurulum sihirbazının adımlarını takip edin. FileViewPro size bilgisayarınızı hızlandırmak için WinThruster programını kurmanızı önerebilir. Bu adımı atlamak istiyorsanız "Skip" butonuna tıklayın.
-Kurulum bittiÄinde, FileViewPro'nun tüm özelliklerini kullanabilmek için kullanıcı arayüzünün saÄ Ã¼st köÅesindeki "Register" butonuna tıklayabilirsiniz.
-FileViewPro 2016 Lisans Anahtarı Nasıl Alırım?
-FileViewPro'yu kurduktan sonra, ücretsiz olarak etkinleÅtirmek isteyebilirsiniz. EndiÅelenmeyin, burada size ücretsiz FileViewPro 2016 Lisans Anahtarı sunuyoruz.
-FileViewPro 2016 Lisans Anahtarı:
-
-8765F4D3S45F678765F4
-7B6V5C4XC5V6B7N87B6
-B6V5C45V6B7N8 76V5B7
-8N7B6V5C4XC5V6B7NB6
-7NB6V5C4C5V6B7N8N7B6
-
-Bu lisans anahtarlarından birini kopyalayın ve FileViewPro'nun kayıt penceresine yapıÅtırın. Ardından "Activate" butonuna tıklayın ve programın etkinleÅtirildiÄini göreceksiniz.
-
-FileViewPro'yu Nasıl Kırarım?
-EÄer ücretsiz FileViewPro 2016 Lisans Anahtarı iÅe yaramazsa, FileViewPro 2016 kırma programını indirebileceÄiniz birçok web sitesi vardır. Ancak bu sitelerin güvenilir olmadıÄını ve virüs veya zararlı yazılım içerebileceÄini unutmayın. Bu nedenle kendi sorumluluÄunuzda indirin ve kullanın.
-Bu sitelerden biri
-
-
FileViewPro'nun Avantajları ve Dezavantajları Nelerdir?
-FileViewPro, birçok dosya formatını açmanıza yardımcı olan kullanıÅlı bir programdır. Ancak her program gibi, FileViewPro'nun da avantajları ve dezavantajları vardır. İÅte bunlardan bazıları:
-Avantajları
-
-FileViewPro ile birden fazla program indirmenize gerek kalmaz. Tek bir program ile tüm dosyaları açabilirsiniz.
-FileViewPro, dosyaları görüntülemenin yanı sıra düzenlemenize, dönüÅtürmenize ve paylaÅmanıza da olanak tanır. Böylece dosyalarınız üzerinde tam kontrol sahibi olursunuz.
-FileViewPro, kullanıcı dostu bir arayüze sahiptir. Dosyalarınızı sürükleyip bırakarak veya açmak istediÄiniz dosyayı seçerek kolayca açabilirsiniz.
-FileViewPro, 34 farklı dilde destek sunar. Böylece kendi dilinizde dosyalarınızı açabilir ve yönetebilirsiniz.
-
-Dezavantajları
-
-FileViewPro ücretsiz deÄildir. Tüm özelliklerini kullanabilmek için yıllık 39.95 dolar ödemeniz gerekir.
-FileViewPro bazen diÄer potansiyel olarak istenmeyen programlarla birlikte gelir. Bu programlar bilgisayarınızın performansını düÅürebilir veya güvenliÄini tehlikeye atabilir.
-FileViewPro her ne kadar 120'den fazla dosya formatını desteklese de, bazı nadir veya yeni dosya formatlarını açamayabilir. Bu durumda baÅka bir program aramak zorunda kalabilirsiniz.
-FileViewPro bazen hatalar veya çökmeler yaÅayabilir. Bu da dosyalarınızın zarar görmesine veya kaybolmasına neden olabilir.
-
-FileViewPro Alternatifleri Nelerdir?
-EÄer FileViewPro'yu beÄenmediyseniz veya baÅka bir program arıyorsanız, size FileViewPro alternatifleri olarak sunabileceÄimiz birkaç program vardır. Bunlar:
-
-File Viewer Plus : File Viewer Plus 4, 400'den fazla dosya formatını açabilen bir programdır. Ofis belgeleri, PDF'ler, resim dosyaları gibi yaygın dosya türlerini tek bir programla görüntüleyebilirsiniz. Ayrıca dosyaları düzenleyebilir, dönüÅtürebilir ve kaydedebilirsiniz.
-The Unarchiver : The Unarchiver, Mac için dünyanın en sevilen RAR açıcı programıdır. Mac'in yerel aracından daha Åık olan bu program, bilinen tüm arÅiv türlerini destekler. Ayrıca Åifreli veya bozuk arÅivleri de açabilir.
-Free File Viewer : Free File Viewer, 200'den fazla dosya formatını açabilen ücretsiz bir programdır. PDF, DOCX, XLSX, PNG, JPG, MP3, MP4 gibi popüler dosya türlerini görüntüleyebilirsiniz. Ayrıca dosyaları yazdırabilir veya e-posta ile gönderebilirsiniz. d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/fcossio/measure-fiber-diameter/line_fit.py b/spaces/fcossio/measure-fiber-diameter/line_fit.py
deleted file mode 100644
index 606e4869fcdda192eaf7020e4041f8de8a8381c2..0000000000000000000000000000000000000000
--- a/spaces/fcossio/measure-fiber-diameter/line_fit.py
+++ /dev/null
@@ -1,168 +0,0 @@
-
-import numpy as np
-from scipy.optimize import curve_fit
-from typing import Tuple, List
-import cv2
-
-point = Tuple[float,float]
-line = Tuple[point, point]
-measurements = List[line]
-
-class LineFit:
- def __init__(self, n: int, step_size:float) -> None:
- """Model that fits a line in a binary image and measures diameter of fibers
-
- :param n: number of measurements to do along the fitted line.
- :param step_size: step size of diameter measurement (in pixels). Can be fraction.
- """
- self.n = n
- self.step_size = step_size
-
- def get_coordinates(self, im, value_for_mask):
- #I = rgb2gray(I_orig) #we can delete this if we get binary images
- mask = im > value_for_mask
- fiber_coor = np.argwhere(mask)
- x = fiber_coor[:, 1]
- y = fiber_coor[:, 0]
- return x, y
-
- def func_line(self, x, a, b):
- return a * x + b
-
- def func_line_inv(self, y, a, b):
- return (y - b)/a
-
- def get_fited_line_x_y(self, im):
- value_for_mask = (int(np.max(im))+int(np.min(im)))/2 # Pixels to mask in get_coordinate
- x, y = self.get_coordinates(im, value_for_mask)
- popt, pcov = curve_fit(self.func_line, x, y)
- return x, y, popt, pcov
-
- def get_fited_line_y_x(self, im):
- value_for_mask = (int(np.max(im))+int(np.min(im)))/2 # Pixels to mask in get_coordinate
- x, y = self.get_coordinates(im, value_for_mask)
- popt, pcov = curve_fit(self.func_line, y, x)
- return x, y, popt, pcov
-
- def get_better_fit(self, x, y, popt, popt_inv, pcov, pcov_inv):
- diagonal = np.diagonal(pcov)
- diagonal_inv = np.diagonal(pcov_inv)
- if np.less(diagonal, diagonal_inv).all() == True:
- popt_fit = popt
- x_line = np.arange(0, max(x), 1)
- y_line = []
- for i in x_line:
- a = self.func_line(x_line[i], *popt)
- y_line.append(a)
- y_fit = y_line
- x_fit = x_line
- p1 = [x_fit[0],y_fit[0]]
- p2 = [x_fit[-1],y_fit[-1]]
- elif np.less(diagonal, diagonal_inv).all() == False:
- popt_fit = [1/popt_inv[0], (-popt_inv[1])/popt_inv[0]]
- y_line = np.arange(0, max(y), 1)
- x_line = []
- for i in y_line:
- a = self.func_line(y_line[i], *popt_inv)
- x_line.append(a)
- y_fit = y_line
- x_fit = x_line
- p1 = [x_fit[0],y_fit[0]]
- p2 = [x_fit[-1],y_fit[-1]]
- else:
- print("One of the pcov values is True and the rest are False")
- return popt_fit, x_fit, y_fit, p1, p2
-
- def get_point(self, t, p1, p2):
- dx = p2[0]-p1[0]
- dy = p2[1]-p1[1]
- p = [(dx * t + p1[0]), (dy * t + p1[1])]
- return p, dx, dy
-
- def get_normal_vector(self, t, dx, dy, p3):
- n_pos = [-dy, dx]
- mag_pos = np.linalg.norm(n_pos)
- nu_pos = n_pos/mag_pos
- u_pos = [(nu_pos[0] * t + p3[0]), (nu_pos[1] * t + p3[1])]
- return u_pos
-
- def is_inside(self, im, pos):
- if not (0 <= pos[0] < im.shape[0]):
- return False
- if not (0 <= pos[1] < im.shape[1]):
- return False
- return True
-
- def get_pixels_half (self, pos_or_neg, im, dx, dy, p3):
- color_threshold = (int(np.max(im))+int(np.min(im)))/2
- for ts in (range(len(im[0]))):
- u = self.get_normal_vector((pos_or_neg*(ts+(self.step_size))), dx, dy, p3)
- test_point = round(u[1]),round(u[0])
- if not self.is_inside(im, test_point):
- return None, None
- test = im[test_point[0], test_point[1]] > color_threshold
- if test == False:
- pixels = ts - 1
- break
- # plt.plot(u[0], u[1], 'c.', markersize=12)
- return pixels, (u[0], u[1])
-
-
- def get_calculated_diameter(self, im, p1, p2):
- color_threshold = (int(np.max(im))+int(np.min(im)))/2
- diameters = []
- lines = []
- #mask_meas_lines = np.zeros_like(im)
- for n in range(1, self.n+1):
- t = 1/(self.n+1)
- p3, dx, dy = self.get_point((t * n), p1, p2)
- test_point = round(p3[1]),round(p3[0])
- if not self.is_inside(im, test_point):
- continue
- true_point = im[test_point[0], test_point[1]] > color_threshold
- if true_point == False:
- continue
- if true_point == True:
- radius_p, cp1 = self.get_pixels_half(1, im, dx, dy, p3)
- radius_n, cp2 = self.get_pixels_half(-1, im, dx, dy, p3)
- if (radius_p != None) and (radius_n != None):
- max_val = max(radius_p, radius_n)
- min_val = min(radius_p, radius_n)
- equal = abs((max_val - min_val)/(max_val + 1e-5))
- if equal < 0.1:
- lines.append((cp1,cp2))
- diameters.append(radius_p+radius_n)
- # mask_meas_lines = self.mask_measured_lines(im, lines)
- # plt.plot(p3[0], p3[1], 'r.', markersize=12)
- calculated_diameter = np.array(diameters).mean()
- return calculated_diameter, lines
-
- def line_to_arrays(self, line):
- return [line[0][0], line[1][0]], [line[0][1], line[1][1]]
-
- def mask_measured_lines(self, im, lines):
- mask = np.zeros_like(im)
- for p1, p2 in lines:
- if not (p1 == None or p2 == None):
- cv2.line(mask, np.array(p1).astype(np.int32), np.array(p2).astype(np.int32), 1, 1)
- return mask
-
- def predict(self, im: np.ndarray):
- x, y, popt, pcov = self.get_fited_line_x_y(im)
- _, _, popt_inv, pcov_inv = self.get_fited_line_y_x(im)
- popt_fit, x_fit, y_fit, p1, p2 = self.get_better_fit(x, y, popt, popt_inv, pcov, pcov_inv)
- calculated_diameter, lines = self.get_calculated_diameter(im, p1, p2)
- mask_meas_lines = self.mask_measured_lines(im, lines)
- #for line in lines:
- # plt.plot(*self.line_to_arrays(line), 'c-')
- return calculated_diameter, lines
-
-if __name__ == "__main__":
- import os
-
- model = LineFit(10, 0.5)
- dataset_path = "/Users/carmenlopez/dev/diameterY/scratch/dataset_files"
- example_path = os.path.join(dataset_path, "test_0005.npz")
- example = np.load(example_path)
- diameter_pred, mask_meas_lines = model.predict(example["x"])
- print(diameter_pred, example["d"])
diff --git a/spaces/feng2022/styleganhuman_copy/torch_utils/misc.py b/spaces/feng2022/styleganhuman_copy/torch_utils/misc.py
deleted file mode 100644
index cd512ab8b61ece35d81ec35f43948a843efbbce1..0000000000000000000000000000000000000000
--- a/spaces/feng2022/styleganhuman_copy/torch_utils/misc.py
+++ /dev/null
@@ -1,264 +0,0 @@
-# Copyright (c) SenseTime Research. All rights reserved.
-
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-import re
-import contextlib
-import numpy as np
-import torch
-import warnings
-import dnnlib
-
-#----------------------------------------------------------------------------
-# Cached construction of constant tensors. Avoids CPU=>GPU copy when the
-# same constant is used multiple times.
-
-_constant_cache = dict()
-
-def constant(value, shape=None, dtype=None, device=None, memory_format=None):
- value = np.asarray(value)
- if shape is not None:
- shape = tuple(shape)
- if dtype is None:
- dtype = torch.get_default_dtype()
- if device is None:
- device = torch.device('cpu')
- if memory_format is None:
- memory_format = torch.contiguous_format
-
- key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format)
- tensor = _constant_cache.get(key, None)
- if tensor is None:
- tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device)
- if shape is not None:
- tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape))
- tensor = tensor.contiguous(memory_format=memory_format)
- _constant_cache[key] = tensor
- return tensor
-
-#----------------------------------------------------------------------------
-# Replace NaN/Inf with specified numerical values.
-
-try:
- nan_to_num = torch.nan_to_num # 1.8.0a0
-except AttributeError:
- def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin
- assert isinstance(input, torch.Tensor)
- if posinf is None:
- posinf = torch.finfo(input.dtype).max
- if neginf is None:
- neginf = torch.finfo(input.dtype).min
- assert nan == 0
- return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out)
-
-#----------------------------------------------------------------------------
-# Symbolic assert.
-
-try:
- symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access
-except AttributeError:
- symbolic_assert = torch.Assert # 1.7.0
-
-#----------------------------------------------------------------------------
-# Context manager to suppress known warnings in torch.jit.trace().
-
-class suppress_tracer_warnings(warnings.catch_warnings):
- def __enter__(self):
- super().__enter__()
- warnings.simplefilter('ignore', category=torch.jit.TracerWarning)
- return self
-
-#----------------------------------------------------------------------------
-# Assert that the shape of a tensor matches the given list of integers.
-# None indicates that the size of a dimension is allowed to vary.
-# Performs symbolic assertion when used in torch.jit.trace().
-
-def assert_shape(tensor, ref_shape):
- if tensor.ndim != len(ref_shape):
- raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}')
- for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)):
- if ref_size is None:
- pass
- elif isinstance(ref_size, torch.Tensor):
- with suppress_tracer_warnings(): # as_tensor results are registered as constants
- symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}')
- elif isinstance(size, torch.Tensor):
- with suppress_tracer_warnings(): # as_tensor results are registered as constants
- symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}')
- elif size != ref_size:
- raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}')
-
-#----------------------------------------------------------------------------
-# Function decorator that calls torch.autograd.profiler.record_function().
-
-def profiled_function(fn):
- def decorator(*args, **kwargs):
- with torch.autograd.profiler.record_function(fn.__name__):
- return fn(*args, **kwargs)
- decorator.__name__ = fn.__name__
- return decorator
-
-#----------------------------------------------------------------------------
-# Sampler for torch.utils.data.DataLoader that loops over the dataset
-# indefinitely, shuffling items as it goes.
-
-class InfiniteSampler(torch.utils.data.Sampler):
- def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
- assert len(dataset) > 0
- assert num_replicas > 0
- assert 0 <= rank < num_replicas
- assert 0 <= window_size <= 1
- super().__init__(dataset)
- self.dataset = dataset
- self.rank = rank
- self.num_replicas = num_replicas
- self.shuffle = shuffle
- self.seed = seed
- self.window_size = window_size
-
- def __iter__(self):
- order = np.arange(len(self.dataset))
- rnd = None
- window = 0
- if self.shuffle:
- rnd = np.random.RandomState(self.seed)
- rnd.shuffle(order)
- window = int(np.rint(order.size * self.window_size))
-
- idx = 0
- while True:
- i = idx % order.size
- if idx % self.num_replicas == self.rank:
- yield order[i]
- if window >= 2:
- j = (i - rnd.randint(window)) % order.size
- order[i], order[j] = order[j], order[i]
- idx += 1
-
-#----------------------------------------------------------------------------
-# Utilities for operating with torch.nn.Module parameters and buffers.
-
-def params_and_buffers(module):
- assert isinstance(module, torch.nn.Module)
- return list(module.parameters()) + list(module.buffers())
-
-def named_params_and_buffers(module):
- assert isinstance(module, torch.nn.Module)
- return list(module.named_parameters()) + list(module.named_buffers())
-
-def copy_params_and_buffers(src_module, dst_module, require_all=False):
- assert isinstance(src_module, torch.nn.Module)
- assert isinstance(dst_module, torch.nn.Module)
- src_tensors = {name: tensor for name, tensor in named_params_and_buffers(src_module)}
- for name, tensor in named_params_and_buffers(dst_module):
- assert (name in src_tensors) or (not require_all)
- if name in src_tensors:
- tensor.copy_(src_tensors[name].detach()).requires_grad_(tensor.requires_grad)
-
-#----------------------------------------------------------------------------
-# Context manager for easily enabling/disabling DistributedDataParallel
-# synchronization.
-
-@contextlib.contextmanager
-def ddp_sync(module, sync):
- assert isinstance(module, torch.nn.Module)
- if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel):
- yield
- else:
- with module.no_sync():
- yield
-
-#----------------------------------------------------------------------------
-# Check DistributedDataParallel consistency across processes.
-
-def check_ddp_consistency(module, ignore_regex=None):
- assert isinstance(module, torch.nn.Module)
- for name, tensor in named_params_and_buffers(module):
- fullname = type(module).__name__ + '.' + name
- if ignore_regex is not None and re.fullmatch(ignore_regex, fullname):
- continue
- tensor = tensor.detach()
- other = tensor.clone()
- torch.distributed.broadcast(tensor=other, src=0)
- assert (nan_to_num(tensor) == nan_to_num(other)).all(), fullname
-
-#----------------------------------------------------------------------------
-# Print summary table of module hierarchy.
-
-def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):
- assert isinstance(module, torch.nn.Module)
- assert not isinstance(module, torch.jit.ScriptModule)
- assert isinstance(inputs, (tuple, list))
-
- # Register hooks.
- entries = []
- nesting = [0]
- def pre_hook(_mod, _inputs):
- nesting[0] += 1
- def post_hook(mod, _inputs, outputs):
- nesting[0] -= 1
- if nesting[0] <= max_nesting:
- outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs]
- outputs = [t for t in outputs if isinstance(t, torch.Tensor)]
- entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs))
- hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()]
- hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()]
-
- # Run module.
- outputs = module(*inputs)
- for hook in hooks:
- hook.remove()
-
- # Identify unique outputs, parameters, and buffers.
- tensors_seen = set()
- for e in entries:
- e.unique_params = [t for t in e.mod.parameters() if id(t) not in tensors_seen]
- e.unique_buffers = [t for t in e.mod.buffers() if id(t) not in tensors_seen]
- e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen]
- tensors_seen |= {id(t) for t in e.unique_params + e.unique_buffers + e.unique_outputs}
-
- # Filter out redundant entries.
- if skip_redundant:
- entries = [e for e in entries if len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs)]
-
- # Construct table.
- rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']]
- rows += [['---'] * len(rows[0])]
- param_total = 0
- buffer_total = 0
- submodule_names = {mod: name for name, mod in module.named_modules()}
- for e in entries:
- name = '' if e.mod is module else submodule_names[e.mod]
- param_size = sum(t.numel() for t in e.unique_params)
- buffer_size = sum(t.numel() for t in e.unique_buffers)
- output_shapes = [str(list(e.outputs[0].shape)) for t in e.outputs]
- output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs]
- rows += [[
- name + (':0' if len(e.outputs) >= 2 else ''),
- str(param_size) if param_size else '-',
- str(buffer_size) if buffer_size else '-',
- (output_shapes + ['-'])[0],
- (output_dtypes + ['-'])[0],
- ]]
- for idx in range(1, len(e.outputs)):
- rows += [[name + f':{idx}', '-', '-', output_shapes[idx], output_dtypes[idx]]]
- param_total += param_size
- buffer_total += buffer_size
- rows += [['---'] * len(rows[0])]
- rows += [['Total', str(param_total), str(buffer_total), '-', '-']]
-
- # Print table.
- widths = [max(len(cell) for cell in column) for column in zip(*rows)]
- print()
- for row in rows:
- print(' '.join(cell + ' ' * (width - len(cell)) for cell, width in zip(row, widths)))
- print()
- return outputs
-
-#----------------------------------------------------------------------------
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Instagram 6.17 0 APK - The Most Popular Social Media App for Android.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Instagram 6.17 0 APK - The Most Popular Social Media App for Android.md
deleted file mode 100644
index cab2bcf226d4125bccf7b3dd8631542dedc29d52..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Instagram 6.17 0 APK - The Most Popular Social Media App for Android.md
+++ /dev/null
@@ -1,142 +0,0 @@
-
-Instagram 6.17 0 APK Download: How to Get the Latest Version of the Popular Social Media App
- Instagram is one of the most popular social media apps in the world, with over one billion monthly active users. It allows you to share photos and videos with your friends, family, and followers, as well as discover new content from people you may not know. Whether you want to showcase your creativity, promote your business, or just have fun, Instagram is the app for you.
- What is Instagram and why do you need it?
- Instagram features and benefits
- Instagram has many features that make it a great app for social media lovers. Some of them are:
-instagram 6.17 0 apk download Download >>> https://gohhs.com/2uPuXw
-
-You can post photos and videos to your profile, or share them as stories that disappear after 24 hours.
-You can edit your photos and videos with filters, stickers, text, and other tools.
-You can chat with your friends and send them photos and videos privately through direct messages.
-You can follow your favorite celebrities, influencers, brands, and other accounts that interest you.
-You can explore new content from different categories, such as beauty, fashion, sports, music, and more.
-You can watch live videos from your favorite accounts or broadcast your own live video to your followers.
-You can create reels, which are short videos that you can add music and effects to.
-You can shop from your favorite brands and creators directly on the app.
-
- Instagram requirements and compatibility
- To use Instagram, you need a smartphone or tablet that runs on Android or iOS operating systems. You also need an internet connection, either Wi-Fi or mobile data. The minimum Android version required for Instagram is 4.4 (KitKat), while the minimum iOS version required is 11.0.
- What is an APK file and why do you need it?
- APK file definition and advantages
- An APK file is an Android application package file that contains all the files and code needed to install and run an app on an Android device. It is similar to an EXE file on Windows or a DMG file on Mac. You can download APK files from various websites that offer them for free or for a fee.
- Some of the advantages of using APK files are:
-
-You can access apps that are not available in your region or country.
-You can get apps that are not compatible with your device or operating system.
-You can get apps that are not updated or supported by their developers anymore.
-You can get apps that have extra features or modifications that are not available in the official versions.
-
- APK file risks and precautions
- However, using APK files also comes with some risks and precautions that you should be aware of. Some of them are:
-
-You may download malicious or infected files that can harm your device or compromise your privacy.
-You may violate the terms and conditions of the app developers or Google Play Store by installing unauthorized or modified apps.
-You may encounter bugs, errors, or compatibility issues that can affect the performance or functionality of the app.
-You You may need to update the app manually or frequently to get the latest features and fixes.
-
- Therefore, you should always download APK files from reputable and trusted sources, and scan them with an antivirus or malware detector before installing them. You should also backup your data and device before installing any APK file, in case something goes wrong.
- How to download and install Instagram 6.17 0 APK file?
- If you want to download and install the Instagram 6.17 0 APK file on your Android device, you need to follow these steps:
- Step 1: Enable unknown sources on your device
- By default, Android devices do not allow the installation of apps from sources other than the Google Play Store. To enable the installation of APK files, you need to change the security settings on your device. Here is how:
-instagram 6.17 0 apk download for android
-instagram 6.17 0 apk download free
-instagram 6.17 0 apk download latest version
-instagram 6.17 0 apk download old version
-instagram 6.17 0 apk download mod
-instagram 6.17 0 apk download uptodown
-instagram 6.17 0 apk download apkpure
-instagram 6.17 0 apk download apkcombo[^1^]
-instagram 6.17 0 apk download mirror
-instagram 6.17 0 apk download no ads
-instagram 6.17 0 apk download hack
-instagram 6.17 0 apk download premium
-instagram 6.17 0 apk download unlocked
-instagram 6.17 0 apk download pro
-instagram 6.17 0 apk download plus
-instagram 6.17 0 apk download dark mode
-instagram 6.17 0 apk download update
-instagram 6.17 0 apk download offline
-instagram 6.17 0 apk download online
-instagram 6.17 0 apk download original
-instagram 6.17 0 apk download cracked
-instagram 6.17 0 apk download patched
-instagram 6.17 0 apk download beta
-instagram 6.17 0 apk download stable
-instagram 6.17 0 apk download full
-instagram 6.17 0 apk download lite
-instagram 6.17 0 apk download mega
-instagram 6.17 0 apk download mediafire
-instagram 6.17 0 apk download google drive
-instagram 6.17 0 apk download dropbox
-instagram 6.17 0 apk download zippyshare
-instagram 6.17 0 apk download direct link
-instagram 6.17 0 apk download fast speed
-instagram 6.17 0 apk download high quality
-instagram 6.17 0 apk download low size
-instagram 6.17 0 apk download virus free
-instagram 6.17 0 apk download malware free
-instagram 6.17 0 apk download safe and secure
-instagram 6.17 0 apk download easy to install
-instagram
-
-Go to Settings > Security > Unknown sources.
-Toggle the switch to turn it on.
-A warning message will pop up. Tap OK to confirm.
-
- Note: The exact steps may vary depending on your device model and Android version. You can also disable this option after installing the APK file.
- Step 2: Download the Instagram 6.17 0 APK file from a trusted source
- Next, you need to download the Instagram 6.17 0 APK file from a reliable and safe website. There are many websites that offer APK files for free or for a fee, but not all of them are trustworthy. Some of them may contain viruses, malware, or fake apps that can harm your device or steal your data.
- To avoid these risks, you should do some research before downloading any APK file. You should check the reviews, ratings, comments, and feedback from other users who have downloaded the same file. You should also verify the file size, version, and developer name of the app. You can use tools like VirusTotal or APKMirror to scan and verify the APK file before downloading it.
- One of the websites that we recommend for downloading Instagram 6.17 0 APK file is [APKPure]. It is a reputable and popular website that offers thousands of APK files for various apps and games. It also provides detailed information, screenshots, and changelogs for each app. You can download the Instagram 6.17 0 APK file from [this link].
- Step 3: Install the Instagram 6.17 0 APK file on your device
- After downloading the Instagram 6.17 0 APK file, you need to install it on your device. Here is how:
-
-Locate the downloaded file on your device using a file manager app or your browser's download history.
-Tap on the file to open it.
-A confirmation message will appear. Tap Install to proceed.
-Wait for the installation process to complete.
-
- Step 4: Launch and enjoy the Instagram app
- Congratulations! You have successfully installed the Instagram 6.17 0 APK file on your device. You can now launch the app from your app drawer or home screen and enjoy its features and benefits.
- How to update the Instagram app to the latest version?
- Update from the Google Play Store
- If you have installed the Instagram app from the Google Play Store, you can easily update it to the latest version by following these steps:
-
-Open the Google Play Store app on your device.
-Tap on the menu icon (three horizontal lines) on the top left corner.
-Tap on My apps & games.
-Find the Instagram app in the list of installed apps.
-If there is an update available, tap on Update to download and install it.
-
- Update from the APK file
- If you have installed the Instagram app from an APK file, you can also update it to the latest version by following these steps:
-
-Download the latest version of the Instagram APK file from a trusted source (see Step 2 above).
-Follow the same steps as in Step 3 above to install it on your device.
-The new version will overwrite the old one without deleting your data or settings.
-
- Conclusion
- In this article, we have explained what is Instagram and why do you need it, what is an APK file and why do you need it, how to download and install Instagram 6.17 0 APK file on your Android device, and how to update the app to the latest version. We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy Instagramming!
- FAQs
- Here are some of the frequently asked questions about Instagram 6.17 0 APK download:
-
-Is Instagram 6.17 0 APK file safe to download and install?
-Yes, as long as you download it from a reputable and trusted source, such as [APKPure]. However, you should always scan the file with an antivirus or malware detector before installing it, and backup your data and device in case something goes wrong.
-What are the benefits of downloading Instagram 6.17 0 APK file?
-Some of the benefits are that you can access apps that are not available in your region or country, get apps that are not compatible with your device or operating system, get apps that are not updated or supported by their developers anymore, and get apps that have extra features or modifications that are not available in the official versions.
-What are the risks of downloading Instagram 6.17 0 APK file?
-Some of the risks are that you may download malicious or infected files that can harm your device or compromise your privacy, violate the terms and conditions of the app developers or Google Play Store by installing unauthorized or modified apps, encounter bugs, errors, or compatibility issues that can affect the performance or functionality of the app, and need to update the app manually or frequently to get the latest features and fixes.
-How can I uninstall Instagram 6.17 0 APK file from my device?
-You can uninstall Instagram 6.17 0 APK file from your device by following these steps:
-
-Go to Settings > Apps > Instagram.
-Tap on Uninstall.
-A confirmation message will appear. Tap OK to proceed.
-
- How can I contact the Instagram support team if I have any issues with the app?
-You can contact the Instagram support team by visiting their [Help Center] or by sending them an email at [support@instagram.com]. You can also report any bugs, errors, or feedback through the app by going to Settings > Help > Report a Problem.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Snapchat APK for Android - The Best Way to Share the Moment.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Snapchat APK for Android - The Best Way to Share the Moment.md
deleted file mode 100644
index 289117089a1bd8c4fbdf6ef764c6ea7f108ae349..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Snapchat APK for Android - The Best Way to Share the Moment.md
+++ /dev/null
@@ -1,121 +0,0 @@
-
-How to Download APK Snapchat for Android Devices
-Do you love using Snapchat to share your snaps and stories with your friends and family? Do you want to enjoy more features and options than the official app offers? If yes, then you might want to download APK Snapchat for your Android device.
-download apk snapchat Download Zip 🔗 https://gohhs.com/2uPum4
-APK Snapchat is a modified version of the original Snapchat app that allows you to access more filters, stickers, emojis, lenses, and other fun stuff. You can also save snaps and stories without notifying the sender, view deleted messages, spoof your location, and more.
-In this article, we will show you how to download APK Snapchat for your Android device from a trusted source called Uptodown. We will also guide you through the installation process and give you some tips on how to use APK Snapchat safely and effectively.
-What is Snapchat?
-Snapchat is a popular social media app that lets you send and receive photos and videos (called snaps) with your contacts. The interesting thing about Snapchat is that all the content disappears after it's viewed. You can also create stories that last for 24 hours and showcase your daily activities.
-Snapchat also has a lot of features that make it fun and engaging. You can use filters, stickers, emojis, lenses, bitmojis, and other effects to spice up your snaps. You can also chat with your friends, make video calls, play games, watch shows, discover news, and more.
-Why Download APK Snapchat?
-APK Snapchat is a modified version of the original Snapchat app that gives you more control and freedom over your snaps. With APK Snapchat, you can:
-
-Access more filters, stickers, emojis, lenses, bitmojis, and other effects that are not available in the official app.
- Save snaps and stories without notifying the sender or the app. You can also view deleted messages and replay snaps as many times as you want.
-Spoof your location and use a fake GPS to access geo-restricted filters and content.
-Disable ads, screenshots, typing notifications, and other annoyances.
-Customize your app's appearance, layout, font, color, and more.
-
-However, downloading APK Snapchat also comes with some risks. For one thing, APK Snapchat is not authorized by Snapchat Inc., the official developer of the app. This means that APK Snapchat may not be safe, secure, or compatible with your device. It may also contain malware, viruses, or spyware that can harm your device or compromise your privacy.
-Another risk of downloading APK Snapchat is that you may get banned from using the official app. Snapchat has a strict policy against using third-party apps or modifications that violate its terms of service. If Snapchat detects that you are using APK Snapchat, it may lock your account temporarily or permanently.
-Therefore, if you decide to download APK Snapchat, you should do so at your own risk and discretion. You should also make sure that you download APK Snapchat from a trusted source that offers the latest and safest version of the app.
-download snapchat apk for android
-download snapchat apk latest version
-download snapchat apk file
-download snapchat apk mod
-download snapchat apk old version
-download snapchat apk for pc
-download snapchat apk for ios
-download snapchat apk 2023
-download snapchat apk without google play
-download snapchat apk mirror
-download snapchat apk from uptodown
-download snapchat apk for windows 10
-download snapchat apk for laptop
-download snapchat apk for mac
-download snapchat apk for fire tablet
-download snapchat apk pure
-download snapchat apk beta
-download snapchat apk for chromebook
-download snapchat apk for kindle fire
-download snapchat apk for android tv
-download snapchat apk with filters
-download snapchat apk no root
-download snapchat apk cracked
-download snapchat apk hack
-download snapchat apk premium
-download snapchat apk pro
-download snapchat apk plus
-download snapchat apk free
-download snapchat apk offline
-download snapchat apk online
-download snapchat apk update
-download snapchat apk 12.38.0.45
-download snapchat apk dark mode
-download snapchat apk 2022
-download snapchat apk 2021
-download snapchat apk 2020
-download snapchat apk 2019
-download snapchat apk 2018
-download snapchat apk 2017
-how to download snapchat apk on android phone
-how to download snapchat apk on iphone
-how to download snapchat apk on computer
-how to download snapchat apk on bluestacks
-how to install snapchat apk on android device
-how to install snapchat apk on ios device
-how to install snapchat apk on pc windows 7/8/10
-How to Download APK Snapchat from Uptodown
-One of the best sources to download APK Snapchat is Uptodown. Uptodown is a reputable website that offers millions of free and legal apps for Android devices. Uptodown also scans all the apps for viruses and malware before uploading them to its platform. You can also read user reviews, ratings, and comments about the apps on Uptodown.
-To download APK Snapchat from Uptodown, follow these simple steps:
-
-Go to Uptodown's website on your browser.
-Type "Snapchat" in the search bar and hit enter.
-Select the app named "Snapchat" with the yellow icon and the description "Share your day using pictures".
-Scroll down to the bottom of the page and click on "Download".
-Choose the version of APK Snapchat that you want to download. You can see the file size, date, and changelog of each version.
-Click on "Download" again and wait for the file to be downloaded to your device.
-
-You can also use this comparison table to see the differences between APK Snapchat and official Snapchat:
- | Feature | APK Snapchat | Official Snapchat | |---------|--------------|-------------------| | Filters | More | Less | | Stickers | More | Less | | Emojis | More | Less | | Lenses | More | Less | | Bitmojis | More | Less | | Save snaps | Yes | No | | View deleted messages | Yes | No | | Replay snaps | Unlimited | Limited | | Spoof location | Yes | No | | Disable ads | Yes | No | | Disable screenshots | Yes | No | | Disable typing notifications | Yes | No | | Customize appearance | Yes | No | | Safety | Low | High | | Compatibility | Low | High | How to Install APK Snapchat on Your Android Device
- After you download APK Snapchat from Uptodown, you need to install it on your Android device. To do that, follow these steps:
-
-Locate the downloaded file on your device's file manager or downloads folder.
-Tap on the file and select "Install".
-If you see a warning message that says "For your security, your phone is not allowed to install unknown apps from this source", you need to enable unknown sources on your device. To do that, go to Settings > Security > Unknown sources and toggle it on.
-Go back to the file and tap on "Install" again.
-Wait for the installation process to finish.
-You can now launch APK Snapchat from your app drawer or home screen.
-
- How to Use APK Snapchat on Your Android Device
- Using APK Snapchat on your Android device is similar to using the official app. You can sign in with your existing account or create a new one. You can also sync your contacts and add friends from other social media platforms.
- To use APK Snapchat on your Android device, you can follow these tips:
-
-To access more filters, stickers, emojis, lenses, bitmojis, and other effects, swipe left or right on the camera screen. You can also tap on the smiley face icon at the bottom of the screen to see more options. To save snaps and stories without notifying the sender or the app, tap on the download icon at the bottom of the screen. You can also long-press on the snap or story to save it.
-To view deleted messages and replay snaps, go to the chat screen and tap on the message or snap that you want to see. You can also swipe up on the screen to see more options.
-To spoof your location and use a fake GPS, go to Settings > Location > Fake GPS and toggle it on. You can also select the location that you want to use from the map.
-To disable ads, screenshots, typing notifications, and other annoyances, go to Settings > Privacy > Advanced and toggle off the options that you don't want.
-To customize your app's appearance, layout, font, color, and more, go to Settings > Appearance and choose the options that you like.
-
- Conclusion
- APK Snapchat is a great way to enjoy more features and options than the official app. You can access more filters, stickers, emojis, lenses, bitmojis, and other effects. You can also save snaps and stories without notifying the sender, view deleted messages, spoof your location, and more.
- However, APK Snapchat also comes with some risks. It is not authorized by Snapchat Inc., the official developer of the app. It may not be safe, secure, or compatible with your device. It may also contain malware, viruses, or spyware that can harm your device or compromise your privacy. Moreover, you may get banned from using the official app if Snapchat detects that you are using APK Snapchat.
- Therefore, if you decide to download APK Snapchat, you should do so at your own risk and discretion. You should also make sure that you download APK Snapchat from a trusted source like Uptodown that offers the latest and safest version of the app.
- If you are ready to download APK Snapchat for your Android device, click on the link below and follow the instructions. Have fun snapping!
- Download APK Snapchat from Uptodown
- FAQs
- Here are some frequently asked questions about APK Snapchat and their answers:
-
-What is APK Snapchat?
-APK Snapchat is a modified version of the original Snapchat app that allows you to access more features and options than the official app.
-What are the benefits of downloading APK Snapchat?
-With APK Snapchat, you can access more filters, stickers, emojis, lenses, bitmojis, and other effects. You can also save snaps and stories without notifying the sender, view deleted messages, spoof your location, and more.
-What are the risks of downloading APK Snapchat?
-APK Snapchat is not authorized by Snapchat Inc., the official developer of the app. It may not be safe, secure, or compatible with your device. It may also contain malware, viruses, or spyware that can harm your device or compromise your privacy. Moreover, you may get banned from using the official app if Snapchat detects that you are using APK Snapchat.
-Where can I download APK Snapchat?
-You can download APK Snapchat from a trusted source like Uptodown that offers the latest and safest version of the app.
-How can I install APK Snapchat on my Android device?
-You need to enable unknown sources on your device and then tap on the downloaded file to install it.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fffiloni/Image-to-Story/app.py b/spaces/fffiloni/Image-to-Story/app.py
deleted file mode 100644
index 8b8e92ed5ce4ba9fb84b1a5549fe0efa4222a2dd..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/Image-to-Story/app.py
+++ /dev/null
@@ -1,157 +0,0 @@
-import gradio as gr
-from share_btn import community_icon_html, loading_icon_html, share_js
-import re
-import os
-hf_token = os.environ.get('HF_TOKEN')
-from gradio_client import Client
-client = Client("https://fffiloni-test-llama-api-debug.hf.space/", hf_token=hf_token)
-
-clipi_client = Client("https://fffiloni-clip-interrogator-2.hf.space/")
-
-def get_text_after_colon(input_text):
- # Find the first occurrence of ":"
- colon_index = input_text.find(":")
-
- # Check if ":" exists in the input_text
- if colon_index != -1:
- # Extract the text after the colon
- result_text = input_text[colon_index + 1:].strip()
- return result_text
- else:
- # Return the original text if ":" is not found
- return input_text
-
-def infer(image_input, audience):
- gr.Info('Calling CLIP Interrogator ...')
- clipi_result = clipi_client.predict(
- image_input, # str (filepath or URL to image) in 'parameter_3' Image component
- "best", # str in 'Select mode' Radio component
- 4, # int | float (numeric value between 2 and 24) in 'best mode max flavors' Slider component
- api_name="/clipi2"
- )
- print(clipi_result)
-
-
- llama_q = f"""
- I'll give you a simple image caption, please provide a fictional story for a {audience} audience that would fit well with the image. Please be creative, do not worry and only generate a cool fictional story.
- Here's the image description:
- '{clipi_result[0]}'
-
- """
- gr.Info('Calling Llama2 ...')
- result = client.predict(
- llama_q, # str in 'Message' Textbox component
- "I2S",
- api_name="/predict"
- )
-
- print(f"Llama2 result: {result}")
-
- result = get_text_after_colon(result)
-
- # Split the text into paragraphs based on actual line breaks
- paragraphs = result.split('\n')
-
- # Join the paragraphs back with an extra empty line between each paragraph
- formatted_text = '\n\n'.join(paragraphs)
-
-
- return formatted_text, gr.Group.update(visible=True)
-
-css="""
-#col-container {max-width: 910px; margin-left: auto; margin-right: auto;}
-a {text-decoration-line: underline; font-weight: 600;}
-a {text-decoration-line: underline; font-weight: 600;}
-.animate-spin {
- animation: spin 1s linear infinite;
-}
-@keyframes spin {
- from {
- transform: rotate(0deg);
- }
- to {
- transform: rotate(360deg);
- }
-}
-#share-btn-container {
- display: flex;
- padding-left: 0.5rem !important;
- padding-right: 0.5rem !important;
- background-color: #000000;
- justify-content: center;
- align-items: center;
- border-radius: 9999px !important;
- max-width: 15rem;
-}
-div#share-btn-container > div {
- flex-direction: row;
- background: black;
- align-items: center;
-}
-#share-btn-container:hover {
- background-color: #060606;
-}
-#share-btn {
- all: initial;
- color: #ffffff;
- font-weight: 600;
- cursor:pointer;
- font-family: 'IBM Plex Sans', sans-serif;
- margin-left: 0.5rem !important;
- padding-top: 0.5rem !important;
- padding-bottom: 0.5rem !important;
- right:0;
-}
-#share-btn * {
- all: unset;
-}
-#share-btn-container div:nth-child(-n+2){
- width: auto !important;
- min-height: 0px !important;
-}
-#share-btn-container .wrap {
- display: none !important;
-}
-#share-btn-container.hidden {
- display: none!important;
-}
-
-div#story textarea {
- font-size: 1.5em;
- line-height: 1.4em;
-}
-"""
-
-with gr.Blocks(css=css) as demo:
- with gr.Column(elem_id="col-container"):
- gr.Markdown(
- """
- Image to Story
- Upload an image, get a story made by Llama2 !
- """
- )
- with gr.Row():
- with gr.Column():
- image_in = gr.Image(label="Image input", type="filepath", elem_id="image-in", height=420)
- audience = gr.Radio(label="Target Audience", choices=["Children", "Adult"], value="Children")
- submit_btn = gr.Button('Tell me a story')
- with gr.Column():
- #caption = gr.Textbox(label="Generated Caption")
- story = gr.Textbox(label="generated Story", elem_id="story", height=420)
-
- with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
- community_icon = gr.HTML(community_icon_html)
- loading_icon = gr.HTML(loading_icon_html)
- share_button = gr.Button("Share with community", elem_id="share-btn")
-
- gr.Examples(examples=[["./examples/crabby.png", "Children"],["./examples/hopper.jpeg", "Adult"]],
- fn=infer,
- inputs=[image_in, audience],
- outputs=[story, share_group],
- cache_examples=True
- )
-
- submit_btn.click(fn=infer, inputs=[image_in, audience], outputs=[story, share_group])
- share_button.click(None, [], [], _js=share_js)
-
-demo.queue(max_size=12).launch()
diff --git a/spaces/figsfidds/moody_nana_classifier/app.py b/spaces/figsfidds/moody_nana_classifier/app.py
deleted file mode 100644
index db767db9630ad89d3a886b9c895fcc8096e6005e..0000000000000000000000000000000000000000
--- a/spaces/figsfidds/moody_nana_classifier/app.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from fastai.vision.all import *
-import gradio as gr
-
-learn = load_learner('export.pkl')
-
-labels = learn.dls.vocab
-def predict(img_fnm):
- img = PILImage.create(img_fnm)
- pred, pred_idx, probs = learn.predict(img)
- return {label: float(prob) for label, prob in zip(labels, probs)}
-
-title = "Moody-nana Classifier"
-description = "Classifies an image as either happy, angry, or banana."
-examples = [
- "examples/angry-banana.jpg",
- "examples/angry.jpg",
- "examples/happy.jpg",
- "examples/banana.jpg"
-]
-interpretation = "default"
-enable_queue = True
-
-intf = gr.Interface(
- fn=predict,
- inputs=gr.components.Image(shape=(512, 512)),
- outputs=gr.components.Label(),
- title=title,
- description=description,
- examples=examples,
- interpretation=interpretation
-)
-
-intf.launch()
diff --git a/spaces/fiz2/cloudy/greeting.md b/spaces/fiz2/cloudy/greeting.md
deleted file mode 100644
index 53c7c4bc1e4e1ad87e25f400b685536ea4b8a816..0000000000000000000000000000000000000000
--- a/spaces/fiz2/cloudy/greeting.md
+++ /dev/null
@@ -1,2 +0,0 @@
-thank you antigonus <3
-pass: _cosmos_
\ No newline at end of file
diff --git a/spaces/flynster/FeinbergQuizNotes/question_generation/utils.py b/spaces/flynster/FeinbergQuizNotes/question_generation/utils.py
deleted file mode 100644
index 37fb834f59cc9a031fee5e36cc2e0249f932ba55..0000000000000000000000000000000000000000
--- a/spaces/flynster/FeinbergQuizNotes/question_generation/utils.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from typing import Callable, Dict, Iterable, List
-from torch import nn
-
-# these functions are taken from transformers repo
-def grad_status(model: nn.Module) -> Iterable:
- return (par.requires_grad for par in model.parameters())
-
-def freeze_params(model: nn.Module):
- for par in model.parameters():
- par.requires_grad = False
-
-def freeze_embeds(model: nn.Module):
- """Freeze token embeddings and positional embeddings for bart, just token embeddings for t5."""
- try:
- freeze_params(model.model.shared)
- for d in [model.model.encoder, model.model.decoder]:
- freeze_params(d.embed_positions)
- freeze_params(d.embed_tokens)
- except AttributeError:
- freeze_params(model.shared)
- for d in [model.encoder, model.decoder]:
- freeze_params(d.embed_tokens)
-
-def assert_not_all_frozen(model):
- model_grads: List[bool] = list(grad_status(model))
- npars = len(model_grads)
- assert any(model_grads), f"none of {npars} weights require grad"
-
-def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=-100):
- """From fairseq"""
- if target.dim() == lprobs.dim() - 1:
- target = target.unsqueeze(-1)
- nll_loss = -lprobs.gather(dim=-1, index=target)
- smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
- if ignore_index is not None:
- pad_mask = target.eq(ignore_index)
- nll_loss.masked_fill_(pad_mask, 0.0)
- smooth_loss.masked_fill_(pad_mask, 0.0)
- bs = pad_mask.long().sum()
- else:
- nll_loss = nll_loss.squeeze(-1)
- smooth_loss = smooth_loss.squeeze(-1)
- bs = lprobs.shape[0]
-
- nll_loss = nll_loss.sum() # mean()? Scared to break other math.
- smooth_loss = smooth_loss.sum()
- eps_i = epsilon / lprobs.size(-1)
- loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
- return loss / bs, nll_loss / bs
\ No newline at end of file
diff --git a/spaces/frapochetti/blurry-faces/kornia_benchmark.py b/spaces/frapochetti/blurry-faces/kornia_benchmark.py
deleted file mode 100644
index 9317c8517e60b3b72f6409566db6be705f334aa6..0000000000000000000000000000000000000000
--- a/spaces/frapochetti/blurry-faces/kornia_benchmark.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import cv2
-import gradio as gr
-from PIL import Image
-import numpy as np
-import torch
-import kornia as K
-from kornia.contrib import FaceDetector, FaceDetectorResult
-import time
-
-device = torch.device('cpu')
-face_detection = FaceDetector().to(device)
-
-def scale_image(img: np.ndarray, size: int) -> np.ndarray:
- h, w = img.shape[:2]
- scale = 1. * size / w
- return cv2.resize(img, (int(w * scale), int(h * scale)))
-
-
-def apply_blur_face(img: torch.Tensor, img_vis: np.ndarray, det: FaceDetectorResult):
- # crop the face
- x1, y1 = det.xmin.int(), det.ymin.int()
- x2, y2 = det.xmax.int(), det.ymax.int()
- roi = img[..., y1:y2, x1:x2]
- #print(roi.shape)
- if roi.shape[-1]==0 or roi.shape[-2]==0:
- return
-
- # apply blurring and put back to the visualisation image
- roi = K.filters.gaussian_blur2d(roi, (21, 21), (100., 100.))
- roi = K.color.rgb_to_bgr(roi)
- img_vis[y1:y2, x1:x2] = K.tensor_to_image(roi)
-
-
-def run(image):
- image.thumbnail((1280, 1280))
- img_raw = np.array(image)
-
- # preprocess
- img = K.image_to_tensor(img_raw, keepdim=False).to(device)
- img = K.color.bgr_to_rgb(img.float())
-
- with torch.no_grad():
- dets = face_detection(img)
- dets = [FaceDetectorResult(o) for o in dets]
-
- img_vis = img_raw.copy()
-
- for b in dets:
- if b.score < 0.5:
- continue
-
- apply_blur_face(img, img_vis, b)
-
- return Image.fromarray(img_vis)
-
-if __name__ == "__main__":
-
- start = time.time()
- for _ in range(100):
- image = Image.open("./images/crowd.jpeg")
- _ = run(image)
-
- print('It took', (time.time()-start)/100, 'seconds.')
\ No newline at end of file
diff --git a/spaces/g4f/freegpt-webui/g4f/Provider/Providers/You.py b/spaces/g4f/freegpt-webui/g4f/Provider/Providers/You.py
deleted file mode 100644
index 02a2774ce62bae33612a73272d584dc2acaf3eb0..0000000000000000000000000000000000000000
--- a/spaces/g4f/freegpt-webui/g4f/Provider/Providers/You.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import os
-import json
-import time
-import subprocess
-
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://you.com'
-model = 'gpt-3.5-turbo'
-supports_stream = True
-needs_auth = False
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
-
- path = os.path.dirname(os.path.realpath(__file__))
- config = json.dumps({
- 'messages': messages}, separators=(',', ':'))
-
- cmd = ['python3', f'{path}/helpers/you.py', config]
-
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-
- for line in iter(p.stdout.readline, b''):
- yield line.decode('utf-8') #[:-1]
\ No newline at end of file
diff --git a/spaces/gauravahuja/nlpconnect-vit-gpt2-image-captioning/README.md b/spaces/gauravahuja/nlpconnect-vit-gpt2-image-captioning/README.md
deleted file mode 100644
index 9fa34c47d9d0ce4ce93f35db1f9e1b593726619b..0000000000000000000000000000000000000000
--- a/spaces/gauravahuja/nlpconnect-vit-gpt2-image-captioning/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Nlpconnect Vit Gpt2 Image Captioning
-emoji: 📉
-colorFrom: purple
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.18.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/gaurxvreddy/Xtinguish/utils.py b/spaces/gaurxvreddy/Xtinguish/utils.py
deleted file mode 100644
index fd2c30eca632f02e0701bfa8689bcb2d5333b5cf..0000000000000000000000000000000000000000
--- a/spaces/gaurxvreddy/Xtinguish/utils.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import cv2
-import numpy as np
-
-def pixel_counter(img, lower_limit, upper_limit):
- img = np.array(img)
-
- rimg = cv2.resize(img, (1000, 600))
- blur = cv2.GaussianBlur(rimg, (15, 15), 0)
- hsv = cv2.cvtColor(blur, cv2.COLOR_RGB2HSV)
-
- lower = np.array(lower_limit, dtype='uint8') # Fire Color Ranges
- upper = np.array(upper_limit, dtype='uint8') # Fire Color Ranges
-
- # Counting "fire-like" pixels in the image
- mask = cv2.inRange(hsv, lower, upper)
- count = cv2.countNonZero(mask)
-
- return count
-
-def find_intensity(image, time):
- fire_pixels = pixel_counter(image, [0, 100, 110], [70, 250, 255])
-
- if (time == 'Night'):
- value = fire_pixels/150000
- if value>=1:
- return "High Intensity", 1
- elif value<0.01:
- return "Low Intensity", value
- else:
- return "Medium Intensity", value
-
- else:
- value = fire_pixels/80000
- if value>=1:
- return "High Intensity", 1
- elif value<0.25:
- return "Low Intensity", value
- else:
- return "Medium Intensity", value
\ No newline at end of file
diff --git a/spaces/geniusguy777/Face_Recognition/README.md b/spaces/geniusguy777/Face_Recognition/README.md
deleted file mode 100644
index 6dee63dbffaeb0c6a9b8a78ba4e1311144f9c00e..0000000000000000000000000000000000000000
--- a/spaces/geniusguy777/Face_Recognition/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: FaceRecognition
-emoji: 🚀
-colorFrom: red
-colorTo: red
-sdk: gradio
-sdk_version: 3.1.1
-app_file: app.py
-pinned: false
-license: gpl-3.0
-duplicated_from: Zengyf-CVer/FaceRecognition
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv.py
deleted file mode 100644
index cf54491997a48ac3e7fadc4183ab7bf3e831024c..0000000000000000000000000000000000000000
--- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from torch import nn
-
-from .registry import CONV_LAYERS
-
-CONV_LAYERS.register_module('Conv1d', module=nn.Conv1d)
-CONV_LAYERS.register_module('Conv2d', module=nn.Conv2d)
-CONV_LAYERS.register_module('Conv3d', module=nn.Conv3d)
-CONV_LAYERS.register_module('Conv', module=nn.Conv2d)
-
-
-def build_conv_layer(cfg, *args, **kwargs):
- """Build convolution layer.
-
- Args:
- cfg (None or dict): The conv layer config, which should contain:
- - type (str): Layer type.
- - layer args: Args needed to instantiate an conv layer.
- args (argument list): Arguments passed to the `__init__`
- method of the corresponding conv layer.
- kwargs (keyword arguments): Keyword arguments passed to the `__init__`
- method of the corresponding conv layer.
-
- Returns:
- nn.Module: Created conv layer.
- """
- if cfg is None:
- cfg_ = dict(type='Conv2d')
- else:
- if not isinstance(cfg, dict):
- raise TypeError('cfg must be a dict')
- if 'type' not in cfg:
- raise KeyError('the cfg dict must contain the key "type"')
- cfg_ = cfg.copy()
-
- layer_type = cfg_.pop('type')
- if layer_type not in CONV_LAYERS:
- raise KeyError(f'Unrecognized norm type {layer_type}')
- else:
- conv_layer = CONV_LAYERS.get(layer_type)
-
- layer = conv_layer(*args, **kwargs, **cfg_)
-
- return layer
diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/__init__.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/__init__.py
deleted file mode 100644
index 999e090a458ee148ceca0649f1e3806a40e909bd..0000000000000000000000000000000000000000
--- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/ops/__init__.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from .assign_score_withk import assign_score_withk
-from .ball_query import ball_query
-from .bbox import bbox_overlaps
-from .border_align import BorderAlign, border_align
-from .box_iou_rotated import box_iou_rotated
-from .carafe import CARAFE, CARAFENaive, CARAFEPack, carafe, carafe_naive
-from .cc_attention import CrissCrossAttention
-from .contour_expand import contour_expand
-from .corner_pool import CornerPool
-from .correlation import Correlation
-from .deform_conv import DeformConv2d, DeformConv2dPack, deform_conv2d
-from .deform_roi_pool import (DeformRoIPool, DeformRoIPoolPack,
- ModulatedDeformRoIPoolPack, deform_roi_pool)
-from .deprecated_wrappers import Conv2d_deprecated as Conv2d
-from .deprecated_wrappers import ConvTranspose2d_deprecated as ConvTranspose2d
-from .deprecated_wrappers import Linear_deprecated as Linear
-from .deprecated_wrappers import MaxPool2d_deprecated as MaxPool2d
-from .focal_loss import (SigmoidFocalLoss, SoftmaxFocalLoss,
- sigmoid_focal_loss, softmax_focal_loss)
-from .furthest_point_sample import (furthest_point_sample,
- furthest_point_sample_with_dist)
-from .fused_bias_leakyrelu import FusedBiasLeakyReLU, fused_bias_leakyrelu
-from .gather_points import gather_points
-from .group_points import GroupAll, QueryAndGroup, grouping_operation
-from .info import (get_compiler_version, get_compiling_cuda_version,
- get_onnxruntime_op_path)
-from .iou3d import boxes_iou_bev, nms_bev, nms_normal_bev
-from .knn import knn
-from .masked_conv import MaskedConv2d, masked_conv2d
-from .modulated_deform_conv import (ModulatedDeformConv2d,
- ModulatedDeformConv2dPack,
- modulated_deform_conv2d)
-from .multi_scale_deform_attn import MultiScaleDeformableAttention
-from .nms import batched_nms, nms, nms_match, nms_rotated, soft_nms
-from .pixel_group import pixel_group
-from .point_sample import (SimpleRoIAlign, point_sample,
- rel_roi_point_to_rel_img_point)
-from .points_in_boxes import (points_in_boxes_all, points_in_boxes_cpu,
- points_in_boxes_part)
-from .points_sampler import PointsSampler
-from .psa_mask import PSAMask
-from .roi_align import RoIAlign, roi_align
-from .roi_align_rotated import RoIAlignRotated, roi_align_rotated
-from .roi_pool import RoIPool, roi_pool
-from .roiaware_pool3d import RoIAwarePool3d
-from .roipoint_pool3d import RoIPointPool3d
-from .saconv import SAConv2d
-from .scatter_points import DynamicScatter, dynamic_scatter
-from .sync_bn import SyncBatchNorm
-from .three_interpolate import three_interpolate
-from .three_nn import three_nn
-from .tin_shift import TINShift, tin_shift
-from .upfirdn2d import upfirdn2d
-from .voxelize import Voxelization, voxelization
-
-__all__ = [
- 'bbox_overlaps', 'CARAFE', 'CARAFENaive', 'CARAFEPack', 'carafe',
- 'carafe_naive', 'CornerPool', 'DeformConv2d', 'DeformConv2dPack',
- 'deform_conv2d', 'DeformRoIPool', 'DeformRoIPoolPack',
- 'ModulatedDeformRoIPoolPack', 'deform_roi_pool', 'SigmoidFocalLoss',
- 'SoftmaxFocalLoss', 'sigmoid_focal_loss', 'softmax_focal_loss',
- 'get_compiler_version', 'get_compiling_cuda_version',
- 'get_onnxruntime_op_path', 'MaskedConv2d', 'masked_conv2d',
- 'ModulatedDeformConv2d', 'ModulatedDeformConv2dPack',
- 'modulated_deform_conv2d', 'batched_nms', 'nms', 'soft_nms', 'nms_match',
- 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 'SyncBatchNorm', 'Conv2d',
- 'ConvTranspose2d', 'Linear', 'MaxPool2d', 'CrissCrossAttention', 'PSAMask',
- 'point_sample', 'rel_roi_point_to_rel_img_point', 'SimpleRoIAlign',
- 'SAConv2d', 'TINShift', 'tin_shift', 'assign_score_withk',
- 'box_iou_rotated', 'RoIPointPool3d', 'nms_rotated', 'knn', 'ball_query',
- 'upfirdn2d', 'FusedBiasLeakyReLU', 'fused_bias_leakyrelu',
- 'RoIAlignRotated', 'roi_align_rotated', 'pixel_group', 'QueryAndGroup',
- 'GroupAll', 'grouping_operation', 'contour_expand', 'three_nn',
- 'three_interpolate', 'MultiScaleDeformableAttention', 'BorderAlign',
- 'border_align', 'gather_points', 'furthest_point_sample',
- 'furthest_point_sample_with_dist', 'PointsSampler', 'Correlation',
- 'boxes_iou_bev', 'nms_bev', 'nms_normal_bev', 'Voxelization',
- 'voxelization', 'dynamic_scatter', 'DynamicScatter', 'RoIAwarePool3d',
- 'points_in_boxes_part', 'points_in_boxes_cpu', 'points_in_boxes_all'
-]
diff --git a/spaces/gojiteji/SDTextTransmitter/README.md b/spaces/gojiteji/SDTextTransmitter/README.md
deleted file mode 100644
index 92583628b040094f122a256450c1556ced34a389..0000000000000000000000000000000000000000
--- a/spaces/gojiteji/SDTextTransmitter/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: 📡SDTextTransmitter📡
-emoji: 📡
-colorFrom: purple
-colorTo: purple
-sdk: gradio
-sdk_version: 3.28.0
-app_file: app.py
-pinned: false
-tags:
-- jax-diffusers-event
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Astute Graphics Plugins Bundle 1.2.2 Crack How to Install and Activate the Best Tools for Vector Sketching.md b/spaces/gotiQspiryo/whisper-ui/examples/Astute Graphics Plugins Bundle 1.2.2 Crack How to Install and Activate the Best Tools for Vector Sketching.md
deleted file mode 100644
index b5bf29a598467f9c9927e84f884e9bd63b7de29f..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Astute Graphics Plugins Bundle 1.2.2 Crack How to Install and Activate the Best Tools for Vector Sketching.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Astute Graphics Plugins Bundle 1.2.2 Crack Download File ❤❤❤ https://urlgoal.com/2uyMY1
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Mastercam X8 Full Crack 19 How to Download and Install the Best CADCAM Software.md b/spaces/gotiQspiryo/whisper-ui/examples/Mastercam X8 Full Crack 19 How to Download and Install the Best CADCAM Software.md
deleted file mode 100644
index 1375660716848352116b2b2ea0d7430d982efdc3..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Mastercam X8 Full Crack 19 How to Download and Install the Best CADCAM Software.md
+++ /dev/null
@@ -1,6 +0,0 @@
-mastercam x8 full crack 19 Download Zip ★★★ https://urlgoal.com/2uyM3o
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/gptjx/02/Dockerfile b/spaces/gptjx/02/Dockerfile
deleted file mode 100644
index 8cbd335b09b1d1975bfd83a053b5fcaf398147ea..0000000000000000000000000000000000000000
--- a/spaces/gptjx/02/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM python:3.9 as builder
-RUN apt-get update && apt-get install -y build-essential
-COPY requirements.txt .
-RUN pip install --user -r requirements.txt
-
-FROM python:3.9
-MAINTAINER iskoldt
-COPY --from=builder /root/.local /root/.local
-ENV PATH=/root/.local/bin:$PATH
-COPY . /app
-WORKDIR /app
-ENV my_api_key empty
-ENV dockerrun yes
-CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"]
diff --git a/spaces/gradio/HuBERT/examples/conv_seq2seq/README.md b/spaces/gradio/HuBERT/examples/conv_seq2seq/README.md
deleted file mode 100644
index 95fe7e7909a77ee0e50fe31d4b8be38daa8f3be7..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/examples/conv_seq2seq/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Convolutional Sequence to Sequence Learning (Gehring et al., 2017)
-
-## Pre-trained models
-
-Description | Dataset | Model | Test set(s)
----|---|---|---
-Convolutional ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2) | newstest2014: [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.v2.en-fr.newstest2014.tar.bz2) newstest2012/2013: [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.v2.en-fr.ntst1213.tar.bz2)
-Convolutional ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT14 English-German](http://statmt.org/wmt14/translation-task.html#Download) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2) | newstest2014: [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-de.newstest2014.tar.bz2)
-Convolutional ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT17 English-German](http://statmt.org/wmt17/translation-task.html#Download) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2) | newstest2014: [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt17.v2.en-de.newstest2014.tar.bz2)
-
-## Example usage
-
-See the [translation README](../translation/README.md) for instructions on reproducing results for WMT'14 En-De and
-WMT'14 En-Fr using the `fconv_wmt_en_de` and `fconv_wmt_en_fr` model architectures.
-
-## Citation
-
-```bibtex
-@inproceedings{gehring2017convs2s,
- title = {Convolutional Sequence to Sequence Learning},
- author = {Gehring, Jonas, and Auli, Michael and Grangier, David and Yarats, Denis and Dauphin, Yann N},
- booktitle = {Proc. of ICML},
- year = 2017,
-}
-```
diff --git a/spaces/gradio/HuBERT/examples/wav2vec/unsupervised/scripts/pca.py b/spaces/gradio/HuBERT/examples/wav2vec/unsupervised/scripts/pca.py
deleted file mode 100644
index 948cf5319fd86ba1bccff65270b2881048faf9b1..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/examples/wav2vec/unsupervised/scripts/pca.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python3 -u
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import argparse
-import os
-import os.path as osp
-import numpy as np
-
-import faiss
-
-
-
-def get_parser():
- parser = argparse.ArgumentParser(
- description="compute a pca matrix given an array of numpy features"
- )
- # fmt: off
- parser.add_argument('data', help='numpy file containing features')
- parser.add_argument('--output', help='where to save the pca matrix', required=True)
- parser.add_argument('--dim', type=int, help='dim for pca reduction', required=True)
- parser.add_argument('--eigen-power', type=float, default=0, help='eigen power, -0.5 for whitening')
-
- return parser
-
-
-def main():
- parser = get_parser()
- args = parser.parse_args()
-
- print("Reading features")
- x = np.load(args.data, mmap_mode="r")
-
- print("Computing PCA")
- pca = faiss.PCAMatrix(x.shape[-1], args.dim, args.eigen_power)
- pca.train(x)
- b = faiss.vector_to_array(pca.b)
- A = faiss.vector_to_array(pca.A).reshape(pca.d_out, pca.d_in)
-
- os.makedirs(args.output, exist_ok=True)
-
- prefix = str(args.dim)
- if args.eigen_power != 0:
- prefix += f"_{args.eigen_power}"
-
- np.save(osp.join(args.output, f"{prefix}_pca_A"), A.T)
- np.save(osp.join(args.output, f"{prefix}_pca_b"), b)
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/gsaivinay/open_llm_leaderboard/src/assets/css_html_js.py b/spaces/gsaivinay/open_llm_leaderboard/src/assets/css_html_js.py
deleted file mode 100644
index 67df3a34746ed92e7d1e8ba406a7d60fa5db4bef..0000000000000000000000000000000000000000
--- a/spaces/gsaivinay/open_llm_leaderboard/src/assets/css_html_js.py
+++ /dev/null
@@ -1,111 +0,0 @@
-custom_css = """
-
-.markdown-text {
- font-size: 16px !important;
-}
-
-#models-to-add-text {
- font-size: 18px !important;
-}
-
-#citation-button span {
- font-size: 16px !important;
-}
-
-#citation-button textarea {
- font-size: 16px !important;
-}
-
-#citation-button > label > button {
- margin: 6px;
- transform: scale(1.3);
-}
-
-#leaderboard-table {
- margin-top: 15px
-}
-
-#leaderboard-table-lite {
- margin-top: 15px
-}
-
-#search-bar-table-box > div:first-child {
- background: none;
- border: none;
-}
-
-#search-bar {
- padding: 0px;
-}
-
-/* Hides the final AutoEvalColumn */
-#llm-benchmark-tab-table table td:last-child,
-#llm-benchmark-tab-table table th:last-child {
- display: none;
-}
-
-/* Limit the width of the first AutoEvalColumn so that names don't expand too much */
-table td:first-child,
-table th:first-child {
- max-width: 400px;
- overflow: auto;
- white-space: nowrap;
-}
-
-.tab-buttons button {
- font-size: 20px;
-}
-
-#scale-logo {
- border-style: none !important;
- box-shadow: none;
- display: block;
- margin-left: auto;
- margin-right: auto;
- max-width: 600px;
-}
-
-#scale-logo .download {
- display: none;
-}
-#filter_type{
- border: 0;
- padding-left: 0;
- padding-top: 0;
-}
-#filter_type label {
- display: flex;
-}
-#filter_type label > span{
- margin-top: var(--spacing-lg);
- margin-right: 0.5em;
-}
-#filter_type label > .wrap{
- width: 103px;
-}
-#filter_type label > .wrap .wrap-inner{
- padding: 2px;
-}
-#filter_type label > .wrap .wrap-inner input{
- width: 1px
-}
-#filter-columns-type{
- border:0;
- padding:0.5;
-}
-#filter-columns-size{
- border:0;
- padding:0.5;
-}
-#box-filter > .form{
- border: 0
-}
-"""
-
-get_window_url_params = """
- function(url_params) {
- const params = new URLSearchParams(window.location.search);
- url_params = Object.fromEntries(params);
- return url_params;
- }
- """
diff --git a/spaces/h2oai/wave-tour/examples/post.py b/spaces/h2oai/wave-tour/examples/post.py
deleted file mode 100644
index bad691ee5e3837729d14a0e52249aa0eb67944bf..0000000000000000000000000000000000000000
--- a/spaces/h2oai/wave-tour/examples/post.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Post card
-# Create a post card displaying persona, image, caption and optional buttons.
-# ---
-from h2o_wave import site, ui
-
-page = site['/demo']
-
-image = 'https://images.pexels.com/photos/220453/pexels-photo-220453.jpeg?auto=compress&h=750&w=1260'
-page['example'] = ui.post_card(
- box='1 1 3 5',
- persona=ui.persona(title='John Doe', subtitle='Data Scientist', image=image, caption='caption'),
- commands=[
- ui.command(name='new', label='New', icon='Add', items=[
- ui.command(name='email', label='Email Message', icon='Mail'),
- ui.command(name='calendar', label='Calendar Event', icon='Calendar'),
- ]),
- ui.command(name='upload', label='Upload', icon='Upload'),
- ui.command(name='share', label='Share', icon='Share'),
- ui.command(name='download', label='Download', icon='Download'),
- ],
- items=[
- ui.inline(justify='end', items=[
- ui.mini_buttons([
- ui.mini_button(name='like', label='4', icon='Heart'),
- ui.mini_button(name='comment', label='2', icon='Comment'),
- ui.mini_button(name='share', label='1', icon='Share'),
- ]),
- ]),
- ui.buttons(items=[
- ui.button(name='like', label='Like'),
- ui.button(name='comment', label='Comment'),
- ui.button(name='share', label='Share'),
- ]),
- ],
- caption='''
-Lorem ipsum dolor sit amet, consectetur adipisicing elit. Quia aliquam maxime quos facere
-necessitatibus tempore eum odio, qui illum. Repellat modi dolor facilis odio ex possimus
-''',
- aux_value='2h ago',
- image='https://images.pexels.com/photos/3225517/pexels-photo-3225517.jpeg?auto=compress&cs=tinysrgb&dpr=2&h=750&w=1260' # noqa
-)
-page.save()
diff --git a/spaces/hackathon-somos-nlp-2023/PodcastNER-GPTJ/README.md b/spaces/hackathon-somos-nlp-2023/PodcastNER-GPTJ/README.md
deleted file mode 100644
index 984878abe58784da61cc2ca6d4d7febdd55d266b..0000000000000000000000000000000000000000
--- a/spaces/hackathon-somos-nlp-2023/PodcastNER-GPTJ/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: PodcastNER GPTJ
-emoji: 🌍
-colorFrom: red
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/hands012/gpt-academic/request_llm/bridge_newbingfree.py b/spaces/hands012/gpt-academic/request_llm/bridge_newbingfree.py
deleted file mode 100644
index 38d2eb9bf610ef95aa5e3f571b1dc7a30a6eada1..0000000000000000000000000000000000000000
--- a/spaces/hands012/gpt-academic/request_llm/bridge_newbingfree.py
+++ /dev/null
@@ -1,243 +0,0 @@
-"""
-========================================================================
-第一部分:来自EdgeGPT.py
-https://github.com/acheong08/EdgeGPT
-========================================================================
-"""
-from .edge_gpt_free import Chatbot as NewbingChatbot
-load_message = "等待NewBing响应。"
-
-"""
-========================================================================
-第二部分:子进程Worker(调用主体)
-========================================================================
-"""
-import time
-import json
-import re
-import logging
-import asyncio
-import importlib
-import threading
-from toolbox import update_ui, get_conf, trimmed_format_exc
-from multiprocessing import Process, Pipe
-
-def preprocess_newbing_out(s):
- pattern = r'\^(\d+)\^' # 匹配^数字^
- sub = lambda m: '('+m.group(1)+')' # 将匹配到的数字作为替换值
- result = re.sub(pattern, sub, s) # 替换操作
- if '[1]' in result:
- result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
- return result
-
-def preprocess_newbing_out_simple(result):
- if '[1]' in result:
- result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
- return result
-
-class NewBingHandle(Process):
- def __init__(self):
- super().__init__(daemon=True)
- self.parent, self.child = Pipe()
- self.newbing_model = None
- self.info = ""
- self.success = True
- self.local_history = []
- self.check_dependency()
- self.start()
- self.threadLock = threading.Lock()
-
- def check_dependency(self):
- try:
- self.success = False
- import certifi, httpx, rich
- self.info = "依赖检测通过,等待NewBing响应。注意目前不能多人同时调用NewBing接口(有线程锁),否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时,会自动使用已配置的代理。"
- self.success = True
- except:
- self.info = "缺少的依赖,如果要使用Newbing,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_newbing.txt`安装Newbing的依赖。"
- self.success = False
-
- def ready(self):
- return self.newbing_model is not None
-
- async def async_run(self):
- # 读取配置
- NEWBING_STYLE, = get_conf('NEWBING_STYLE')
- from request_llm.bridge_all import model_info
- endpoint = model_info['newbing']['endpoint']
- while True:
- # 等待
- kwargs = self.child.recv()
- question=kwargs['query']
- history=kwargs['history']
- system_prompt=kwargs['system_prompt']
-
- # 是否重置
- if len(self.local_history) > 0 and len(history)==0:
- await self.newbing_model.reset()
- self.local_history = []
-
- # 开始问问题
- prompt = ""
- if system_prompt not in self.local_history:
- self.local_history.append(system_prompt)
- prompt += system_prompt + '\n'
-
- # 追加历史
- for ab in history:
- a, b = ab
- if a not in self.local_history:
- self.local_history.append(a)
- prompt += a + '\n'
- # if b not in self.local_history:
- # self.local_history.append(b)
- # prompt += b + '\n'
-
- # 问题
- prompt += question
- self.local_history.append(question)
- print('question:', prompt)
- # 提交
- async for final, response in self.newbing_model.ask_stream(
- prompt=question,
- conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
- wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
- ):
- if not final:
- print(response)
- self.child.send(str(response))
- else:
- print('-------- receive final ---------')
- self.child.send('[Finish]')
- # self.local_history.append(response)
-
-
- def run(self):
- """
- 这个函数运行在子进程
- """
- # 第一次运行,加载参数
- self.success = False
- self.local_history = []
- if (self.newbing_model is None) or (not self.success):
- # 代理设置
- proxies, = get_conf('proxies')
- if proxies is None:
- self.proxies_https = None
- else:
- self.proxies_https = proxies['https']
-
- try:
- self.newbing_model = NewbingChatbot(proxy=self.proxies_https)
- except:
- self.success = False
- tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
- self.child.send(f'[Local Message] 不能加载Newbing组件。{tb_str}')
- self.child.send('[Fail]')
- self.child.send('[Finish]')
- raise RuntimeError(f"不能加载Newbing组件。")
-
- self.success = True
- try:
- # 进入任务等待状态
- asyncio.run(self.async_run())
- except Exception:
- tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
- self.child.send(f'[Local Message] Newbing失败 {tb_str}.')
- self.child.send('[Fail]')
- self.child.send('[Finish]')
-
- def stream_chat(self, **kwargs):
- """
- 这个函数运行在主进程
- """
- self.threadLock.acquire()
- self.parent.send(kwargs) # 发送请求到子进程
- while True:
- res = self.parent.recv() # 等待newbing回复的片段
- if res == '[Finish]':
- break # 结束
- elif res == '[Fail]':
- self.success = False
- break
- else:
- yield res # newbing回复的片段
- self.threadLock.release()
-
-
-"""
-========================================================================
-第三部分:主进程统一调用函数接口
-========================================================================
-"""
-global newbingfree_handle
-newbingfree_handle = None
-
-def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
- """
- 多线程方法
- 函数的说明请见 request_llm/bridge_all.py
- """
- global newbingfree_handle
- if (newbingfree_handle is None) or (not newbingfree_handle.success):
- newbingfree_handle = NewBingHandle()
- if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + newbingfree_handle.info
- if not newbingfree_handle.success:
- error = newbingfree_handle.info
- newbingfree_handle = None
- raise RuntimeError(error)
-
- # 没有 sys_prompt 接口,因此把prompt加入 history
- history_feedin = []
- for i in range(len(history)//2):
- history_feedin.append([history[2*i], history[2*i+1]] )
-
- watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
- response = ""
- if len(observe_window) >= 1: observe_window[0] = "[Local Message]: 等待NewBing响应中 ..."
- for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
- if len(observe_window) >= 1: observe_window[0] = preprocess_newbing_out_simple(response)
- if len(observe_window) >= 2:
- if (time.time()-observe_window[1]) > watch_dog_patience:
- raise RuntimeError("程序终止。")
- return preprocess_newbing_out_simple(response)
-
-def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
- """
- 单线程方法
- 函数的说明请见 request_llm/bridge_all.py
- """
- chatbot.append((inputs, "[Local Message]: 等待NewBing响应中 ..."))
-
- global newbingfree_handle
- if (newbingfree_handle is None) or (not newbingfree_handle.success):
- newbingfree_handle = NewBingHandle()
- chatbot[-1] = (inputs, load_message + "\n\n" + newbingfree_handle.info)
- yield from update_ui(chatbot=chatbot, history=[])
- if not newbingfree_handle.success:
- newbingfree_handle = None
- return
-
- if additional_fn is not None:
- import core_functional
- importlib.reload(core_functional) # 热更新prompt
- core_functional = core_functional.get_core_functions()
- if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
- inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
-
- history_feedin = []
- for i in range(len(history)//2):
- history_feedin.append([history[2*i], history[2*i+1]] )
-
- chatbot[-1] = (inputs, "[Local Message]: 等待NewBing响应中 ...")
- response = "[Local Message]: 等待NewBing响应中 ..."
- yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
- for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
- chatbot[-1] = (inputs, preprocess_newbing_out(response))
- yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
- if response == "[Local Message]: 等待NewBing响应中 ...": response = "[Local Message]: NewBing响应异常,请刷新界面重试 ..."
- history.extend([inputs, response])
- logging.info(f'[raw_input] {inputs}')
- logging.info(f'[response] {response}')
- yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
-
diff --git a/spaces/hanstyle/tts/face_detection/detection/core.py b/spaces/hanstyle/tts/face_detection/detection/core.py
deleted file mode 100644
index 0f8275e8e53143f66298f75f0517c234a68778cd..0000000000000000000000000000000000000000
--- a/spaces/hanstyle/tts/face_detection/detection/core.py
+++ /dev/null
@@ -1,130 +0,0 @@
-import logging
-import glob
-from tqdm import tqdm
-import numpy as np
-import torch
-import cv2
-
-
-class FaceDetector(object):
- """An abstract class representing a face detector.
-
- Any other face detection implementation must subclass it. All subclasses
- must implement ``detect_from_image``, that return a list of detected
- bounding boxes. Optionally, for speed considerations detect from path is
- recommended.
- """
-
- def __init__(self, device, verbose):
- self.device = device
- self.verbose = verbose
-
- if verbose:
- if 'cpu' in device:
- logger = logging.getLogger(__name__)
- logger.warning("Detection running on CPU, this may be potentially slow.")
-
- if 'cpu' not in device and 'cuda' not in device:
- if verbose:
- logger.error("Expected values for device are: {cpu, cuda} but got: %s", device)
- raise ValueError
-
- def detect_from_image(self, tensor_or_path):
- """Detects faces in a given image.
-
- This function detects the faces present in a provided BGR(usually)
- image. The input can be either the image itself or the path to it.
-
- Arguments:
- tensor_or_path {numpy.ndarray, torch.tensor or string} -- the path
- to an image or the image itself.
-
- Example::
-
- >>> path_to_image = 'data/image_01.jpg'
- ... detected_faces = detect_from_image(path_to_image)
- [A list of bounding boxes (x1, y1, x2, y2)]
- >>> image = cv2.imread(path_to_image)
- ... detected_faces = detect_from_image(image)
- [A list of bounding boxes (x1, y1, x2, y2)]
-
- """
- raise NotImplementedError
-
- def detect_from_directory(self, path, extensions=['.jpg', '.png'], recursive=False, show_progress_bar=True):
- """Detects faces from all the images present in a given directory.
-
- Arguments:
- path {string} -- a string containing a path that points to the folder containing the images
-
- Keyword Arguments:
- extensions {list} -- list of string containing the extensions to be
- consider in the following format: ``.extension_name`` (default:
- {['.jpg', '.png']}) recursive {bool} -- option wherever to scan the
- folder recursively (default: {False}) show_progress_bar {bool} --
- display a progressbar (default: {True})
-
- Example:
- >>> directory = 'data'
- ... detected_faces = detect_from_directory(directory)
- {A dictionary of [lists containing bounding boxes(x1, y1, x2, y2)]}
-
- """
- if self.verbose:
- logger = logging.getLogger(__name__)
-
- if len(extensions) == 0:
- if self.verbose:
- logger.error("Expected at list one extension, but none was received.")
- raise ValueError
-
- if self.verbose:
- logger.info("Constructing the list of images.")
- additional_pattern = '/**/*' if recursive else '/*'
- files = []
- for extension in extensions:
- files.extend(glob.glob(path + additional_pattern + extension, recursive=recursive))
-
- if self.verbose:
- logger.info("Finished searching for images. %s images found", len(files))
- logger.info("Preparing to run the detection.")
-
- predictions = {}
- for image_path in tqdm(files, disable=not show_progress_bar):
- if self.verbose:
- logger.info("Running the face detector on image: %s", image_path)
- predictions[image_path] = self.detect_from_image(image_path)
-
- if self.verbose:
- logger.info("The detector was successfully run on all %s images", len(files))
-
- return predictions
-
- @property
- def reference_scale(self):
- raise NotImplementedError
-
- @property
- def reference_x_shift(self):
- raise NotImplementedError
-
- @property
- def reference_y_shift(self):
- raise NotImplementedError
-
- @staticmethod
- def tensor_or_path_to_ndarray(tensor_or_path, rgb=True):
- """Convert path (represented as a string) or torch.tensor to a numpy.ndarray
-
- Arguments:
- tensor_or_path {numpy.ndarray, torch.tensor or string} -- path to the image, or the image itself
- """
- if isinstance(tensor_or_path, str):
- return cv2.imread(tensor_or_path) if not rgb else cv2.imread(tensor_or_path)[..., ::-1]
- elif torch.is_tensor(tensor_or_path):
- # Call cpu in case its coming from cuda
- return tensor_or_path.cpu().numpy()[..., ::-1].copy() if not rgb else tensor_or_path.cpu().numpy()
- elif isinstance(tensor_or_path, np.ndarray):
- return tensor_or_path[..., ::-1].copy() if not rgb else tensor_or_path
- else:
- raise TypeError
diff --git a/spaces/harmdevries/bigcode_planning/app.py b/spaces/harmdevries/bigcode_planning/app.py
deleted file mode 100644
index 852d130d502d1e2733b1f03f23ad9ee76029cb45..0000000000000000000000000000000000000000
--- a/spaces/harmdevries/bigcode_planning/app.py
+++ /dev/null
@@ -1,178 +0,0 @@
-from github import Github
-from github import Auth
-import os
-import streamlit as st
-import datetime
-import plotly.figure_factory as ff
-import plotly.graph_objects as go
-import pandas as pd
-import math
-import copy
-
-st.set_page_config(layout="wide")
-name2repo = [("Dataset", "bigcode-project/bigcode-dataset"),
- ("Training", "bigcode-project/Megatron-LM"),
- # ("Evaluation", "bigcode-project/bigcode-evaluation-harness"),
- # ("Inference", "bigcode-project/bigcode-inference-benchmark"),
- # ("Legal", "bigcode-project/admin"),
- # ("Demo", "bigcode-project/bigcode-demo")
- ]
-
-name2num_milestones = dict()
-github_key = os.environ['github']
-auth = Auth.Token(github_key)
-
-g = Github(auth=auth)
-df = list()
-all_status = list()
-bad_milestones = list()
-
-
-for name, repo_name in name2repo:
- repo = g.get_repo(repo_name)
- num_milestones = 0
- for milestone in repo.get_milestones():
- try:
- num_milestones += 1
- desc = dict()
- for line in milestone.description.split('\n'):
- tmp = line.split(":")
- if len(tmp) > 1:
- key = tmp[0].lower()
- value = tmp[1].strip()
- if key == 'status':
- value = value.lower()
- desc[key] = value
- task_name = f"""{milestone.title} """
- if desc['status'] not in all_status:
- all_status.append(desc['status'])
- df.append(dict(Task=task_name,
- Start=desc['start date'],
- Finish=milestone.due_on.strftime('%Y-%m-%d'),
- Resource=desc['status'],
- Description=desc['leader']))
- except:
- num_milestones -= 1
- task_name = f"""{milestone.title} """
- bad_milestones.append(task_name)
- name2num_milestones[name] = num_milestones
-
-copy_df = copy.deepcopy(df)
-colors = {'not started': 'rgb(217, 217, 217)',
- 'in progress': 'rgb(147, 196, 125)',
- 'high priority - on track': 'rgb(234, 153, 153)',
- 'high priority - help needed': 'rgb(255, 0, 0)',
- 'completed': 'rgb(56, 118, 29)'}
-
-if len(all_status) == 0:
- task_name = "None"
-
-for key in colors.keys():
- if key not in all_status:
- copy_df.append(dict(Task=task_name,
- Start='2023-04-09',
- Finish='2023-04-09',
- Resource=key))
-
-fig = ff.create_gantt(copy_df, colors=colors,
- index_col='Resource',
- show_colorbar=True,
- show_hover_fill=True,
- group_tasks=True,
- title="BigCode planning")
-
-fig.update_xaxes(ticks= "outside",
- ticklabelmode= "period",
- tickformat="%b",
- tickcolor= "black",
- ticklen=10,
- range=[datetime.datetime(2023, 8, 25),
- datetime.datetime(2023, 11, 16)],
- minor=dict(
- ticklen=4,
- dtick=7*24*60*60*1000,
- tick0="2023-09-01",
- griddash='dot',
- gridcolor='white')
- )
-
-fig.update_layout(margin=go.layout.Margin(l=250))
-fig.layout.xaxis.rangeselector = None # remove range selector on top
-
-# Add today line
-fig.add_vline(x=datetime.datetime.now().strftime('%Y-%m-%d'), line_width=3, line_dash="dash", line_color="black")
-fig.add_annotation({
- "x": datetime.datetime.now().strftime('%Y-%m-%d'),
- "y": fig.layout.yaxis['range'][1],
- "yshift": 10,
- "text": "Today",
- "showarrow": False,
- })
-
-# Add The Stack 1.2
-fig.add_vline(x='2023-03-05', line_width=3, line_dash="dash", line_color="red")
-fig.add_annotation({
- "x": '2023-03-05',
- "y": fig.layout.yaxis['range'][1],
- "yshift": 10,
- "text": "The Stack 1.2",
- "showarrow": False,
- })
-
-# Add PII
-fig.add_vline(x='2023-10-01', line_width=3, line_dash="dash", line_color="red")
-fig.add_annotation({
- "x": '2023-10-01',
- "y": fig.layout.yaxis['range'][1],
- "yshift": 10,
- "text": "Model training",
- "showarrow": False,
- })
-
-# Add release line
-fig.add_vline(x='2023-10-31', line_width=3, line_dash="dash", line_color="red")
-fig.add_annotation({
- "x": '2023-10-31',
- "y": fig.layout.yaxis['range'][1],
- "yshift": 10,
- "text": "Model release",
- "showarrow": False,
- })
-
-# Add point of contacts
-fig.add_annotation({
- "x": "2023-08-25",
- "y": fig.layout.yaxis['range'][1],
- "yshift": 10,
- "xanchor": "left",
- "text": "Contact",
- "showarrow": False})
-for i, entry in enumerate(df[::-1]):
- fig.add_annotation(x='2023-08-25', y=i,
- text=entry['Description'],
- showarrow=False,
- xanchor="left",
- xref="x")
-
-# Add working group annotations
-fig.add_hline(y=-0.5, line_width=1, line_color="grey")
-height = -0.5
-for name, _ in name2repo[::-1]:
- if name2num_milestones[name] > 0:
- fig.add_annotation(x='2023-11-13', y=height + name2num_milestones[name]/2,
- text=name,
- showarrow=False,
- align="center",
- textangle=-90)
- height += name2num_milestones[name]
- fig.add_hline(y=height, line_width=1, line_color="grey")
-
-st.plotly_chart(fig, use_container_width=True)
-
-if len(bad_milestones):
- with st.expander("Bad Milestones"):
- for bms in bad_milestones:
- st.markdown(bms + "\n\n", unsafe_allow_html=True)
-
-if st.button("Refresh"):
- st.experimental_rerun()
\ No newline at end of file
diff --git a/spaces/hrnph/rvc-models/vc_infer_pipeline.py b/spaces/hrnph/rvc-models/vc_infer_pipeline.py
deleted file mode 100644
index c26d45068f9b6bf2b194b13c3c89f8a06347c124..0000000000000000000000000000000000000000
--- a/spaces/hrnph/rvc-models/vc_infer_pipeline.py
+++ /dev/null
@@ -1,306 +0,0 @@
-import numpy as np, parselmouth, torch, pdb
-from time import time as ttime
-import torch.nn.functional as F
-from config import x_pad, x_query, x_center, x_max
-import scipy.signal as signal
-import pyworld, os, traceback, faiss
-from scipy import signal
-
-bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
-
-
-class VC(object):
- def __init__(self, tgt_sr, device, is_half):
- self.sr = 16000 # hubert输入采样率
- self.window = 160 # 每帧点数
- self.t_pad = self.sr * x_pad # 每条前后pad时间
- self.t_pad_tgt = tgt_sr * x_pad
- self.t_pad2 = self.t_pad * 2
- self.t_query = self.sr * x_query # 查询切点前后查询时间
- self.t_center = self.sr * x_center # 查询切点位置
- self.t_max = self.sr * x_max # 免查询时长阈值
- self.device = device
- self.is_half = is_half
-
- def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None):
- time_step = self.window / self.sr * 1000
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
- if f0_method == "pm":
- f0 = (
- parselmouth.Sound(x, self.sr)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=f0_min,
- pitch_ceiling=f0_max,
- )
- .selected_array["frequency"]
- )
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(
- f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
- )
- elif f0_method == "harvest":
- f0, t = pyworld.harvest(
- x.astype(np.double),
- fs=self.sr,
- f0_ceil=f0_max,
- f0_floor=f0_min,
- frame_period=10,
- )
- f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
- f0 = signal.medfilt(f0, 3)
- f0 *= pow(2, f0_up_key / 12)
- # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
- tf0 = self.sr // self.window # 每秒f0点数
- if inp_f0 is not None:
- delta_t = np.round(
- (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
- ).astype("int16")
- replace_f0 = np.interp(
- list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
- )
- shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0]
- f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
- # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
- f0bak = f0.copy()
- f0_mel = 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
- f0_mel_max - f0_mel_min
- ) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- f0_coarse = np.rint(f0_mel).astype(np.int)
- return f0_coarse, f0bak # 1-0
-
- def vc(
- self,
- model,
- net_g,
- sid,
- audio0,
- pitch,
- pitchf,
- times,
- index,
- big_npy,
- index_rate,
- ): # ,file_index,file_big_npy
- feats = torch.from_numpy(audio0)
- if self.is_half:
- feats = feats.half()
- else:
- feats = feats.float()
- if feats.dim() == 2: # double channels
- feats = feats.mean(-1)
- assert feats.dim() == 1, feats.dim()
- feats = feats.view(1, -1)
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
-
- inputs = {
- "source": feats.to(self.device),
- "padding_mask": padding_mask,
- "output_layer": 9, # layer 9
- }
- t0 = ttime()
- with torch.no_grad():
- logits = model.extract_features(**inputs)
- feats = model.final_proj(logits[0])
-
- if (
- isinstance(index, type(None)) == False
- and isinstance(big_npy, type(None)) == False
- and index_rate != 0
- ):
- npy = feats[0].cpu().numpy()
- if self.is_half:
- npy = npy.astype("float32")
- _, I = index.search(npy, 1)
- npy = big_npy[I.squeeze()]
- if self.is_half:
- npy = npy.astype("float16")
- feats = (
- torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
- + (1 - index_rate) * feats
- )
-
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
- t1 = ttime()
- p_len = audio0.shape[0] // self.window
- if feats.shape[1] < p_len:
- p_len = feats.shape[1]
- if pitch != None and pitchf != None:
- pitch = pitch[:, :p_len]
- pitchf = pitchf[:, :p_len]
- p_len = torch.tensor([p_len], device=self.device).long()
- with torch.no_grad():
- if pitch != None and pitchf != None:
- audio1 = (
- (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768)
- .data.cpu()
- .float()
- .numpy()
- .astype(np.int16)
- )
- else:
- audio1 = (
- (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768)
- .data.cpu()
- .float()
- .numpy()
- .astype(np.int16)
- )
- del feats, p_len, padding_mask
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- t2 = ttime()
- times[0] += t1 - t0
- times[2] += t2 - t1
- return audio1
-
- def pipeline(
- self,
- model,
- net_g,
- sid,
- audio,
- times,
- f0_up_key,
- f0_method,
- file_index,
- file_big_npy,
- index_rate,
- if_f0,
- f0_file=None,
- ):
- if (
- file_big_npy != ""
- and file_index != ""
- and os.path.exists(file_big_npy) == True
- and os.path.exists(file_index) == True
- and index_rate != 0
- ):
- try:
- index = faiss.read_index(file_index)
- big_npy = np.load(file_big_npy)
- except:
- traceback.print_exc()
- index = big_npy = None
- else:
- index = big_npy = None
- print("Feature retrieval library doesn't exist or ratio is 0")
- audio = signal.filtfilt(bh, ah, audio)
- audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
- opt_ts = []
- if audio_pad.shape[0] > self.t_max:
- audio_sum = np.zeros_like(audio)
- for i in range(self.window):
- audio_sum += audio_pad[i : i - self.window]
- for t in range(self.t_center, audio.shape[0], self.t_center):
- opt_ts.append(
- t
- - self.t_query
- + np.where(
- np.abs(audio_sum[t - self.t_query : t + self.t_query])
- == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
- )[0][0]
- )
- s = 0
- audio_opt = []
- t = None
- t1 = ttime()
- audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
- p_len = audio_pad.shape[0] // self.window
- inp_f0 = None
- if hasattr(f0_file, "name") == True:
- try:
- with open(f0_file.name, "r") as f:
- lines = f.read().strip("\n").split("\n")
- inp_f0 = []
- for line in lines:
- inp_f0.append([float(i) for i in line.split(",")])
- inp_f0 = np.array(inp_f0, dtype="float32")
- except:
- traceback.print_exc()
- sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
- pitch, pitchf = None, None
- if if_f0 == 1:
- pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0)
- pitch = pitch[:p_len]
- pitchf = pitchf[:p_len]
- pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
- pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
- t2 = ttime()
- times[1] += t2 - t1
- for t in opt_ts:
- t = t // self.window * self.window
- if if_f0 == 1:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[s : t + self.t_pad2 + self.window],
- pitch[:, s // self.window : (t + self.t_pad2) // self.window],
- pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- else:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[s : t + self.t_pad2 + self.window],
- None,
- None,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- s = t
- if if_f0 == 1:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[t:],
- pitch[:, t // self.window :] if t is not None else pitch,
- pitchf[:, t // self.window :] if t is not None else pitchf,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- else:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[t:],
- None,
- None,
- times,
- index,
- big_npy,
- index_rate,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- audio_opt = np.concatenate(audio_opt)
- del pitch, pitchf, sid
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- return audio_opt
diff --git a/spaces/huaiji3y/bingo-Public/src/components/chat-suggestions.tsx b/spaces/huaiji3y/bingo-Public/src/components/chat-suggestions.tsx
deleted file mode 100644
index 48aec7c84e4407c482acdfcc7857fb0f660d12d3..0000000000000000000000000000000000000000
--- a/spaces/huaiji3y/bingo-Public/src/components/chat-suggestions.tsx
+++ /dev/null
@@ -1,45 +0,0 @@
-import React, { useMemo } from 'react'
-import Image from 'next/image'
-import HelpIcon from '@/assets/images/help.svg'
-import { SuggestedResponse } from '@/lib/bots/bing/types'
-import { useBing } from '@/lib/hooks/use-bing'
-import { atom, useAtom } from 'jotai'
-
-type Suggestions = SuggestedResponse[]
-const helpSuggestions = ['为什么不回应某些主题', '告诉我更多关于必应的资迅', '必应如何使用 AI?'].map((text) => ({ text }))
-const suggestionsAtom = atom([])
-
-type ChatSuggestionsProps = React.ComponentProps<'div'> & Pick, 'setInput'> & { suggestions?: Suggestions }
-
-export function ChatSuggestions({ setInput, suggestions = [] }: ChatSuggestionsProps) {
- const [currentSuggestions, setSuggestions] = useAtom(suggestionsAtom)
- const toggleSuggestions = (() => {
- if (currentSuggestions === helpSuggestions) {
- setSuggestions(suggestions)
- } else {
- setSuggestions(helpSuggestions)
- }
- })
-
- useMemo(() => {
- setSuggestions(suggestions)
- window.scrollBy(0, 2000)
- }, [suggestions.length, setSuggestions])
-
- return currentSuggestions?.length ? (
-
-
-
-
-
- {
- currentSuggestions.map(suggestion => (
- setInput(suggestion.text)}>
- {suggestion.text}
-
- ))
- }
-
-
- ) : null
-}
diff --git a/spaces/hugggof/vampnet/vampnet/modules/transformer.py b/spaces/hugggof/vampnet/vampnet/modules/transformer.py
deleted file mode 100644
index 0858644d363d50c9395b2fbf5177f7ad5659114b..0000000000000000000000000000000000000000
--- a/spaces/hugggof/vampnet/vampnet/modules/transformer.py
+++ /dev/null
@@ -1,953 +0,0 @@
-import math
-import logging
-from typing import Optional, Tuple, Union
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from einops import rearrange
-import loralib as lora
-import audiotools as at
-
-from .activations import get_activation
-from .layers import CodebookEmbedding
-from .layers import FiLM
-from .layers import SequentialWithFiLM
-from .layers import WNConv1d
-from ..util import scalar_to_batch_tensor, codebook_flatten, codebook_unflatten
-from ..mask import _gamma
-
-LORA_R = 8
-
-# def log(t, eps=1e-20):
-# return torch.log(t + eps)
-
-
-def gumbel_noise_like(t):
- noise = torch.zeros_like(t).uniform_(1e-20, 1)
- return -torch.log(-torch.log(noise))
-
-
-def gumbel_sample(t, temperature=1.0, dim=-1):
- return ((t / max(temperature, 1e-10)) + gumbel_noise_like(t)).argmax(dim=dim)
-
-
-class RMSNorm(nn.Module):
- def __init__(self, hidden_size: int, eps=1e-6):
- super().__init__()
- self.weight = nn.Parameter(torch.ones(hidden_size))
- self.var_eps = eps
-
- def forward(self, x):
- """Returns root mean square normalized version of input `x`
- # T5 uses a layer_norm which only scales and doesn't shift, which is also known
- # as Root Mean Square Layer Normalization https://arxiv.org/abs/1910.07467
- # thus varience is calculated w/o mean and there is no bias
- Parameters
- ----------
- x : Tensor[B x T x D]
- Returns
- -------
- Tensor[B x T x D]
- """
- var = x.pow(2).mean(-1, keepdim=True)
- x = x * torch.rsqrt(var + self.var_eps)
-
- return self.weight * x
-
-
-class FeedForward(nn.Module):
- def __init__(
- self, d_model: int = 512, dropout: float = 0.1, activation: str = "geglu"
- ):
- super().__init__()
- factor = 2 if activation == "geglu" else 1
- self.w_1 = lora.Linear(d_model, d_model * 4, bias=False, r=LORA_R)
- self.w_2 = lora.Linear(d_model * 4 // factor, d_model, bias=False, r=LORA_R)
- self.drop = nn.Dropout(dropout)
- self.act = get_activation(activation)()
-
- def forward(self, x):
- """Computes position-wise feed-forward layer
- Parameters
- ----------
- x : Tensor[B x T x D]
- Returns
- -------
- Tensor[B x T x D]
- """
- x = self.w_1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.w_2(x)
- return x
-
-
-class MultiHeadRelativeAttention(nn.Module):
- def __init__(
- self,
- n_head: int = 8,
- d_model: int = 512,
- dropout: float = 0.1,
- bidirectional: bool = True,
- has_relative_attention_bias: bool = True,
- attention_num_buckets: int = 32,
- attention_max_distance: int = 128,
- ):
- super().__init__()
- d_head = d_model // n_head
- self.n_head = n_head
- self.d_head = d_head
- self.bidirectional = bidirectional
- self.has_relative_attention_bias = has_relative_attention_bias
- self.attention_num_buckets = attention_num_buckets
- self.attention_max_distance = attention_max_distance
-
- # Create linear query, key, value projections
- self.w_qs = lora.Linear(d_model, d_model, bias=False, r=LORA_R)
- self.w_ks = nn.Linear(d_model, d_model, bias=False)
- self.w_vs = lora.Linear(d_model, d_model, bias=False, r=LORA_R)
-
- # Create linear final output projection
- self.fc = lora.Linear(d_model, d_model, bias=False, r=LORA_R)
-
- # Dropout for attention output weights
- self.dropout = nn.Dropout(dropout)
-
- # Create relative positional embeddings (if turned on)
- if has_relative_attention_bias:
- self.relative_attention_bias = nn.Embedding(attention_num_buckets, n_head)
-
- def _relative_position_bucket(self, relative_position):
- """Converts unbounded relative position into bounded set of buckets
- with half "exact" buckets (1 position = 1 bucket) and half "log-spaced"
- buckets
- Parameters
- ----------
- relative_position : Tensor[T_q x T_kv]
- Relative positions between queries and key_value items
- Returns
- -------
- Tensor[T_q x T_kv]
- Input relative positions converted into buckets
- """
- relative_buckets = 0
- num_buckets = self.attention_num_buckets
- max_distance = self.attention_max_distance
-
- # Convert relative position for (-inf, inf) to [0, inf]
- # Negative relative positions correspond to past
- # Positive relative positions correspond to future
- if self.bidirectional:
- # use half buckets for each side (past / future)
- num_buckets //= 2
-
- # Shift the position positions by `num_buckets` to wrap around
- # negative positions
- relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
- relative_position = torch.abs(relative_position)
- else:
- # If not bidirectional, ignore positive positions and wrap
- # negative positions to positive
- relative_position = -torch.min(
- relative_position, torch.zeros_like(relative_position)
- )
-
- # Allocate half of the buckets are for exact increments in positions
- max_exact = num_buckets // 2
- is_small = relative_position < max_exact
-
- # The other half of the buckets are for logarithmically bigger bins in
- # positions up to `max_distance`
- relative_postion_if_large = max_exact + (
- torch.log(relative_position.float() / max_exact)
- / math.log(max_distance / max_exact)
- * (num_buckets - max_exact)
- ).to(torch.long)
-
- # Clip the max relative position to `num_buckets - 1`
- relative_postion_if_large = torch.min(
- relative_postion_if_large,
- torch.full_like(relative_postion_if_large, num_buckets - 1),
- )
-
- # Choose relative buckets based on small or large positions
- relative_buckets += torch.where(
- is_small, relative_position, relative_postion_if_large
- )
-
- return relative_buckets
-
- def compute_bias(self, query_length, key_length):
- """Computes a position bias scalar for each index in query_length x key_length
- Parameters
- ----------
- query_length : int
- key_length : int
- Returns
- -------
- Tensor[heads x 1 x T_q x T_kv]
- Position bias to be applied on attention logits
- """
-
- query_position = torch.arange(query_length, dtype=torch.long)[:, None]
- key_position = torch.arange(key_length, dtype=torch.long)[None, :]
- relative_position = key_position - query_position
-
- # Convert relative position to buckets
- relative_position_bucket = self._relative_position_bucket(relative_position)
- relative_position_bucket = relative_position_bucket.to(
- self.relative_attention_bias.weight.device
- )
-
- # Index attention bias values
- values = self.relative_attention_bias(relative_position_bucket)
- values = rearrange(values, "q k h -> h 1 q k")
-
- return values
-
- def forward(self, q, k, v, mask=None, position_bias=None):
- """Computes attention over (keys, values) for every timestep in query
- Parameters
- ----------
- q : Tensor[B x T_q x d_model]
- Query vectors
- k : Tensor[B x T_kv x d_model]
- Key vectors to compute attention over
- v : Tensor[B x T_kv x d_model]
- Value vectors corresponding to the keys
- mask : Tensor[B x T_q x T_kv], optional
- position_bias: Tensor[head x 1 x T_q x T_kv]
- Returns
- -------
- Tensor[B x T_q x d_model]
- Outputs after attending (key, value) using queries
- """
- # Compute query, key, value projections
- q = rearrange(self.w_qs(q), "b l (head k) -> head b l k", head=self.n_head)
- k = rearrange(self.w_ks(k), "b t (head k) -> head b t k", head=self.n_head)
- v = rearrange(self.w_vs(v), "b t (head k) -> head b t k", head=self.n_head)
-
- # Compute attention matrix
- attn = torch.einsum("hblk,hbtk->hblt", [q, k]) / np.sqrt(q.shape[-1])
-
- # Add relative position bias to attention scores
- if position_bias is None:
- if self.has_relative_attention_bias:
- position_bias = self.compute_bias(q.size(-2), k.size(-2))
- else:
- position_bias = torch.zeros_like(attn)
- attn += position_bias
-
- # Apply mask to attention scores to prevent looking up invalid locations
- if mask is not None:
- attn = attn.masked_fill(mask[None] == 0, -1e9)
-
- # Normalize attention scores and add dropout
- attn = torch.softmax(attn, dim=3)
- attn = self.dropout(attn)
-
- # Compute attended outputs (product of attention matrix and values)
- output = torch.einsum("hblt,hbtv->hblv", [attn, v])
- output = rearrange(output, "head b l v -> b l (head v)")
- output = self.fc(output)
-
- return output, position_bias
-
-
-class TransformerLayer(nn.Module):
- def __init__(
- self,
- d_model: int = 512,
- d_cond: int = 64,
- n_heads: int = 8,
- bidirectional: bool = True,
- is_decoder: bool = False,
- has_relative_attention_bias: bool = False,
- flash_attn: bool = False,
- dropout: float = 0.1,
- ):
- super().__init__()
- # Store args
- self.is_decoder = is_decoder
-
- # Create self-attention layer
- self.norm_1 = RMSNorm(d_model)
- self.film_1 = FiLM(d_cond, d_model)
- self.flash_attn = flash_attn
-
- if flash_attn:
- from flash_attn.flash_attention import FlashMHA
- self.self_attn = FlashMHA(
- embed_dim=d_model,
- num_heads=n_heads,
- attention_dropout=dropout,
- causal=False,
- )
- else:
- self.self_attn = MultiHeadRelativeAttention(
- n_heads, d_model, dropout, bidirectional, has_relative_attention_bias
- )
-
- # (Optional) Create cross-attention layer
- if is_decoder:
- self.norm_2 = RMSNorm(d_model)
- self.film_2 = FiLM(d_cond, d_model)
- self.cross_attn = MultiHeadRelativeAttention(
- n_heads,
- d_model,
- dropout,
- bidirectional=True,
- has_relative_attention_bias=False,
- )
-
- # Create last feed-forward layer
- self.norm_3 = RMSNorm(d_model)
- self.film_3 = FiLM(d_cond, d_model)
- self.feed_forward = FeedForward(d_model=d_model, dropout=dropout)
-
- # Create dropout
- self.dropout = nn.Dropout(dropout)
-
- def forward(
- self,
- x,
- x_mask,
- cond,
- src=None,
- src_mask=None,
- position_bias=None,
- encoder_decoder_position_bias=None,
- ):
- """Computes one transformer layer consisting of self attention, (op) cross attention
- and feedforward layer
- Parameters
- ----------
- x : Tensor[B x T_q x D]
- x_mask : Tensor[B x T_q]
- src : Tensor[B x T_kv x D], optional
- src_mask : Tensor[B x T_kv x D], optional
- position_bias : Tensor[heads x B x T_q x T_q], optional
- Relative position bias for self attention layer
- encoder_decoder_position_bias : Tensor[heads x B x T_q x T_kv], optional
- Relative position bias for cross attention layer
- Returns
- -------
- Tensor[B x T_q x D]
- """
- y = self.norm_1(x)
- y = self.film_1(y.permute(0, 2, 1), cond).permute(0, 2, 1)
- if self.flash_attn:
- with torch.autocast(y.device.type, dtype=torch.bfloat16):
- y = self.self_attn(y)[0]
- else:
- y, position_bias = self.self_attn(y, y, y, x_mask, position_bias)
- x = x + self.dropout(y)
-
- if self.is_decoder:
- y = self.norm_2(x)
- y = self.film_2(y.permute(0, 2, 1), cond).permute(0, 2, 1)
- y, encoder_decoder_position_bias = self.cross_attn(
- y, src, src, src_mask, encoder_decoder_position_bias
- )
- x = x + self.dropout(y)
-
- y = self.norm_3(x)
- y = self.film_3(
- y.permute(
- 0,
- 2,
- 1,
- ),
- cond,
- ).permute(0, 2, 1)
- y = self.feed_forward(y)
- x = x + self.dropout(y)
-
- return x, position_bias, encoder_decoder_position_bias
-
-
-class TransformerStack(nn.Module):
- def __init__(
- self,
- d_model: int = 512,
- d_cond: int = 64,
- n_heads: int = 8,
- n_layers: int = 8,
- last_layer: bool = True,
- bidirectional: bool = True,
- flash_attn: bool = False,
- is_decoder: bool = False,
- dropout: float = 0.1,
- ):
- super().__init__()
- # Store args
- self.bidirectional = bidirectional
- self.is_decoder = is_decoder
-
- # Create transformer layers
- # In T5, relative attention bias is shared by all layers in the stack
- self.layers = nn.ModuleList(
- [
- TransformerLayer(
- d_model,
- d_cond,
- n_heads,
- bidirectional,
- is_decoder,
- has_relative_attention_bias=True if (i == 0) else False,
- flash_attn=flash_attn,
- dropout=dropout,
- )
- for i in range(n_layers)
- ]
- )
-
- # Perform last normalization
- self.norm = RMSNorm(d_model) if last_layer else None
-
- def subsequent_mask(self, size):
- return torch.ones(1, size, size).tril().bool()
-
- def forward(self, x, x_mask, cond=None, src=None, src_mask=None,
- return_activations: bool = False
- ):
- """Computes a full transformer stack
- Parameters
- ----------
- x : Tensor[B x T_q x D]
- x_mask : Tensor[B x T_q]
- src : Tensor[B x T_kv x D], optional
- src_mask : Tensor[B x T_kv], optional
- Returns
- -------
- Tensor[B x T_q x D]
- """
-
- # Convert `src_mask` to (B x T_q x T_kv) shape for cross attention masking
- if self.is_decoder:
- src_mask = x_mask.unsqueeze(-1) * src_mask.unsqueeze(-2)
-
- # Convert `x_mask` to (B x T_q x T_q) shape for self attention masking
- x_mask = x_mask.unsqueeze(-2)
- if not self.bidirectional:
- x_mask = x_mask * self.subsequent_mask(x.size(1)).to(x_mask.device)
-
- # Initialize position biases
- position_bias = None
- encoder_decoder_position_bias = None
-
- # Compute transformer layers
- if return_activations:
- activations = []
- for layer in self.layers:
- x, position_bias, encoder_decoder_position_bias = layer(
- x=x,
- x_mask=x_mask,
- cond=cond,
- src=src,
- src_mask=src_mask,
- position_bias=position_bias,
- encoder_decoder_position_bias=encoder_decoder_position_bias,
- )
- if return_activations:
- activations.append(x.detach())
-
-
- out = self.norm(x) if self.norm is not None else x
- if return_activations:
- return out, torch.stack(activations)
- else:
- return out
-
-
-class VampNet(at.ml.BaseModel):
- def __init__(
- self,
- n_heads: int = 20,
- n_layers: int = 16,
- r_cond_dim: int = 0,
- n_codebooks: int = 9,
- n_conditioning_codebooks: int = 0,
- latent_dim: int = 8,
- embedding_dim: int = 1280,
- vocab_size: int = 1024,
- flash_attn: bool = True,
- noise_mode: str = "mask",
- dropout: float = 0.1
- ):
- super().__init__()
- assert r_cond_dim == 0, f"r_cond_dim must be 0 (not supported), but got {r_cond_dim}"
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.r_cond_dim = r_cond_dim
- self.n_codebooks = n_codebooks
- self.n_conditioning_codebooks = n_conditioning_codebooks
- self.embedding_dim = embedding_dim
- self.vocab_size = vocab_size
- self.latent_dim = latent_dim
- self.flash_attn = flash_attn
- self.noise_mode = noise_mode
-
- assert self.noise_mode == "mask", "deprecated"
-
- self.embedding = CodebookEmbedding(
- latent_dim=latent_dim,
- n_codebooks=n_codebooks,
- vocab_size=vocab_size,
- emb_dim=embedding_dim,
- special_tokens=["MASK"],
- )
- self.mask_token = self.embedding.special_idxs["MASK"]
-
- self.transformer = TransformerStack(
- d_model=embedding_dim,
- d_cond=r_cond_dim,
- n_heads=n_heads,
- n_layers=n_layers,
- last_layer=True,
- bidirectional=True,
- flash_attn=flash_attn,
- is_decoder=False,
- dropout=dropout,
- )
-
- # Add final conv layer
- self.n_predict_codebooks = n_codebooks - n_conditioning_codebooks
- self.classifier = SequentialWithFiLM(
- WNConv1d(
- embedding_dim,
- vocab_size * self.n_predict_codebooks,
- kernel_size=1,
- padding="same",
- # groups=self.n_predict_codebooks,
- ),
- )
-
- def forward(self, x, return_activations: bool = False):
- x = self.embedding(x)
- x_mask = torch.ones_like(x, dtype=torch.bool)[:, :1, :].squeeze(1)
-
- x = rearrange(x, "b d n -> b n d")
- out = self.transformer(x=x, x_mask=x_mask, return_activations=return_activations)
- if return_activations:
- out, activations = out
-
- out = rearrange(out, "b n d -> b d n")
-
- out = self.classifier(out, None) # no cond here!
-
- out = rearrange(out, "b (p c) t -> b p (t c)", c=self.n_predict_codebooks)
-
- if return_activations:
- return out, activations
- else:
- return out
-
- def r_embed(self, r, max_positions=10000):
- if self.r_cond_dim > 0:
- dtype = r.dtype
-
- r = _gamma(r) * max_positions
- half_dim = self.r_cond_dim // 2
-
- emb = math.log(max_positions) / (half_dim - 1)
- emb = torch.arange(half_dim, device=r.device).float().mul(-emb).exp()
-
- emb = r[:, None] * emb[None, :]
- emb = torch.cat([emb.sin(), emb.cos()], dim=1)
-
- if self.r_cond_dim % 2 == 1: # zero pad
- emb = nn.functional.pad(emb, (0, 1), mode="constant")
-
- return emb.to(dtype)
- else:
- return r
-
- @torch.no_grad()
- def to_signal(self, z, codec):
- """
- convert a sequence of latents to a signal.
- """
- assert z.ndim == 3
-
- signal = at.AudioSignal(
- codec.decode(
- codec.quantizer.from_latents(self.embedding.from_codes(z, codec))[0]
- )["audio"],
- codec.sample_rate,
- )
-
- # find where the mask token is and replace it with silence in the audio
- for tstep in range(z.shape[-1]):
- if torch.any(z[:, :, tstep] == self.mask_token):
- sample_idx_0 = tstep * codec.hop_length
- sample_idx_1 = sample_idx_0 + codec.hop_length
- signal.samples[:, :, sample_idx_0:sample_idx_1] = 0.0
-
- return signal
-
-
- @torch.no_grad()
- def generate(
- self,
- codec,
- time_steps: int = 300,
- sampling_steps: int = 36,
- start_tokens: Optional[torch.Tensor] = None,
- sampling_temperature: float = 1.0,
- mask: Optional[torch.Tensor] = None,
- mask_temperature: float = 10.5,
- typical_filtering=False,
- typical_mass=0.2,
- typical_min_tokens=1,
- top_p=None,
- return_signal=True,
- seed: int = None,
- sample_cutoff: float = 1.0,
- ):
- if seed is not None:
- at.util.seed(seed)
- logging.debug(f"beginning generation with {sampling_steps} steps")
-
-
-
- #####################
- # resolve initial z #
- #####################
- z = start_tokens
-
- if z is None:
- z = torch.full((1, self.n_codebooks, time_steps), self.mask_token).to(
- self.device
- )
-
- logging.debug(f"created z with shape {z.shape}")
-
-
- #################
- # resolve mask #
- #################
-
- if mask is None:
- mask = torch.ones_like(z).to(self.device).int()
- mask[:, : self.n_conditioning_codebooks, :] = 0.0
- if mask.ndim == 2:
- mask = mask[:, None, :].repeat(1, z.shape[1], 1)
- # init_mask = mask.clone()
-
- logging.debug(f"created mask with shape {mask.shape}")
-
-
- ###########
- # set up #
- ##########
- # apply the mask to z
- z_masked = z.masked_fill(mask.bool(), self.mask_token)
- # logging.debug(f"z_masked: {z_masked}")
-
- # how many mask tokens to begin with?
- num_mask_tokens_at_start = (z_masked == self.mask_token).sum()
- logging.debug(f"num mask tokens at start: {num_mask_tokens_at_start}")
-
- # how many codebooks are we inferring vs conditioning on?
- n_infer_codebooks = self.n_codebooks - self.n_conditioning_codebooks
- logging.debug(f"n infer codebooks: {n_infer_codebooks}")
-
- #################
- # begin sampling #
- #################
-
- for i in range(sampling_steps):
- logging.debug(f"step {i} of {sampling_steps}")
-
- # our current schedule step
- r = scalar_to_batch_tensor(
- (i + 1) / sampling_steps,
- z.shape[0]
- ).to(z.device)
- logging.debug(f"r: {r}")
-
- # get latents
- latents = self.embedding.from_codes(z_masked, codec)
- logging.debug(f"computed latents with shape: {latents.shape}")
-
-
- # infer from latents
- # NOTE: this collapses the codebook dimension into the sequence dimension
- logits = self.forward(latents) # b, prob, seq
- logits = logits.permute(0, 2, 1) # b, seq, prob
- b = logits.shape[0]
-
- logging.debug(f"permuted logits with shape: {logits.shape}")
-
- sampled_z, selected_probs = sample_from_logits(
- logits, sample=(
- (i / sampling_steps) <= sample_cutoff
- ),
- temperature=sampling_temperature,
- typical_filtering=typical_filtering, typical_mass=typical_mass,
- typical_min_tokens=typical_min_tokens,
- top_k=None, top_p=top_p, return_probs=True,
- )
-
- logging.debug(f"sampled z with shape: {sampled_z.shape}")
-
- # flatten z_masked and mask, so we can deal with the sampling logic
- # we'll unflatten them at the end of the loop for the next forward pass
- # remove conditioning codebooks, we'll add them back at the end
- z_masked = codebook_flatten(z_masked[:, self.n_conditioning_codebooks:, :])
-
- mask = (z_masked == self.mask_token).int()
-
- # update the mask, remove conditioning codebooks from the mask
- logging.debug(f"updated mask with shape: {mask.shape}")
- # add z back into sampled z where the mask was false
- sampled_z = torch.where(
- mask.bool(), sampled_z, z_masked
- )
- logging.debug(f"added z back into sampled z with shape: {sampled_z.shape}")
-
- # ignore any tokens that weren't masked
- selected_probs = torch.where(
- mask.bool(), selected_probs, torch.inf
- )
-
- # get the num tokens to mask, according to the schedule
- num_to_mask = torch.floor(_gamma(r) * num_mask_tokens_at_start).unsqueeze(1).long()
- logging.debug(f"num to mask: {num_to_mask}")
-
- if i != (sampling_steps - 1):
- num_to_mask = torch.maximum(
- torch.tensor(1),
- torch.minimum(
- mask.sum(dim=-1, keepdim=True) - 1,
- num_to_mask
- )
- )
-
-
- # get our new mask
- mask = mask_by_random_topk(
- num_to_mask, selected_probs, mask_temperature * (1-r)
- )
-
- # update the mask
- z_masked = torch.where(
- mask.bool(), self.mask_token, sampled_z
- )
- logging.debug(f"updated z_masked with shape: {z_masked.shape}")
-
- z_masked = codebook_unflatten(z_masked, n_infer_codebooks)
- mask = codebook_unflatten(mask, n_infer_codebooks)
- logging.debug(f"unflattened z_masked with shape: {z_masked.shape}")
-
- # add conditioning codebooks back to z_masked
- z_masked = torch.cat(
- (z[:, :self.n_conditioning_codebooks, :], z_masked), dim=1
- )
- logging.debug(f"added conditioning codebooks back to z_masked with shape: {z_masked.shape}")
-
-
- # add conditioning codebooks back to sampled_z
- sampled_z = codebook_unflatten(sampled_z, n_infer_codebooks)
- sampled_z = torch.cat(
- (z[:, :self.n_conditioning_codebooks, :], sampled_z), dim=1
- )
-
- logging.debug(f"finished sampling")
-
- if return_signal:
- return self.to_signal(sampled_z, codec)
- else:
- return sampled_z
-
-def sample_from_logits(
- logits,
- sample: bool = True,
- temperature: float = 1.0,
- top_k: int = None,
- top_p: float = None,
- typical_filtering: bool = False,
- typical_mass: float = 0.2,
- typical_min_tokens: int = 1,
- return_probs: bool = False
- ):
- """Convenience function to sample from a categorial distribution with input as
- unnormalized logits.
-
- Parameters
- ----------
- logits : Tensor[..., vocab_size]
- config: SamplingConfig
- The set of hyperparameters to be used for sampling
- sample : bool, optional
- Whether to perform multinomial sampling, by default True
- temperature : float, optional
- Scaling parameter when multinomial samping, by default 1.0
- top_k : int, optional
- Restricts sampling to only `top_k` values acc. to probability,
- by default None
- top_p : float, optional
- Restricts sampling to only those values with cumulative
- probability = `top_p`, by default None
-
- Returns
- -------
- Tensor[...]
- Sampled tokens
- """
- shp = logits.shape[:-1]
-
- if typical_filtering:
- typical_filter(logits,
- typical_mass=typical_mass,
- typical_min_tokens=typical_min_tokens
- )
-
- # Apply top_k sampling
- if top_k is not None:
- v, _ = logits.topk(top_k)
- logits[logits < v[..., [-1]]] = -float("inf")
-
- # Apply top_p (nucleus) sampling
- if top_p is not None and top_p < 1.0:
- v, sorted_indices = logits.sort(descending=True)
- cumulative_probs = v.softmax(dim=-1).cumsum(dim=-1)
-
- sorted_indices_to_remove = cumulative_probs > top_p
- # Right shift indices_to_remove to keep 1st token over threshold
- sorted_indices_to_remove = F.pad(sorted_indices_to_remove, (1, 0), value=False)[
- ..., :-1
- ]
-
- # Compute indices_to_remove in unsorted array
- indices_to_remove = sorted_indices_to_remove.scatter(
- -1, sorted_indices, sorted_indices_to_remove
- )
-
- logits[indices_to_remove] = -float("inf")
-
- # Perform multinomial sampling after normalizing logits
- probs = (
- F.softmax(logits / temperature, dim=-1)
- if temperature > 0
- else logits.softmax(dim=-1)
- )
- token = (
- probs.view(-1, probs.size(-1)).multinomial(1).squeeze(1).view(*shp)
- if sample
- else logits.argmax(-1)
- )
-
- if return_probs:
- token_probs = probs.take_along_dim(token.unsqueeze(-1), dim=-1).squeeze(-1)
- return token, token_probs
- else:
- return token
-
-
-
-def mask_by_random_topk(
- num_to_mask: int,
- probs: torch.Tensor,
- temperature: float = 1.0,
- ):
- """
- Args:
- num_to_mask (int): number of tokens to mask
- probs (torch.Tensor): probabilities for each sampled event, shape (batch, seq)
- temperature (float, optional): temperature. Defaults to 1.0.
- """
- logging.debug(f"masking by random topk")
- logging.debug(f"num to mask: {num_to_mask}")
- logging.debug(f"probs shape: {probs.shape}")
- logging.debug(f"temperature: {temperature}")
- logging.debug("")
-
- noise = gumbel_noise_like(probs)
- confidence = torch.log(probs) + temperature * noise
- logging.debug(f"confidence shape: {confidence.shape}")
-
- sorted_confidence, sorted_idx = confidence.sort(dim=-1)
- logging.debug(f"sorted confidence shape: {sorted_confidence.shape}")
- logging.debug(f"sorted idx shape: {sorted_idx.shape}")
-
- # get the cut off threshold, given the mask length
- cut_off = torch.take_along_dim(
- sorted_confidence, num_to_mask, axis=-1
- )
- logging.debug(f"cut off shape: {cut_off.shape}")
-
- # mask out the tokens
- mask = confidence < cut_off
- logging.debug(f"mask shape: {mask.shape}")
-
- return mask
-
-def typical_filter(
- logits,
- typical_mass: float = 0.95,
- typical_min_tokens: int = 1,):
- nb, nt, _ = logits.shape
- x_flat = rearrange(logits, "b t l -> (b t ) l")
- x_flat_norm = torch.nn.functional.log_softmax(x_flat, dim=-1)
- x_flat_norm_p = torch.exp(x_flat_norm)
- entropy = -(x_flat_norm * x_flat_norm_p).nansum(-1, keepdim=True)
-
- c_flat_shifted = torch.abs((-x_flat_norm) - entropy)
- c_flat_sorted, x_flat_indices = torch.sort(c_flat_shifted, descending=False)
- x_flat_cumsum = (
- x_flat.gather(-1, x_flat_indices).softmax(dim=-1).cumsum(dim=-1)
- )
-
- last_ind = (x_flat_cumsum < typical_mass).sum(dim=-1)
- sorted_indices_to_remove = c_flat_sorted > c_flat_sorted.gather(
- 1, last_ind.view(-1, 1)
- )
- if typical_min_tokens > 1:
- sorted_indices_to_remove[..., :typical_min_tokens] = 0
- indices_to_remove = sorted_indices_to_remove.scatter(
- 1, x_flat_indices, sorted_indices_to_remove
- )
- x_flat = x_flat.masked_fill(indices_to_remove, -float("Inf"))
- logits = rearrange(x_flat, "(b t) l -> b t l", t=nt)
- return logits
-
-
-if __name__ == "__main__":
- # import argbind
- from .layers import num_params
-
- VampNet = argbind.bind(VampNet)
-
- @argbind.bind(without_prefix=True)
- def try_model(device: str = "cuda", batch_size: int = 2, seq_len_s: float = 10.0):
- seq_len = int(32000 / 512 * seq_len_s)
-
- model = VampNet().to(device)
-
- z = torch.randint(
- 0, model.vocab_size, size=(batch_size, model.n_codebooks, seq_len)
- ).to(device)
-
- r = torch.zeros(batch_size).to(device)
-
- z_mask_latent = torch.rand(
- batch_size, model.latent_dim * model.n_codebooks, seq_len
- ).to(device)
- z_hat = model(z_mask_latent)
-
- pred = z_hat.argmax(dim=1)
- pred = model.embedding.unflatten(pred, n_codebooks=model.n_predict_codebooks)
-
- print(f"model has {num_params(model)/1e6:<.3f}M parameters")
- print(f"prediction has shape {pred.shape}")
- breakpoint()
-
- args = argbind.parse_args()
- with argbind.scope(args):
- try_model()
-
-
diff --git "a/spaces/huggingface/Model_Cards_Writing_Tool/pages/8_\360\237\214\217_Environmental_Impact.py" "b/spaces/huggingface/Model_Cards_Writing_Tool/pages/8_\360\237\214\217_Environmental_Impact.py"
deleted file mode 100644
index 5c423a80215a3e0e070fd598ab70ff61641a7e43..0000000000000000000000000000000000000000
--- "a/spaces/huggingface/Model_Cards_Writing_Tool/pages/8_\360\237\214\217_Environmental_Impact.py"
+++ /dev/null
@@ -1,56 +0,0 @@
-import streamlit as st
-from persist import persist, load_widget_state
-from pathlib import Path
-
-
-
-global variable_output
-
-def main():
-
- cs_body()
-
-
-def cs_body():
-
- stateVariable = 'Model_carbon'
- help_text ='Provide an estimate for the carbon emissions: e.g hardware used, horus spent training, cloud provider '
-
- st.markdown('# Environmental Impact')
- st.markdown('###### Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).')
- st.text_area("", help="Provide an estimate for the carbon emissions: e.g hardware used, horus spent training, cloud provider")
-
- left, right = st.columns([2,4])
- with left:
- st.write("\n")
- st.write("\n")
- st.markdown('### Hardware Type:')
- st.write("\n")
- st.write("\n")
- #st.write("\n")
- st.markdown('### Hours used:')
- st.write("\n")
- st.write("\n")
- st.markdown('### Cloud Provider:')
- st.write("\n")
- st.write("\n")
- st.markdown('### Compute Region:')
- st.write("\n")
- st.write("\n")
- st.markdown('### Carbon Emitted:')
- with right:
- #soutput_jinja = parse_into_jinja_markdown()
- st.text_input("",key=persist("Model_hardware"))
- #st.write("\n")
- st.text_input("",help="sw",key=persist("hours_used"))
- st.text_input("",key=persist("Model_cloud_provider"))
- st.text_input("",key=persist("Model_cloud_region"))
- st.text_input("",help= 'in grams of CO2eq', key=persist("Model_c02_emitted")) ##to-do: auto calculate
-
-
-
-
-
-if __name__ == '__main__':
- load_widget_state()
- main()
\ No newline at end of file
diff --git a/spaces/huggingface/Model_Cards_Writing_Tool/viewCardProgress(old).py b/spaces/huggingface/Model_Cards_Writing_Tool/viewCardProgress(old).py
deleted file mode 100644
index 6410796a60c3a3ec60bcded5914bfc2441096826..0000000000000000000000000000000000000000
--- a/spaces/huggingface/Model_Cards_Writing_Tool/viewCardProgress(old).py
+++ /dev/null
@@ -1,101 +0,0 @@
-import streamlit as st
-from persist import persist, load_widget_state
-from modelcards import CardData, ModelCard
-from huggingface_hub import create_repo
-
-
-def is_float(value):
- try:
- float(value)
- return True
- except:
- return False
-
-def get_card():
- languages=st.session_state.languages or None
- license=st.session_state.license or None
- library_name = st.session_state.library_name or None
- tags= [x.strip() for x in st.session_state.tags.split(',') if x.strip()]
- tags.append("autogenerated-modelcard")
- datasets= [x.strip() for x in st.session_state.datasets.split(',') if x.strip()] or None
- metrics=st.session_state.metrics or None
- model_name = st.session_state.model_name or None
- model_description = st.session_state.model_description or None
- #Model_details_text = st.session_state.Model_details_text or None
- #Model_how_to = st.session_state.Model_how_to or None
- authors = st.session_state.authors or None
- paper_url = st.session_state.paper_url or None
- github_url = st.session_state.github_url or None
- bibtex_citations = st.session_state.bibtex_citations or None
- emissions = float(st.session_state.emissions) if is_float(st.session_state.emissions) else None # BUG
-
- # Handle any warnings...
- do_warn = False
- warning_msg = "Warning: The following fields are required but have not been filled in: "
- if not languages:
- warning_msg += "\n- Languages"
- do_warn = True
- if not license:
- warning_msg += "\n- License"
- do_warn = True
- if do_warn:
- st.error(warning_msg)
- st.stop()
-
- # Generate and display card
- card_data = CardData(
- language=languages,
- license=license,
- library_name=library_name,
- tags=tags,
- datasets=datasets,
- metrics=metrics,
- )
- if emissions:
- card_data.co2_eq_emissions = {'emissions': emissions}
-
- card = ModelCard.from_template(
- card_data,
- template_path='template.md',
- model_id=model_name,
- # Template kwargs:
- model_description=model_description,
- license=license,
- authors=authors,
- paper_url=paper_url,
- github_url=github_url,
- bibtex_citations=bibtex_citations,
- emissions=emissions
- )
- return card
-
-
-def main():
-
- card = get_card()
- card.save('current_card.md')
- view_raw = st.sidebar.checkbox("View Raw")
- if view_raw:
- st.text(card)
- else:
- st.markdown(card.text, unsafe_allow_html=True)
-
- with st.sidebar:
- with st.form("Upload to 🤗 Hub"):
- st.markdown("Use a token with write access from [here](https://hf.co/settings/tokens)")
- token = st.text_input("Token", type='password')
- repo_id = st.text_input("Repo ID")
- submit = st.form_submit_button('Upload to 🤗 Hub')
-
- if submit:
- if len(repo_id.split('/')) == 2:
- repo_url = create_repo(repo_id, exist_ok=True, token=token)
- card.push_to_hub(repo_id, token=token)
- st.success(f"Pushed the card to the repo [here]({repo_url}!")
- else:
- st.error("Repo ID invalid. It should be username/repo-name. For example: nateraw/food")
-
-
-if __name__ == "__main__":
- load_widget_state()
- main()
\ No newline at end of file
diff --git a/spaces/huy-ha/semabs-relevancy/CLIP/clip/model.py b/spaces/huy-ha/semabs-relevancy/CLIP/clip/model.py
deleted file mode 100644
index 73d0000fe957d9826f5b93f25371e195edf870b7..0000000000000000000000000000000000000000
--- a/spaces/huy-ha/semabs-relevancy/CLIP/clip/model.py
+++ /dev/null
@@ -1,587 +0,0 @@
-from collections import OrderedDict
-from typing import Tuple, Union
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-from torch import nn
-from .auxiliary import interpolate_positional_emb
-
-
-class Bottleneck(nn.Module):
- expansion = 4
-
- def __init__(self, inplanes, planes, stride=1):
- super().__init__()
-
- # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
- self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
- self.bn1 = nn.BatchNorm2d(planes)
-
- self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
- self.bn2 = nn.BatchNorm2d(planes)
-
- self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
-
- self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
- self.bn3 = nn.BatchNorm2d(planes * self.expansion)
-
- self.relu = nn.ReLU(inplace=True)
- self.downsample = None
- self.stride = stride
-
- if stride > 1 or inplanes != planes * Bottleneck.expansion:
- # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
- self.downsample = nn.Sequential(
- OrderedDict(
- [
- ("-1", nn.AvgPool2d(stride)),
- (
- "0",
- nn.Conv2d(
- inplanes,
- planes * self.expansion,
- 1,
- stride=1,
- bias=False,
- ),
- ),
- ("1", nn.BatchNorm2d(planes * self.expansion)),
- ]
- )
- )
-
- def forward(self, x: torch.Tensor):
- identity = x
-
- out = self.relu(self.bn1(self.conv1(x)))
- out = self.relu(self.bn2(self.conv2(out)))
- out = self.avgpool(out)
- out = self.bn3(self.conv3(out))
-
- if self.downsample is not None:
- identity = self.downsample(x)
-
- out += identity
- out = self.relu(out)
- return out
-
-
-class AttentionPool2d(nn.Module):
- def __init__(
- self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
- ):
- super().__init__()
- self.positional_embedding = nn.Parameter(
- torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
- )
- self.k_proj = nn.Linear(embed_dim, embed_dim)
- self.q_proj = nn.Linear(embed_dim, embed_dim)
- self.v_proj = nn.Linear(embed_dim, embed_dim)
- self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
- self.num_heads = num_heads
-
- def forward(self, x):
- x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
- 2, 0, 1
- ) # NCHW -> (HW)NC
- x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
- assert len(x) >= 50
- if len(x) > 50:
- target_seq_len = len(x)
- pe = interpolate_positional_emb(self.positional_embedding, target_seq_len)
- x = x + pe[:, None, :] # (HW+1)NC
- else:
- x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
- x, _ = F.multi_head_attention_forward(
- query=x,
- key=x,
- value=x,
- embed_dim_to_check=x.shape[-1],
- num_heads=self.num_heads,
- q_proj_weight=self.q_proj.weight,
- k_proj_weight=self.k_proj.weight,
- v_proj_weight=self.v_proj.weight,
- in_proj_weight=None,
- in_proj_bias=torch.cat(
- [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
- ),
- bias_k=None,
- bias_v=None,
- add_zero_attn=False,
- dropout_p=0,
- out_proj_weight=self.c_proj.weight,
- out_proj_bias=self.c_proj.bias,
- use_separate_proj_weight=True,
- training=self.training,
- need_weights=False,
- )
-
- return x[0]
-
-
-class ModifiedResNet(nn.Module):
- """
- A ResNet class that is similar to torchvision's but contains the following changes:
- - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- - The final pooling layer is a QKV attention instead of an average pool
- """
-
- def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
- super().__init__()
- self.output_dim = output_dim
- self.input_resolution = input_resolution
-
- # the 3-layer stem
- self.conv1 = nn.Conv2d(
- 3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
- )
- self.bn1 = nn.BatchNorm2d(width // 2)
- self.conv2 = nn.Conv2d(
- width // 2, width // 2, kernel_size=3, padding=1, bias=False
- )
- self.bn2 = nn.BatchNorm2d(width // 2)
- self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
- self.bn3 = nn.BatchNorm2d(width)
- self.avgpool = nn.AvgPool2d(2)
- self.relu = nn.ReLU(inplace=True)
-
- # residual layers
- self._inplanes = width # this is a *mutable* variable used during construction
- self.layer1 = self._make_layer(width, layers[0])
- self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
- self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
- self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
-
- embed_dim = width * 32 # the ResNet feature dimension
- self.attnpool = AttentionPool2d(
- input_resolution // 32, embed_dim, heads, output_dim
- )
-
- def _make_layer(self, planes, blocks, stride=1):
- layers = [Bottleneck(self._inplanes, planes, stride)]
-
- self._inplanes = planes * Bottleneck.expansion
- for _ in range(1, blocks):
- layers.append(Bottleneck(self._inplanes, planes))
-
- return nn.Sequential(*layers)
-
- def forward(self, x):
- def stem(x):
- for conv, bn in [
- (self.conv1, self.bn1),
- (self.conv2, self.bn2),
- (self.conv3, self.bn3),
- ]:
- x = self.relu(bn(conv(x)))
- x = self.avgpool(x)
- return x
-
- x = x.type(self.conv1.weight.dtype)
- x = stem(x)
- x = self.layer1(x)
- x = self.layer2(x)
- x = self.layer3(x)
- x = self.layer4(x)
- x = self.attnpool(x)
-
- return x
-
-
-class LayerNorm(nn.LayerNorm):
- """Subclass torch's LayerNorm to handle fp16."""
-
- def forward(self, x: torch.Tensor):
- orig_type = x.dtype
- ret = super().forward(x.type(torch.float32))
- return ret.type(orig_type)
-
-
-class QuickGELU(nn.Module):
- def forward(self, x: torch.Tensor):
- return x * torch.sigmoid(1.702 * x)
-
-
-class ResidualAttentionBlock(nn.Module):
- def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
- super().__init__()
-
- self.attn = nn.MultiheadAttention(d_model, n_head)
- self.ln_1 = LayerNorm(d_model)
- self.mlp = nn.Sequential(
- OrderedDict(
- [
- ("c_fc", nn.Linear(d_model, d_model * 4)),
- ("gelu", QuickGELU()),
- ("c_proj", nn.Linear(d_model * 4, d_model)),
- ]
- )
- )
- self.ln_2 = LayerNorm(d_model)
- self.attn_mask = attn_mask
-
- def attention(self, x: torch.Tensor):
- self.attn_mask = (
- self.attn_mask.to(dtype=x.dtype, device=x.device)
- if self.attn_mask is not None
- else None
- )
- return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
-
- def forward(self, x: torch.Tensor):
- x = x + self.attention(self.ln_1(x))
- x = x + self.mlp(self.ln_2(x))
- return x
-
-
-class Transformer(nn.Module):
- def __init__(
- self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None
- ):
- super().__init__()
- self.width = width
- self.layers = layers
- self.resblocks = nn.Sequential(
- *[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]
- )
-
- def forward(self, x: torch.Tensor, tile_attn_mask: torch.Tensor = None):
- prev_attn_masks = []
- if tile_attn_mask is not None:
- for resblock in filter(
- lambda module: isinstance(module, ResidualAttentionBlock),
- self.resblocks.modules(),
- ):
- prev_attn_masks.append(
- resblock.attn_mask.clone()
- if resblock.attn_mask is not None
- else None
- )
- resblock.attn_mask = tile_attn_mask
- x = self.resblocks(x)
- if tile_attn_mask is not None:
- for resblock, prev_attn_mask in zip(
- filter(
- lambda module: isinstance(module, ResidualAttentionBlock),
- self.resblocks.modules(),
- ),
- prev_attn_masks,
- ):
- resblock.attn_mask = prev_attn_mask
- return x
-
-
-class VisionTransformer(nn.Module):
- def __init__(
- self,
- input_resolution: int,
- patch_size: int,
- width: int,
- layers: int,
- heads: int,
- output_dim: int,
- ):
- super().__init__()
- self.input_resolution = input_resolution
- self.output_dim = output_dim
- self.conv1 = nn.Conv2d(
- in_channels=3,
- out_channels=width,
- kernel_size=patch_size,
- stride=patch_size,
- bias=False,
- )
-
- scale = width**-0.5
- self.class_embedding = nn.Parameter(scale * torch.randn(width))
- self.positional_embedding = nn.Parameter(
- scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)
- )
- self.ln_pre = LayerNorm(width)
-
- self.transformer = Transformer(width, layers, heads)
-
- self.ln_post = LayerNorm(width)
- self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
-
- def forward(self, x: torch.Tensor, **kwargs):
- x = self.conv1(x) # shape = [*, width, grid, grid]
- # shape = [*, width, grid ** 2]
- x = x.reshape(x.shape[0], x.shape[1], -1)
- x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
- x = torch.cat(
- [
- self.class_embedding.to(x.dtype)
- + torch.zeros(
- x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
- ),
- x,
- ],
- dim=1,
- ) # shape = [*, grid ** 2 + 1, width]
- x = x + self.positional_embedding.to(x.dtype)
- x = self.ln_pre(x)
-
- x = x.permute(1, 0, 2) # NLD -> LND
- x = self.transformer(x, **kwargs)
- x = x.permute(1, 0, 2) # LND -> NLD
-
- x = self.ln_post(x[:, 0, :])
-
- if self.proj is not None:
- x = x @ self.proj
-
- return x
-
-
-class CLIP(nn.Module):
- def __init__(
- self,
- embed_dim: int,
- # vision
- image_resolution: int,
- vision_layers: Union[Tuple[int, int, int, int], int],
- vision_width: int,
- vision_patch_size: int,
- # text
- context_length: int,
- vocab_size: int,
- transformer_width: int,
- transformer_heads: int,
- transformer_layers: int,
- ):
- super().__init__()
-
- self.context_length = context_length
-
- if isinstance(vision_layers, (tuple, list)):
- vision_heads = vision_width * 32 // 64
- self.visual = ModifiedResNet(
- layers=vision_layers,
- output_dim=embed_dim,
- heads=vision_heads,
- input_resolution=image_resolution,
- width=vision_width,
- )
- else:
- vision_heads = vision_width // 64
- self.visual = VisionTransformer(
- input_resolution=image_resolution,
- patch_size=vision_patch_size,
- width=vision_width,
- layers=vision_layers,
- heads=vision_heads,
- output_dim=embed_dim,
- )
-
- self.transformer = Transformer(
- width=transformer_width,
- layers=transformer_layers,
- heads=transformer_heads,
- attn_mask=self.build_attention_mask(),
- )
-
- self.vocab_size = vocab_size
- self.token_embedding = nn.Embedding(vocab_size, transformer_width)
- self.positional_embedding = nn.Parameter(
- torch.empty(self.context_length, transformer_width)
- )
- self.ln_final = LayerNorm(transformer_width)
-
- self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
- self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
-
- self.initialize_parameters()
-
- def initialize_parameters(self):
- nn.init.normal_(self.token_embedding.weight, std=0.02)
- nn.init.normal_(self.positional_embedding, std=0.01)
-
- if isinstance(self.visual, ModifiedResNet):
- if self.visual.attnpool is not None:
- std = self.visual.attnpool.c_proj.in_features**-0.5
- nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
- nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
- nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
- nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
-
- for resnet_block in [
- self.visual.layer1,
- self.visual.layer2,
- self.visual.layer3,
- self.visual.layer4,
- ]:
- for name, param in resnet_block.named_parameters():
- if name.endswith("bn3.weight"):
- nn.init.zeros_(param)
-
- proj_std = (self.transformer.width**-0.5) * (
- (2 * self.transformer.layers) ** -0.5
- )
- attn_std = self.transformer.width**-0.5
- fc_std = (2 * self.transformer.width) ** -0.5
- for block in self.transformer.resblocks:
- nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
- nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
- nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
- nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
-
- if self.text_projection is not None:
- nn.init.normal_(self.text_projection, std=self.transformer.width**-0.5)
-
- def build_attention_mask(self):
- # lazily create causal attention mask, with full attention between the vision tokens
- # pytorch uses additive attention mask; fill with -inf
- mask = torch.empty(self.context_length, self.context_length)
- mask.fill_(float("-inf"))
- mask.triu_(1) # zero out the lower diagonal
- return mask
-
- @property
- def dtype(self):
- return self.visual.conv1.weight.dtype
-
- def encode_image(self, image, **kwargs):
- return self.visual(image.type(self.dtype), **kwargs)
-
- def encode_text(self, text, return_transformer_outputs=False):
- x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
- x = x + self.positional_embedding.type(self.dtype)[: x.shape[1], :]
- x = x.permute(1, 0, 2) # NLD -> LND
- x = self.transformer(x)
- x = x.permute(1, 0, 2) # LND -> NLD
- transformer_output = self.ln_final(x).type(self.dtype)
-
- # x.shape = [batch_size, n_ctx, transformer.width]
- # take features from the eot embedding (eot_token is the highest number in each sequence)
- x = (
- transformer_output[
- torch.arange(transformer_output.shape[0]), text.argmax(dim=-1)
- ]
- @ self.text_projection
- )
- if return_transformer_outputs:
- return x, transformer_output
- return x
-
- def forward(self, image, text):
- image_features = self.encode_image(image)
- text_features = self.encode_text(text)
-
- # normalized features
- image_features = image_features / image_features.norm(dim=-1, keepdim=True)
- text_features = text_features / text_features.norm(dim=-1, keepdim=True)
-
- # cosine similarity as logits
- logit_scale = self.logit_scale.exp()
- logits_per_image = logit_scale * image_features @ text_features.t()
- logits_per_text = logits_per_image.t()
-
- # shape = [global_batch_size, global_batch_size]
- return logits_per_image, logits_per_text
-
-
-def convert_weights(model: nn.Module):
- """Convert applicable model parameters to fp16"""
-
- def _convert_weights_to_fp16(l):
- if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
- l.weight.data = l.weight.data.half()
- if l.bias is not None:
- l.bias.data = l.bias.data.half()
-
- if isinstance(l, nn.MultiheadAttention):
- for attr in [
- *[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
- "in_proj_bias",
- "bias_k",
- "bias_v",
- ]:
- tensor = getattr(l, attr)
- if tensor is not None:
- tensor.data = tensor.data.half()
-
- for name in ["text_projection", "proj"]:
- if hasattr(l, name):
- attr = getattr(l, name)
- if attr is not None:
- attr.data = attr.data.half()
-
- model.apply(_convert_weights_to_fp16)
-
-
-def build_model(state_dict: dict):
- vit = "visual.proj" in state_dict
-
- if vit:
- vision_width = state_dict["visual.conv1.weight"].shape[0]
- vision_layers = len(
- [
- k
- for k in state_dict.keys()
- if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")
- ]
- )
- vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
- grid_size = round(
- (state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5
- )
- image_resolution = vision_patch_size * grid_size
- else:
- counts: list = [
- len(
- set(
- k.split(".")[2]
- for k in state_dict
- if k.startswith(f"visual.layer{b}")
- )
- )
- for b in [1, 2, 3, 4]
- ]
- vision_layers = tuple(counts)
- vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
- output_width = round(
- (state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5
- )
- vision_patch_size = None
- assert (
- output_width**2 + 1
- == state_dict["visual.attnpool.positional_embedding"].shape[0]
- )
- image_resolution = output_width * 32
-
- embed_dim = state_dict["text_projection"].shape[1]
- context_length = state_dict["positional_embedding"].shape[0]
- vocab_size = state_dict["token_embedding.weight"].shape[0]
- transformer_width = state_dict["ln_final.weight"].shape[0]
- transformer_heads = transformer_width // 64
- transformer_layers = len(
- set(
- k.split(".")[2]
- for k in state_dict
- if k.startswith(f"transformer.resblocks")
- )
- )
-
- model = CLIP(
- embed_dim,
- image_resolution,
- vision_layers,
- vision_width,
- vision_patch_size,
- context_length,
- vocab_size,
- transformer_width,
- transformer_heads,
- transformer_layers,
- )
-
- for key in ["input_resolution", "context_length", "vocab_size"]:
- if key in state_dict:
- del state_dict[key]
-
- convert_weights(model)
- model.load_state_dict(state_dict)
- return model.eval()
diff --git a/spaces/hwang1/anime-gan/Makefile b/spaces/hwang1/anime-gan/Makefile
deleted file mode 100644
index ff727d0ac0d87aa292e9ddbd99218cadb034f3a4..0000000000000000000000000000000000000000
--- a/spaces/hwang1/anime-gan/Makefile
+++ /dev/null
@@ -1,27 +0,0 @@
-install:
- pip install --upgrade pip &&\
- pip install -r requirements.txt
-
-test:
- python -m pytest -vvv --cov=hello --cov=greeting \
- --cov=smath --cov=web tests
- python -m pytest --nbval notebook.ipynb #tests our jupyter notebook
- #python -m pytest -v tests/test_web.py #if you just want to test web
-
-debug:
- python -m pytest -vv --pdb #Debugger is invoked
-
-one-test:
- python -m pytest -vv tests/test_greeting.py::test_my_name4
-
-debugthree:
- #not working the way I expect
- python -m pytest -vv --pdb --maxfail=4 # drop to PDB for first three failures
-
-format:
- black *.py
-
-lint:
- pylint --disable=R,C *.py
-
-all: install lint test format
\ No newline at end of file
diff --git a/spaces/hylee/apdrawing/APDrawingGAN2/util/html.py b/spaces/hylee/apdrawing/APDrawingGAN2/util/html.py
deleted file mode 100644
index 5bb4c52d5991564bdb0b2bf0f7546d224a01a971..0000000000000000000000000000000000000000
--- a/spaces/hylee/apdrawing/APDrawingGAN2/util/html.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import dominate
-from dominate.tags import *
-import os
-
-
-class HTML:
- def __init__(self, web_dir, title, reflesh=0, folder='images'):
- self.title = title
- self.web_dir = web_dir
- #self.img_dir = os.path.join(self.web_dir, 'images')
- self.img_dir = os.path.join(self.web_dir, folder)
- self.folder = folder
- if not os.path.exists(self.web_dir):
- os.makedirs(self.web_dir)
- if not os.path.exists(self.img_dir):
- os.makedirs(self.img_dir)
- # print(self.img_dir)
-
- self.doc = dominate.document(title=title)
- if reflesh > 0:
- with self.doc.head:
- meta(http_equiv="reflesh", content=str(reflesh))
-
- def get_image_dir(self):
- return self.img_dir
-
- def add_header(self, str):
- with self.doc:
- h3(str)
-
- def add_table(self, border=1):
- self.t = table(border=border, style="table-layout: fixed;")
- self.doc.add(self.t)
-
- def add_images(self, ims, txts, links, width=400):
- self.add_table()
- with self.t:
- with tr():
- for im, txt, link in zip(ims, txts, links):
- with td(style="word-wrap: break-word;", halign="center", valign="top"):
- with p():
- with a(href=os.path.join('images', link)):
- #img(style="width:%dpx" % width, src=os.path.join('images', im))
- img(style="width:%dpx" % width, src=os.path.join(self.folder, im))
- br()
- p(txt)
-
- def save(self):
- #html_file = '%s/index.html' % self.web_dir
- html_file = '%s/index%s.html' % (self.web_dir, self.folder[6:])
- f = open(html_file, 'wt')
- f.write(self.doc.render())
- f.close()
-
-
-if __name__ == '__main__':
- html = HTML('web/', 'test_html')
- html.add_header('hello world')
-
- ims = []
- txts = []
- links = []
- for n in range(4):
- ims.append('image_%d.png' % n)
- txts.append('text_%d' % n)
- links.append('image_%d.png' % n)
- html.add_images(ims, txts, links)
- html.save()
diff --git a/spaces/hylee/finetuned_diffusion/style.css b/spaces/hylee/finetuned_diffusion/style.css
deleted file mode 100644
index 9bfa78cc983f84693cf7cbab1e3bfd0e0d36c944..0000000000000000000000000000000000000000
--- a/spaces/hylee/finetuned_diffusion/style.css
+++ /dev/null
@@ -1,24 +0,0 @@
-.finetuned-diffusion-div div{
- display:inline-flex;
- align-items:center;
- gap:.8rem;
- font-size:1.75rem
-}
-.finetuned-diffusion-div div h1{
- font-weight:900;
- margin-bottom:7px
-}
-.finetuned-diffusion-div p{
- margin-bottom:10px;
- font-size:94%
-}
-a{
- text-decoration:underline
-}
-.tabs{
- margin-top:0;
- margin-bottom:0
-}
-#gallery{
- min-height:20rem
-}
diff --git a/spaces/hysts/DeepDanbooru/style.css b/spaces/hysts/DeepDanbooru/style.css
deleted file mode 100644
index c4739b4ea5fc35e774a049e3dacc443f7f0eac19..0000000000000000000000000000000000000000
--- a/spaces/hysts/DeepDanbooru/style.css
+++ /dev/null
@@ -1,3 +0,0 @@
-h1 {
- text-align: center;
-}
diff --git a/spaces/iamrobotbear/gradio-auth-new/app.py b/spaces/iamrobotbear/gradio-auth-new/app.py
deleted file mode 100644
index ed12dba16c8933324c67e778ab924fa7ac6e1d2f..0000000000000000000000000000000000000000
--- a/spaces/iamrobotbear/gradio-auth-new/app.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import gradio as gr
-import os
-
-demo = gr.Interface.load("spaces/RamAnanth1/ControlNet")
-#demo.launch(enable_queue=False, auth=("username", "password"), auth_message="Try this")
-demo.launch(enable_queue=False, auth=("username", os.environ.get("P")), auth_message="Try this")
diff --git a/spaces/idlsono/Idksono4/Dockerfile b/spaces/idlsono/Idksono4/Dockerfile
deleted file mode 100644
index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000
--- a/spaces/idlsono/Idksono4/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM node:18-bullseye-slim
-RUN apt-get update && \
- apt-get install -y git
-RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
-WORKDIR /app
-RUN npm install
-COPY Dockerfile greeting.md* .env* ./
-RUN npm run build
-EXPOSE 7860
-ENV NODE_ENV=production
-CMD [ "npm", "start" ]
diff --git a/spaces/immortaker/as/README.md b/spaces/immortaker/as/README.md
deleted file mode 100644
index 3a8d0636fe4fd8ee97d79f931baf231e6c73f155..0000000000000000000000000000000000000000
--- a/spaces/immortaker/as/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-title: Alist
-emoji: 🦀
-colorFrom: red
-
-colorTo: pink
-sdk: docker
-pinned: false
-license: agpl-3.0
-app_port: 5244
-
----
-
-
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/720p Alibaba Aur 41 Chor Download UPDATED.md b/spaces/inplisQlawa/anything-midjourney-v4-1/720p Alibaba Aur 41 Chor Download UPDATED.md
deleted file mode 100644
index f2c6568360f0c134f5eb11b0a8f876f797d04fdb..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/720p Alibaba Aur 41 Chor Download UPDATED.md
+++ /dev/null
@@ -1,66 +0,0 @@
-
-Alibaba Aur 41 Chor: A Fun and Family-Friendly Animated Movie
-If you are looking for a fun and family-friendly animated movie to watch, you might want to check out Alibaba Aur 41 Chor. This is a 2011 movie based on the classic Arabian Nights tale of Alibaba and the Forty Thieves. The movie features the voices of John Abraham, Priyanka Chopra, Ashutosh Rana, and Prem Chopra. The movie follows the adventures of Alibaba, a poor woodcutter who stumbles upon a secret cave filled with treasure belonging to a band of robbers. He decides to take some of the treasure for himself, but soon gets into trouble with the ruthless leader of the robbers, Abu Hassan.
-720p Alibaba Aur 41 Chor Download Download ✅ https://urlin.us/2uEvNN
-In this article, we will tell you more about Alibaba Aur 41 Chor and how you can watch it in HD quality. We will also give you some tips on how to download or stream the movie online safely and legally.
-What is Alibaba Aur 41 Chor About?
-Alibaba Aur 41 Chor is a movie that combines comedy, action, romance, and musical elements. The movie is set in the Middle East and showcases the culture and history of the region. The movie also has a lot of humor and references to modern pop culture.
-The movie starts with Alibaba, a poor woodcutter who lives with his brother Kasim and his wife Marjina. One day, he witnesses a group of robbers entering a secret cave using a magic password. He follows them and discovers that the cave is full of gold and jewels. He decides to take some of the treasure for himself and his family.
-However, his brother Kasim gets greedy and tries to take more treasure from the cave. He forgets the password and gets trapped inside. The robbers find out that someone has stolen their treasure and they vow to find and kill him. They also kidnap Marjina and threaten to kill her unless Alibaba surrenders.
-Alibaba has to use his wit and courage to save his brother, his wife, and himself from the robbers. He also gets help from Morgiana, a slave girl who works for Abu Hassan. She falls in love with Alibaba and helps him escape from many dangers.
-The movie ends with Alibaba defeating Abu Hassan and freeing Morgiana from slavery. He also shares his treasure with the poor people of his town and becomes a hero.
-
-How to Watch Alibaba Aur 41 Chor in HD Quality?
-Alibaba Aur 41 Chor is a movie that you can enjoy in HD quality on your TV, laptop, tablet, or smartphone. There are several ways to watch the movie in HD quality, such as:
-
-Download Alibaba Aur 41 Chor Full Movie 720p with Subtitles : You can download the full movie in 720p resolution with subtitles from various torrent sites, such as SolidTorrents, Torrentz2, or RARBG. You will need a torrent client, such as BitTorrent or uTorrent, to download the movie file. You can also find subtitles in different languages on sites like OpenSubtitles or Subscene.
-Stream Alibaba Aur 41 Chor Online in HD Quality for Free : You can stream the movie online in HD quality for free from various streaming sites, such as Putlocker, Fmovies, or 123Movies. You will need a good internet connection and a web browser to watch the movie online.
-Watch Alibaba Aur 41 Chor on SoundCloud : You can watch the movie on SoundCloud, a platform that allows you to listen to music and podcasts online. You will need an account and a subscription to access SoundCloud. You can find the movie on SoundCloud by searching for "720p Alibaba Aur 41 Chor Download".
-
-What are the Risks of Downloading or Streaming Movies Online?
-While downloading or streaming movies online may seem convenient and cheap, there are some risks involved in doing so. Some of these risks are:
-
-Legal Risks : Downloading or streaming movies from torrent or free streaming sites may not be legal in your country. You may be violating the copyright laws and face legal consequences such as fines or jail time.
-Security Risks : Downloading or streaming movies from torrent or free streaming sites may expose your device to viruses or malware that can harm your data or privacy. You may also encounter annoying ads or pop-ups that may disrupt your viewing experience.
-
-How to Watch Movies Online Safely and Legally?
-If you want to watch movies online safely and legally, you should use a VPN service or an ad-blocker to protect yourself from legal and security risks. A VPN service is a tool that encrypts your internet traffic and hides your IP address from prying eyes. This way, you can access any site without being tracked or blocked by your ISP or government. An ad-blocker is a tool that blocks unwanted ads or pop-ups that may interfere with your viewing experience.
-You should also use legal and reputable platforms to watch movies online, such as Netflix, Amazon Prime Video, Hulu, Disney+, or YouTube Premium. These platforms offer high-quality movies and shows that you can watch legally and safely on your device.
-Conclusion
-Alibaba Aur 41 Chor is a fun and family-friendly animated movie that you can watch in HD quality on your device. You can choose from different options to download or stream the movie online, but you should be aware of the legal and security risks involved in using torrent or free streaming sites. You should use a VPN service or an ad-blocker to protect yourself from these risks.
-We hope this article has helped you find the best way to watch Alibaba Aur 41 Chor in HD quality. If you have any questions or suggestions, please leave them in the comments below.
-Where to Find Alibaba Aur 41 Chor 720p Download Links
-If you prefer to download the movie and watch it offline, you may want to find some reliable and fast download links for Alibaba Aur 41 Chor 720p. You can find many sites that offer this option, such as MoviesCounter, FilmyZilla, or WorldFree4u. These sites provide direct download links for the movie in 720p resolution with subtitles. You can also choose from different formats, such as MKV, MP4, or AVI.
-However, finding download links for movies may not be easy or safe. You may have to deal with broken links, slow downloads, or fake files. You may also encounter ads or pop-ups that may redirect you to malicious sites or download unwanted software. Therefore, you should always use a VPN service or an ad-blocker to avoid these problems when downloading movies.
-Alibaba Aur 41 Chor: A Bollywood Adventure You Don't Want to Miss
-Alibaba Aur 41 Chor is a movie that will entertain you and your family with its comedy, action, romance, and musical elements. The movie is a modern adaptation of the classic Arabian Nights tale of Alibaba and the Forty Thieves. The movie showcases the culture and history of the Middle East and has a lot of humor and references to modern pop culture.
-The movie tells the story of Alibaba, a poor woodcutter who discovers a secret cave full of treasure belonging to a band of robbers. He decides to take some of the treasure for himself and his family, but soon gets into trouble with the leader of the robbers, Abu Hassan. He also gets help from Morgiana, a slave girl who works for Abu Hassan and falls in love with Alibaba.
-The movie has colorful animation, catchy songs, and a lot of humor and action. The movie also has a positive message about courage, honesty, and generosity. The movie is suitable for all ages and will make you laugh and cheer for Alibaba and his friends.
-Conclusion
-Alibaba Aur 41 Chor is a fun and family-friendly animated movie that you can watch in HD quality on your device. You can choose from different options to download or stream the movie online in 720p resolution with subtitles. However, you should be aware of the legal and security risks involved in using torrent or free streaming sites and use a VPN service or an ad-blocker to protect yourself from these risks.
-We hope this article has helped you find the best way to watch Alibaba Aur 41 Chor in HD quality. If you have any questions or suggestions, please leave them in the comments below.
-What are the Benefits of Watching Alibaba Aur 41 Chor in HD Quality?
-Watching Alibaba Aur 41 Chor in HD quality has many benefits that will enhance your viewing experience. Some of these benefits are:
-
-Better Picture Quality : Watching the movie in HD quality means that you will see more details and colors on the screen. The movie will look sharper and clearer, and you will not miss any important scenes or expressions. You will also enjoy the animation and the visual effects more in HD quality.
-Better Sound Quality : Watching the movie in HD quality also means that you will hear better sound quality on your speakers or headphones. The movie has a great soundtrack and sound effects that will make you feel like you are part of the action. You will also hear the voices of the characters more clearly and distinctly.
-Better Viewing Experience : Watching the movie in HD quality will make you feel more immersed and entertained by the movie. You will be able to appreciate the story, the characters, the humor, and the message of the movie more in HD quality. You will also have a more enjoyable time watching the movie with your family or friends.
-
-How to Optimize Your Device for Watching Alibaba Aur 41 Chor in HD Quality?
-If you want to watch Alibaba Aur 41 Chor in HD quality on your device, you should optimize your device for the best viewing experience. Here are some tips on how to do that:
-
-Choose the Right Device : You should choose a device that supports HD resolution and has a good screen size and quality. For example, you can watch the movie on your laptop, tablet, or smartphone, but you should avoid watching it on a small or low-quality screen.
-Adjust the Settings : You should adjust the settings of your device to ensure that you get the best picture and sound quality. For example, you should set the brightness, contrast, and color of your screen to suit your preferences and environment. You should also set the volume and sound mode of your speakers or headphones to match your needs.
-Use a Good Internet Connection : You should use a good internet connection to download or stream the movie online without any interruptions or delays. You should also avoid using a public or shared network that may be slow or insecure. You should use a private or secure network that offers high-speed and reliable service.
-
-Conclusion
-Alibaba Aur 41 Chor is a fun and family-friendly animated movie that you can watch in HD quality on your device. You can choose from different options to download or stream the movie online in 720p resolution with subtitles. However, you should be aware of the legal and security risks involved in using torrent or free streaming sites and use a VPN service or an ad-blocker to protect yourself from these risks.
-You should also optimize your device for watching the movie in HD quality by choosing the right device, adjusting the settings, and using a good internet connection. This way, you will enjoy the movie more and have a better viewing experience.
-We hope this article has helped you find the best way to watch Alibaba Aur 41 Chor in HD quality. If you have any questions or suggestions, please leave them in the comments below.
-Conclusion
-Alibaba Aur 41 Chor is a fun and family-friendly animated movie that you can watch in HD quality on your device. The movie is based on a classic Arabian Nights tale and has a lot of humor, action, romance, and musical elements. The movie also showcases the culture and history of the Middle East and has a positive message about courage, honesty, and generosity.
-You can choose from different options to download or stream the movie online in 720p resolution with subtitles. However, you should be aware of the legal and security risks involved in using torrent or free streaming sites and use a VPN service or an ad-blocker to protect yourself from these risks.
-You should also optimize your device for watching the movie in HD quality by choosing the right device, adjusting the settings, and using a good internet connection. This way, you will enjoy the movie more and have a better viewing experience.
-We hope this article has helped you find the best way to watch Alibaba Aur 41 Chor in HD quality. If you have any questions or suggestions, please leave them in the comments below.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Axasoft Cari Hesap Takip 2.7.9 Keygen Extra Quality.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Axasoft Cari Hesap Takip 2.7.9 Keygen Extra Quality.md
deleted file mode 100644
index dd5de4c979ddfac4f4de6875a0f2f756a91ab075..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Axasoft Cari Hesap Takip 2.7.9 Keygen Extra Quality.md
+++ /dev/null
@@ -1,11 +0,0 @@
-
-Sitemiz Ses Tarih - Eksi (2.4.8.11) B.S.A (2011) Windows 7 64bit. BilsoftMuhasebe lisans anahtar Axasoft on muhasebe crack!! better!! on racvesehyd., axasoft cari hesap takip lisans kodu, axa cari hesap.
-Sitemiz Tarih Eksi (2.4.8.11) B.S.A (2011) Windows 7 64bit. BilsoftMuhasebe lisans anahtar Axasoft on muhasebe crack!! better!! on racvesehyd., axasoft cari hesap takip lisans kodu, axa cari hesap.
-axasoft cari hesap takip 2.7.9 keygen Download ✏ ✏ ✏ https://urlin.us/2uEyrb
- axa cari hesap takip v 2.7.9 program keygen. Abacom FrontDesigner v3 0 En De Fr ISO mitwaa marathi movie full download free Windows.7.Loader.v2.0.9-DAZ (32Bit-64Bit) 12 Wolf Quest 2.7 Free Download Full Versioninstmanksgolkesl WPI 2018 2.0 Dixguel03 64 bit Novel Mona Gersang Download 5 dancing f nami extreme english download Two Mothers Movie Torrent Download Hard Disk Sentinel Pro 4.30 Registration Key.epubgolkes
-2019 Review of the Business Intelligence Dashboards for SQL. Inventor 08/24/2016, Team Centers,1.x, Install instructions: Axasoft Inventor Development Edition Axasoft is an innovative online award winning collaboration solution, that helps business users stay productive in the age of IT change.
-Global Release 2.3 Support for SQL Server 2012 and SQL Server 2014 Addition of new features for databases, BI dashboards, BI project.... Download axasoft cari hesap takip v 2.7.9 program keygen. Axasoft. Download axasoft cari hesap takip 2.7.9 program keygen? Axasoft cari hesap takip 2.7.9.
-Free download xforce keygen autocad 2014 64 bit.. Autodesk Inventor Pro 2014 X86-x64 Torrent 528 -> DOWNLOAD (Mirror #1).... between.. Vault Workgroup 2015 64 Bit Crack Torrent Download... 1 Update for Vault... Autodesk Inventor Pro 2014 X86-x64 Torrent 528.... 3ds max.... Autocad 2016 xforce keygen 64 bit Adobe Photoshop Elements... Autodesk Inventor Pro 2014 X86-x64 Torrent 528 - 12 Rounds 3.... Xforce Keygen 64-bit.. File Type PDF. Autodesk Inventor. 2014 Espanol. 2014. Select and download one of the language packs... Inventor. Professional. 2014 for all operating systems and languages. For installation on... Inventor Pro. 2014 X86-x64. Torrent 528 ->.
-
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (Hd Yamla Pagla Deewana 2 Full Movie Free Download).md b/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (Hd Yamla Pagla Deewana 2 Full Movie Free Download).md
deleted file mode 100644
index d6bcde3000db63b9c3e01973f45127da06d3293b..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (Hd Yamla Pagla Deewana 2 Full Movie Free Download).md
+++ /dev/null
@@ -1,16 +0,0 @@
-HD Online Player (Hd Yamla Pagla Deewana 2 Full Movie Free Download) Download File ✯✯✯ https://urlin.us/2uEygn
-
-Login; Register. Yamla Pagla Deewana 2 Movie Streaming Online Watch on Jio Cinema, MX Player, Voot. Yamla Pagla Deevana 2 (2013). User reviews. Film cad rating. Yamalan.
-Watch in Full hd 720 quality, Yamalana 2: Yamalana 2 (2013) watch online in good quality for free completely in Russian.
-KinoPoisk rating.
-IMDb rating.
-Cast.
-Yamla Pagla Deewana 2 HD Torrent.
-Yamla Pagla Deewana 2 Online Full Movie.
-Yamla Pagla Deewana 2 Movie Download Torrent.
-Yamla Pagla Deewana 2 Movie Online Free Download.
-Yamla Pagla Deewana 2 Thumbelina (HD).
-Yamla Pagla Deewana 2 Thumbelina (HD). 8a78ff9644
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Itisnotfoundanyfilespecifiedforisarcextractsolution NEW!.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Itisnotfoundanyfilespecifiedforisarcextractsolution NEW!.md
deleted file mode 100644
index d31a3a6a3df4bd2b676bcbe9a9e945aa04a4a9ec..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Itisnotfoundanyfilespecifiedforisarcextractsolution NEW!.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Itisnotfoundanyfilespecifiedforisarcextractsolution Download File ===== https://urlin.us/2uEyHO
-
-Downloadmahadevsongsansarsaram .... downloadmahadevsongsansarsaram · Itisnotfoundanyfilespecifiedforisarcextractsolution · Portable Photoshop Cs3 ... 1fdad05405
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Av Bros Puzzle Pro 3.1 Crack.md b/spaces/inreVtussa/clothingai/Examples/Av Bros Puzzle Pro 3.1 Crack.md
deleted file mode 100644
index fc665a5073f10e7a1cf6ced8763f29bb84f9785f..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Av Bros Puzzle Pro 3.1 Crack.md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-retro-futuristic puzzle games were a favorite of mine growing up. the usual layout is that one of you is the player and the other is the computer. the computer plays by establishing rules, usually a very simple set of rules, and then the player has to figure out how to play against them. how to play that game usually involves memorizing a pattern and repeating it many times. this is true for everything, but most of these games tend to be very simple.
-a puzzle game is a kind of game that is mostly played for fun, and like many other "games", requires a bit of strategy and skill to complete. puzzle games can be broadly divided into two categories: strategic and intellectual. in strategic games, the aim is to achieve specific goals within a fixed time limit, while in intellectual games, the aim is to complete a specific task or solve a problem. some puzzles require solving riddles and unriddling, while others require memorization or physical skill. other puzzles may require unusual logic or complex reasoning.
-av bros puzzle pro 3.1 crack Download Zip ✔ https://tiurll.com/2uCleU
-my favorite genre is completely puzzle games, where the whole point is to work through a story or solve a puzzle. my favorite puzzles are the ones that require you to solve by yourself, without any hints or walkthroughs. it was quite fun, playing through the story and solving the puzzles.
-the choices you make in the game directly determine your final score. the game ends when one player has solved the puzzles and collected all the money. solving each puzzle is a race against time, and the one who is the fastest to solve all of the puzzles wins the game.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/inreVtussa/clothingai/Examples/Bartender Enterprise Automation 9.3 TOP Keygen.md b/spaces/inreVtussa/clothingai/Examples/Bartender Enterprise Automation 9.3 TOP Keygen.md
deleted file mode 100644
index 6dd73e6b766dcf7c14f6679400bc9125550df69b..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Bartender Enterprise Automation 9.3 TOP Keygen.md
+++ /dev/null
@@ -1,12 +0,0 @@
-Bartender Enterprise Automation 9.3 Keygen Download File --->>> https://tiurll.com/2uCjCS
-
-January 30, 2022 - Bartender 11.1.2 R7 Crack & Serial key (Torrent) Free download. Crack enterprise bartender automation. Bartender Crack is the best software ... download bartender 11 crack
-download bartender 11 crack.
-Bartender 11 Crack - This software allows you to customize your cocktails.
-You can create your own cocktails and you can make new cocktails...
-Bartender 11 Crack.
-Download Bartender 11.1.2 / Bartender 11 Crack.
-DOWNLOAD FREE . 8a78ff9644
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Bupena Kelas 5 Sd Pdf 140 !!LINK!!.md b/spaces/inreVtussa/clothingai/Examples/Bupena Kelas 5 Sd Pdf 140 !!LINK!!.md
deleted file mode 100644
index 38ebb222a1842b79a6e4df5c9adc3442c9b2d8d3..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Bupena Kelas 5 Sd Pdf 140 !!LINK!!.md
+++ /dev/null
@@ -1,6 +0,0 @@
-bupena kelas 5 sd pdf 140 DOWNLOAD »»» https://tiurll.com/2uCiri
-
-Shildeg kinonuudiig shuud uz4/5(137)kino shuud uzeh saituud help you make the Kino ..... yamarch bj bolno ... bupena kelas 5 sd pdf 140 1fdad05405
-
-
-
diff --git a/spaces/iqovocn/ChuanhuChatGPT/README.md b/spaces/iqovocn/ChuanhuChatGPT/README.md
deleted file mode 100644
index 79790f767ded0eb77b8129f8e960c65b8d166c14..0000000000000000000000000000000000000000
--- a/spaces/iqovocn/ChuanhuChatGPT/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: ChuanhuChatGPT
-emoji: 🐯
-colorFrom: green
-colorTo: red
-sdk: gradio
-sdk_version: 3.33.1
-app_file: ChuanhuChatbot.py
-pinned: false
-license: gpl-3.0
-duplicated_from: JohnSmith9982/ChuanhuChatGPT
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/jackli888/stable-diffusion-webui/modules/ui.py b/spaces/jackli888/stable-diffusion-webui/modules/ui.py
deleted file mode 100644
index badf4975128985ad55bb69d6ee028adc0a61f97c..0000000000000000000000000000000000000000
--- a/spaces/jackli888/stable-diffusion-webui/modules/ui.py
+++ /dev/null
@@ -1,1798 +0,0 @@
-import html
-import json
-import math
-import mimetypes
-import os
-import platform
-import random
-import sys
-import tempfile
-import time
-import traceback
-from functools import partial, reduce
-import warnings
-
-import gradio as gr
-import gradio.routes
-import gradio.utils
-import numpy as np
-from PIL import Image, PngImagePlugin
-from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
-
-from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing
-from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
-from modules.paths import script_path, data_path
-
-from modules.shared import opts, cmd_opts, restricted_opts
-
-import modules.codeformer_model
-import modules.generation_parameters_copypaste as parameters_copypaste
-import modules.gfpgan_model
-import modules.hypernetworks.ui
-import modules.scripts
-import modules.shared as shared
-import modules.styles
-import modules.textual_inversion.ui
-from modules import prompt_parser
-from modules.images import save_image
-from modules.sd_hijack import model_hijack
-from modules.sd_samplers import samplers, samplers_for_img2img
-from modules.textual_inversion import textual_inversion
-import modules.hypernetworks.ui
-from modules.generation_parameters_copypaste import image_from_url_text
-import modules.extras
-
-warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning)
-
-# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
-mimetypes.init()
-mimetypes.add_type('application/javascript', '.js')
-
-if not cmd_opts.share and not cmd_opts.listen:
- # fix gradio phoning home
- gradio.utils.version_check = lambda: None
- gradio.utils.get_local_ip_address = lambda: '127.0.0.1'
-
-if cmd_opts.ngrok is not None:
- import modules.ngrok as ngrok
- print('ngrok authtoken detected, trying to connect...')
- ngrok.connect(
- cmd_opts.ngrok,
- cmd_opts.port if cmd_opts.port is not None else 7860,
- cmd_opts.ngrok_region
- )
-
-
-def gr_show(visible=True):
- return {"visible": visible, "__type__": "update"}
-
-
-sample_img2img = "assets/stable-samples/img2img/sketch-mountains-input.jpg"
-sample_img2img = sample_img2img if os.path.exists(sample_img2img) else None
-
-css_hide_progressbar = """
-.wrap .m-12 svg { display:none!important; }
-.wrap .m-12::before { content:"Loading..." }
-.wrap .z-20 svg { display:none!important; }
-.wrap .z-20::before { content:"Loading..." }
-.wrap.cover-bg .z-20::before { content:"" }
-.progress-bar { display:none!important; }
-.meta-text { display:none!important; }
-.meta-text-center { display:none!important; }
-"""
-
-# Using constants for these since the variation selector isn't visible.
-# Important that they exactly match script.js for tooltip to work.
-random_symbol = '\U0001f3b2\ufe0f' # 🎲️
-reuse_symbol = '\u267b\ufe0f' # ♻️
-paste_symbol = '\u2199\ufe0f' # ↙
-refresh_symbol = '\U0001f504' # 🔄
-save_style_symbol = '\U0001f4be' # 💾
-apply_style_symbol = '\U0001f4cb' # 📋
-clear_prompt_symbol = '\U0001F5D1' # 🗑️
-extra_networks_symbol = '\U0001F3B4' # 🎴
-switch_values_symbol = '\U000021C5' # ⇅
-
-
-def plaintext_to_html(text):
- return ui_common.plaintext_to_html(text)
-
-
-def send_gradio_gallery_to_image(x):
- if len(x) == 0:
- return None
- return image_from_url_text(x[0])
-
-def visit(x, func, path=""):
- if hasattr(x, 'children'):
- for c in x.children:
- visit(c, func, path)
- elif x.label is not None:
- func(path + "/" + str(x.label), x)
-
-
-def add_style(name: str, prompt: str, negative_prompt: str):
- if name is None:
- return [gr_show() for x in range(4)]
-
- style = modules.styles.PromptStyle(name, prompt, negative_prompt)
- shared.prompt_styles.styles[style.name] = style
- # Save all loaded prompt styles: this allows us to update the storage format in the future more easily, because we
- # reserialize all styles every time we save them
- shared.prompt_styles.save_styles(shared.styles_filename)
-
- return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(2)]
-
-
-def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y):
- from modules import processing, devices
-
- if not enable:
- return ""
-
- p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y)
-
- with devices.autocast():
- p.init([""], [0], [0])
-
- return f"resize: from {p.width}x{p.height} to {p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y} "
-
-
-def apply_styles(prompt, prompt_neg, styles):
- prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles)
- prompt_neg = shared.prompt_styles.apply_negative_styles_to_prompt(prompt_neg, styles)
-
- return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=prompt_neg), gr.Dropdown.update(value=[])]
-
-
-def process_interrogate(interrogation_function, mode, ii_input_dir, ii_output_dir, *ii_singles):
- if mode in {0, 1, 3, 4}:
- return [interrogation_function(ii_singles[mode]), None]
- elif mode == 2:
- return [interrogation_function(ii_singles[mode]["image"]), None]
- elif mode == 5:
- assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
- images = shared.listfiles(ii_input_dir)
- print(f"Will process {len(images)} images.")
- if ii_output_dir != "":
- os.makedirs(ii_output_dir, exist_ok=True)
- else:
- ii_output_dir = ii_input_dir
-
- for image in images:
- img = Image.open(image)
- filename = os.path.basename(image)
- left, _ = os.path.splitext(filename)
- print(interrogation_function(img), file=open(os.path.join(ii_output_dir, left + ".txt"), 'a'))
-
- return [gr.update(), None]
-
-
-def interrogate(image):
- prompt = shared.interrogator.interrogate(image.convert("RGB"))
- return gr.update() if prompt is None else prompt
-
-
-def interrogate_deepbooru(image):
- prompt = deepbooru.model.tag(image)
- return gr.update() if prompt is None else prompt
-
-
-def create_seed_inputs(target_interface):
- with FormRow(elem_id=target_interface + '_seed_row'):
- seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed')
- seed.style(container=False)
- random_seed = gr.Button(random_symbol, elem_id=target_interface + '_random_seed')
- reuse_seed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_seed')
-
- with gr.Group(elem_id=target_interface + '_subseed_show_box'):
- seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False)
-
- # Components to show/hide based on the 'Extra' checkbox
- seed_extras = []
-
- with FormRow(visible=False, elem_id=target_interface + '_subseed_row') as seed_extra_row_1:
- seed_extras.append(seed_extra_row_1)
- subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed')
- subseed.style(container=False)
- random_subseed = gr.Button(random_symbol, elem_id=target_interface + '_random_subseed')
- reuse_subseed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_subseed')
- subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength')
-
- with FormRow(visible=False) as seed_extra_row_2:
- seed_extras.append(seed_extra_row_2)
- seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=target_interface + '_seed_resize_from_w')
- seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=target_interface + '_seed_resize_from_h')
-
- random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed])
- random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed])
-
- def change_visibility(show):
- return {comp: gr_show(show) for comp in seed_extras}
-
- seed_checkbox.change(change_visibility, show_progress=False, inputs=[seed_checkbox], outputs=seed_extras)
-
- return seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox
-
-
-
-def connect_clear_prompt(button):
- """Given clear button, prompt, and token_counter objects, setup clear prompt button click event"""
- button.click(
- _js="clear_prompt",
- fn=None,
- inputs=[],
- outputs=[],
- )
-
-
-def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: gr.Textbox, dummy_component, is_subseed):
- """ Connects a 'reuse (sub)seed' button's click event so that it copies last used
- (sub)seed value from generation info the to the seed field. If copying subseed and subseed strength
- was 0, i.e. no variation seed was used, it copies the normal seed value instead."""
- def copy_seed(gen_info_string: str, index):
- res = -1
-
- try:
- gen_info = json.loads(gen_info_string)
- index -= gen_info.get('index_of_first_image', 0)
-
- if is_subseed and gen_info.get('subseed_strength', 0) > 0:
- all_subseeds = gen_info.get('all_subseeds', [-1])
- res = all_subseeds[index if 0 <= index < len(all_subseeds) else 0]
- else:
- all_seeds = gen_info.get('all_seeds', [-1])
- res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
-
- except json.decoder.JSONDecodeError as e:
- if gen_info_string != '':
- print("Error parsing JSON generation info:", file=sys.stderr)
- print(gen_info_string, file=sys.stderr)
-
- return [res, gr_show(False)]
-
- reuse_seed.click(
- fn=copy_seed,
- _js="(x, y) => [x, selected_gallery_index()]",
- show_progress=False,
- inputs=[generation_info, dummy_component],
- outputs=[seed, dummy_component]
- )
-
-
-def update_token_counter(text, steps):
- try:
- text, _ = extra_networks.parse_prompt(text)
-
- _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text])
- prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps)
-
- except Exception:
- # a parsing error can happen here during typing, and we don't want to bother the user with
- # messages related to it in console
- prompt_schedules = [[[steps, text]]]
-
- flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
- prompts = [prompt_text for step, prompt_text in flat_prompts]
- token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0])
- return f"{token_count}/{max_length} "
-
-
-def create_toprow(is_img2img):
- id_part = "img2img" if is_img2img else "txt2img"
-
- with gr.Row(elem_id=f"{id_part}_toprow", variant="compact"):
- with gr.Column(elem_id=f"{id_part}_prompt_container", scale=6):
- with gr.Row():
- with gr.Column(scale=80):
- with gr.Row():
- prompt = gr.Textbox(label="Prompt", elem_id=f"{id_part}_prompt", show_label=False, lines=3, placeholder="Prompt (press Ctrl+Enter or Alt+Enter to generate)")
-
- with gr.Row():
- with gr.Column(scale=80):
- with gr.Row():
- negative_prompt = gr.Textbox(label="Negative prompt", elem_id=f"{id_part}_neg_prompt", show_label=False, lines=2, placeholder="Negative prompt (press Ctrl+Enter or Alt+Enter to generate)")
-
- button_interrogate = None
- button_deepbooru = None
- if is_img2img:
- with gr.Column(scale=1, elem_id="interrogate_col"):
- button_interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate")
- button_deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru")
-
- with gr.Column(scale=1, elem_id=f"{id_part}_actions_column"):
- with gr.Row(elem_id=f"{id_part}_generate_box"):
- interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
- skip = gr.Button('Skip', elem_id=f"{id_part}_skip")
- submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
-
- skip.click(
- fn=lambda: shared.state.skip(),
- inputs=[],
- outputs=[],
- )
-
- interrupt.click(
- fn=lambda: shared.state.interrupt(),
- inputs=[],
- outputs=[],
- )
-
- with gr.Row(elem_id=f"{id_part}_tools"):
- paste = ToolButton(value=paste_symbol, elem_id="paste")
- clear_prompt_button = ToolButton(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt")
- extra_networks_button = ToolButton(value=extra_networks_symbol, elem_id=f"{id_part}_extra_networks")
- prompt_style_apply = ToolButton(value=apply_style_symbol, elem_id=f"{id_part}_style_apply")
- save_style = ToolButton(value=save_style_symbol, elem_id=f"{id_part}_style_create")
-
- token_counter = gr.HTML(value=" ", elem_id=f"{id_part}_token_counter")
- token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
- negative_token_counter = gr.HTML(value=" ", elem_id=f"{id_part}_negative_token_counter")
- negative_token_button = gr.Button(visible=False, elem_id=f"{id_part}_negative_token_button")
-
- clear_prompt_button.click(
- fn=lambda *x: x,
- _js="confirm_clear_prompt",
- inputs=[prompt, negative_prompt],
- outputs=[prompt, negative_prompt],
- )
-
- with gr.Row(elem_id=f"{id_part}_styles_row"):
- prompt_styles = gr.Dropdown(label="Styles", elem_id=f"{id_part}_styles", choices=[k for k, v in shared.prompt_styles.styles.items()], value=[], multiselect=True)
- create_refresh_button(prompt_styles, shared.prompt_styles.reload, lambda: {"choices": [k for k, v in shared.prompt_styles.styles.items()]}, f"refresh_{id_part}_styles")
-
- return prompt, prompt_styles, negative_prompt, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button
-
-
-def setup_progressbar(*args, **kwargs):
- pass
-
-
-def apply_setting(key, value):
- if value is None:
- return gr.update()
-
- if shared.cmd_opts.freeze_settings:
- return gr.update()
-
- # dont allow model to be swapped when model hash exists in prompt
- if key == "sd_model_checkpoint" and opts.disable_weights_auto_swap:
- return gr.update()
-
- if key == "sd_model_checkpoint":
- ckpt_info = sd_models.get_closet_checkpoint_match(value)
-
- if ckpt_info is not None:
- value = ckpt_info.title
- else:
- return gr.update()
-
- comp_args = opts.data_labels[key].component_args
- if comp_args and isinstance(comp_args, dict) and comp_args.get('visible') is False:
- return
-
- valtype = type(opts.data_labels[key].default)
- oldval = opts.data.get(key, None)
- opts.data[key] = valtype(value) if valtype != type(None) else value
- if oldval != value and opts.data_labels[key].onchange is not None:
- opts.data_labels[key].onchange()
-
- opts.save(shared.config_filename)
- return getattr(opts, key)
-
-
-def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id):
- def refresh():
- refresh_method()
- args = refreshed_args() if callable(refreshed_args) else refreshed_args
-
- for k, v in args.items():
- setattr(refresh_component, k, v)
-
- return gr.update(**(args or {}))
-
- refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id)
- refresh_button.click(
- fn=refresh,
- inputs=[],
- outputs=[refresh_component]
- )
- return refresh_button
-
-
-def create_output_panel(tabname, outdir):
- return ui_common.create_output_panel(tabname, outdir)
-
-
-def create_sampler_and_steps_selection(choices, tabname):
- if opts.samplers_in_dropdown:
- with FormRow(elem_id=f"sampler_selection_{tabname}"):
- sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
- steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
- else:
- with FormGroup(elem_id=f"sampler_selection_{tabname}"):
- steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20)
- sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index")
-
- return steps, sampler_index
-
-
-def ordered_ui_categories():
- user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder.split(","))}
-
- for i, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)):
- yield category
-
-
-def get_value_for_setting(key):
- value = getattr(opts, key)
-
- info = opts.data_labels[key]
- args = info.component_args() if callable(info.component_args) else info.component_args or {}
- args = {k: v for k, v in args.items() if k not in {'precision'}}
-
- return gr.update(value=value, **args)
-
-
-def create_override_settings_dropdown(tabname, row):
- dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True)
-
- dropdown.change(
- fn=lambda x: gr.Dropdown.update(visible=len(x) > 0),
- inputs=[dropdown],
- outputs=[dropdown],
- )
-
- return dropdown
-
-
-def create_ui():
- import modules.img2img
- import modules.txt2img
-
- reload_javascript()
-
- parameters_copypaste.reset()
-
- modules.scripts.scripts_current = modules.scripts.scripts_txt2img
- modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False)
-
- with gr.Blocks(analytics_enabled=False) as txt2img_interface:
- txt2img_prompt, txt2img_prompt_styles, txt2img_negative_prompt, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=False)
-
- dummy_component = gr.Label(visible=False)
- txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="binary", visible=False)
-
- with FormRow(variant='compact', elem_id="txt2img_extra_networks", visible=False) as extra_networks:
- from modules import ui_extra_networks
- extra_networks_ui = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'txt2img')
-
- with gr.Row().style(equal_height=False):
- with gr.Column(variant='compact', elem_id="txt2img_settings"):
- for category in ordered_ui_categories():
- if category == "sampler":
- steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img")
-
- elif category == "dimensions":
- with FormRow():
- with gr.Column(elem_id="txt2img_column_size", scale=4):
- width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width")
- height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height")
-
- res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="txt2img_res_switch_btn")
- if opts.dimensions_and_batch_together:
- with gr.Column(elem_id="txt2img_column_batch"):
- batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
- batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
-
- elif category == "cfg":
- cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale")
-
- elif category == "seed":
- seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img')
-
- elif category == "checkboxes":
- with FormRow(elem_id="txt2img_checkboxes", variant="compact"):
- restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
- tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
- enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
- hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False)
-
- elif category == "hires_fix":
- with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options:
- with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"):
- hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
- hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps")
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
-
- with FormRow(elem_id="txt2img_hires_fix_row2", variant="compact"):
- hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
- hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x")
- hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y")
-
- elif category == "batch":
- if not opts.dimensions_and_batch_together:
- with FormRow(elem_id="txt2img_column_batch"):
- batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count")
- batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size")
-
- elif category == "override_settings":
- with FormRow(elem_id="txt2img_override_settings_row") as row:
- override_settings = create_override_settings_dropdown('txt2img', row)
-
- elif category == "scripts":
- with FormGroup(elem_id="txt2img_script_container"):
- custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
-
- hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y]
- for input in hr_resolution_preview_inputs:
- input.change(
- fn=calc_resolution_hires,
- inputs=hr_resolution_preview_inputs,
- outputs=[hr_final_resolution],
- show_progress=False,
- )
- input.change(
- None,
- _js="onCalcResolutionHires",
- inputs=hr_resolution_preview_inputs,
- outputs=[],
- show_progress=False,
- )
-
- txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples)
-
- connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
- connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
-
- txt2img_args = dict(
- fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']),
- _js="submit",
- inputs=[
- dummy_component,
- txt2img_prompt,
- txt2img_negative_prompt,
- txt2img_prompt_styles,
- steps,
- sampler_index,
- restore_faces,
- tiling,
- batch_count,
- batch_size,
- cfg_scale,
- seed,
- subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
- height,
- width,
- enable_hr,
- denoising_strength,
- hr_scale,
- hr_upscaler,
- hr_second_pass_steps,
- hr_resize_x,
- hr_resize_y,
- override_settings,
- ] + custom_inputs,
-
- outputs=[
- txt2img_gallery,
- generation_info,
- html_info,
- html_log,
- ],
- show_progress=False,
- )
-
- txt2img_prompt.submit(**txt2img_args)
- submit.click(**txt2img_args)
-
- res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height])
-
- txt_prompt_img.change(
- fn=modules.images.image_data,
- inputs=[
- txt_prompt_img
- ],
- outputs=[
- txt2img_prompt,
- txt_prompt_img
- ]
- )
-
- enable_hr.change(
- fn=lambda x: gr_show(x),
- inputs=[enable_hr],
- outputs=[hr_options],
- show_progress = False,
- )
-
- txt2img_paste_fields = [
- (txt2img_prompt, "Prompt"),
- (txt2img_negative_prompt, "Negative prompt"),
- (steps, "Steps"),
- (sampler_index, "Sampler"),
- (restore_faces, "Face restoration"),
- (cfg_scale, "CFG scale"),
- (seed, "Seed"),
- (width, "Size-1"),
- (height, "Size-2"),
- (batch_size, "Batch size"),
- (subseed, "Variation seed"),
- (subseed_strength, "Variation seed strength"),
- (seed_resize_from_w, "Seed resize from-1"),
- (seed_resize_from_h, "Seed resize from-2"),
- (denoising_strength, "Denoising strength"),
- (enable_hr, lambda d: "Denoising strength" in d),
- (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
- (hr_scale, "Hires upscale"),
- (hr_upscaler, "Hires upscaler"),
- (hr_second_pass_steps, "Hires steps"),
- (hr_resize_x, "Hires resize-1"),
- (hr_resize_y, "Hires resize-2"),
- *modules.scripts.scripts_txt2img.infotext_fields
- ]
- parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings)
- parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
- paste_button=txt2img_paste, tabname="txt2img", source_text_component=txt2img_prompt, source_image_component=None,
- ))
-
- txt2img_preview_params = [
- txt2img_prompt,
- txt2img_negative_prompt,
- steps,
- sampler_index,
- cfg_scale,
- seed,
- width,
- height,
- ]
-
- token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_prompt, steps], outputs=[token_counter])
- negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter])
-
- ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery)
-
- modules.scripts.scripts_current = modules.scripts.scripts_img2img
- modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True)
-
- with gr.Blocks(analytics_enabled=False) as img2img_interface:
- img2img_prompt, img2img_prompt_styles, img2img_negative_prompt, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste, extra_networks_button, token_counter, token_button, negative_token_counter, negative_token_button = create_toprow(is_img2img=True)
-
- img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="binary", visible=False)
-
- with FormRow(variant='compact', elem_id="img2img_extra_networks", visible=False) as extra_networks:
- from modules import ui_extra_networks
- extra_networks_ui_img2img = ui_extra_networks.create_ui(extra_networks, extra_networks_button, 'img2img')
-
- with FormRow().style(equal_height=False):
- with gr.Column(variant='compact', elem_id="img2img_settings"):
- copy_image_buttons = []
- copy_image_destinations = {}
-
- def add_copy_image_controls(tab_name, elem):
- with gr.Row(variant="compact", elem_id=f"img2img_copy_to_{tab_name}"):
- gr.HTML("Copy image to: ", elem_id=f"img2img_label_copy_to_{tab_name}")
-
- for title, name in zip(['img2img', 'sketch', 'inpaint', 'inpaint sketch'], ['img2img', 'sketch', 'inpaint', 'inpaint_sketch']):
- if name == tab_name:
- gr.Button(title, interactive=False)
- copy_image_destinations[name] = elem
- continue
-
- button = gr.Button(title)
- copy_image_buttons.append((button, name, elem))
-
- with gr.Tabs(elem_id="mode_img2img"):
- with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab") as tab_img2img:
- init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool="editor", image_mode="RGBA").style(height=480)
- add_copy_image_controls('img2img', init_img)
-
- with gr.TabItem('Sketch', id='img2img_sketch', elem_id="img2img_img2img_sketch_tab") as tab_sketch:
- sketch = gr.Image(label="Image for img2img", elem_id="img2img_sketch", show_label=False, source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=480)
- add_copy_image_controls('sketch', sketch)
-
- with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab") as tab_inpaint:
- init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA").style(height=480)
- add_copy_image_controls('inpaint', init_img_with_mask)
-
- with gr.TabItem('Inpaint sketch', id='inpaint_sketch', elem_id="img2img_inpaint_sketch_tab") as tab_inpaint_color:
- inpaint_color_sketch = gr.Image(label="Color sketch inpainting", show_label=False, elem_id="inpaint_sketch", source="upload", interactive=True, type="pil", tool="color-sketch", image_mode="RGBA").style(height=480)
- inpaint_color_sketch_orig = gr.State(None)
- add_copy_image_controls('inpaint_sketch', inpaint_color_sketch)
-
- def update_orig(image, state):
- if image is not None:
- same_size = state is not None and state.size == image.size
- has_exact_match = np.any(np.all(np.array(image) == np.array(state), axis=-1))
- edited = same_size and has_exact_match
- return image if not edited or state is None else state
-
- inpaint_color_sketch.change(update_orig, [inpaint_color_sketch, inpaint_color_sketch_orig], inpaint_color_sketch_orig)
-
- with gr.TabItem('Inpaint upload', id='inpaint_upload', elem_id="img2img_inpaint_upload_tab") as tab_inpaint_upload:
- init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", elem_id="img_inpaint_base")
- init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", elem_id="img_inpaint_mask")
-
- with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
- hidden = ' Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
- gr.HTML(
- f"Process images in a directory on the same machine where the server is running." +
- f" Use an empty output directory to save pictures normally instead of writing to the output directory." +
- f" Add inpaint batch mask directory to enable inpaint batch processing."
- f"{hidden}
"
- )
- img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
- img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
- img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
-
- def copy_image(img):
- if isinstance(img, dict) and 'image' in img:
- return img['image']
-
- return img
-
- for button, name, elem in copy_image_buttons:
- button.click(
- fn=copy_image,
- inputs=[elem],
- outputs=[copy_image_destinations[name]],
- )
- button.click(
- fn=lambda: None,
- _js="switch_to_"+name.replace(" ", "_"),
- inputs=[],
- outputs=[],
- )
-
- with FormRow():
- resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
-
- for category in ordered_ui_categories():
- if category == "sampler":
- steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img")
-
- elif category == "dimensions":
- with FormRow():
- with gr.Column(elem_id="img2img_column_size", scale=4):
- width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width")
- height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height")
-
- res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="img2img_res_switch_btn")
- if opts.dimensions_and_batch_together:
- with gr.Column(elem_id="img2img_column_batch"):
- batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
- batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
-
- elif category == "cfg":
- with FormGroup():
- with FormRow():
- cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale")
- image_cfg_scale = gr.Slider(minimum=0, maximum=3.0, step=0.05, label='Image CFG Scale', value=1.5, elem_id="img2img_image_cfg_scale", visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit")
- denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength")
-
- elif category == "seed":
- seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img')
-
- elif category == "checkboxes":
- with FormRow(elem_id="img2img_checkboxes", variant="compact"):
- restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
- tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
-
- elif category == "batch":
- if not opts.dimensions_and_batch_together:
- with FormRow(elem_id="img2img_column_batch"):
- batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count")
- batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size")
-
- elif category == "override_settings":
- with FormRow(elem_id="img2img_override_settings_row") as row:
- override_settings = create_override_settings_dropdown('img2img', row)
-
- elif category == "scripts":
- with FormGroup(elem_id="img2img_script_container"):
- custom_inputs = modules.scripts.scripts_img2img.setup_ui()
-
- elif category == "inpaint":
- with FormGroup(elem_id="inpaint_controls", visible=False) as inpaint_controls:
- with FormRow():
- mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur")
- mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha")
-
- with FormRow():
- inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode")
-
- with FormRow():
- inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill")
-
- with FormRow():
- with gr.Column():
- inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res")
-
- with gr.Column(scale=4):
- inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding")
-
- def select_img2img_tab(tab):
- return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3),
-
- for i, elem in enumerate([tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch]):
- elem.select(
- fn=lambda tab=i: select_img2img_tab(tab),
- inputs=[],
- outputs=[inpaint_controls, mask_alpha],
- )
-
- img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
-
- connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
- connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
-
- img2img_prompt_img.change(
- fn=modules.images.image_data,
- inputs=[
- img2img_prompt_img
- ],
- outputs=[
- img2img_prompt,
- img2img_prompt_img
- ]
- )
-
- img2img_args = dict(
- fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']),
- _js="submit_img2img",
- inputs=[
- dummy_component,
- dummy_component,
- img2img_prompt,
- img2img_negative_prompt,
- img2img_prompt_styles,
- init_img,
- sketch,
- init_img_with_mask,
- inpaint_color_sketch,
- inpaint_color_sketch_orig,
- init_img_inpaint,
- init_mask_inpaint,
- steps,
- sampler_index,
- mask_blur,
- mask_alpha,
- inpainting_fill,
- restore_faces,
- tiling,
- batch_count,
- batch_size,
- cfg_scale,
- image_cfg_scale,
- denoising_strength,
- seed,
- subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox,
- height,
- width,
- resize_mode,
- inpaint_full_res,
- inpaint_full_res_padding,
- inpainting_mask_invert,
- img2img_batch_input_dir,
- img2img_batch_output_dir,
- img2img_batch_inpaint_mask_dir,
- override_settings,
- ] + custom_inputs,
- outputs=[
- img2img_gallery,
- generation_info,
- html_info,
- html_log,
- ],
- show_progress=False,
- )
-
- interrogate_args = dict(
- _js="get_img2img_tab_index",
- inputs=[
- dummy_component,
- img2img_batch_input_dir,
- img2img_batch_output_dir,
- init_img,
- sketch,
- init_img_with_mask,
- inpaint_color_sketch,
- init_img_inpaint,
- ],
- outputs=[img2img_prompt, dummy_component],
- )
-
- img2img_prompt.submit(**img2img_args)
- submit.click(**img2img_args)
- res_switch_btn.click(lambda w, h: (h, w), inputs=[width, height], outputs=[width, height])
-
- img2img_interrogate.click(
- fn=lambda *args: process_interrogate(interrogate, *args),
- **interrogate_args,
- )
-
- img2img_deepbooru.click(
- fn=lambda *args: process_interrogate(interrogate_deepbooru, *args),
- **interrogate_args,
- )
-
- prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
- style_dropdowns = [txt2img_prompt_styles, img2img_prompt_styles]
- style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"]
-
- for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
- button.click(
- fn=add_style,
- _js="ask_for_style_name",
- # Have to pass empty dummy component here, because the JavaScript and Python function have to accept
- # the same number of parameters, but we only know the style-name after the JavaScript prompt
- inputs=[dummy_component, prompt, negative_prompt],
- outputs=[txt2img_prompt_styles, img2img_prompt_styles],
- )
-
- for button, (prompt, negative_prompt), styles, js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs):
- button.click(
- fn=apply_styles,
- _js=js_func,
- inputs=[prompt, negative_prompt, styles],
- outputs=[prompt, negative_prompt, styles],
- )
-
- token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
- negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[txt2img_negative_prompt, steps], outputs=[negative_token_counter])
-
- ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery)
-
- img2img_paste_fields = [
- (img2img_prompt, "Prompt"),
- (img2img_negative_prompt, "Negative prompt"),
- (steps, "Steps"),
- (sampler_index, "Sampler"),
- (restore_faces, "Face restoration"),
- (cfg_scale, "CFG scale"),
- (image_cfg_scale, "Image CFG scale"),
- (seed, "Seed"),
- (width, "Size-1"),
- (height, "Size-2"),
- (batch_size, "Batch size"),
- (subseed, "Variation seed"),
- (subseed_strength, "Variation seed strength"),
- (seed_resize_from_w, "Seed resize from-1"),
- (seed_resize_from_h, "Seed resize from-2"),
- (denoising_strength, "Denoising strength"),
- (mask_blur, "Mask blur"),
- *modules.scripts.scripts_img2img.infotext_fields
- ]
- parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings)
- parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings)
- parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
- paste_button=img2img_paste, tabname="img2img", source_text_component=img2img_prompt, source_image_component=None,
- ))
-
- modules.scripts.scripts_current = None
-
- with gr.Blocks(analytics_enabled=False) as extras_interface:
- ui_postprocessing.create_ui()
-
- with gr.Blocks(analytics_enabled=False) as pnginfo_interface:
- with gr.Row().style(equal_height=False):
- with gr.Column(variant='panel'):
- image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil")
-
- with gr.Column(variant='panel'):
- html = gr.HTML()
- generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info")
- html2 = gr.HTML()
- with gr.Row():
- buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"])
-
- for tabname, button in buttons.items():
- parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
- paste_button=button, tabname=tabname, source_text_component=generation_info, source_image_component=image,
- ))
-
- image.change(
- fn=wrap_gradio_call(modules.extras.run_pnginfo),
- inputs=[image],
- outputs=[html, generation_info, html2],
- )
-
- def update_interp_description(value):
- interp_description_css = "{}
"
- interp_descriptions = {
- "No interpolation": interp_description_css.format("No interpolation will be used. Requires one model; A. Allows for format conversion and VAE baking."),
- "Weighted sum": interp_description_css.format("A weighted sum will be used for interpolation. Requires two models; A and B. The result is calculated as A * (1 - M) + B * M"),
- "Add difference": interp_description_css.format("The difference between the last two models will be added to the first. Requires three models; A, B and C. The result is calculated as A + (B - C) * M")
- }
- return interp_descriptions[value]
-
- with gr.Blocks(analytics_enabled=False) as modelmerger_interface:
- with gr.Row().style(equal_height=False):
- with gr.Column(variant='compact'):
- interp_description = gr.HTML(value=update_interp_description("Weighted sum"), elem_id="modelmerger_interp_description")
-
- with FormRow(elem_id="modelmerger_models"):
- primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)")
- create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A")
-
- secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)")
- create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B")
-
- tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)")
- create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C")
-
- custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name")
- interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount")
- interp_method = gr.Radio(choices=["No interpolation", "Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method")
- interp_method.change(fn=update_interp_description, inputs=[interp_method], outputs=[interp_description])
-
- with FormRow():
- checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="ckpt", label="Checkpoint format", elem_id="modelmerger_checkpoint_format")
- save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half")
-
- with FormRow():
- with gr.Column():
- config_source = gr.Radio(choices=["A, B or C", "B", "C", "Don't"], value="A, B or C", label="Copy config from", type="index", elem_id="modelmerger_config_method")
-
- with gr.Column():
- with FormRow():
- bake_in_vae = gr.Dropdown(choices=["None"] + list(sd_vae.vae_dict), value="None", label="Bake in VAE", elem_id="modelmerger_bake_in_vae")
- create_refresh_button(bake_in_vae, sd_vae.refresh_vae_list, lambda: {"choices": ["None"] + list(sd_vae.vae_dict)}, "modelmerger_refresh_bake_in_vae")
-
- with FormRow():
- discard_weights = gr.Textbox(value="", label="Discard weights with matching name", elem_id="modelmerger_discard_weights")
-
- with gr.Row():
- modelmerger_merge = gr.Button(elem_id="modelmerger_merge", value="Merge", variant='primary')
-
- with gr.Column(variant='compact', elem_id="modelmerger_results_container"):
- with gr.Group(elem_id="modelmerger_results_panel"):
- modelmerger_result = gr.HTML(elem_id="modelmerger_result", show_label=False)
-
- with gr.Blocks(analytics_enabled=False) as train_interface:
- with gr.Row().style(equal_height=False):
- gr.HTML(value="See wiki for detailed explanation.
")
-
- with gr.Row(variant="compact").style(equal_height=False):
- with gr.Tabs(elem_id="train_tabs"):
-
- with gr.Tab(label="Create embedding"):
- new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name")
- initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text")
- nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt")
- overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding")
-
- with gr.Row():
- with gr.Column(scale=3):
- gr.HTML(value="")
-
- with gr.Column():
- create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding")
-
- with gr.Tab(label="Create hypernetwork"):
- new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name")
- new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes")
- new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure")
- new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func")
- new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option")
- new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm")
- new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout")
- new_hypernetwork_dropout_structure = gr.Textbox("0, 0, 0", label="Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15", placeholder="1st and last digit must be 0 and values should be between 0 and 1. ex:'0, 0.01, 0'")
- overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork")
-
- with gr.Row():
- with gr.Column(scale=3):
- gr.HTML(value="")
-
- with gr.Column():
- create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork")
-
- with gr.Tab(label="Preprocess images"):
- process_src = gr.Textbox(label='Source directory', elem_id="train_process_src")
- process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst")
- process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width")
- process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height")
- preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action")
-
- with gr.Row():
- process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip")
- process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split")
- process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop")
- process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop")
- process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption")
- process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru")
-
- with gr.Row(visible=False) as process_split_extra_row:
- process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold")
- process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio")
-
- with gr.Row(visible=False) as process_focal_crop_row:
- process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight")
- process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight")
- process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight")
- process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
-
- with gr.Column(visible=False) as process_multicrop_col:
- gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
- with gr.Row():
- process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim")
- process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim")
- with gr.Row():
- process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea")
- process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea")
- with gr.Row():
- process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective")
- process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold")
-
- with gr.Row():
- with gr.Column(scale=3):
- gr.HTML(value="")
-
- with gr.Column():
- with gr.Row():
- interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing")
- run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess")
-
- process_split.change(
- fn=lambda show: gr_show(show),
- inputs=[process_split],
- outputs=[process_split_extra_row],
- )
-
- process_focal_crop.change(
- fn=lambda show: gr_show(show),
- inputs=[process_focal_crop],
- outputs=[process_focal_crop_row],
- )
-
- process_multicrop.change(
- fn=lambda show: gr_show(show),
- inputs=[process_multicrop],
- outputs=[process_multicrop_col],
- )
-
- def get_textual_inversion_template_names():
- return sorted([x for x in textual_inversion.textual_inversion_templates])
-
- with gr.Tab(label="Train"):
- gr.HTML(value="Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]
")
- with FormRow():
- train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
- create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name")
-
- train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=[x for x in shared.hypernetworks.keys()])
- create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name")
-
- with FormRow():
- embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate")
- hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate")
-
- with FormRow():
- clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"])
- clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False)
-
- with FormRow():
- batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size")
- gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step")
-
- dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory")
- log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory")
-
- with FormRow():
- template_file = gr.Dropdown(label='Prompt template', value="style_filewords.txt", elem_id="train_template_file", choices=get_textual_inversion_template_names())
- create_refresh_button(template_file, textual_inversion.list_textual_inversion_templates, lambda: {"choices": get_textual_inversion_template_names()}, "refrsh_train_template_file")
-
- training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width")
- training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height")
- varsize = gr.Checkbox(label="Do not resize images", value=False, elem_id="train_varsize")
- steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps")
-
- with FormRow():
- create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every")
- save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every")
-
- use_weight = gr.Checkbox(label="Use PNG alpha channel as loss weight", value=False, elem_id="use_weight")
-
- save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding")
- preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img")
-
- shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags")
- tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out")
-
- latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method")
-
- with gr.Row():
- train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding")
- interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training")
- train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork")
-
- params = script_callbacks.UiTrainTabParams(txt2img_preview_params)
-
- script_callbacks.ui_train_tabs_callback(params)
-
- with gr.Column(elem_id='ti_gallery_container'):
- ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
- ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(grid=4)
- ti_progress = gr.HTML(elem_id="ti_progress", value="")
- ti_outcome = gr.HTML(elem_id="ti_error", value="")
-
- create_embedding.click(
- fn=modules.textual_inversion.ui.create_embedding,
- inputs=[
- new_embedding_name,
- initialization_text,
- nvpt,
- overwrite_old_embedding,
- ],
- outputs=[
- train_embedding_name,
- ti_output,
- ti_outcome,
- ]
- )
-
- create_hypernetwork.click(
- fn=modules.hypernetworks.ui.create_hypernetwork,
- inputs=[
- new_hypernetwork_name,
- new_hypernetwork_sizes,
- overwrite_old_hypernetwork,
- new_hypernetwork_layer_structure,
- new_hypernetwork_activation_func,
- new_hypernetwork_initialization_option,
- new_hypernetwork_add_layer_norm,
- new_hypernetwork_use_dropout,
- new_hypernetwork_dropout_structure
- ],
- outputs=[
- train_hypernetwork_name,
- ti_output,
- ti_outcome,
- ]
- )
-
- run_preprocess.click(
- fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]),
- _js="start_training_textual_inversion",
- inputs=[
- dummy_component,
- process_src,
- process_dst,
- process_width,
- process_height,
- preprocess_txt_action,
- process_flip,
- process_split,
- process_caption,
- process_caption_deepbooru,
- process_split_threshold,
- process_overlap_ratio,
- process_focal_crop,
- process_focal_crop_face_weight,
- process_focal_crop_entropy_weight,
- process_focal_crop_edges_weight,
- process_focal_crop_debug,
- process_multicrop,
- process_multicrop_mindim,
- process_multicrop_maxdim,
- process_multicrop_minarea,
- process_multicrop_maxarea,
- process_multicrop_objective,
- process_multicrop_threshold,
- ],
- outputs=[
- ti_output,
- ti_outcome,
- ],
- )
-
- train_embedding.click(
- fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]),
- _js="start_training_textual_inversion",
- inputs=[
- dummy_component,
- train_embedding_name,
- embedding_learn_rate,
- batch_size,
- gradient_step,
- dataset_directory,
- log_directory,
- training_width,
- training_height,
- varsize,
- steps,
- clip_grad_mode,
- clip_grad_value,
- shuffle_tags,
- tag_drop_out,
- latent_sampling_method,
- use_weight,
- create_image_every,
- save_embedding_every,
- template_file,
- save_image_with_stored_embedding,
- preview_from_txt2img,
- *txt2img_preview_params,
- ],
- outputs=[
- ti_output,
- ti_outcome,
- ]
- )
-
- train_hypernetwork.click(
- fn=wrap_gradio_gpu_call(modules.hypernetworks.ui.train_hypernetwork, extra_outputs=[gr.update()]),
- _js="start_training_textual_inversion",
- inputs=[
- dummy_component,
- train_hypernetwork_name,
- hypernetwork_learn_rate,
- batch_size,
- gradient_step,
- dataset_directory,
- log_directory,
- training_width,
- training_height,
- varsize,
- steps,
- clip_grad_mode,
- clip_grad_value,
- shuffle_tags,
- tag_drop_out,
- latent_sampling_method,
- use_weight,
- create_image_every,
- save_embedding_every,
- template_file,
- preview_from_txt2img,
- *txt2img_preview_params,
- ],
- outputs=[
- ti_output,
- ti_outcome,
- ]
- )
-
- interrupt_training.click(
- fn=lambda: shared.state.interrupt(),
- inputs=[],
- outputs=[],
- )
-
- interrupt_preprocessing.click(
- fn=lambda: shared.state.interrupt(),
- inputs=[],
- outputs=[],
- )
-
- def create_setting_component(key, is_quicksettings=False):
- def fun():
- return opts.data[key] if key in opts.data else opts.data_labels[key].default
-
- info = opts.data_labels[key]
- t = type(info.default)
-
- args = info.component_args() if callable(info.component_args) else info.component_args
-
- if info.component is not None:
- comp = info.component
- elif t == str:
- comp = gr.Textbox
- elif t == int:
- comp = gr.Number
- elif t == bool:
- comp = gr.Checkbox
- else:
- raise Exception(f'bad options item type: {str(t)} for key {key}')
-
- elem_id = "setting_"+key
-
- if info.refresh is not None:
- if is_quicksettings:
- res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {}))
- create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key)
- else:
- with FormRow():
- res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {}))
- create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key)
- else:
- res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {}))
-
- return res
-
- components = []
- component_dict = {}
- shared.settings_components = component_dict
-
- script_callbacks.ui_settings_callback()
- opts.reorder()
-
- def run_settings(*args):
- changed = []
-
- for key, value, comp in zip(opts.data_labels.keys(), args, components):
- assert comp == dummy_component or opts.same_type(value, opts.data_labels[key].default), f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
-
- for key, value, comp in zip(opts.data_labels.keys(), args, components):
- if comp == dummy_component:
- continue
-
- if opts.set(key, value):
- changed.append(key)
-
- try:
- opts.save(shared.config_filename)
- except RuntimeError:
- return opts.dumpjson(), f'{len(changed)} settings changed without save: {", ".join(changed)}.'
- return opts.dumpjson(), f'{len(changed)} settings changed{": " if len(changed) > 0 else ""}{", ".join(changed)}.'
-
- def run_settings_single(value, key):
- if not opts.same_type(value, opts.data_labels[key].default):
- return gr.update(visible=True), opts.dumpjson()
-
- if not opts.set(key, value):
- return gr.update(value=getattr(opts, key)), opts.dumpjson()
-
- opts.save(shared.config_filename)
-
- return get_value_for_setting(key), opts.dumpjson()
-
- with gr.Blocks(analytics_enabled=False) as settings_interface:
- with gr.Row():
- with gr.Column(scale=6):
- settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit")
- with gr.Column():
- restart_gradio = gr.Button(value='Reload UI', variant='primary', elem_id="settings_restart_gradio")
-
- result = gr.HTML(elem_id="settings_result")
-
- quicksettings_names = [x.strip() for x in opts.quicksettings.split(",")]
- quicksettings_names = {x: i for i, x in enumerate(quicksettings_names) if x != 'quicksettings'}
-
- quicksettings_list = []
-
- previous_section = None
- current_tab = None
- current_row = None
- with gr.Tabs(elem_id="settings"):
- for i, (k, item) in enumerate(opts.data_labels.items()):
- section_must_be_skipped = item.section[0] is None
-
- if previous_section != item.section and not section_must_be_skipped:
- elem_id, text = item.section
-
- if current_tab is not None:
- current_row.__exit__()
- current_tab.__exit__()
-
- gr.Group()
- current_tab = gr.TabItem(elem_id="settings_{}".format(elem_id), label=text)
- current_tab.__enter__()
- current_row = gr.Column(variant='compact')
- current_row.__enter__()
-
- previous_section = item.section
-
- if k in quicksettings_names and not shared.cmd_opts.freeze_settings:
- quicksettings_list.append((i, k, item))
- components.append(dummy_component)
- elif section_must_be_skipped:
- components.append(dummy_component)
- else:
- component = create_setting_component(k)
- component_dict[k] = component
- components.append(component)
-
- if current_tab is not None:
- current_row.__exit__()
- current_tab.__exit__()
-
- with gr.TabItem("Actions"):
- request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
- download_localization = gr.Button(value='Download localization template', elem_id="download_localization")
- reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies")
-
- with gr.TabItem("Licenses"):
- gr.HTML(shared.html("licenses.html"), elem_id="licenses")
-
- gr.Button(value="Show all pages", elem_id="settings_show_all_pages")
-
- request_notifications.click(
- fn=lambda: None,
- inputs=[],
- outputs=[],
- _js='function(){}'
- )
-
- download_localization.click(
- fn=lambda: None,
- inputs=[],
- outputs=[],
- _js='download_localization'
- )
-
- def reload_scripts():
- modules.scripts.reload_script_body_only()
- reload_javascript() # need to refresh the html page
-
- reload_script_bodies.click(
- fn=reload_scripts,
- inputs=[],
- outputs=[]
- )
-
- def request_restart():
- shared.state.interrupt()
- shared.state.need_restart = True
-
- restart_gradio.click(
- fn=request_restart,
- _js='restart_reload',
- inputs=[],
- outputs=[],
- )
-
- interfaces = [
- (txt2img_interface, "txt2img", "txt2img"),
- (img2img_interface, "img2img", "img2img"),
- (extras_interface, "Extras", "extras"),
- (pnginfo_interface, "PNG Info", "pnginfo"),
- (modelmerger_interface, "Checkpoint Merger", "modelmerger"),
- (train_interface, "Train", "ti"),
- ]
-
- css = ""
-
- for cssfile in modules.scripts.list_files_with_name("style.css"):
- if not os.path.isfile(cssfile):
- continue
-
- with open(cssfile, "r", encoding="utf8") as file:
- css += file.read() + "\n"
-
- if os.path.exists(os.path.join(data_path, "user.css")):
- with open(os.path.join(data_path, "user.css"), "r", encoding="utf8") as file:
- css += file.read() + "\n"
-
- if not cmd_opts.no_progressbar_hiding:
- css += css_hide_progressbar
-
- interfaces += script_callbacks.ui_tabs_callback()
- interfaces += [(settings_interface, "Settings", "settings")]
-
- extensions_interface = ui_extensions.create_ui()
- interfaces += [(extensions_interface, "Extensions", "extensions")]
-
- with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
- with gr.Row(elem_id="quicksettings", variant="compact"):
- for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])):
- component = create_setting_component(k, is_quicksettings=True)
- component_dict[k] = component
-
- parameters_copypaste.connect_paste_params_buttons()
-
- with gr.Tabs(elem_id="tabs") as tabs:
- for interface, label, ifid in interfaces:
- with gr.TabItem(label, id=ifid, elem_id='tab_' + ifid):
- interface.render()
-
- if os.path.exists(os.path.join(script_path, "notification.mp3")):
- audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
-
- footer = shared.html("footer.html")
- footer = footer.format(versions=versions_html())
- gr.HTML(footer, elem_id="footer")
-
- text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
- settings_submit.click(
- fn=wrap_gradio_call(run_settings, extra_outputs=[gr.update()]),
- inputs=components,
- outputs=[text_settings, result],
- )
-
- for i, k, item in quicksettings_list:
- component = component_dict[k]
-
- component.change(
- fn=lambda value, k=k: run_settings_single(value, key=k),
- inputs=[component],
- outputs=[component, text_settings],
- )
-
- text_settings.change(
- fn=lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit"),
- inputs=[],
- outputs=[image_cfg_scale],
- )
-
- button_set_checkpoint = gr.Button('Change checkpoint', elem_id='change_checkpoint', visible=False)
- button_set_checkpoint.click(
- fn=lambda value, _: run_settings_single(value, key='sd_model_checkpoint'),
- _js="function(v){ var res = desiredCheckpointName; desiredCheckpointName = ''; return [res || v, null]; }",
- inputs=[component_dict['sd_model_checkpoint'], dummy_component],
- outputs=[component_dict['sd_model_checkpoint'], text_settings],
- )
-
- component_keys = [k for k in opts.data_labels.keys() if k in component_dict]
-
- def get_settings_values():
- return [get_value_for_setting(key) for key in component_keys]
-
- demo.load(
- fn=get_settings_values,
- inputs=[],
- outputs=[component_dict[k] for k in component_keys],
- )
-
- def modelmerger(*args):
- try:
- results = modules.extras.run_modelmerger(*args)
- except Exception as e:
- print("Error loading/saving model file:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
- modules.sd_models.list_models() # to remove the potentially missing models from the list
- return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"]
- return results
-
- modelmerger_merge.click(fn=lambda: '', inputs=[], outputs=[modelmerger_result])
- modelmerger_merge.click(
- fn=wrap_gradio_gpu_call(modelmerger, extra_outputs=lambda: [gr.update() for _ in range(4)]),
- _js='modelmerger',
- inputs=[
- dummy_component,
- primary_model_name,
- secondary_model_name,
- tertiary_model_name,
- interp_method,
- interp_amount,
- save_as_half,
- custom_name,
- checkpoint_format,
- config_source,
- bake_in_vae,
- discard_weights,
- ],
- outputs=[
- primary_model_name,
- secondary_model_name,
- tertiary_model_name,
- component_dict['sd_model_checkpoint'],
- modelmerger_result,
- ]
- )
-
- ui_config_file = cmd_opts.ui_config_file
- ui_settings = {}
- settings_count = len(ui_settings)
- error_loading = False
-
- try:
- if os.path.exists(ui_config_file):
- with open(ui_config_file, "r", encoding="utf8") as file:
- ui_settings = json.load(file)
- except Exception:
- error_loading = True
- print("Error loading settings:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
-
- def loadsave(path, x):
- def apply_field(obj, field, condition=None, init_field=None):
- key = path + "/" + field
-
- if getattr(obj, 'custom_script_source', None) is not None:
- key = 'customscript/' + obj.custom_script_source + '/' + key
-
- if getattr(obj, 'do_not_save_to_config', False):
- return
-
- saved_value = ui_settings.get(key, None)
- if saved_value is None:
- ui_settings[key] = getattr(obj, field)
- elif condition and not condition(saved_value):
- pass
-
- # this warning is generally not useful;
- # print(f'Warning: Bad ui setting value: {key}: {saved_value}; Default value "{getattr(obj, field)}" will be used instead.')
- else:
- setattr(obj, field, saved_value)
- if init_field is not None:
- init_field(saved_value)
-
- if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown] and x.visible:
- apply_field(x, 'visible')
-
- if type(x) == gr.Slider:
- apply_field(x, 'value')
- apply_field(x, 'minimum')
- apply_field(x, 'maximum')
- apply_field(x, 'step')
-
- if type(x) == gr.Radio:
- apply_field(x, 'value', lambda val: val in x.choices)
-
- if type(x) == gr.Checkbox:
- apply_field(x, 'value')
-
- if type(x) == gr.Textbox:
- apply_field(x, 'value')
-
- if type(x) == gr.Number:
- apply_field(x, 'value')
-
- if type(x) == gr.Dropdown:
- def check_dropdown(val):
- if getattr(x, 'multiselect', False):
- return all([value in x.choices for value in val])
- else:
- return val in x.choices
-
- apply_field(x, 'value', check_dropdown, getattr(x, 'init_field', None))
-
- visit(txt2img_interface, loadsave, "txt2img")
- visit(img2img_interface, loadsave, "img2img")
- visit(extras_interface, loadsave, "extras")
- visit(modelmerger_interface, loadsave, "modelmerger")
- visit(train_interface, loadsave, "train")
-
- if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)):
- with open(ui_config_file, "w", encoding="utf8") as file:
- json.dump(ui_settings, file, indent=4)
-
- # Required as a workaround for change() event not triggering when loading values from ui-config.json
- interp_description.value = update_interp_description(interp_method.value)
-
- return demo
-
-
-def reload_javascript():
- head = f'\n'
-
- inline = f"{localization.localization_js(shared.opts.localization)};"
- if cmd_opts.theme is not None:
- inline += f"set_theme('{cmd_opts.theme}');"
-
- for script in modules.scripts.list_scripts("javascript", ".js"):
- head += f'\n'
-
- head += f'\n'
-
- def template_response(*args, **kwargs):
- res = shared.GradioTemplateResponseOriginal(*args, **kwargs)
- res.body = res.body.replace(b'', f'{head}'.encode("utf8"))
- res.init_headers()
- return res
-
- gradio.routes.templates.TemplateResponse = template_response
-
-
-if not hasattr(shared, 'GradioTemplateResponseOriginal'):
- shared.GradioTemplateResponseOriginal = gradio.routes.templates.TemplateResponse
-
-
-def versions_html():
- import torch
- import launch
-
- python_version = ".".join([str(x) for x in sys.version_info[0:3]])
- commit = launch.commit_hash()
- short_commit = commit[0:8]
-
- if shared.xformers_available:
- import xformers
- xformers_version = xformers.__version__
- else:
- xformers_version = "N/A"
-
- return f"""
-python: {python_version}
- •
-torch: {getattr(torch, '__long_version__',torch.__version__)}
- •
-xformers: {xformers_version}
- •
-gradio: {gr.__version__}
- •
-commit: {short_commit}
- •
-checkpoint: N/A
-"""
diff --git a/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/transformer_vanilla.py b/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/transformer_vanilla.py
deleted file mode 100644
index 10c0920c1a217af5bb3e1b13077568035ab3b7b5..0000000000000000000000000000000000000000
--- a/spaces/jackyccl/segment-anything/groundingdino/models/GroundingDINO/transformer_vanilla.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# ------------------------------------------------------------------------
-# Grounding DINO
-# url: https://github.com/IDEA-Research/GroundingDINO
-# Copyright (c) 2023 IDEA. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------
-# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-"""
-DETR Transformer class.
-
-Copy-paste from torch.nn.Transformer with modifications:
- * positional encodings are passed in MHattention
- * extra LN at the end of encoder is removed
- * decoder returns a stack of activations from all decoding layers
-"""
-from typing import Optional
-
-import torch
-import torch.nn.functional as F
-from torch import Tensor, nn
-
-from .utils import (
- MLP,
- _get_activation_fn,
- _get_clones,
- gen_encoder_output_proposals,
- gen_sineembed_for_position,
- sigmoid_focal_loss,
-)
-
-
-class TextTransformer(nn.Module):
- def __init__(self, num_layers, d_model=256, nheads=8, dim_feedforward=2048, dropout=0.1):
- super().__init__()
- self.num_layers = num_layers
- self.d_model = d_model
- self.nheads = nheads
- self.dim_feedforward = dim_feedforward
- self.norm = None
-
- single_encoder_layer = TransformerEncoderLayer(
- d_model=d_model, nhead=nheads, dim_feedforward=dim_feedforward, dropout=dropout
- )
- self.layers = _get_clones(single_encoder_layer, num_layers)
-
- def forward(self, memory_text: torch.Tensor, text_attention_mask: torch.Tensor):
- """
-
- Args:
- text_attention_mask: bs, num_token
- memory_text: bs, num_token, d_model
-
- Raises:
- RuntimeError: _description_
-
- Returns:
- output: bs, num_token, d_model
- """
-
- output = memory_text.transpose(0, 1)
-
- for layer in self.layers:
- output = layer(output, src_key_padding_mask=text_attention_mask)
-
- if self.norm is not None:
- output = self.norm(output)
-
- return output.transpose(0, 1)
-
-
-class TransformerEncoderLayer(nn.Module):
- def __init__(
- self,
- d_model,
- nhead,
- dim_feedforward=2048,
- dropout=0.1,
- activation="relu",
- normalize_before=False,
- ):
- super().__init__()
- self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
- # Implementation of Feedforward model
- self.linear1 = nn.Linear(d_model, dim_feedforward)
- self.dropout = nn.Dropout(dropout)
- self.linear2 = nn.Linear(dim_feedforward, d_model)
-
- self.norm1 = nn.LayerNorm(d_model)
- self.norm2 = nn.LayerNorm(d_model)
- self.dropout1 = nn.Dropout(dropout)
- self.dropout2 = nn.Dropout(dropout)
-
- self.activation = _get_activation_fn(activation)
- self.normalize_before = normalize_before
- self.nhead = nhead
-
- def with_pos_embed(self, tensor, pos: Optional[Tensor]):
- return tensor if pos is None else tensor + pos
-
- def forward(
- self,
- src,
- src_mask: Optional[Tensor] = None,
- src_key_padding_mask: Optional[Tensor] = None,
- pos: Optional[Tensor] = None,
- ):
- # repeat attn mask
- if src_mask.dim() == 3 and src_mask.shape[0] == src.shape[1]:
- # bs, num_q, num_k
- src_mask = src_mask.repeat(self.nhead, 1, 1)
-
- q = k = self.with_pos_embed(src, pos)
-
- src2 = self.self_attn(q, k, value=src, attn_mask=src_mask)[0]
-
- # src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
- src = src + self.dropout1(src2)
- src = self.norm1(src)
- src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
- src = src + self.dropout2(src2)
- src = self.norm2(src)
- return src
diff --git a/spaces/jarvisbot/ChatImprovement/crazy_functions/test_project/python/dqn/policies.py b/spaces/jarvisbot/ChatImprovement/crazy_functions/test_project/python/dqn/policies.py
deleted file mode 100644
index 4ecf39a5fc04b24ad1b809232b186728366987b6..0000000000000000000000000000000000000000
--- a/spaces/jarvisbot/ChatImprovement/crazy_functions/test_project/python/dqn/policies.py
+++ /dev/null
@@ -1,237 +0,0 @@
-from typing import Any, Dict, List, Optional, Type
-
-import gym
-import torch as th
-from torch import nn
-
-from stable_baselines3.common.policies import BasePolicy, register_policy
-from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp
-from stable_baselines3.common.type_aliases import Schedule
-
-
-class QNetwork(BasePolicy):
- """
- Action-Value (Q-Value) network for DQN
-
- :param observation_space: Observation space
- :param action_space: Action space
- :param net_arch: The specification of the policy and value networks.
- :param activation_fn: Activation function
- :param normalize_images: Whether to normalize images or not,
- dividing by 255.0 (True by default)
- """
-
- def __init__(
- self,
- observation_space: gym.spaces.Space,
- action_space: gym.spaces.Space,
- features_extractor: nn.Module,
- features_dim: int,
- net_arch: Optional[List[int]] = None,
- activation_fn: Type[nn.Module] = nn.ReLU,
- normalize_images: bool = True,
- ):
- super(QNetwork, self).__init__(
- observation_space,
- action_space,
- features_extractor=features_extractor,
- normalize_images=normalize_images,
- )
-
- if net_arch is None:
- net_arch = [64, 64]
-
- self.net_arch = net_arch
- self.activation_fn = activation_fn
- self.features_extractor = features_extractor
- self.features_dim = features_dim
- self.normalize_images = normalize_images
- action_dim = self.action_space.n # number of actions
- q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn)
- self.q_net = nn.Sequential(*q_net)
-
- def forward(self, obs: th.Tensor) -> th.Tensor:
- """
- Predict the q-values.
-
- :param obs: Observation
- :return: The estimated Q-Value for each action.
- """
- return self.q_net(self.extract_features(obs))
-
- def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
- q_values = self.forward(observation)
- # Greedy action
- action = q_values.argmax(dim=1).reshape(-1)
- return action
-
- def _get_constructor_parameters(self) -> Dict[str, Any]:
- data = super()._get_constructor_parameters()
-
- data.update(
- dict(
- net_arch=self.net_arch,
- features_dim=self.features_dim,
- activation_fn=self.activation_fn,
- features_extractor=self.features_extractor,
- )
- )
- return data
-
-
-class DQNPolicy(BasePolicy):
- """
- Policy class with Q-Value Net and target net for DQN
-
- :param observation_space: Observation space
- :param action_space: Action space
- :param lr_schedule: Learning rate schedule (could be constant)
- :param net_arch: The specification of the policy and value networks.
- :param activation_fn: Activation function
- :param features_extractor_class: Features extractor to use.
- :param features_extractor_kwargs: Keyword arguments
- to pass to the features extractor.
- :param normalize_images: Whether to normalize images or not,
- dividing by 255.0 (True by default)
- :param optimizer_class: The optimizer to use,
- ``th.optim.Adam`` by default
- :param optimizer_kwargs: Additional keyword arguments,
- excluding the learning rate, to pass to the optimizer
- """
-
- def __init__(
- self,
- observation_space: gym.spaces.Space,
- action_space: gym.spaces.Space,
- lr_schedule: Schedule,
- net_arch: Optional[List[int]] = None,
- activation_fn: Type[nn.Module] = nn.ReLU,
- features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
- features_extractor_kwargs: Optional[Dict[str, Any]] = None,
- normalize_images: bool = True,
- optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
- optimizer_kwargs: Optional[Dict[str, Any]] = None,
- ):
- super(DQNPolicy, self).__init__(
- observation_space,
- action_space,
- features_extractor_class,
- features_extractor_kwargs,
- optimizer_class=optimizer_class,
- optimizer_kwargs=optimizer_kwargs,
- )
-
- if net_arch is None:
- if features_extractor_class == FlattenExtractor:
- net_arch = [64, 64]
- else:
- net_arch = []
-
- self.net_arch = net_arch
- self.activation_fn = activation_fn
- self.normalize_images = normalize_images
-
- self.net_args = {
- "observation_space": self.observation_space,
- "action_space": self.action_space,
- "net_arch": self.net_arch,
- "activation_fn": self.activation_fn,
- "normalize_images": normalize_images,
- }
-
- self.q_net, self.q_net_target = None, None
- self._build(lr_schedule)
-
- def _build(self, lr_schedule: Schedule) -> None:
- """
- Create the network and the optimizer.
-
- :param lr_schedule: Learning rate schedule
- lr_schedule(1) is the initial learning rate
- """
-
- self.q_net = self.make_q_net()
- self.q_net_target = self.make_q_net()
- self.q_net_target.load_state_dict(self.q_net.state_dict())
-
- # Setup optimizer with initial learning rate
- self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
-
- def make_q_net(self) -> QNetwork:
- # Make sure we always have separate networks for features extractors etc
- net_args = self._update_features_extractor(self.net_args, features_extractor=None)
- return QNetwork(**net_args).to(self.device)
-
- def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
- return self._predict(obs, deterministic=deterministic)
-
- def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
- return self.q_net._predict(obs, deterministic=deterministic)
-
- def _get_constructor_parameters(self) -> Dict[str, Any]:
- data = super()._get_constructor_parameters()
-
- data.update(
- dict(
- net_arch=self.net_args["net_arch"],
- activation_fn=self.net_args["activation_fn"],
- lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
- optimizer_class=self.optimizer_class,
- optimizer_kwargs=self.optimizer_kwargs,
- features_extractor_class=self.features_extractor_class,
- features_extractor_kwargs=self.features_extractor_kwargs,
- )
- )
- return data
-
-
-MlpPolicy = DQNPolicy
-
-
-class CnnPolicy(DQNPolicy):
- """
- Policy class for DQN when using images as input.
-
- :param observation_space: Observation space
- :param action_space: Action space
- :param lr_schedule: Learning rate schedule (could be constant)
- :param net_arch: The specification of the policy and value networks.
- :param activation_fn: Activation function
- :param features_extractor_class: Features extractor to use.
- :param normalize_images: Whether to normalize images or not,
- dividing by 255.0 (True by default)
- :param optimizer_class: The optimizer to use,
- ``th.optim.Adam`` by default
- :param optimizer_kwargs: Additional keyword arguments,
- excluding the learning rate, to pass to the optimizer
- """
-
- def __init__(
- self,
- observation_space: gym.spaces.Space,
- action_space: gym.spaces.Space,
- lr_schedule: Schedule,
- net_arch: Optional[List[int]] = None,
- activation_fn: Type[nn.Module] = nn.ReLU,
- features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
- features_extractor_kwargs: Optional[Dict[str, Any]] = None,
- normalize_images: bool = True,
- optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
- optimizer_kwargs: Optional[Dict[str, Any]] = None,
- ):
- super(CnnPolicy, self).__init__(
- observation_space,
- action_space,
- lr_schedule,
- net_arch,
- activation_fn,
- features_extractor_class,
- features_extractor_kwargs,
- normalize_images,
- optimizer_class,
- optimizer_kwargs,
- )
-
-
-register_policy("MlpPolicy", MlpPolicy)
-register_policy("CnnPolicy", CnnPolicy)
diff --git a/spaces/jbilcke-hf/MusicGen/audiocraft/modules/codebooks_patterns.py b/spaces/jbilcke-hf/MusicGen/audiocraft/modules/codebooks_patterns.py
deleted file mode 100644
index c5b35cbea8cff84aa56116dbdd860fc72a913a13..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/MusicGen/audiocraft/modules/codebooks_patterns.py
+++ /dev/null
@@ -1,539 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from collections import namedtuple
-from dataclasses import dataclass
-from functools import lru_cache
-import logging
-import typing as tp
-
-from abc import ABC, abstractmethod
-import torch
-
-LayoutCoord = namedtuple('LayoutCoord', ['t', 'q']) # (timestep, codebook index)
-PatternLayout = tp.List[tp.List[LayoutCoord]] # Sequence of coordinates
-logger = logging.getLogger(__name__)
-
-
-@dataclass
-class Pattern:
- """Base implementation of a pattern over a sequence with multiple codebooks.
-
- The codebook pattern consists in a layout, defining for each sequence step
- the list of coordinates of each codebook timestep in the resulting interleaved sequence.
- The first item of the pattern is always an empty list in order to properly insert a special token
- to start with. For convenience, we also keep track of ``n_q`` the number of codebooks used for the pattern
- and ``timesteps`` the number of timesteps corresponding to the original sequence.
-
- The pattern provides convenient methods to build and revert interleaved sequences from it:
- ``build_pattern_sequence`` maps a given a dense input tensor of multi-codebook sequence from [B, K, T]
- to the interleaved sequence of shape [B, K, S] applying the pattern, with S being the batch size,
- K being the number of codebooks, T the number of original timesteps and S the number of sequence steps
- for the output sequence. The unfilled positions are replaced with a special token and the built sequence
- is returned along with a mask indicating valid tokens.
- ``revert_pattern_sequence`` maps back an interleaved sequence of shape [B, K, S] to the original alignment
- of codebooks across timesteps to an output tensor of shape [B, K, T], using again a special token and a mask
- to fill and specify invalid positions if needed.
- See the dedicated methods for more details.
- """
- # Pattern layout, for each sequence step, we have a list of coordinates
- # corresponding to the original codebook timestep and position.
- # The first list is always an empty list in order to properly insert
- # a special token to start with.
- layout: PatternLayout
- timesteps: int
- n_q: int
-
- def __post_init__(self):
- assert len(self.layout) > 0
- assert self.layout[0] == []
- self._validate_layout()
- self._build_reverted_sequence_scatter_indexes = lru_cache(100)(self._build_reverted_sequence_scatter_indexes)
- self._build_pattern_sequence_scatter_indexes = lru_cache(100)(self._build_pattern_sequence_scatter_indexes)
- logger.info("New pattern, time steps: %d, sequence steps: %d", self.timesteps, len(self.layout))
-
- def _validate_layout(self):
- """Runs checks on the layout to ensure a valid pattern is defined.
- A pattern is considered invalid if:
- - Multiple timesteps for a same codebook are defined in the same sequence step
- - The timesteps for a given codebook are not in ascending order as we advance in the sequence
- (this would mean that we have future timesteps before past timesteps).
- """
- q_timesteps = {q: 0 for q in range(self.n_q)}
- for s, seq_coords in enumerate(self.layout):
- if len(seq_coords) > 0:
- qs = set()
- for coord in seq_coords:
- qs.add(coord.q)
- last_q_timestep = q_timesteps[coord.q]
- assert coord.t >= last_q_timestep, \
- f"Past timesteps are found in the sequence for codebook = {coord.q} at step {s}"
- q_timesteps[coord.q] = coord.t
- # each sequence step contains at max 1 coordinate per codebook
- assert len(qs) == len(seq_coords), \
- f"Multiple entries for a same codebook are found at step {s}"
-
- @property
- def num_sequence_steps(self):
- return len(self.layout) - 1
-
- @property
- def max_delay(self):
- max_t_in_seq_coords = 0
- for seq_coords in self.layout[1:]:
- for coords in seq_coords:
- max_t_in_seq_coords = max(max_t_in_seq_coords, coords.t + 1)
- return max_t_in_seq_coords - self.timesteps
-
- @property
- def valid_layout(self):
- valid_step = len(self.layout) - self.max_delay
- return self.layout[:valid_step]
-
- def get_sequence_coords_with_timestep(self, t: int, q: tp.Optional[int] = None):
- """Get codebook coordinates in the layout that corresponds to the specified timestep t
- and optionally to the codebook q. Coordinates are returned as a tuple with the sequence step
- and the actual codebook coordinates.
- """
- assert t <= self.timesteps, "provided timesteps is greater than the pattern's number of timesteps"
- if q is not None:
- assert q <= self.n_q, "provided number of codebooks is greater than the pattern's number of codebooks"
- coords = []
- for s, seq_codes in enumerate(self.layout):
- for code in seq_codes:
- if code.t == t and (q is None or code.q == q):
- coords.append((s, code))
- return coords
-
- def get_steps_with_timestep(self, t: int, q: tp.Optional[int] = None) -> tp.List[int]:
- return [step for step, coords in self.get_sequence_coords_with_timestep(t, q)]
-
- def get_first_step_with_timesteps(self, t: int, q: tp.Optional[int] = None) -> tp.Optional[int]:
- steps_with_timesteps = self.get_steps_with_timestep(t, q)
- return steps_with_timesteps[0] if len(steps_with_timesteps) > 0 else None
-
- def _build_pattern_sequence_scatter_indexes(self, timesteps: int, n_q: int, keep_only_valid_steps: bool,
- device: tp.Union[torch.device, str] = 'cpu'):
- """Build scatter indexes corresponding to the pattern, up to the provided sequence_steps.
-
- Args:
- timesteps (int): Maximum number of timesteps steps to consider.
- keep_only_valid_steps (bool): Restrict the pattern layout to match only valid steps.
- device (Union[torch.device, str]): Device for created tensors.
- Returns:
- indexes (torch.Tensor): Indexes corresponding to the sequence, of shape [K, S].
- mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes, of shape [K, S].
- """
- assert n_q == self.n_q, f"invalid number of codebooks for the sequence and the pattern: {n_q} != {self.n_q}"
- assert timesteps <= self.timesteps, "invalid number of timesteps used to build the sequence from the pattern"
- # use the proper layout based on whether we limit ourselves to valid steps only or not,
- # note that using the valid_layout will result in a truncated sequence up to the valid steps
- ref_layout = self.valid_layout if keep_only_valid_steps else self.layout
- # single item indexing being super slow with pytorch vs. numpy, so we use numpy here
- indexes = torch.zeros(n_q, len(ref_layout), dtype=torch.long).numpy()
- mask = torch.zeros(n_q, len(ref_layout), dtype=torch.bool).numpy()
- # fill indexes with last sequence step value that will correspond to our special token
- # the last value is n_q * timesteps as we have flattened z and append special token as the last token
- # which will correspond to the index: n_q * timesteps
- indexes[:] = n_q * timesteps
- # iterate over the pattern and fill scattered indexes and mask
- for s, sequence_coords in enumerate(ref_layout):
- for coords in sequence_coords:
- if coords.t < timesteps:
- indexes[coords.q, s] = coords.t + coords.q * timesteps
- mask[coords.q, s] = 1
- indexes = torch.from_numpy(indexes).to(device)
- mask = torch.from_numpy(mask).to(device)
- return indexes, mask
-
- def build_pattern_sequence(self, z: torch.Tensor, special_token: int, keep_only_valid_steps: bool = False):
- """Build sequence corresponding to the pattern from the input tensor z.
- The sequence is built using up to sequence_steps if specified, and non-pattern
- coordinates are filled with the special token.
-
- Args:
- z (torch.Tensor): Input tensor of multi-codebooks sequence, of shape [B, K, T].
- special_token (int): Special token used to fill non-pattern coordinates in the new sequence.
- keep_only_valid_steps (bool): Build a sequence from the pattern up to valid (= fully defined) steps.
- Steps that are beyond valid steps will be replaced by the special_token in that case.
- Returns:
- values (torch.Tensor): Interleaved sequence matching the pattern, of shape [B, K, S] with S
- corresponding either to the sequence_steps if provided, otherwise to the length of the pattern.
- indexes (torch.Tensor): Indexes corresponding to the interleaved sequence, of shape [K, S].
- mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, S].
- """
- B, K, T = z.shape
- indexes, mask = self._build_pattern_sequence_scatter_indexes(
- T, K, keep_only_valid_steps=keep_only_valid_steps, device=str(z.device)
- )
- z = z.view(B, -1)
- # we append the special token as the last index of our flattened z tensor
- z = torch.cat([z, torch.zeros_like(z[:, :1]) + special_token], dim=1)
- values = z[:, indexes.view(-1)]
- values = values.view(B, K, indexes.shape[-1])
- return values, indexes, mask
-
- def _build_reverted_sequence_scatter_indexes(self, sequence_steps: int, n_q: int,
- keep_only_valid_steps: bool = False,
- is_model_output: bool = False,
- device: tp.Union[torch.device, str] = 'cpu'):
- """Builds scatter indexes required to retrieve the original multi-codebook sequence
- from interleaving pattern.
-
- Args:
- sequence_steps (int): Sequence steps.
- n_q (int): Number of codebooks.
- keep_only_valid_steps (bool): Build a sequence from the pattern up to valid (= fully defined) steps.
- Steps that are beyond valid steps will be replaced by the special_token in that case.
- is_model_output (bool): Whether to keep the sequence item corresponding to initial special token or not.
- device (Union[torch.device, str]): Device for created tensors.
- Returns:
- torch.Tensor: Indexes for reconstructing the output, of shape [K, T].
- mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, T].
- """
- ref_layout = self.valid_layout if keep_only_valid_steps else self.layout
- # TODO(jade): Do we want to further truncate to only valid timesteps here as well?
- timesteps = self.timesteps
- assert n_q == self.n_q, f"invalid number of codebooks for the sequence and the pattern: {n_q} != {self.n_q}"
- assert sequence_steps <= len(ref_layout), \
- f"sequence to revert is longer than the defined pattern: {sequence_steps} > {len(ref_layout)}"
-
- # ensure we take the appropriate indexes to keep the model output from the first special token as well
- if is_model_output:
- ref_layout = ref_layout[1:]
-
- # single item indexing being super slow with pytorch vs. numpy, so we use numpy here
- indexes = torch.zeros(n_q, timesteps, dtype=torch.long).numpy()
- mask = torch.zeros(n_q, timesteps, dtype=torch.bool).numpy()
- # fill indexes with last sequence step value that will correspond to our special token
- indexes[:] = n_q * sequence_steps
- for s, sequence_codes in enumerate(ref_layout):
- if s < sequence_steps:
- for code in sequence_codes:
- if code.t < timesteps:
- indexes[code.q, code.t] = s + code.q * sequence_steps
- mask[code.q, code.t] = 1
- indexes = torch.from_numpy(indexes).to(device)
- mask = torch.from_numpy(mask).to(device)
- return indexes, mask
-
- def revert_pattern_sequence(self, s: torch.Tensor, special_token: int, keep_only_valid_steps: bool = False):
- """Revert a sequence built from the pattern back to the original multi-codebook sequence without interleaving.
- The sequence is reverted using up to timesteps if specified, and non-pattern coordinates
- are filled with the special token.
-
- Args:
- s (torch.Tensor): Interleaved sequence tensor obtained from the pattern, of shape [B, K, S].
- special_token (int or float): Special token used to fill non-pattern coordinates in the new sequence.
- Returns:
- values (torch.Tensor): Interleaved sequence matching the pattern, of shape [B, K, T] with T
- corresponding either to the timesteps if provided, or the total timesteps in pattern otherwise.
- indexes (torch.Tensor): Indexes corresponding to the interleaved sequence, of shape [K, T].
- mask (torch.Tensor): Mask corresponding to indexes that matches valid indexes of shape [K, T].
- """
- B, K, S = s.shape
- indexes, mask = self._build_reverted_sequence_scatter_indexes(
- S, K, keep_only_valid_steps, is_model_output=False, device=str(s.device)
- )
- s = s.view(B, -1)
- # we append the special token as the last index of our flattened z tensor
- s = torch.cat([s, torch.zeros_like(s[:, :1]) + special_token], dim=1)
- values = s[:, indexes.view(-1)]
- values = values.view(B, K, indexes.shape[-1])
- return values, indexes, mask
-
- def revert_pattern_logits(self, logits: torch.Tensor, special_token: float, keep_only_valid_steps: bool = False):
- """Revert model logits obtained on a sequence built from the pattern
- back to a tensor matching the original sequence.
-
- This method is similar to ``revert_pattern_sequence`` with the following specificities:
- 1. It is designed to work with the extra cardinality dimension
- 2. We return the logits for the first sequence item that matches the special_token and
- which matching target in the original sequence is the first item of the sequence,
- while we skip the last logits as there is no matching target
- """
- B, card, K, S = logits.shape
- indexes, mask = self._build_reverted_sequence_scatter_indexes(
- S, K, keep_only_valid_steps, is_model_output=True, device=logits.device
- )
- logits = logits.reshape(B, card, -1)
- # we append the special token as the last index of our flattened z tensor
- logits = torch.cat([logits, torch.zeros_like(logits[:, :, :1]) + special_token], dim=-1) # [B, card, K x S]
- values = logits[:, :, indexes.view(-1)]
- values = values.view(B, card, K, indexes.shape[-1])
- return values, indexes, mask
-
-
-class CodebooksPatternProvider(ABC):
- """Abstraction around providing pattern for interleaving codebooks.
-
- The CodebooksPatternProvider abstraction allows to implement various strategies to
- define interleaving pattern of sequences composed of multiple codebooks. For a given
- number of codebooks `n_q`, the pattern provider can generate a specified pattern
- corresponding to a sequence of `T` timesteps with `n_q` parallel codebooks. This pattern
- can be used to construct a new sequence from the original codes respecting the specified
- pattern. The pattern is defined as a list of list of code coordinates, code coordinate
- being a tuple with the original timestep and codebook to build the new sequence.
- Note that all patterns must start with an empty list that is then used to insert a first
- sequence step of special tokens in the newly generated sequence.
-
- Args:
- n_q (int): number of codebooks.
- cached (bool): if True, patterns for a given length are cached. In general
- that should be true for efficiency reason to avoid synchronization points.
- """
- def __init__(self, n_q: int, cached: bool = True):
- assert n_q > 0
- self.n_q = n_q
- self.get_pattern = lru_cache(100)(self.get_pattern) # type: ignore
-
- @abstractmethod
- def get_pattern(self, timesteps: int) -> Pattern:
- """Builds pattern with specific interleaving between codebooks.
-
- Args:
- timesteps (int): Total numer of timesteps.
- """
- raise NotImplementedError()
-
-
-class DelayedPatternProvider(CodebooksPatternProvider):
- """Provider for delayed pattern across delayed codebooks.
- Codebooks are delayed in the sequence and sequence steps will contain codebooks
- from different timesteps.
-
- Example:
- Taking timesteps=4 and n_q=3, delays=None, the multi-codebook sequence:
- [[1, 2, 3, 4],
- [1, 2, 3, 4],
- [1, 2, 3, 4]]
- The resulting sequence obtained from the returned pattern is:
- [[S, 1, 2, 3, 4],
- [S, S, 1, 2, 3],
- [S, S, S, 1, 2]]
- (with S being a special token)
-
- Args:
- n_q (int): Number of codebooks.
- delays (Optional[List[int]]): Delay for each of the codebooks.
- If delays not defined, each codebook is delayed by 1 compared to the previous one.
- flatten_first (int): Flatten the first N timesteps.
- empty_initial (int): Prepend with N empty list of coordinates.
- """
- def __init__(self, n_q: int, delays: tp.Optional[tp.List[int]] = None,
- flatten_first: int = 0, empty_initial: int = 0):
- super().__init__(n_q)
- if delays is None:
- delays = list(range(n_q))
- self.delays = delays
- self.flatten_first = flatten_first
- self.empty_initial = empty_initial
- assert len(self.delays) == self.n_q
- assert sorted(self.delays) == self.delays
-
- def get_pattern(self, timesteps: int) -> Pattern:
- out: PatternLayout = [[]]
- max_delay = max(self.delays)
- if self.empty_initial:
- out += [[] for _ in range(self.empty_initial)]
- if self.flatten_first:
- for t in range(min(timesteps, self.flatten_first)):
- for q in range(self.n_q):
- out.append([LayoutCoord(t, q)])
- for t in range(self.flatten_first, timesteps + max_delay):
- v = []
- for q, delay in enumerate(self.delays):
- t_for_q = t - delay
- if t_for_q >= self.flatten_first:
- v.append(LayoutCoord(t_for_q, q))
- out.append(v)
- return Pattern(out, n_q=self.n_q, timesteps=timesteps)
-
-
-class ParallelPatternProvider(DelayedPatternProvider):
- """Provider for parallel pattern across codebooks.
- This pattern provider is a special case of the delayed pattern with actually no delay,
- hence delays=repeat(0, n_q).
-
- Args:
- n_q (int): Number of codebooks.
- """
- def __init__(self, n_q: int):
- super().__init__(n_q, [0] * n_q)
-
-
-class UnrolledPatternProvider(CodebooksPatternProvider):
- """Provider for unrolling codebooks pattern.
- This pattern provider enables to represent the codebook flattened completely or only to some extend
- while also specifying a given delay between the flattened codebooks representation, allowing to
- unroll the codebooks in the sequence.
-
- Example:
- 1. Flattening of the codebooks.
- By default, the pattern provider will fully flatten the codebooks such as flattening=range(n_q),
- taking n_q = 3 and timesteps = 4:
- [[1, 2, 3, 4],
- [1, 2, 3, 4],
- [1, 2, 3, 4]]
- will result into:
- [[S, S, 1, S, S, 2, S, S, 3, S, S, 4],
- [S, 1, S, S, 2, S, S, 3, S, S, 4, S],
- [1, S, S, 2, S, S, 3, S, S, 4, S, S]]
- 2. Partial flattening of the codebooks. The ``flattening`` parameter allows to specify the inner step
- for each of the codebook, allowing to define which codebook to flatten (or keep in parallel), for example
- taking n_q = 3, timesteps = 4 and flattening = [0, 1, 1]:
- [[1, 2, 3, 4],
- [1, 2, 3, 4],
- [1, 2, 3, 4]]
- will result into:
- [[S, 1, S, S, 2, S, S, 3, S, S, 4, S],
- [S, 1, S, S, 2, S, S, 3, S, S, 4, S],
- [1, S, S, 2, S, S, 3, S, S, 4, S, S]]
- 3. Flattening with delay. The ``delay`` parameter allows to further unroll the sequence of codebooks
- allowing to specify the delay per codebook. Note that the delay between codebooks flattened to the
- same inner timestep should be coherent. For example, taking n_q = 3, timesteps = 4, flattening = [0, 1, 1]
- and delays = [0, 3, 3]:
- [[1, 2, 3, 4],
- [1, 2, 3, 4],
- [1, 2, 3, 4]]
- will result into:
- [[S, S, S, 1, S, 2, S, 3, S, 4],
- [S, S, S, 1, S, 2, S, 3, S, 4],
- [1, 2, 3, S, 4, S, 5, S, 6, S]]
-
- Args:
- n_q (int): Number of codebooks.
- flattening (Optional[List[int]]): Flattening schema over the codebooks. If not defined,
- the codebooks will be flattened to 1 codebook per step, meaning that the sequence will
- have n_q extra steps for each timestep.
- delays (Optional[List[int]]): Delay for each of the codebooks. If not defined,
- no delay is added and therefore will default to [0] * ``n_q``.
- Note that two codebooks that will be flattened to the same inner step
- should have the same delay, otherwise the pattern is considered as invalid.
- """
- FlattenedCodebook = namedtuple('FlattenedCodebook', ['codebooks', 'delay'])
-
- def __init__(self, n_q: int, flattening: tp.Optional[tp.List[int]] = None,
- delays: tp.Optional[tp.List[int]] = None):
- super().__init__(n_q)
- if flattening is None:
- flattening = list(range(n_q))
- if delays is None:
- delays = [0] * n_q
- assert len(flattening) == n_q
- assert len(delays) == n_q
- assert sorted(flattening) == flattening
- assert sorted(delays) == delays
- self._flattened_codebooks = self._build_flattened_codebooks(delays, flattening)
- self.max_delay = max(delays)
-
- def _build_flattened_codebooks(self, delays: tp.List[int], flattening: tp.List[int]):
- """Build a flattened codebooks representation as a dictionary of inner step
- and the actual codebook indices corresponding to the flattened codebook. For convenience, we
- also store the delay associated to the flattened codebook to avoid maintaining an extra mapping.
- """
- flattened_codebooks: dict = {}
- for q, (inner_step, delay) in enumerate(zip(flattening, delays)):
- if inner_step not in flattened_codebooks:
- flat_codebook = UnrolledPatternProvider.FlattenedCodebook(codebooks=[q], delay=delay)
- else:
- flat_codebook = flattened_codebooks[inner_step]
- assert flat_codebook.delay == delay, (
- "Delay and flattening between codebooks is inconsistent: ",
- "two codebooks flattened to the same position should have the same delay."
- )
- flat_codebook.codebooks.append(q)
- flattened_codebooks[inner_step] = flat_codebook
- return flattened_codebooks
-
- @property
- def _num_inner_steps(self):
- """Number of inner steps to unroll between timesteps in order to flatten the codebooks.
- """
- return max([inner_step for inner_step in self._flattened_codebooks.keys()]) + 1
-
- def num_virtual_steps(self, timesteps: int) -> int:
- return timesteps * self._num_inner_steps + 1
-
- def get_pattern(self, timesteps: int) -> Pattern:
- """Builds pattern for delay across codebooks.
-
- Args:
- timesteps (int): Total numer of timesteps.
- """
- # the PatternLayout is built as a tuple of sequence position and list of coordinates
- # so that it can be reordered properly given the required delay between codebooks of given timesteps
- indexed_out: list = [(-1, [])]
- max_timesteps = timesteps + self.max_delay
- for t in range(max_timesteps):
- # for each timestep, we unroll the flattened codebooks,
- # emitting the sequence step with the corresponding delay
- for step in range(self._num_inner_steps):
- if step in self._flattened_codebooks:
- # we have codebooks at this virtual step to emit
- step_codebooks = self._flattened_codebooks[step]
- t_for_q = t + step_codebooks.delay
- coords = [LayoutCoord(t, q) for q in step_codebooks.codebooks]
- if t_for_q < max_timesteps and t < max_timesteps:
- indexed_out.append((t_for_q, coords))
- else:
- # there is no codebook in this virtual step so we emit an empty list
- indexed_out.append((t, []))
- out = [coords for _, coords in sorted(indexed_out)]
- return Pattern(out, n_q=self.n_q, timesteps=timesteps)
-
-
-class VALLEPattern(CodebooksPatternProvider):
- """Almost VALL-E style pattern. We futher allow some delays for the
- codebooks other than the first one.
-
- Args:
- n_q (int): Number of codebooks.
- delays (Optional[List[int]]): Delay for each of the codebooks.
- If delays not defined, each codebook is delayed by 1 compared to the previous one.
- """
- def __init__(self, n_q: int, delays: tp.Optional[tp.List[int]] = None):
- super().__init__(n_q)
- if delays is None:
- delays = [0] * (n_q - 1)
- self.delays = delays
- assert len(self.delays) == self.n_q - 1
- assert sorted(self.delays) == self.delays
-
- def get_pattern(self, timesteps: int) -> Pattern:
- out: PatternLayout = [[]]
- for t in range(timesteps):
- out.append([LayoutCoord(t, 0)])
- max_delay = max(self.delays)
- for t in range(timesteps + max_delay):
- v = []
- for q, delay in enumerate(self.delays):
- t_for_q = t - delay
- if t_for_q >= 0:
- v.append(LayoutCoord(t_for_q, q + 1))
- out.append(v)
- return Pattern(out, n_q=self.n_q, timesteps=timesteps)
-
-
-class MusicLMPattern(CodebooksPatternProvider):
- """Almost MusicLM style pattern. This is equivalent to full flattening
- but in a different order.
-
- Args:
- n_q (int): Number of codebooks.
- group_by (int): Number of codebooks to group together.
- """
- def __init__(self, n_q: int, group_by: int = 2):
- super().__init__(n_q)
- self.group_by = group_by
-
- def get_pattern(self, timesteps: int) -> Pattern:
- out: PatternLayout = [[]]
- for offset in range(0, self.n_q, self.group_by):
- for t in range(timesteps):
- for q in range(offset, offset + self.group_by):
- out.append([LayoutCoord(t, q)])
- return Pattern(out, n_q=self.n_q, timesteps=timesteps)
diff --git a/spaces/jbilcke-hf/media-server/test.sh b/spaces/jbilcke-hf/media-server/test.sh
deleted file mode 100644
index a4653ad33613542efdd798195d5f001be282378f..0000000000000000000000000000000000000000
--- a/spaces/jbilcke-hf/media-server/test.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-echo "$FOO$BAR"
-
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Cipher/test_CFB.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Cipher/test_CFB.py
deleted file mode 100644
index cb0c35295ce51cf3cc8be4d85b66b52ac85353f4..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Cipher/test_CFB.py
+++ /dev/null
@@ -1,411 +0,0 @@
-# ===================================================================
-#
-# Copyright (c) 2014, Legrandin
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-# ===================================================================
-
-import unittest
-from binascii import unhexlify
-
-from Crypto.SelfTest.loader import load_test_vectors
-from Crypto.SelfTest.st_common import list_test_cases
-from Crypto.Util.py3compat import tobytes, is_string
-from Crypto.Cipher import AES, DES3, DES
-from Crypto.Hash import SHAKE128
-
-from Crypto.SelfTest.Cipher.test_CBC import BlockChainingTests
-
-
-def get_tag_random(tag, length):
- return SHAKE128.new(data=tobytes(tag)).read(length)
-
-
-class CfbTests(BlockChainingTests):
-
- aes_mode = AES.MODE_CFB
- des3_mode = DES3.MODE_CFB
-
- # Redefine test_unaligned_data_128/64
-
- def test_unaligned_data_128(self):
- plaintexts = [ b"7777777" ] * 100
-
- cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=8)
- ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
- cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=8)
- self.assertEqual(b"".join(ciphertexts), cipher.encrypt(b"".join(plaintexts)))
-
- cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=128)
- ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
- cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=128)
- self.assertEqual(b"".join(ciphertexts), cipher.encrypt(b"".join(plaintexts)))
-
- def test_unaligned_data_64(self):
- plaintexts = [ b"7777777" ] * 100
- cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=8)
- ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
- cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=8)
- self.assertEqual(b"".join(ciphertexts), cipher.encrypt(b"".join(plaintexts)))
-
- cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=64)
- ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
- cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=64)
- self.assertEqual(b"".join(ciphertexts), cipher.encrypt(b"".join(plaintexts)))
-
- # Extra
-
- def test_segment_size_128(self):
- for bits in range(8, 129, 8):
- cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128,
- segment_size=bits)
-
- for bits in 0, 7, 9, 127, 129:
- self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_CFB,
- self.iv_128,
- segment_size=bits)
-
- def test_segment_size_64(self):
- for bits in range(8, 65, 8):
- cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64,
- segment_size=bits)
-
- for bits in 0, 7, 9, 63, 65:
- self.assertRaises(ValueError, DES3.new, self.key_192, AES.MODE_CFB,
- self.iv_64,
- segment_size=bits)
-
-
-class NistCfbVectors(unittest.TestCase):
-
- def _do_kat_aes_test(self, file_name, segment_size):
-
- test_vectors = load_test_vectors(("Cipher", "AES"),
- file_name,
- "AES CFB%d KAT" % segment_size,
- { "count" : lambda x: int(x) } )
- if test_vectors is None:
- return
-
- direction = None
- for tv in test_vectors:
-
- # The test vector file contains some directive lines
- if is_string(tv):
- direction = tv
- continue
-
- self.description = tv.desc
- cipher = AES.new(tv.key, AES.MODE_CFB, tv.iv,
- segment_size=segment_size)
- if direction == "[ENCRYPT]":
- self.assertEqual(cipher.encrypt(tv.plaintext), tv.ciphertext)
- elif direction == "[DECRYPT]":
- self.assertEqual(cipher.decrypt(tv.ciphertext), tv.plaintext)
- else:
- assert False
-
- # See Section 6.4.5 in AESAVS
- def _do_mct_aes_test(self, file_name, segment_size):
-
- test_vectors = load_test_vectors(("Cipher", "AES"),
- file_name,
- "AES CFB%d Montecarlo" % segment_size,
- { "count" : lambda x: int(x) } )
- if test_vectors is None:
- return
-
- assert(segment_size in (8, 128))
-
- direction = None
- for tv in test_vectors:
-
- # The test vector file contains some directive lines
- if is_string(tv):
- direction = tv
- continue
-
- self.description = tv.desc
- cipher = AES.new(tv.key, AES.MODE_CFB, tv.iv,
- segment_size=segment_size)
-
- def get_input(input_text, output_seq, j):
- # CFB128
- if segment_size == 128:
- if j >= 2:
- return output_seq[-2]
- return [input_text, tv.iv][j]
- # CFB8
- if j == 0:
- return input_text
- elif j <= 16:
- return tv.iv[j - 1:j]
- return output_seq[j - 17]
-
- if direction == '[ENCRYPT]':
- cts = []
- for j in range(1000):
- plaintext = get_input(tv.plaintext, cts, j)
- cts.append(cipher.encrypt(plaintext))
- self.assertEqual(cts[-1], tv.ciphertext)
- elif direction == '[DECRYPT]':
- pts = []
- for j in range(1000):
- ciphertext = get_input(tv.ciphertext, pts, j)
- pts.append(cipher.decrypt(ciphertext))
- self.assertEqual(pts[-1], tv.plaintext)
- else:
- assert False
-
- def _do_tdes_test(self, file_name, segment_size):
-
- test_vectors = load_test_vectors(("Cipher", "TDES"),
- file_name,
- "TDES CFB%d KAT" % segment_size,
- { "count" : lambda x: int(x) } )
- if test_vectors is None:
- return
-
- direction = None
- for tv in test_vectors:
-
- # The test vector file contains some directive lines
- if is_string(tv):
- direction = tv
- continue
-
- self.description = tv.desc
- if hasattr(tv, "keys"):
- cipher = DES.new(tv.keys, DES.MODE_CFB, tv.iv,
- segment_size=segment_size)
- else:
- if tv.key1 != tv.key3:
- key = tv.key1 + tv.key2 + tv.key3 # Option 3
- else:
- key = tv.key1 + tv.key2 # Option 2
- cipher = DES3.new(key, DES3.MODE_CFB, tv.iv,
- segment_size=segment_size)
- if direction == "[ENCRYPT]":
- self.assertEqual(cipher.encrypt(tv.plaintext), tv.ciphertext)
- elif direction == "[DECRYPT]":
- self.assertEqual(cipher.decrypt(tv.ciphertext), tv.plaintext)
- else:
- assert False
-
-
-# Create one test method per file
-nist_aes_kat_mmt_files = (
- # KAT
- "CFB?GFSbox128.rsp",
- "CFB?GFSbox192.rsp",
- "CFB?GFSbox256.rsp",
- "CFB?KeySbox128.rsp",
- "CFB?KeySbox192.rsp",
- "CFB?KeySbox256.rsp",
- "CFB?VarKey128.rsp",
- "CFB?VarKey192.rsp",
- "CFB?VarKey256.rsp",
- "CFB?VarTxt128.rsp",
- "CFB?VarTxt192.rsp",
- "CFB?VarTxt256.rsp",
- # MMT
- "CFB?MMT128.rsp",
- "CFB?MMT192.rsp",
- "CFB?MMT256.rsp",
- )
-nist_aes_mct_files = (
- "CFB?MCT128.rsp",
- "CFB?MCT192.rsp",
- "CFB?MCT256.rsp",
- )
-
-for file_gen_name in nist_aes_kat_mmt_files:
- for bits in "8", "128":
- file_name = file_gen_name.replace("?", bits)
- def new_func(self, file_name=file_name, bits=bits):
- self._do_kat_aes_test(file_name, int(bits))
- setattr(NistCfbVectors, "test_AES_" + file_name, new_func)
-
-for file_gen_name in nist_aes_mct_files:
- for bits in "8", "128":
- file_name = file_gen_name.replace("?", bits)
- def new_func(self, file_name=file_name, bits=bits):
- self._do_mct_aes_test(file_name, int(bits))
- setattr(NistCfbVectors, "test_AES_" + file_name, new_func)
-del file_name, new_func
-
-nist_tdes_files = (
- "TCFB?MMT2.rsp", # 2TDES
- "TCFB?MMT3.rsp", # 3TDES
- "TCFB?invperm.rsp", # Single DES
- "TCFB?permop.rsp",
- "TCFB?subtab.rsp",
- "TCFB?varkey.rsp",
- "TCFB?vartext.rsp",
- )
-
-for file_gen_name in nist_tdes_files:
- for bits in "8", "64":
- file_name = file_gen_name.replace("?", bits)
- def new_func(self, file_name=file_name, bits=bits):
- self._do_tdes_test(file_name, int(bits))
- setattr(NistCfbVectors, "test_TDES_" + file_name, new_func)
-
-# END OF NIST CBC TEST VECTORS
-
-
-class SP800TestVectors(unittest.TestCase):
- """Class exercising the CFB test vectors found in Section F.3
- of NIST SP 800-3A"""
-
- def test_aes_128_cfb8(self):
- plaintext = '6bc1bee22e409f96e93d7e117393172aae2d'
- ciphertext = '3b79424c9c0dd436bace9e0ed4586a4f32b9'
- key = '2b7e151628aed2a6abf7158809cf4f3c'
- iv = '000102030405060708090a0b0c0d0e0f'
-
- key = unhexlify(key)
- iv = unhexlify(iv)
- plaintext = unhexlify(plaintext)
- ciphertext = unhexlify(ciphertext)
-
- cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
- self.assertEqual(cipher.encrypt(plaintext), ciphertext)
- cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
- self.assertEqual(cipher.decrypt(ciphertext), plaintext)
-
- def test_aes_192_cfb8(self):
- plaintext = '6bc1bee22e409f96e93d7e117393172aae2d'
- ciphertext = 'cda2521ef0a905ca44cd057cbf0d47a0678a'
- key = '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b'
- iv = '000102030405060708090a0b0c0d0e0f'
-
- key = unhexlify(key)
- iv = unhexlify(iv)
- plaintext = unhexlify(plaintext)
- ciphertext = unhexlify(ciphertext)
-
- cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
- self.assertEqual(cipher.encrypt(plaintext), ciphertext)
- cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
- self.assertEqual(cipher.decrypt(ciphertext), plaintext)
-
- def test_aes_256_cfb8(self):
- plaintext = '6bc1bee22e409f96e93d7e117393172aae2d'
- ciphertext = 'dc1f1a8520a64db55fcc8ac554844e889700'
- key = '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4'
- iv = '000102030405060708090a0b0c0d0e0f'
-
- key = unhexlify(key)
- iv = unhexlify(iv)
- plaintext = unhexlify(plaintext)
- ciphertext = unhexlify(ciphertext)
-
- cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
- self.assertEqual(cipher.encrypt(plaintext), ciphertext)
- cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
- self.assertEqual(cipher.decrypt(ciphertext), plaintext)
-
- def test_aes_128_cfb128(self):
- plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
- 'ae2d8a571e03ac9c9eb76fac45af8e51' +\
- '30c81c46a35ce411e5fbc1191a0a52ef' +\
- 'f69f2445df4f9b17ad2b417be66c3710'
- ciphertext = '3b3fd92eb72dad20333449f8e83cfb4a' +\
- 'c8a64537a0b3a93fcde3cdad9f1ce58b' +\
- '26751f67a3cbb140b1808cf187a4f4df' +\
- 'c04b05357c5d1c0eeac4c66f9ff7f2e6'
- key = '2b7e151628aed2a6abf7158809cf4f3c'
- iv = '000102030405060708090a0b0c0d0e0f'
-
- key = unhexlify(key)
- iv = unhexlify(iv)
- plaintext = unhexlify(plaintext)
- ciphertext = unhexlify(ciphertext)
-
- cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
- self.assertEqual(cipher.encrypt(plaintext), ciphertext)
- cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
- self.assertEqual(cipher.decrypt(ciphertext), plaintext)
-
- def test_aes_192_cfb128(self):
- plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
- 'ae2d8a571e03ac9c9eb76fac45af8e51' +\
- '30c81c46a35ce411e5fbc1191a0a52ef' +\
- 'f69f2445df4f9b17ad2b417be66c3710'
- ciphertext = 'cdc80d6fddf18cab34c25909c99a4174' +\
- '67ce7f7f81173621961a2b70171d3d7a' +\
- '2e1e8a1dd59b88b1c8e60fed1efac4c9' +\
- 'c05f9f9ca9834fa042ae8fba584b09ff'
- key = '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b'
- iv = '000102030405060708090a0b0c0d0e0f'
-
- key = unhexlify(key)
- iv = unhexlify(iv)
- plaintext = unhexlify(plaintext)
- ciphertext = unhexlify(ciphertext)
-
- cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
- self.assertEqual(cipher.encrypt(plaintext), ciphertext)
- cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
- self.assertEqual(cipher.decrypt(ciphertext), plaintext)
-
- def test_aes_256_cfb128(self):
- plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
- 'ae2d8a571e03ac9c9eb76fac45af8e51' +\
- '30c81c46a35ce411e5fbc1191a0a52ef' +\
- 'f69f2445df4f9b17ad2b417be66c3710'
-
- ciphertext = 'dc7e84bfda79164b7ecd8486985d3860' +\
- '39ffed143b28b1c832113c6331e5407b' +\
- 'df10132415e54b92a13ed0a8267ae2f9' +\
- '75a385741ab9cef82031623d55b1e471'
- key = '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4'
- iv = '000102030405060708090a0b0c0d0e0f'
-
- key = unhexlify(key)
- iv = unhexlify(iv)
- plaintext = unhexlify(plaintext)
- ciphertext = unhexlify(ciphertext)
-
- cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
- self.assertEqual(cipher.encrypt(plaintext), ciphertext)
- cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
- self.assertEqual(cipher.decrypt(ciphertext), plaintext)
-
-
-def get_tests(config={}):
- tests = []
- tests += list_test_cases(CfbTests)
- if config.get('slow_tests'):
- tests += list_test_cases(NistCfbVectors)
- tests += list_test_cases(SP800TestVectors)
- return tests
-
-
-if __name__ == '__main__':
- suite = lambda: unittest.TestSuite(get_tests())
- unittest.main(defaultTest='suite')
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_server.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_server.py
deleted file mode 100644
index fa46e905caa307f30a242951610193ee2a98692e..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/aiohttp/web_server.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""Low level HTTP server."""
-import asyncio
-from typing import Any, Awaitable, Callable, Dict, List, Optional # noqa
-
-from .abc import AbstractStreamWriter
-from .helpers import get_running_loop
-from .http_parser import RawRequestMessage
-from .streams import StreamReader
-from .web_protocol import RequestHandler, _RequestFactory, _RequestHandler
-from .web_request import BaseRequest
-
-__all__ = ("Server",)
-
-
-class Server:
- def __init__(
- self,
- handler: _RequestHandler,
- *,
- request_factory: Optional[_RequestFactory] = None,
- loop: Optional[asyncio.AbstractEventLoop] = None,
- **kwargs: Any
- ) -> None:
- self._loop = get_running_loop(loop)
- self._connections: Dict[RequestHandler, asyncio.Transport] = {}
- self._kwargs = kwargs
- self.requests_count = 0
- self.request_handler = handler
- self.request_factory = request_factory or self._make_request
-
- @property
- def connections(self) -> List[RequestHandler]:
- return list(self._connections.keys())
-
- def connection_made(
- self, handler: RequestHandler, transport: asyncio.Transport
- ) -> None:
- self._connections[handler] = transport
-
- def connection_lost(
- self, handler: RequestHandler, exc: Optional[BaseException] = None
- ) -> None:
- if handler in self._connections:
- del self._connections[handler]
-
- def _make_request(
- self,
- message: RawRequestMessage,
- payload: StreamReader,
- protocol: RequestHandler,
- writer: AbstractStreamWriter,
- task: "asyncio.Task[None]",
- ) -> BaseRequest:
- return BaseRequest(message, payload, protocol, writer, task, self._loop)
-
- async def shutdown(self, timeout: Optional[float] = None) -> None:
- coros = [conn.shutdown(timeout) for conn in self._connections]
- await asyncio.gather(*coros)
- self._connections.clear()
-
- def __call__(self) -> RequestHandler:
- return RequestHandler(self, loop=self._loop, **self._kwargs)
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/knowledge_graph/base.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/knowledge_graph/base.py
deleted file mode 100644
index d6d758488aa4f117d92c3a487ff31b70388a0842..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/knowledge_graph/base.py
+++ /dev/null
@@ -1,196 +0,0 @@
-"""Keyword-table based index.
-
-Similar to a "hash table" in concept. LlamaIndex first tries
-to extract keywords from the source text, and stores the
-keywords as keys per item. It similarly extracts keywords
-from the query text. Then, it tries to match those keywords to
-existing keywords in the table.
-
-"""
-
-import logging
-from typing import Any, Dict, List, Optional, Sequence, Tuple, Type
-
-from gpt_index.data_structs.data_structs import KG
-from gpt_index.indices.base import DOCUMENTS_INPUT, BaseGPTIndex
-from gpt_index.indices.query.base import BaseGPTIndexQuery
-from gpt_index.indices.query.knowledge_graph.query import GPTKGTableQuery, KGQueryMode
-from gpt_index.indices.query.schema import QueryMode
-from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
-from gpt_index.langchain_helpers.text_splitter import TextSplitter
-from gpt_index.prompts.default_prompts import (
- DEFAULT_KG_TRIPLET_EXTRACT_PROMPT,
- DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE,
-)
-from gpt_index.prompts.prompts import KnowledgeGraphPrompt
-from gpt_index.schema import BaseDocument
-from gpt_index.utils import get_new_id
-
-DQKET = DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE
-
-
-class GPTKnowledgeGraphIndex(BaseGPTIndex[KG]):
- """GPT Knowledge Graph Index.
-
- Build a KG by extracting triplets, and leveraging the KG during query-time.
-
- Args:
- kg_triple_extract_template (KnowledgeGraphPrompt): The prompt to use for
- extracting triplets.
- max_triplets_per_chunk (int): The maximum number of triplets to extract.
-
- """
-
- index_struct_cls = KG
-
- def __init__(
- self,
- documents: Optional[Sequence[DOCUMENTS_INPUT]] = None,
- index_struct: Optional[KG] = None,
- kg_triple_extract_template: Optional[KnowledgeGraphPrompt] = None,
- max_triplets_per_chunk: int = 10,
- llm_predictor: Optional[LLMPredictor] = None,
- text_splitter: Optional[TextSplitter] = None,
- include_embeddings: bool = False,
- **kwargs: Any,
- ) -> None:
- """Initialize params."""
- # need to set parameters before building index in base class.
- self.include_embeddings = include_embeddings
- self.max_triplets_per_chunk = max_triplets_per_chunk
- self.kg_triple_extract_template = (
- kg_triple_extract_template or DEFAULT_KG_TRIPLET_EXTRACT_PROMPT
- )
- # NOTE: Partially format keyword extract template here.
- self.kg_triple_extract_template = (
- self.kg_triple_extract_template.partial_format(
- max_knowledge_triplets=self.max_triplets_per_chunk
- )
- )
- super().__init__(
- documents=documents,
- index_struct=index_struct,
- llm_predictor=llm_predictor,
- text_splitter=text_splitter,
- **kwargs,
- )
-
- @classmethod
- def get_query_map(self) -> Dict[str, Type[BaseGPTIndexQuery]]:
- """Get query map."""
- return {
- QueryMode.DEFAULT: GPTKGTableQuery,
- }
-
- def _extract_triplets(self, text: str) -> List[Tuple[str, str, str]]:
- """Extract keywords from text."""
- response, _ = self._llm_predictor.predict(
- self.kg_triple_extract_template,
- text=text,
- )
- return self._parse_triplet_response(response)
-
- @staticmethod
- def _parse_triplet_response(response: str) -> List[Tuple[str, str, str]]:
- knowledge_strs = response.strip().split("\n")
- results = []
- for text in knowledge_strs:
- tokens = text[1:-1].split(",")
- if len(tokens) != 3:
- continue
- subj, pred, obj = tokens
- results.append((subj.strip(), pred.strip(), obj.strip()))
- return results
-
- def _build_fallback_text_splitter(self) -> TextSplitter:
- # if not specified, use "smart" text splitter to ensure chunks fit in prompt
- return self._prompt_helper.get_text_splitter_given_prompt(
- self.kg_triple_extract_template, 1
- )
-
- def _build_index_from_documents(self, documents: Sequence[BaseDocument]) -> KG:
- """Build the index from documents."""
- # do simple concatenation
- index_struct = KG(table={})
- for d in documents:
- nodes = self._get_nodes_from_document(d)
- for n in nodes:
- # set doc id
- node_id = get_new_id(set())
- n.doc_id = node_id
-
- triplets = self._extract_triplets(n.get_text())
- logging.debug(f"> Extracted triplets: {triplets}")
- for triplet in triplets:
- index_struct.upsert_triplet(triplet, n)
-
- if self.include_embeddings:
- for i, triplet in enumerate(triplets):
- self._embed_model.queue_text_for_embeddding(
- str(triplet), str(triplet)
- )
-
- embed_outputs = self._embed_model.get_queued_text_embeddings()
- for (rel_text, rel_embed) in zip(*embed_outputs):
- index_struct.add_to_embedding_dict(rel_text, rel_embed)
-
- return index_struct
-
- def _insert(self, document: BaseDocument, **insert_kwargs: Any) -> None:
- """Insert a document."""
- nodes = self._get_nodes_from_document(document)
- for n in nodes:
- # set doc id
- node_id = get_new_id(set())
- n.doc_id = node_id
-
- triplets = self._extract_triplets(n.get_text())
- logging.debug(f"Extracted triplets: {triplets}")
- for triplet in triplets:
- triplet_str = str(triplet)
- self._index_struct.upsert_triplet(triplet, n)
- if (
- self.include_embeddings
- and triplet_str not in self._index_struct.embedding_dict
- ):
- rel_embedding = self._embed_model.get_text_embedding(triplet_str)
- self.index_struct.add_to_embedding_dict(triplet_str, rel_embedding)
-
- def _delete(self, doc_id: str, **delete_kwargs: Any) -> None:
- """Delete a document."""
- raise NotImplementedError("Delete is not supported for KG index yet.")
-
- def _preprocess_query(self, mode: QueryMode, query_kwargs: Dict) -> None:
- """Set the default embedding mode during query based on current index."""
- if (
- len(self.index_struct.embedding_dict) > 0
- and "embedding_mode" not in query_kwargs
- ):
- query_kwargs["embedding_mode"] = KGQueryMode.HYBRID
-
- def get_networkx_graph(self) -> Any:
- """Get networkx representation of the graph structure.
-
- NOTE: This function requires networkx to be installed.
- NOTE: This is a beta feature.
-
- """
- try:
- import networkx as nx
- except ImportError:
- raise ImportError(
- "Please install networkx to visualize the graph: `pip install networkx`"
- )
-
- g = nx.Graph()
- # add nodes
- for node_name in self.index_struct.table.keys():
- g.add_node(node_name)
-
- # add edges
- rel_map = self.index_struct.rel_map
- for keyword in rel_map.keys():
- for obj, rel in rel_map[keyword]:
- g.add_edge(keyword, obj, title=rel)
-
- return g
diff --git a/spaces/justest/gpt4free/g4f/.v1/testing/hpgptai_test.py b/spaces/justest/gpt4free/g4f/.v1/testing/hpgptai_test.py
deleted file mode 100644
index cdd146dd381346d689266ce05b6fa9e12f574b1b..0000000000000000000000000000000000000000
--- a/spaces/justest/gpt4free/g4f/.v1/testing/hpgptai_test.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import hpgptai
-
-#single completion
-res = hpgptai.Completion.create("你是谁","127.0.0.1:7890")
-print(res["reply"])
-
-
-#chat completion
-messages = [
- {
- "content": "你是谁",
- "html": "你是谁",
- "id": hpgptai.ChatCompletion.randomStr(),
- "role": "user",
- "who": "User: ",
- },
- {
- "content": "我是一位AI助手,专门为您提供各种服务和支持。我可以回答您的问题,帮助您解决问题,提供相关信息,并执行一些任务。请随时告诉我您需要什么帮助。",
- "html": "我是一位AI助手,专门为您提供各种服务和支持。我可以回答您的问题,帮助您解决问题,提供相关信息,并执行一些任务。请随时告诉我您需要什么帮助。",
- "id": hpgptai.ChatCompletion.randomStr(),
- "role": "assistant",
- "who": "AI: ",
- },
- {
- "content": "我上一句问的是什么?",
- "html": "我上一句问的是什么?",
- "id": hpgptai.ChatCompletion.randomStr(),
- "role": "user",
- "who": "User: ",
- },
-]
-res = hpgptai.ChatCompletion.create(messages,proxy="127.0.0.1:7890")
-print(res["reply"])
-
-
-
-
-
-
-
-
diff --git a/spaces/kaicheng/ChatGPT_ad/locale/extract_locale.py b/spaces/kaicheng/ChatGPT_ad/locale/extract_locale.py
deleted file mode 100644
index 32b0924bd6dffe150cb3e481ddadef836b91b83c..0000000000000000000000000000000000000000
--- a/spaces/kaicheng/ChatGPT_ad/locale/extract_locale.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import os
-import json
-import re
-
-# Define regular expression patterns
-pattern = r'i18n\((\"{3}.*?\"{3}|\".*?\")\)'
-
-# Load the .py file
-with open('ChuanhuChatbot.py', 'r', encoding='utf-8') as f:
- contents = f.read()
-
-# Load the .py files in the modules folder
-for filename in os.listdir("modules"):
- if filename.endswith(".py"):
- with open(os.path.join("modules", filename), "r", encoding="utf-8") as f:
- contents += f.read()
-
-# Matching with regular expressions
-matches = re.findall(pattern, contents, re.DOTALL)
-
-# Convert to key/value pairs
-data = {match.strip('()"'): '' for match in matches}
-
-# Save as a JSON file
-with open('labels.json', 'w', encoding='utf-8') as f:
- json.dump(data, f, ensure_ascii=False, indent=4)
\ No newline at end of file
diff --git a/spaces/kbora/minerva-generate-docker/README.md b/spaces/kbora/minerva-generate-docker/README.md
deleted file mode 100644
index eaf41d5d34dc4a1b08457af08e107fb5a3b8c3eb..0000000000000000000000000000000000000000
--- a/spaces/kbora/minerva-generate-docker/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: Minerva Docker
-sdk: docker
-emoji: 📊
-colorFrom: indigo
-colorTo: green
-pinned: false
----
\ No newline at end of file
diff --git a/spaces/kenhugs/dsed/Encoder.py b/spaces/kenhugs/dsed/Encoder.py
deleted file mode 100644
index eda71aace262cf2b7dbc608b0e4e8e17e90c80a7..0000000000000000000000000000000000000000
--- a/spaces/kenhugs/dsed/Encoder.py
+++ /dev/null
@@ -1,836 +0,0 @@
-"""
-
-"""
-from collections.abc import Iterable
-
-import numpy as np
-
-
-class Encoder:
- """ Allow the localization in time of sound events using temporal prediction.
-
- In a sound event detection task, the output of the prediction model is
- often a temporal prediction. Different segmentation algorithm exist in order
- to translate this curves into a list of segment. This encoder give you access
- to most of them in a fairly easy way.
-
- """
-
- def __init__(self, classes: list, temporal_precision: int, clip_length: int,
- minimal_segment_step: int, t_collar: float = 0.200,
- percentage_of_length: float = 0.2,
- time_resolution: float = 1.00,
- method: str = "event_based_metrics"):
- """ Initialization of the encoder.
-
- To initialize the encoder, you must provide the list of the classes that
- each curve will represent, in the same order along with information
- about the curves and the precision of the segmentation.
-
-
- Args:
- classes (list):
- The list of class that each curves will represent
- It is require for the function parse.
- temporal_precision (int):
- The temporal prediction for each classes.
- clip_length (int):
- The length of the audio file (in seconds).
- minimal_segment_step (int):
- The minimum space in between two segments.
-
- .. todo:: Implement alternative way to provide the data (dict)
- """
- self.classes = classes
- self.temporal_precision = temporal_precision
- self.clip_length = clip_length
- self.minimal_segment_step = minimal_segment_step
- self.t_collar = t_collar
- self.percentage_of_length = percentage_of_length
- self.time_resolution = time_resolution
- self.method = method
-
- # Attribute that are not initialize with the constructor
- self.frame_length = None
- self.nb_frame = None
- self.class_correspondence = dict(zip(classes, range(len(classes))))
- self.class_correspondence_reverse = dict(zip(range(len(classes)), classes))
-
- def encode(self, temporal_prediction: np.array, method: str = "threshold",
- smooth: str = None, **kwargs) -> list:
- """Perform the localization of the sound event present in the file.
-
- Using the temporal prediction provided y the last step of the system,
- it will "localize" the sound event inside the file under the form of a
- strongly annotated line. (see DCASE2018 task 4 strong label exemple).
- There is two methods implemented here, one using a simple threshold
- based segmentation and an other using a modulation system based on the
- variance of the prediction over the time.
-
- Args:
- temporal_prediction (np.array): The complete set for
- probabilities that need to segmented. must be a three dimensional
- numpy array (, , )
- method (str):
- The segmentation method to use
- [threshold | hysteresis | derivative | primitive |
- mean_threshold | global_mean_threshold | median_threshold |
- gobal_median_threshold].
- smooth (str):
- The smoothing method to use [smoothMovingAvg].
- kwargs:
- See the segmentation method parameters.
-
- Returns:
- Return a list of positive and negative segments with their size. A
- segment is a tuple where the first value that represent the segment
- value (1) for positive, (0) for negative and the second values is
- the width of the segment (number of frame).
-
- :Exemple:
-
- ```YOTsn73eqbfc_10.000_20.000.wav 0.163 0.665 Alarm_bell_ringing```
- """
- # parameters verification
- _methods = ["threshold", "hysteresis", "derivative", "mean_threshold",
- "median_threshold", "dynamic_threshold",
- "global_mean_threshold", "global_median_threshold"]
-
- if method not in _methods:
- raise ValueError("Method %s doesn't exist. Only %s are available" %
- (method, _methods))
-
- # Depending on the method selected, the proper function will be selected
- encoder = None
-
- if method == _methods[0]:
- encoder = self.__encode_using_threshold
- elif method == _methods[2]:
- encoder = self._encode_using_derivative
- elif method == _methods[1]:
- encoder = self.__encode_using_hysteresis
- elif method == _methods[3]:
- encoder = self.__encode_using_mean_threshold
- elif method == _methods[6]:
- encoder = self.__encode_using_gmean_threshold
- elif method == _methods[4]:
- encoder = self.__encode_using_median_treshold
- elif method == _methods[5]:
- encoder = self.__encode_using_dynamic_threshold
- elif method == _methods[7]:
- encoder = self.__encode_using_gmedian_threshold
-
- # Apply smoothing if requested
- if smooth is not None:
- temporal_prediction = self.__smooth(temporal_prediction,
- method=smooth, **kwargs)
-
- # Now that we have the strong prediction, we can assign the value to the
- # two attributes nb_frame and frame_length
- self.nb_frame = temporal_prediction.shape[1]
- self.frame_length = self.clip_length / self.nb_frame
-
- # Execute the selected segmentation algorithm and recover its results
- return encoder(temporal_prediction, **kwargs)
-
- def parse(self, all_segments: list, test_files_name: list) -> str:
- """Transform a list of segment into a string ready for evaluation with
- sed_eval.
-
- Args:
- all_segments (list): a list of dict of 10 key. the list length is
- equal to the number of file, the dict number test_files_name
- (list): The list of the file names in the same
- """
- output = ""
-
- for clipIndex in range(len(all_segments)):
- clip = all_segments[clipIndex]
-
- empty_cls = 0
- for cls in clip:
- if len(clip[cls]) == 1 and clip[cls][0][0] == 0:
- empty_cls += 1
-
- if empty_cls == 10:
- output += "%s\n" % test_files_name[clipIndex]
- else:
-
- for cls in clip:
- start = 0
- for segment in clip[cls]:
- if segment[0] == 1.0:
- output += "%s\t%f\t%f\t%s\n" % (
- test_files_name[clipIndex],
- start * self.frame_length,
- (start + segment[1]) * self.frame_length,
- self.class_correspondence_reverse[cls]
- )
- start += segment[1]
-
- return output
-
- # ==================================================================================================================
- #
- # ENCODING METHODS
- #
- # ==================================================================================================================
- def _encode_using_derivative(self, temporal_prediction: np.array,
- rising: float = 0.5, decreasing: float = -0.5,
- window_size: int = 5, high: float = 0.8,
- padding: str = "same", **kwargs) -> list:
- """The derivative create segment based on the intensity of the variation
- of the temporal prediction curve. If the prediction rise above a certain
- threshold `rising` then a valid segment start. If it decrease faster
- than the `decreasing` threshold, then a valid segment finish. If the
- prediction start with a high value, of rise slowly but high, then an
- absolute (and global) threshold `high` is used. (it works like a normal
- threhsold)
-
- Args:
- temporal_prediction (np.array): The complete set for
- probabilities that need to segmented. must be a three dimensional
- numpy array (, , )
- rising (float): Must be between 0 and 1, rising threshold. When the
- decreasing (float): Must be between 0 and 1, decreasing threshold
- window_size (int): size of the processing window
- high (float): minimum prediction value that trigger a valid
- padding (str): The padding method to used on the curves
-
- Returns:
- The result of the system under the form of a strong annotation text
- where each represent on timed event
- """
-
- output = []
-
- # for class-dependant parameters
- window_sizes = window_size
- if not isinstance(window_size, Iterable):
- window_sizes = [window_size] * len(self.classes)
-
- risings = rising
- if not isinstance(rising, Iterable):
- risings = [rising] * len(self.classes)
-
- decreasings = decreasing
- if not isinstance(decreasing, Iterable):
- decreasings = [decreasing] * len(self.classes)
-
- highs = high
- if not isinstance(high, Iterable):
- highs = [high] * len(self.classes)
-
-
- for clip in temporal_prediction:
- cls = 0
- labeled = dict()
-
- for cls_ind, prediction_per_class in enumerate(clip.T):
- # get class-dependant parameters
- _window_size = int(window_sizes[cls_ind])
- _rising = risings[cls_ind]
- _decreasing = decreasings[cls_ind]
- _high = highs[cls_ind]
-
- padded_prediction_per_class = self.__pad(prediction_per_class,
- _window_size,
- method=padding)
-
- nb_segment = 1
- segments = []
- segment = [0.0, 0]
- for i in range(len(padded_prediction_per_class) - _window_size):
- window = padded_prediction_per_class[i:i + _window_size]
- slope = (window[-1] - window[0]) / _window_size
-
- # first element
- if i == 0:
- if window[0] > _high:
- segment = [1.0, 1]
- else:
- segment = [0.0, 1]
-
- # if on "high" segment
- if segment[0] == 1:
-
- # if above high threshol
- if window[0] > _high:
- segment[1] += 1
-
- else:
- # if decreasing threshold is reach
- if slope < _decreasing:
- segments.append(segment)
- nb_segment += 1
- segment = [0.0, 1]
- else:
- segment[1] += 1
-
- # if on "low" segment
- else:
-
- # if above high threshold
- if window[0] > _high:
- segments.append(segment)
- nb_segment += 1
- segment = [1.0, 1]
-
- else:
- if slope > _rising:
- segments.append(segment)
- nb_segment += 1
- segment = [1.0, 1]
- else:
- segment[1] += 1
-
- segments.append(segment.copy())
-
- labeled[cls] = segments
- cls += 1
-
- output.append(labeled)
- return output
-
- def __encode_using_hysteresis(self, temporal_prediction: np.array,
- low: float = 0.4, high: float = 0.6,
- **kwargs) -> list:
- """The hysteresis based segmentation algorithm require two threhsolds. A
- high value to decided when the segment should start and a low value to
- decided when to finish the segment. It perform better when the temporal
- prediction is noisy
-
- Args:
- temporal_prediction (np.array): The complete set for
- probabilities that need to segmented. must be a three dimensional
- numpy array (, , )
- low (float): low threshold (can be a list for class-dependant
- thresholding)
- high (float): high threshold (can ve a list for class-dependant
- thresholding)
- kwargs: Extra arguments
-
- Returns:
- the result of the system under the form of a strong annotation text
- where each line represent on timed event
- """
-
- # In case of class dependant thresholding
- lows = low
- if not isinstance(low, Iterable):
- lows = [low] * len(self.classes)
-
- highs = high
- if not isinstance(high, Iterable):
- highs = [high] * len(self.classes)
-
- prediction = temporal_prediction
-
- output = []
-
- for clip in prediction:
- labeled = dict()
-
- cls = 0
- for cls_ind, prediction_per_class in enumerate(clip.T):
- converted = list()
- segment = [0, 0]
- nb_segment = 1
- for i in range(len(prediction_per_class)):
- element = prediction_per_class[i]
-
- # first element
- if i == 0:
- if element > highs[cls_ind]:
- segment = [1.0, 1]
- else:
- segment = [0.0, 1]
-
- # then
- if element > highs[cls_ind]:
- if segment[0] == 1:
- segment[1] += 1
- else:
- converted.append(segment)
- nb_segment += 1
- segment = [1.0, 1]
-
- elif lows[cls_ind] <= element:
- segment[1] += 1
-
- else:
- if segment[0] == 0:
- segment[1] += 1
- else:
- converted.append(segment)
- nb_segment += 1
- segment = [0.0, 0]
-
- converted.append(segment)
-
- labeled[cls] = converted.copy()
- cls += 1
-
- output.append(labeled)
-
- return output
-
- def __encode_using_threshold(self, temporal_prediction: np.array,
- threshold: float or list, **kwargs) -> list:
- """A basic threshold segmentation algorithm.
-
- For each frame where the probability is above the given threshold,
- will be part of a valid segment, an invalid one otherwise. The
- threshold can be set globally (one unique threshold for all the
- classes) or independantely (one threshold for each classes)
-
- Args:
- temporal_prediction (np.array): The complete set for
- probabilities that need to segmented. must be a three dimensional
- numpy array (, , )
- threshold (float or list): One unique threshold or a list of
- threhsold. If using a list, it must define one threshold for
- each class
- **kwargs:
- """
- output = []
-
- thresholds = threshold
- if not isinstance(threshold, Iterable):
- thresholds = [threshold] * len(self.classes)
-
- bin_prediction = temporal_prediction.copy()
- bin_prediction[bin_prediction > thresholds] = 1
- bin_prediction[bin_prediction <= thresholds] = 0
-
- # Merging "hole" that are smaller than 200 ms
- step_length = self.clip_length / temporal_prediction.shape[1] * 1000
- max_hole_size = int(self.minimal_segment_step / step_length)
-
- for clip in bin_prediction:
- labeled = dict()
-
- cls = 0
- for bin_prediction_per_class in clip.T:
- # convert the binarized list into a list of tuple representing
- # the element and it's number of occurrence. The order is
- # conserved and the total sum should be equal to 10s
-
- # first pass --> Fill the holes
- for i in range(len(bin_prediction_per_class) - max_hole_size):
- window = bin_prediction_per_class[i: i + max_hole_size]
-
- if window[0] == window[-1] == 1:
- window[:] = [window[0]] * max_hole_size
-
- # second pass --> split into segments
- converted = []
- cpt = 0
- nb_segment = 0
- previous_elt = None
- for element in bin_prediction_per_class:
- if previous_elt is None:
- previous_elt = element
- cpt += 1
- nb_segment = 1
- continue
-
- if element == previous_elt:
- cpt += 1
-
- else:
- converted.append((previous_elt, cpt))
- previous_elt = element
- nb_segment += 1
- cpt = 1
-
- # case where the class is detect during the whole clip
- # if nbSegment == 1:
- converted.append((previous_elt, cpt))
-
- labeled[cls] = converted.copy()
- cls += 1
-
- output.append(labeled)
-
- return output
-
- def __encode_using_gmean_threshold(self, temporal_prediction: np.array,
- independent: bool = False, **kwargs
- ) -> list:
- """Using all the temporal prediction, the mean of each curve and for
- each class is computed and will be choose as threshold. Then call the
- `__encode_using_threshold` function to apply it.
-
- Args:
- temporal_prediction (np.array): The complete set for
- probabilities that need to segmented. must be a three dimensional
- numpy array (,