|
import gradio as gr |
|
import re |
|
from collections import Counter |
|
from datetime import datetime |
|
import emoji |
|
import logging |
|
from typing import Tuple, List, Optional |
|
import statistics |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
def clean_text(text): |
|
"""Очищает текст от лишних пробелов и переносов строк""" |
|
return ' '.join(text.split()) |
|
|
|
def count_emojis(text): |
|
"""Подсчитывает количество эмодзи в тексте""" |
|
return len([c for c in text if c in emoji.EMOJI_DATA]) |
|
|
|
def extract_mentions(text): |
|
"""Извлекает упоминания пользователей из текста""" |
|
return re.findall(r'@[\w\.]+', text) |
|
|
|
def get_comment_words(text): |
|
"""Получает список слов из комментария для анализа""" |
|
|
|
words = re.findall(r'\w+', text.lower()) |
|
return [w for w in words if len(w) > 2] |
|
|
|
def analyze_sentiment(text): |
|
"""Расширенный анализ тональности по эмодзи и ключевым словам""" |
|
positive_indicators = ['🔥', '❤️', '👍', '😊', '💪', '👏', '🎉', '♥️', '😍', '🙏', |
|
'круто', 'супер', 'класс', 'огонь', 'пушка', 'отлично', 'здорово', |
|
'прекрасно', 'молодец', 'красота', 'спасибо', 'топ'] |
|
negative_indicators = ['👎', '😢', '😞', '😠', '😡', '💔', '😕', '😑', |
|
'плохо', 'ужас', 'отстой', 'фу', 'жесть', 'ужасно', |
|
'разочарован', 'печаль', 'грустно'] |
|
|
|
text_lower = text.lower() |
|
positive_count = sum(1 for ind in positive_indicators if ind in text_lower) |
|
negative_count = sum(1 for ind in negative_indicators if ind in text_lower) |
|
|
|
|
|
exclamation_count = text.count('!') |
|
positive_count += exclamation_count * 0.5 if positive_count > negative_count else 0 |
|
negative_count += exclamation_count * 0.5 if negative_count > positive_count else 0 |
|
|
|
if positive_count > negative_count: |
|
return 'positive' |
|
elif negative_count > positive_count: |
|
return 'negative' |
|
return 'neutral' |
|
|
|
def extract_comment_data(comment_text): |
|
""" |
|
Извлекает данные из отдельного комментария |
|
Возвращает (username, comment_text, likes_count, week_number) |
|
""" |
|
try: |
|
|
|
username_match = re.search(r"Фото профиля ([^\n]+)", comment_text) |
|
if not username_match: |
|
return None, None, 0, 0 |
|
|
|
username = username_match.group(1).strip() |
|
|
|
|
|
comment_pattern = fr"{username}\n(.*?)(?:\d+ нед\.)" |
|
comment_match = re.search(comment_pattern, comment_text, re.DOTALL) |
|
if comment_match: |
|
comment = clean_text(comment_match.group(1)) |
|
comment = re.sub(fr'^{username}\s*', '', comment) |
|
comment = re.sub(r'^@[\w\.]+ ', '', comment) |
|
else: |
|
comment = "" |
|
|
|
|
|
week_match = re.search(r'(\d+) нед\.', comment_text) |
|
weeks = int(week_match.group(1)) if week_match else 0 |
|
|
|
|
|
likes = 0 |
|
likes_patterns = [ |
|
r"(\d+) отметк[аи] \"Нравится\"", |
|
r"Нравится: (\d+)", |
|
] |
|
|
|
for pattern in likes_patterns: |
|
likes_match = re.search(pattern, comment_text) |
|
if likes_match: |
|
likes = int(likes_match.group(1)) |
|
break |
|
|
|
return username, comment.strip(), likes, weeks |
|
except Exception as e: |
|
logger.error(f"Error extracting comment data: {e}") |
|
return None, None, 0, 0 |
|
|
|
def analyze_post(content_type, link_to_post, post_likes, post_date, description, comment_count, all_comments): |
|
try: |
|
|
|
comments_blocks = re.split(r'(?=Фото профиля)', all_comments) |
|
comments_blocks = [block for block in comments_blocks if block.strip()] |
|
|
|
|
|
usernames = [] |
|
comments = [] |
|
likes = [] |
|
weeks = [] |
|
|
|
|
|
total_emojis = 0 |
|
mentions = [] |
|
sentiments = [] |
|
comment_lengths = [] |
|
words_per_comment = [] |
|
all_words = [] |
|
user_engagement = {} |
|
|
|
|
|
for block in comments_blocks: |
|
username, comment, like_count, week_number = extract_comment_data(block) |
|
if username and comment: |
|
usernames.append(username) |
|
comments.append(comment) |
|
likes.append(str(like_count)) |
|
weeks.append(week_number) |
|
|
|
|
|
total_emojis += count_emojis(comment) |
|
mentions.extend(extract_mentions(comment)) |
|
sentiment = analyze_sentiment(comment) |
|
sentiments.append(sentiment) |
|
comment_lengths.append(len(comment)) |
|
|
|
|
|
words = get_comment_words(comment) |
|
words_per_comment.append(len(words)) |
|
all_words.extend(words) |
|
|
|
|
|
if username not in user_engagement: |
|
user_engagement[username] = { |
|
'comments': 0, |
|
'total_likes': 0, |
|
'emoji_usage': 0, |
|
'avg_length': 0, |
|
'sentiments': [] |
|
} |
|
user_stats = user_engagement[username] |
|
user_stats['comments'] += 1 |
|
user_stats['total_likes'] += like_count |
|
user_stats['emoji_usage'] += count_emojis(comment) |
|
user_stats['avg_length'] += len(comment) |
|
user_stats['sentiments'].append(sentiment) |
|
|
|
|
|
total_comments = len(comments) |
|
if total_comments == 0: |
|
raise ValueError("No valid comments found") |
|
|
|
|
|
for username in user_engagement: |
|
stats = user_engagement[username] |
|
stats['avg_length'] /= stats['comments'] |
|
stats['engagement_rate'] = stats['total_likes'] / stats['comments'] |
|
stats['sentiment_ratio'] = sum(1 for s in stats['sentiments'] if s == 'positive') / len(stats['sentiments']) |
|
|
|
|
|
avg_comment_length = sum(comment_lengths) / total_comments |
|
sentiment_distribution = Counter(sentiments) |
|
most_active_users = Counter(usernames).most_common(5) |
|
most_mentioned = Counter(mentions).most_common(5) |
|
avg_likes = sum(map(int, likes)) / len(likes) if likes else 0 |
|
earliest_week = max(weeks) if weeks else 0 |
|
latest_week = min(weeks) if weeks else 0 |
|
|
|
|
|
median_comment_length = statistics.median(comment_lengths) |
|
avg_words_per_comment = sum(words_per_comment) / total_comments |
|
common_words = Counter(all_words).most_common(10) |
|
|
|
|
|
engagement_metrics = { |
|
'comments_with_likes': sum(1 for l in likes if int(l) > 0), |
|
'comments_with_emoji': sum(1 for c in comments if count_emojis(c) > 0), |
|
'comments_with_mentions': sum(1 for c in comments if extract_mentions(c)), |
|
'avg_engagement_rate': statistics.mean([ |
|
stats['engagement_rate'] for stats in user_engagement.values() |
|
]) |
|
} |
|
|
|
|
|
week_distribution = Counter(weeks) |
|
most_active_weeks = sorted(week_distribution.items(), key=lambda x: x[1], reverse=True)[:3] |
|
|
|
|
|
usernames_output = "\n".join(usernames) |
|
comments_output = "\n".join(comments) |
|
likes_chronology_output = "\n".join(likes) |
|
total_likes_sum = sum(map(int, likes)) |
|
|
|
|
|
analytics_summary = ( |
|
f"Content Type: {content_type}\n" |
|
f"Link to Post: {link_to_post}\n\n" |
|
f"ОСНОВНАЯ СТАТИСТИКА:\n" |
|
f"- Всего комментариев: {total_comments}\n" |
|
f"- Всего лайков на комментариях: {total_likes_sum}\n" |
|
f"- Среднее количество лайков: {avg_likes:.1f}\n" |
|
f"- Период активности: {earliest_week}-{latest_week} недель\n\n" |
|
f"АНАЛИЗ КОНТЕНТА:\n" |
|
f"- Средняя длина комментария: {avg_comment_length:.1f} символов\n" |
|
f"- Медианная длина комментария: {median_comment_length} символов\n" |
|
f"- Среднее количество слов: {avg_words_per_comment:.1f}\n" |
|
f"- Всего эмодзи использовано: {total_emojis}\n" |
|
f"- Тональность комментариев:\n" |
|
f" * Позитивных: {sentiment_distribution['positive']}\n" |
|
f" * Нейтральных: {sentiment_distribution['neutral']}\n" |
|
f" * Негативных: {sentiment_distribution['negative']}\n\n" |
|
f"ПОПУЛЯРНЫЕ СЛОВА:\n" |
|
+ "\n".join([f"- {word}: {count} раз" for word, count in common_words]) + "\n\n" |
|
f"АКТИВНОСТЬ ПОЛЬЗОВАТЕЛЕЙ:\n" |
|
f"Самые активные комментаторы:\n" |
|
+ "\n".join([f"- {user}: {count} комментариев" for user, count in most_active_users]) + "\n\n" |
|
f"Самые упоминаемые пользователи:\n" |
|
+ "\n".join([f"- {user}: {count} упоминаний" for user, count in most_mentioned if user]) + "\n\n" |
|
f"ВОВЛЕЧЕННОСТЬ:\n" |
|
f"- Процент комментариев с лайками: {(engagement_metrics['comments_with_likes'] / total_comments * 100):.1f}%\n" |
|
f"- Процент комментариев с эмодзи: {(engagement_metrics['comments_with_emoji'] / total_comments * 100):.1f}%\n" |
|
f"- Процент комментариев с упоминаниями: {(engagement_metrics['comments_with_mentions'] / total_comments * 100):.1f}%\n" |
|
f"- Средний рейтинг вовлеченности: {engagement_metrics['avg_engagement_rate']:.2f}\n\n" |
|
f"ВРЕМЕННАЯ АКТИВНОСТЬ:\n" |
|
f"Самые активные недели:\n" |
|
+ "\n".join([f"- {week} неделя: {count} комментариев" for week, count in most_active_weeks]) |
|
) |
|
|
|
return analytics_summary, usernames_output, comments_output, likes_chronology_output, str(total_likes_sum) |
|
|
|
except Exception as e: |
|
logger.error(f"Error in analyze_post: {e}", exc_info=True) |
|
error_message = f"Произошла ошибка при обработке: {str(e)}\n{str(type(e))}" |
|
return error_message, error_message, error_message, error_message, "0" |
|
|
|
|
|
iface = gr.Interface( |
|
fn=analyze_post, |
|
inputs=[ |
|
gr.Radio( |
|
choices=["Photo", "Video"], |
|
label="Content Type", |
|
value="Photo" |
|
), |
|
gr.Textbox( |
|
label="Link to Post", |
|
placeholder="Введите ссылку на пост" |
|
), |
|
gr.Number( |
|
label="Likes", |
|
value=0 |
|
), |
|
gr.Textbox( |
|
label="Post Date", |
|
placeholder="Введите дату публикации" |
|
), |
|
gr.Textbox( |
|
label="Description", |
|
placeholder="Введите описание поста", |
|
lines=3 |
|
), |
|
gr.Number( |
|
label="Total Comment Count", |
|
value=0 |
|
), |
|
gr.Textbox( |
|
label="All Comments", |
|
placeholder="Вставьте комментарии", |
|
lines=10 |
|
) |
|
], |
|
outputs=[ |
|
gr.Textbox(label="Analytics Summary", lines=20), |
|
gr.Textbox(label="Usernames (Output 1)", lines=5), |
|
gr.Textbox(label="Comments (Output 2)", lines=5), |
|
gr.Textbox(label="Likes Chronology (Output 3)", lines=5), |
|
gr.Textbox(label="Total Likes on Comments (Output 4)") |
|
], |
|
title="Instagram Comment Analyzer Pro", |
|
description="Расширенный анализатор комментариев Instagram с детальной аналитикой" |
|
) |
|
|
|
if __name__ == "__main__": |
|
iface.launch() |