Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	Commit 
							
							·
						
						c5b1bab
	
1
								Parent(s):
							
							34a740d
								
progress more 48
Browse files
    	
        app.py
    CHANGED
    
    | @@ -35,8 +35,23 @@ def init_langchain_llm(): | |
| 35 | 
             
                return llm
         | 
| 36 |  | 
| 37 | 
             
            def init_langchain_llm():
         | 
| 38 | 
            -
                 | 
| 39 | 
            -
                 | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 40 | 
             
                return llm
         | 
| 41 |  | 
| 42 | 
             
            def estimate_impact(llm, news_text):
         | 
| @@ -47,16 +62,22 @@ def estimate_impact(llm, news_text): | |
| 47 |  | 
| 48 | 
             
                News: {news}
         | 
| 49 |  | 
| 50 | 
            -
                 | 
| 51 | 
            -
                 | 
|  | |
| 52 | 
             
                """
         | 
| 53 | 
             
                prompt = PromptTemplate(template=template, input_variables=["news"])
         | 
| 54 | 
             
                chain = LLMChain(llm=llm, prompt=prompt)
         | 
| 55 | 
             
                response = chain.run(news=news_text)
         | 
| 56 |  | 
| 57 | 
            -
                 | 
| 58 | 
            -
                impact =  | 
| 59 | 
            -
                reasoning =  | 
|  | |
|  | |
|  | |
|  | |
|  | |
| 60 |  | 
| 61 | 
             
                return impact, reasoning
         | 
| 62 |  | 
| @@ -77,7 +98,13 @@ def process_file_with_llm(df, llm): | |
| 77 | 
             
                        impact, reasoning = estimate_impact(llm, row['Translated'])  # Use translated text
         | 
| 78 | 
             
                        df.at[index, 'LLM_Impact'] = impact
         | 
| 79 | 
             
                        df.at[index, 'LLM_Reasoning'] = reasoning
         | 
| 80 | 
            -
                
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 81 | 
             
                # Update progress
         | 
| 82 | 
             
                    progress = (index + 1) / total_rows
         | 
| 83 | 
             
                    progress_bar.progress(progress)
         | 
| @@ -390,7 +417,7 @@ def create_output_file(df, uploaded_file, analysis_df): | |
| 390 | 
             
                return output
         | 
| 391 |  | 
| 392 | 
             
            def main():
         | 
| 393 | 
            -
                st.title("... приступим к анализу... версия  | 
| 394 |  | 
| 395 | 
             
                # Initialize session state
         | 
| 396 | 
             
                if 'processed_df' not in st.session_state:
         | 
|  | |
| 35 | 
             
                return llm
         | 
| 36 |  | 
| 37 | 
             
            def init_langchain_llm():
         | 
| 38 | 
            +
                model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
         | 
| 39 | 
            +
                pipeline = transformers.pipeline(
         | 
| 40 | 
            +
                    "text-generation",
         | 
| 41 | 
            +
                    model=model_id,
         | 
| 42 | 
            +
                    model_kwargs={"torch_dtype": torch.bfloat16},
         | 
| 43 | 
            +
                    device_map="auto",
         | 
| 44 | 
            +
                )
         | 
| 45 | 
            +
             | 
| 46 | 
            +
                def llama_wrapper(prompt):
         | 
| 47 | 
            +
                    messages = [
         | 
| 48 | 
            +
                        {"role": "system", "content": "You are a helpful AI assistant that analyzes news and estimates their impact."},
         | 
| 49 | 
            +
                        {"role": "user", "content": prompt},
         | 
| 50 | 
            +
                    ]
         | 
| 51 | 
            +
                    result = pipeline(messages, max_new_tokens=256)
         | 
| 52 | 
            +
                    return result[0]["generated_text"]
         | 
| 53 | 
            +
                
         | 
| 54 | 
            +
                llm = HuggingFacePipeline(pipeline=llama_wrapper)
         | 
| 55 | 
             
                return llm
         | 
| 56 |  | 
| 57 | 
             
            def estimate_impact(llm, news_text):
         | 
|  | |
| 62 |  | 
| 63 | 
             
                News: {news}
         | 
| 64 |  | 
| 65 | 
            +
                Your response should be in the following format:
         | 
| 66 | 
            +
                Estimated Impact: [Your estimate or category]
         | 
| 67 | 
            +
                Reasoning: [Your reasoning]
         | 
| 68 | 
             
                """
         | 
| 69 | 
             
                prompt = PromptTemplate(template=template, input_variables=["news"])
         | 
| 70 | 
             
                chain = LLMChain(llm=llm, prompt=prompt)
         | 
| 71 | 
             
                response = chain.run(news=news_text)
         | 
| 72 |  | 
| 73 | 
            +
                # Parse the response
         | 
| 74 | 
            +
                impact = "Неопределенный"
         | 
| 75 | 
            +
                reasoning = "Не удалось получить обоснование"
         | 
| 76 | 
            +
                
         | 
| 77 | 
            +
                if "Estimated Impact:" in response and "Reasoning:" in response:
         | 
| 78 | 
            +
                    impact_part, reasoning_part = response.split("Reasoning:")
         | 
| 79 | 
            +
                    impact = impact_part.split("Estimated Impact:")[1].strip()
         | 
| 80 | 
            +
                    reasoning = reasoning_part.strip()
         | 
| 81 |  | 
| 82 | 
             
                return impact, reasoning
         | 
| 83 |  | 
|  | |
| 98 | 
             
                        impact, reasoning = estimate_impact(llm, row['Translated'])  # Use translated text
         | 
| 99 | 
             
                        df.at[index, 'LLM_Impact'] = impact
         | 
| 100 | 
             
                        df.at[index, 'LLM_Reasoning'] = reasoning
         | 
| 101 | 
            +
                # Display each LLM response
         | 
| 102 | 
            +
                        st.write(f"Новость: {row['Заголовок']}")
         | 
| 103 | 
            +
                        st.write(f"Эффект: {impact}")
         | 
| 104 | 
            +
                        st.write(f"Обоснование: {reasoning}")
         | 
| 105 | 
            +
                        st.write("---")  # Add a separator between responses
         | 
| 106 | 
            +
             | 
| 107 | 
            +
             | 
| 108 | 
             
                # Update progress
         | 
| 109 | 
             
                    progress = (index + 1) / total_rows
         | 
| 110 | 
             
                    progress_bar.progress(progress)
         | 
|  | |
| 417 | 
             
                return output
         | 
| 418 |  | 
| 419 | 
             
            def main():
         | 
| 420 | 
            +
                st.title("... приступим к анализу... версия 48")
         | 
| 421 |  | 
| 422 | 
             
                # Initialize session state
         | 
| 423 | 
             
                if 'processed_df' not in st.session_state:
         |