Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,90 @@
|
|
4 |
|
5 |
from langchain_community.llms import HuggingFaceHub
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
###### other models:
|
9 |
# "Trelis/Llama-2-7b-chat-hf-sharded-bf16"
|
10 |
# "bn22/Mistral-7B-Instruct-v0.1-sharded"
|
@@ -288,12 +371,17 @@ fe_app = gr.ChatInterface(
|
|
288 |
# load the model asynchronously on startup and save it into memory
|
289 |
@app.on_event("startup")
|
290 |
async def startup():
|
291 |
-
|
292 |
-
|
293 |
-
|
|
|
|
|
|
|
|
|
|
|
294 |
|
295 |
#########
|
296 |
# Assuming visited_urls is a list of URLs
|
297 |
-
for url in links:
|
298 |
-
|
299 |
#load_model()
|
|
|
4 |
|
5 |
from langchain_community.llms import HuggingFaceHub
|
6 |
|
7 |
+
#from langchain_community.llms import HuggingFaceHub
|
8 |
+
|
9 |
+
llm_zephyr-7b-beta = HuggingFaceHub(
|
10 |
+
repo_id="HuggingFaceH4/zephyr-7b-beta",
|
11 |
+
task="text-generation",
|
12 |
+
model_kwargs={
|
13 |
+
"max_new_tokens": 512,
|
14 |
+
"top_k": 30,
|
15 |
+
"temperature": 0.1,
|
16 |
+
"repetition_penalty": 1.03,
|
17 |
+
},
|
18 |
+
)
|
19 |
+
|
20 |
+
import os
|
21 |
+
from crewai import Agent, Task, Crew, Process
|
22 |
+
from crewai_tools import SerperDevTool
|
23 |
+
|
24 |
+
search_tool = SerperDevTool()
|
25 |
+
|
26 |
+
# Define your agents with roles and goals
|
27 |
+
researcher = Agent(
|
28 |
+
role='Senior Research Analyst',
|
29 |
+
goal='Uncover cutting-edge developments in AI and data science',
|
30 |
+
backstory="""You work at a leading tech think tank.
|
31 |
+
Your expertise lies in identifying emerging trends.
|
32 |
+
You have a knack for dissecting complex data and presenting actionable insights.""",
|
33 |
+
verbose=True,
|
34 |
+
allow_delegation=False,
|
35 |
+
tools=[search_tool],
|
36 |
+
llm=llm_zephyr-7b-beta
|
37 |
+
# You can pass an optional llm attribute specifying what mode you wanna use.
|
38 |
+
# It can be a local model through Ollama / LM Studio or a remote
|
39 |
+
# model like OpenAI, Mistral, Antrophic or others (https://docs.crewai.com/how-to/LLM-Connections/)
|
40 |
+
#
|
41 |
+
# import os
|
42 |
+
# os.environ['OPENAI_MODEL_NAME'] = 'gpt-3.5-turbo'
|
43 |
+
#
|
44 |
+
# OR
|
45 |
+
#
|
46 |
+
# from langchain_openai import ChatOpenAI
|
47 |
+
# llm=ChatOpenAI(model_name="gpt-3.5", temperature=0.7)
|
48 |
+
)
|
49 |
+
|
50 |
+
writer = Agent(
|
51 |
+
role='Tech Content Strategist',
|
52 |
+
goal='Craft compelling content on tech advancements',
|
53 |
+
backstory="""You are a renowned Content Strategist, known for your insightful and engaging articles.
|
54 |
+
You transform complex concepts into compelling narratives.""",
|
55 |
+
verbose=True,
|
56 |
+
allow_delegation=True],
|
57 |
+
llm=llm_zephyr-7b-beta
|
58 |
+
)
|
59 |
+
|
60 |
+
# Create tasks for your agents
|
61 |
+
task1 = Task(
|
62 |
+
description="""Conduct a comprehensive analysis of the latest advancements in AI in 2024.
|
63 |
+
Identify key trends, breakthrough technologies, and potential industry impacts.""",
|
64 |
+
expected_output="Full analysis report in bullet points",
|
65 |
+
agent=researcher
|
66 |
+
)
|
67 |
|
68 |
+
task2 = Task(
|
69 |
+
description="""Using the insights provided, develop an engaging blog
|
70 |
+
post that highlights the most significant AI advancements.
|
71 |
+
Your post should be informative yet accessible, catering to a tech-savvy audience.
|
72 |
+
Make it sound cool, avoid complex words so it doesn't sound like AI.""",
|
73 |
+
expected_output="Full blog post of at least 4 paragraphs",
|
74 |
+
agent=writer
|
75 |
+
)
|
76 |
+
|
77 |
+
# Instantiate your crew with a sequential process
|
78 |
+
crew = Crew(
|
79 |
+
agents=[researcher, writer],
|
80 |
+
tasks=[task1, task2],
|
81 |
+
verbose=2, # You can set it to 1 or 2 to different logging levels
|
82 |
+
)
|
83 |
+
|
84 |
+
# Get your crew to work!
|
85 |
+
#result = crew.kickoff()
|
86 |
+
|
87 |
+
#print("######################")
|
88 |
+
#print(result)
|
89 |
+
|
90 |
+
##################
|
91 |
###### other models:
|
92 |
# "Trelis/Llama-2-7b-chat-hf-sharded-bf16"
|
93 |
# "bn22/Mistral-7B-Instruct-v0.1-sharded"
|
|
|
371 |
# load the model asynchronously on startup and save it into memory
|
372 |
@app.on_event("startup")
|
373 |
async def startup():
|
374 |
+
# Get your crew to work!
|
375 |
+
result = crew.kickoff()
|
376 |
+
|
377 |
+
print("######################")
|
378 |
+
print(result)
|
379 |
+
#domain_url = 'https://globl.contact/'
|
380 |
+
#links = get_all_links_from_domain(domain_url)
|
381 |
+
#print("Links from the domain:", links)
|
382 |
|
383 |
#########
|
384 |
# Assuming visited_urls is a list of URLs
|
385 |
+
#for url in links:
|
386 |
+
# vs = get_vectorstore_from_url(url)
|
387 |
#load_model()
|