muzakkirhussain011 commited on
Commit
1c7fd1d
Β·
1 Parent(s): c762d03

Add application files

Browse files
Files changed (3) hide show
  1. app.py +68 -16
  2. mcp/agents/autonomous_agent_hf.py +19 -5
  3. requirements.txt +3 -2
app.py CHANGED
@@ -52,16 +52,19 @@ print("πŸš€ CX AI AGENT - ENTERPRISE B2B SALES INTELLIGENCE")
52
  print("="*80)
53
 
54
  # AI Mode - HuggingFace Inference API (FREE)
55
- # Uses huggingface_hub InferenceClient
56
- HF_MODEL = os.getenv("HF_MODEL", "mistralai/Mistral-7B-Instruct-v0.3")
57
  HF_TOKEN = os.getenv("HF_TOKEN") or os.getenv("HF_API_TOKEN")
58
 
 
 
 
59
  print(f"πŸ€– AI Mode: HuggingFace Inference API (FREE)")
60
  print(f" Model: {HF_MODEL}")
61
  if HF_TOKEN:
62
- print(f"βœ… HF_TOKEN loaded")
63
  else:
64
- print("⚠️ HF_TOKEN not set! Get one at: https://huggingface.co/settings/tokens")
65
 
66
  serper_key = os.getenv('SERPER_API_KEY')
67
  if serper_key:
@@ -89,7 +92,8 @@ def warmup_hf_model():
89
  Send a dummy prompt to warm up the HuggingFace Inference API.
90
  This ensures the model is loaded and ready for the first real request.
91
  """
92
- if not HF_TOKEN:
 
93
  print("⏭️ Skipping model warm-up (HF_TOKEN not set)")
94
  return
95
 
@@ -97,7 +101,7 @@ def warmup_hf_model():
97
  from huggingface_hub import InferenceClient
98
  print(f"πŸ”₯ Warming up HuggingFace model ({HF_MODEL})...")
99
 
100
- client = InferenceClient(token=HF_TOKEN)
101
  # Send a simple dummy prompt to warm up
102
  response = client.chat_completion(
103
  model=HF_MODEL,
@@ -116,6 +120,16 @@ def warmup_hf_model():
116
  print(f"⚠️ Model warm-up skipped: {e}")
117
 
118
 
 
 
 
 
 
 
 
 
 
 
119
  # Run warm-up in background to not block startup
120
  import threading
121
  warmup_thread = threading.Thread(target=warmup_hf_model, daemon=True)
@@ -569,13 +583,19 @@ def reset_all_data():
569
  # ============================================================================
570
  # CLIENT SETUP - Research the user's company
571
  # ============================================================================
572
- async def setup_client_company(company_name: str, progress=gr.Progress()):
573
  global knowledge_base
574
 
575
  if not company_name or not company_name.strip():
576
  yield "⚠️ Please enter your company name."
577
  return
578
 
 
 
 
 
 
 
579
  company_name = company_name.strip()
580
 
581
  output = f"## 🏒 Setting Up: {company_name}\n\nBuilding knowledge base...\n\n---\n\n### ⏳ Progress\n\n"
@@ -586,7 +606,8 @@ async def setup_client_company(company_name: str, progress=gr.Progress()):
586
  # Initialize HuggingFace agent (FREE Inference API)
587
  agent = AutonomousMCPAgentHF(
588
  mcp_registry=mcp_registry,
589
- model=os.getenv('HF_MODEL', 'mistralai/Mistral-7B-Instruct-v0.3')
 
590
  )
591
  output += f"βœ… AI Agent initialized (HuggingFace - {agent.model})\n\n"
592
  yield output
@@ -754,13 +775,19 @@ This is OUR company - we need this information to find matching prospects."""
754
  # ============================================================================
755
  # AI PROSPECT DISCOVERY - Automatically find prospects
756
  # ============================================================================
757
- async def discover_prospects(num_prospects: int = 5, progress=gr.Progress()):
758
  global knowledge_base
759
 
760
  if not knowledge_base["client"]["name"]:
761
  yield "⚠️ **Setup Required**: Please go to Setup tab and enter your company name first."
762
  return
763
 
 
 
 
 
 
 
764
  client_name = knowledge_base["client"]["name"]
765
  client_info = knowledge_base["client"].get("raw_research", "")
766
 
@@ -772,7 +799,8 @@ async def discover_prospects(num_prospects: int = 5, progress=gr.Progress()):
772
  # Initialize HuggingFace agent (FREE Inference API)
773
  agent = AutonomousMCPAgentHF(
774
  mcp_registry=mcp_registry,
775
- model=os.getenv('HF_MODEL', 'mistralai/Mistral-7B-Instruct-v0.3')
 
776
  )
777
  output += f"βœ… AI Agent initialized (HuggingFace - {agent.model})\n\n"
778
  yield output
@@ -1193,6 +1221,21 @@ def create_app():
1193
  with gr.Column(scale=1):
1194
  gr.HTML("""
1195
  <div class="action-card">
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1196
  <h3>🏒 Your Company</h3>
1197
  <p>Enter your company name. AI will research your company and then automatically find matching prospects.</p>
1198
  </div>
@@ -1204,9 +1247,9 @@ def create_app():
1204
  setup_btn = gr.Button("πŸš€ Setup My Company", variant="primary", size="lg")
1205
  reset_btn = gr.Button("πŸ—‘οΈ Reset All", variant="stop", size="sm")
1206
 
1207
- gr.HTML("""
1208
  <div class="action-card" style="margin-top: 16px;">
1209
- <h3>πŸ€– Automated Workflow</h3>
1210
  <ol style="margin: 0; padding-left: 20px;">
1211
  <li><strong>Setup</strong> β€” AI researches your company</li>
1212
  <li><strong>Find Prospects</strong> β€” AI discovers matching companies</li>
@@ -1217,7 +1260,7 @@ def create_app():
1217
  """)
1218
 
1219
  with gr.Column(scale=2):
1220
- setup_output = gr.Markdown("*Enter your company name to begin.*")
1221
 
1222
  # ===== DASHBOARD TAB =====
1223
  with gr.Tab("πŸ“Š Dashboard", id=1):
@@ -1231,7 +1274,7 @@ def create_app():
1231
  prospects_stat = gr.HTML(get_stat_html("0", "Prospects Found", "var(--primary-blue)"))
1232
  contacts_stat = gr.HTML(get_stat_html("0", "Decision Makers", "var(--success-green)"))
1233
  emails_stat = gr.HTML(get_stat_html("0", "Emails Drafted", "var(--warning-orange)"))
1234
- gr.HTML(get_stat_html("Granite", "AI Model", "var(--purple)"))
1235
 
1236
  # ===== PROSPECTS TAB =====
1237
  with gr.Tab("🎯 Prospects", id=2):
@@ -1248,6 +1291,15 @@ def create_app():
1248
  </div>
1249
  """)
1250
 
 
 
 
 
 
 
 
 
 
1251
  num_prospects = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="Number of prospects to find")
1252
  discover_btn = gr.Button("πŸ” Find Prospects & Contacts", variant="primary", size="lg")
1253
 
@@ -1316,7 +1368,7 @@ def create_app():
1316
  # Setup button - run setup and then update status indicators
1317
  setup_btn.click(
1318
  fn=setup_client_company,
1319
- inputs=[client_name_input],
1320
  outputs=[setup_output]
1321
  ).then(
1322
  fn=lambda: (get_client_status_html(), get_client_status_html()),
@@ -1338,7 +1390,7 @@ def create_app():
1338
  # Discover prospects and then update all lists
1339
  discover_btn.click(
1340
  fn=discover_prospects,
1341
- inputs=[num_prospects],
1342
  outputs=[discovery_output]
1343
  ).then(
1344
  fn=lambda: (get_prospects_html(), get_contacts_html(), get_emails_html()),
 
52
  print("="*80)
53
 
54
  # AI Mode - HuggingFace Inference API (FREE)
55
+ # Uses huggingface_hub InferenceClient with Qwen/Qwen3-4B
56
+ HF_MODEL = os.getenv("HF_MODEL", "Qwen/Qwen3-4B")
57
  HF_TOKEN = os.getenv("HF_TOKEN") or os.getenv("HF_API_TOKEN")
58
 
59
+ # Session token storage (can be set via UI)
60
+ session_hf_token = {"token": HF_TOKEN}
61
+
62
  print(f"πŸ€– AI Mode: HuggingFace Inference API (FREE)")
63
  print(f" Model: {HF_MODEL}")
64
  if HF_TOKEN:
65
+ print(f"βœ… HF_TOKEN loaded from environment")
66
  else:
67
+ print("ℹ️ HF_TOKEN not set in environment - can be entered in the Setup tab")
68
 
69
  serper_key = os.getenv('SERPER_API_KEY')
70
  if serper_key:
 
92
  Send a dummy prompt to warm up the HuggingFace Inference API.
93
  This ensures the model is loaded and ready for the first real request.
94
  """
95
+ token = session_hf_token.get("token")
96
+ if not token:
97
  print("⏭️ Skipping model warm-up (HF_TOKEN not set)")
98
  return
99
 
 
101
  from huggingface_hub import InferenceClient
102
  print(f"πŸ”₯ Warming up HuggingFace model ({HF_MODEL})...")
103
 
104
+ client = InferenceClient(token=token)
105
  # Send a simple dummy prompt to warm up
106
  response = client.chat_completion(
107
  model=HF_MODEL,
 
120
  print(f"⚠️ Model warm-up skipped: {e}")
121
 
122
 
123
+ # Helper function to get current HF token (from UI or environment)
124
+ def get_hf_token(ui_token: str = None) -> str:
125
+ """Get HF token from UI input, session storage, or environment"""
126
+ if ui_token and ui_token.strip():
127
+ # Update session storage with UI token
128
+ session_hf_token["token"] = ui_token.strip()
129
+ return ui_token.strip()
130
+ return session_hf_token.get("token") or ""
131
+
132
+
133
  # Run warm-up in background to not block startup
134
  import threading
135
  warmup_thread = threading.Thread(target=warmup_hf_model, daemon=True)
 
583
  # ============================================================================
584
  # CLIENT SETUP - Research the user's company
585
  # ============================================================================
586
+ async def setup_client_company(company_name: str, hf_token_input: str, progress=gr.Progress()):
587
  global knowledge_base
588
 
589
  if not company_name or not company_name.strip():
590
  yield "⚠️ Please enter your company name."
591
  return
592
 
593
+ # Get HF token from UI input or environment
594
+ token = get_hf_token(hf_token_input)
595
+ if not token:
596
+ yield "⚠️ **HF_TOKEN Required**: Please enter your HuggingFace token in the Setup tab.\n\nGet a free token at: https://huggingface.co/settings/tokens"
597
+ return
598
+
599
  company_name = company_name.strip()
600
 
601
  output = f"## 🏒 Setting Up: {company_name}\n\nBuilding knowledge base...\n\n---\n\n### ⏳ Progress\n\n"
 
606
  # Initialize HuggingFace agent (FREE Inference API)
607
  agent = AutonomousMCPAgentHF(
608
  mcp_registry=mcp_registry,
609
+ hf_token=token,
610
+ model=HF_MODEL
611
  )
612
  output += f"βœ… AI Agent initialized (HuggingFace - {agent.model})\n\n"
613
  yield output
 
775
  # ============================================================================
776
  # AI PROSPECT DISCOVERY - Automatically find prospects
777
  # ============================================================================
778
+ async def discover_prospects(num_prospects: int, hf_token_input: str, progress=gr.Progress()):
779
  global knowledge_base
780
 
781
  if not knowledge_base["client"]["name"]:
782
  yield "⚠️ **Setup Required**: Please go to Setup tab and enter your company name first."
783
  return
784
 
785
+ # Get HF token from UI input or session
786
+ token = get_hf_token(hf_token_input)
787
+ if not token:
788
+ yield "⚠️ **HF_TOKEN Required**: Please enter your HuggingFace token in the Setup tab.\n\nGet a free token at: https://huggingface.co/settings/tokens"
789
+ return
790
+
791
  client_name = knowledge_base["client"]["name"]
792
  client_info = knowledge_base["client"].get("raw_research", "")
793
 
 
799
  # Initialize HuggingFace agent (FREE Inference API)
800
  agent = AutonomousMCPAgentHF(
801
  mcp_registry=mcp_registry,
802
+ hf_token=token,
803
+ model=HF_MODEL
804
  )
805
  output += f"βœ… AI Agent initialized (HuggingFace - {agent.model})\n\n"
806
  yield output
 
1221
  with gr.Column(scale=1):
1222
  gr.HTML("""
1223
  <div class="action-card">
1224
+ <h3>πŸ”‘ HuggingFace Token</h3>
1225
+ <p>Enter your HuggingFace token to use the AI. Get a free token at <a href="https://huggingface.co/settings/tokens" target="_blank">huggingface.co/settings/tokens</a></p>
1226
+ </div>
1227
+ """)
1228
+
1229
+ hf_token_input = gr.Textbox(
1230
+ label="HuggingFace Token",
1231
+ placeholder="hf_xxxxxxxxxxxxxxxxxxxxxxxxx",
1232
+ type="password",
1233
+ lines=1,
1234
+ value=HF_TOKEN or ""
1235
+ )
1236
+
1237
+ gr.HTML("""
1238
+ <div class="action-card" style="margin-top: 16px;">
1239
  <h3>🏒 Your Company</h3>
1240
  <p>Enter your company name. AI will research your company and then automatically find matching prospects.</p>
1241
  </div>
 
1247
  setup_btn = gr.Button("πŸš€ Setup My Company", variant="primary", size="lg")
1248
  reset_btn = gr.Button("πŸ—‘οΈ Reset All", variant="stop", size="sm")
1249
 
1250
+ gr.HTML(f"""
1251
  <div class="action-card" style="margin-top: 16px;">
1252
+ <h3>πŸ€– AI Model: {HF_MODEL}</h3>
1253
  <ol style="margin: 0; padding-left: 20px;">
1254
  <li><strong>Setup</strong> β€” AI researches your company</li>
1255
  <li><strong>Find Prospects</strong> β€” AI discovers matching companies</li>
 
1260
  """)
1261
 
1262
  with gr.Column(scale=2):
1263
+ setup_output = gr.Markdown("*Enter your HuggingFace token and company name to begin.*")
1264
 
1265
  # ===== DASHBOARD TAB =====
1266
  with gr.Tab("πŸ“Š Dashboard", id=1):
 
1274
  prospects_stat = gr.HTML(get_stat_html("0", "Prospects Found", "var(--primary-blue)"))
1275
  contacts_stat = gr.HTML(get_stat_html("0", "Decision Makers", "var(--success-green)"))
1276
  emails_stat = gr.HTML(get_stat_html("0", "Emails Drafted", "var(--warning-orange)"))
1277
+ gr.HTML(get_stat_html("Qwen3-4B", "AI Model", "var(--purple)"))
1278
 
1279
  # ===== PROSPECTS TAB =====
1280
  with gr.Tab("🎯 Prospects", id=2):
 
1291
  </div>
1292
  """)
1293
 
1294
+ # HF Token input for prospects tab (linked to setup tab token)
1295
+ hf_token_prospects = gr.Textbox(
1296
+ label="HuggingFace Token (from Setup tab)",
1297
+ placeholder="hf_xxxxxxxxxxxxxxxxxxxxxxxxx",
1298
+ type="password",
1299
+ lines=1,
1300
+ value=HF_TOKEN or ""
1301
+ )
1302
+
1303
  num_prospects = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="Number of prospects to find")
1304
  discover_btn = gr.Button("πŸ” Find Prospects & Contacts", variant="primary", size="lg")
1305
 
 
1368
  # Setup button - run setup and then update status indicators
1369
  setup_btn.click(
1370
  fn=setup_client_company,
1371
+ inputs=[client_name_input, hf_token_input],
1372
  outputs=[setup_output]
1373
  ).then(
1374
  fn=lambda: (get_client_status_html(), get_client_status_html()),
 
1390
  # Discover prospects and then update all lists
1391
  discover_btn.click(
1392
  fn=discover_prospects,
1393
+ inputs=[num_prospects, hf_token_prospects],
1394
  outputs=[discovery_output]
1395
  ).then(
1396
  fn=lambda: (get_prospects_html(), get_contacts_html(), get_emails_html()),
mcp/agents/autonomous_agent_hf.py CHANGED
@@ -81,9 +81,19 @@ class AutonomousMCPAgentHF:
81
  """
82
  self.mcp_registry = mcp_registry
83
  self.hf_token = hf_token or os.getenv("HF_TOKEN") or os.getenv("HF_API_TOKEN")
84
- self.provider = provider or os.getenv("HF_PROVIDER") or DEFAULT_PROVIDER
85
  self.model = model or os.getenv("HF_MODEL") or DEFAULT_MODEL
86
 
 
 
 
 
 
 
 
 
 
 
 
87
  if not self.hf_token:
88
  raise ValueError(
89
  "HF_TOKEN is required!\n"
@@ -94,10 +104,14 @@ class AutonomousMCPAgentHF:
94
  # Initialize HuggingFace InferenceClient
95
  try:
96
  from huggingface_hub import InferenceClient
97
- self.client = InferenceClient(
98
- provider=self.provider,
99
- token=self.hf_token
100
- )
 
 
 
 
101
  logger.info(f"HuggingFace InferenceClient initialized")
102
  logger.info(f" Provider: {self.provider}")
103
  logger.info(f" Model: {self.model}")
 
81
  """
82
  self.mcp_registry = mcp_registry
83
  self.hf_token = hf_token or os.getenv("HF_TOKEN") or os.getenv("HF_API_TOKEN")
 
84
  self.model = model or os.getenv("HF_MODEL") or DEFAULT_MODEL
85
 
86
+ # Auto-detect provider based on model
87
+ if provider:
88
+ self.provider = provider
89
+ elif os.getenv("HF_PROVIDER"):
90
+ self.provider = os.getenv("HF_PROVIDER")
91
+ elif self.model in QWEN3_MODELS or self.model.startswith("Qwen/Qwen3"):
92
+ # Qwen3 models need a provider (use nebius by default)
93
+ self.provider = "nebius"
94
+ else:
95
+ self.provider = DEFAULT_PROVIDER
96
+
97
  if not self.hf_token:
98
  raise ValueError(
99
  "HF_TOKEN is required!\n"
 
104
  # Initialize HuggingFace InferenceClient
105
  try:
106
  from huggingface_hub import InferenceClient
107
+ # For serverless API (hf-inference), don't pass provider
108
+ if self.provider == "hf-inference":
109
+ self.client = InferenceClient(token=self.hf_token)
110
+ else:
111
+ self.client = InferenceClient(
112
+ provider=self.provider,
113
+ token=self.hf_token
114
+ )
115
  logger.info(f"HuggingFace InferenceClient initialized")
116
  logger.info(f" Provider: {self.provider}")
117
  logger.info(f" Model: {self.model}")
requirements.txt CHANGED
@@ -1,9 +1,10 @@
1
  # CX AI Agent - Requirements for HuggingFace Spaces
2
  # ================================================
3
 
4
- # HuggingFace Hub (for FREE Inference API)
5
  # Provides InferenceClient for chat completions
6
- huggingface_hub>=0.22.0
 
7
 
8
  # Gradio Interface (REQUIRED)
9
  gradio>=4.0.0
 
1
  # CX AI Agent - Requirements for HuggingFace Spaces
2
  # ================================================
3
 
4
+ # HuggingFace Hub (for Inference API with Qwen/Qwen3-4B model)
5
  # Provides InferenceClient for chat completions
6
+ # Requires HF_TOKEN for authentication
7
+ huggingface_hub>=0.24.0
8
 
9
  # Gradio Interface (REQUIRED)
10
  gradio>=4.0.0