| import gradio as gr |
| from gradio_leaderboard import Leaderboard, ColumnFilter |
| import json |
| import os |
| import time |
| import requests |
| from huggingface_hub import HfApi, hf_hub_download |
| from huggingface_hub.errors import HfHubHTTPError |
| import backoff |
| from dotenv import load_dotenv |
| import pandas as pd |
| import random |
| import plotly.graph_objects as go |
| from plotly.subplots import make_subplots |
| from apscheduler.schedulers.background import BackgroundScheduler |
| from apscheduler.triggers.cron import CronTrigger |
| from datetime import datetime, timezone |
|
|
| |
| load_dotenv(override=True) |
|
|
| |
| |
| |
|
|
| AGENTS_REPO = "SWE-Arena/bot_data" |
| LEADERBOARD_FILENAME = f"{os.getenv('COMPOSE_PROJECT_NAME')}.json" |
| LEADERBOARD_REPO = "SWE-Arena/leaderboard_data" |
| LONGSTANDING_GAP_DAYS = 30 |
| MAX_RETRIES = 5 |
|
|
| LEADERBOARD_COLUMNS = [ |
| ("Assistant", "string"), |
| ("Website", "string"), |
| ("Total Issues", "number"), |
| ("Total Discussions", "number"), |
| ("Total Wanted Issues", "number"), |
| ("Issue Resolved Rate (%)", "number"), |
| ("Discussion Resolved Rate (%)", "number"), |
| ("Wanted Issue Resolved Rate (%)", "number"), |
| ("Resolved Wanted Issues", "number"), |
| ("Resolved Issues", "number"), |
| ("Resolved Discussions", "number"), |
| ] |
|
|
| |
| |
| |
|
|
| def is_rate_limit_error(e): |
| """Check if exception is a HuggingFace rate limit error (429).""" |
| if isinstance(e, HfHubHTTPError): |
| return e.response.status_code == 429 |
| return False |
|
|
|
|
| @backoff.on_exception( |
| backoff.expo, |
| HfHubHTTPError, |
| max_tries=MAX_RETRIES, |
| base=300, |
| max_value=3600, |
| giveup=lambda e: not is_rate_limit_error(e), |
| on_backoff=lambda details: print( |
| f"Rate limited. Retrying in {details['wait']/60:.1f} minutes ({details['wait']:.0f}s) - attempt {details['tries']}/5..." |
| ) |
| ) |
| def list_repo_files_with_backoff(api, **kwargs): |
| """Wrapper for api.list_repo_files() with exponential backoff for rate limits.""" |
| return api.list_repo_files(**kwargs) |
|
|
|
|
| @backoff.on_exception( |
| backoff.expo, |
| HfHubHTTPError, |
| max_tries=MAX_RETRIES, |
| base=300, |
| max_value=3600, |
| giveup=lambda e: not is_rate_limit_error(e), |
| on_backoff=lambda details: print( |
| f"Rate limited. Retrying in {details['wait']/60:.1f} minutes ({details['wait']:.0f}s) - attempt {details['tries']}/5..." |
| ) |
| ) |
| def hf_hub_download_with_backoff(**kwargs): |
| """Wrapper for hf_hub_download() with exponential backoff for rate limits.""" |
| return hf_hub_download(**kwargs) |
|
|
|
|
| |
| |
| |
|
|
| def validate_github_username(identifier): |
| """Verify that a GitHub identifier exists.""" |
| try: |
| response = requests.get(f'https://api.github.com/users/{identifier}', timeout=10) |
| return (True, "Username is valid") if response.status_code == 200 else (False, "GitHub identifier not found" if response.status_code == 404 else f"Validation error: HTTP {response.status_code}") |
| except Exception as e: |
| return False, f"Validation error: {str(e)}" |
|
|
|
|
| |
| |
| |
|
|
| def load_agents_from_hf(): |
| """Load all assistant metadata JSON files from HuggingFace dataset.""" |
| try: |
| api = HfApi() |
| assistants = [] |
|
|
| |
| files = list_repo_files_with_backoff(api=api, repo_id=AGENTS_REPO, repo_type="dataset") |
|
|
| |
| json_files = [f for f in files if f.endswith('.json')] |
|
|
| |
| for json_file in json_files: |
| try: |
| file_path = hf_hub_download_with_backoff( |
| repo_id=AGENTS_REPO, |
| filename=json_file, |
| repo_type="dataset" |
| ) |
|
|
| with open(file_path, 'r') as f: |
| agent_data = json.load(f) |
|
|
| |
| if agent_data.get('status') != 'active': |
| continue |
|
|
| |
| filename_identifier = json_file.replace('.json', '') |
|
|
| |
| agent_data['github_identifier'] = filename_identifier |
|
|
| assistants.append(agent_data) |
|
|
| except Exception as e: |
| print(f"Warning: Could not load {json_file}: {str(e)}") |
| continue |
|
|
| print(f"Loaded {len(assistants)} assistants from HuggingFace") |
| return assistants |
|
|
| except Exception as e: |
| print(f"Could not load assistants from HuggingFace: {str(e)}") |
| return None |
|
|
|
|
| def get_hf_token(): |
| """Get HuggingFace token from environment variables.""" |
| token = os.getenv('HF_TOKEN') |
| if not token: |
| print("Warning: HF_TOKEN not found in environment variables") |
| return token |
|
|
|
|
| def upload_with_retry(api, path_or_fileobj, path_in_repo, repo_id, repo_type, token, max_retries=5): |
| """ |
| Upload file to HuggingFace with exponential backoff retry logic. |
| |
| Args: |
| api: HfApi instance |
| path_or_fileobj: Local file path to upload |
| path_in_repo: Target path in the repository |
| repo_id: Repository ID |
| repo_type: Type of repository (e.g., "dataset") |
| token: HuggingFace token |
| max_retries: Maximum number of retry attempts |
| |
| Returns: |
| True if upload succeeded, raises exception if all retries failed |
| """ |
| delay = 2.0 |
|
|
| for attempt in range(max_retries): |
| try: |
| api.upload_file( |
| path_or_fileobj=path_or_fileobj, |
| path_in_repo=path_in_repo, |
| repo_id=repo_id, |
| repo_type=repo_type, |
| token=token |
| ) |
| if attempt > 0: |
| print(f" Upload succeeded on attempt {attempt + 1}/{max_retries}") |
| return True |
|
|
| except Exception as e: |
| if attempt < max_retries - 1: |
| wait_time = delay + random.uniform(0, 1.0) |
| print(f" Upload failed (attempt {attempt + 1}/{max_retries}): {str(e)}") |
| print(f" Retrying in {wait_time:.1f} seconds...") |
| time.sleep(wait_time) |
| delay = min(delay * 2, 60.0) |
| else: |
| print(f" Upload failed after {max_retries} attempts: {str(e)}") |
| raise |
|
|
|
|
| def save_agent_to_hf(data): |
| """Save a new assistant to HuggingFace dataset as {identifier}.json in root.""" |
| try: |
| api = HfApi() |
| token = get_hf_token() |
|
|
| if not token: |
| raise Exception("No HuggingFace token found. Please set HF_TOKEN in your Space settings.") |
|
|
| identifier = data['github_identifier'] |
| filename = f"{identifier}.json" |
|
|
| |
| with open(filename, 'w') as f: |
| json.dump(data, f, indent=2) |
|
|
| try: |
| |
| upload_with_retry( |
| api=api, |
| path_or_fileobj=filename, |
| path_in_repo=filename, |
| repo_id=AGENTS_REPO, |
| repo_type="dataset", |
| token=token |
| ) |
| print(f"Saved assistant to HuggingFace: {filename}") |
| return True |
| finally: |
| |
| if os.path.exists(filename): |
| os.remove(filename) |
|
|
| except Exception as e: |
| print(f"Error saving assistant: {str(e)}") |
| return False |
|
|
|
|
| def load_leaderboard_data_from_hf(): |
| """ |
| Load leaderboard data and monthly metrics from HuggingFace dataset. |
| |
| Returns: |
| dict: Dictionary with 'leaderboard', 'monthly_metrics', and 'metadata' keys |
| Returns None if file doesn't exist or error occurs |
| """ |
| try: |
| token = get_hf_token() |
|
|
| |
| file_path = hf_hub_download_with_backoff( |
| repo_id=LEADERBOARD_REPO, |
| filename=LEADERBOARD_FILENAME, |
| repo_type="dataset", |
| token=token |
| ) |
|
|
| |
| with open(file_path, 'r') as f: |
| data = json.load(f) |
|
|
| last_updated = data.get('metadata', {}).get('last_updated', 'Unknown') |
| print(f"Loaded leaderboard data from HuggingFace (last updated: {last_updated})") |
|
|
| return data |
|
|
| except Exception as e: |
| print(f"Could not load leaderboard data from HuggingFace: {str(e)}") |
| return None |
|
|
|
|
| |
| |
| |
|
|
| def create_monthly_metrics_plot(type="issue", top_n=5): |
| """ |
| Create a Plotly figure with dual y-axes showing monthly metrics: |
| - Left y-axis: Resolved Rate (%) as line curves |
| - Right y-axis: Total count (Issues or Discussions) as bar charts |
| |
| Each assistant gets a unique color for both their line and bars. |
| |
| Args: |
| type: Type of metrics to display - "issue" or "discussion" (default: "issue") |
| top_n: Number of top assistants to show (default: 5) |
| """ |
| |
| if type == "issue": |
| metrics_key = 'issue_monthly_metrics' |
| total_field = 'total_issues' |
| no_data_msg = "No issue data available for visualization" |
| total_label = "Total Issues" |
| print_msg = "issue" |
| elif type == "discussion": |
| metrics_key = 'discussion_monthly_metrics' |
| total_field = 'total_discussions' |
| no_data_msg = "No discussion data available for visualization" |
| total_label = "Total Discussions" |
| print_msg = "discussion" |
| elif type == "wanted": |
| metrics_key = 'wanted_issue_monthly_metrics' |
| total_field = 'total_wanted' |
| no_data_msg = "No wanted issue data available for visualization" |
| total_label = "Total Wanted Issues" |
| print_msg = "wanted issue" |
|
|
| |
| saved_data = load_leaderboard_data_from_hf() |
|
|
| if not saved_data or metrics_key not in saved_data: |
| |
| fig = go.Figure() |
| fig.add_annotation( |
| text=no_data_msg, |
| xref="paper", yref="paper", |
| x=0.5, y=0.5, showarrow=False, |
| font=dict(size=16) |
| ) |
| fig.update_layout( |
| title=None, |
| xaxis_title=None, |
| height=500 |
| ) |
| return fig |
|
|
| metrics = saved_data[metrics_key] |
| print(f"Loaded {print_msg} monthly metrics from saved dataset") |
|
|
| |
| if top_n is not None and top_n > 0 and metrics.get('assistants'): |
| |
| agent_totals = [] |
| for agent_name in metrics['assistants']: |
| agent_data = metrics['data'].get(agent_name, {}) |
| total_count = sum(agent_data.get(total_field, [])) |
| agent_totals.append((agent_name, total_count)) |
|
|
| |
| agent_totals.sort(key=lambda x: x[1], reverse=True) |
| top_agents = [agent_name for agent_name, _ in agent_totals[:top_n]] |
|
|
| |
| metrics = { |
| 'assistants': top_agents, |
| 'months': metrics['months'], |
| 'data': {assistant: metrics['data'][assistant] for assistant in top_agents if assistant in metrics['data']} |
| } |
|
|
| if not metrics['assistants'] or not metrics['months']: |
| |
| fig = go.Figure() |
| fig.add_annotation( |
| text=no_data_msg, |
| xref="paper", yref="paper", |
| x=0.5, y=0.5, showarrow=False, |
| font=dict(size=16) |
| ) |
| fig.update_layout( |
| title=None, |
| xaxis_title=None, |
| height=500 |
| ) |
| return fig |
|
|
| |
| fig = make_subplots(specs=[[{"secondary_y": True}]]) |
|
|
| |
| def generate_color(index, total): |
| """Generate distinct colors using HSL color space for better distribution""" |
| hue = (index * 360 / total) % 360 |
| saturation = 70 + (index % 3) * 10 |
| lightness = 45 + (index % 2) * 10 |
| return f'hsl({hue}, {saturation}%, {lightness}%)' |
|
|
| assistants = metrics['assistants'] |
| months = metrics['months'] |
| data = metrics['data'] |
|
|
| |
| agent_colors = {assistant: generate_color(idx, len(assistants)) for idx, assistant in enumerate(assistants)} |
|
|
| |
| for idx, agent_name in enumerate(assistants): |
| color = agent_colors[agent_name] |
| agent_data = data[agent_name] |
|
|
| |
| resolved_rates = agent_data['resolved_rates'] |
| x_resolved = months |
| y_resolved = resolved_rates |
|
|
| if x_resolved and y_resolved: |
| fig.add_trace( |
| go.Scatter( |
| x=x_resolved, |
| y=y_resolved, |
| name=agent_name, |
| mode='lines+markers', |
| line=dict(color=color, width=2), |
| marker=dict(size=8), |
| legendgroup=agent_name, |
| showlegend=(top_n is not None and top_n <= 10), |
| hovertemplate='<b>Assistant: %{fullData.name}</b><br>' + |
| 'Month: %{x}<br>' + |
| 'Resolved Rate: %{y:.2f}%<br>' + |
| '<extra></extra>' |
| ), |
| secondary_y=False |
| ) |
|
|
| |
| |
| x_bars = [] |
| y_bars = [] |
| for month, count in zip(months, agent_data[total_field]): |
| if count > 0: |
| x_bars.append(month) |
| y_bars.append(count) |
|
|
| if x_bars and y_bars: |
| fig.add_trace( |
| go.Bar( |
| x=x_bars, |
| y=y_bars, |
| name=agent_name, |
| marker=dict(color=color, opacity=0.6), |
| legendgroup=agent_name, |
| showlegend=False, |
| hovertemplate=f'<b>Assistant: %{{fullData.name}}</b><br>' + |
| f'Month: %{{x}}<br>' + |
| f'{total_label}: %{{y}}<br>' + |
| '<extra></extra>', |
| offsetgroup=agent_name |
| ), |
| secondary_y=True |
| ) |
|
|
| |
| fig.update_xaxes(title_text=None) |
| fig.update_yaxes( |
| title_text="<b>Resolved Rate (%)</b>", |
| range=[0, 100], |
| secondary_y=False, |
| showticklabels=True, |
| tickmode='linear', |
| dtick=10, |
| showgrid=True |
| ) |
| fig.update_yaxes(title_text=f"<b>{total_label}</b>", secondary_y=True) |
|
|
| |
| show_legend = (top_n is not None and top_n <= 10) |
| fig.update_layout( |
| title=None, |
| hovermode='closest', |
| barmode='group', |
| height=600, |
| showlegend=show_legend, |
| margin=dict(l=50, r=150 if show_legend else 50, t=50, b=50) |
| ) |
|
|
| return fig |
|
|
|
|
| def get_leaderboard_dataframe(): |
| """ |
| Load leaderboard from saved dataset and convert to pandas DataFrame for display. |
| Returns formatted DataFrame sorted by total issues. |
| """ |
| |
| saved_data = load_leaderboard_data_from_hf() |
|
|
| if not saved_data or 'leaderboard' not in saved_data: |
| print(f"No leaderboard data available") |
| |
| column_names = [col[0] for col in LEADERBOARD_COLUMNS] |
| return pd.DataFrame(columns=column_names) |
|
|
| cache_dict = saved_data['leaderboard'] |
| last_updated = saved_data.get('metadata', {}).get('last_updated', 'Unknown') |
| print(f"Loaded leaderboard from saved dataset (last updated: {last_updated})") |
| print(f"Cache dict size: {len(cache_dict)}") |
|
|
| if not cache_dict: |
| print("WARNING: cache_dict is empty!") |
| |
| column_names = [col[0] for col in LEADERBOARD_COLUMNS] |
| return pd.DataFrame(columns=column_names) |
|
|
| rows = [] |
| filtered_count = 0 |
| for identifier, data in cache_dict.items(): |
| total_issues = data.get('total_issues', 0) |
| total_discussions = data.get('total_discussions', 0) |
| total_wanted_issues = data.get('total_wanted_issues', 0) |
|
|
| |
| if total_issues == 0 and total_discussions == 0 and total_wanted_issues == 0: |
| filtered_count += 1 |
| continue |
|
|
| |
| rows.append([ |
| data.get('name', 'Unknown'), |
| data.get('website', 'N/A'), |
| total_issues, |
| total_discussions, |
| total_wanted_issues, |
| data.get('resolved_rate', 0.0), |
| data.get('discussion_resolved_rate', 0.0), |
| data.get('wanted_issue_resolved_rate', 0.0), |
| data.get('resolved_wanted_issues', 0), |
| data.get('resolved_issues', 0), |
| data.get('resolved_discussions', 0), |
| ]) |
|
|
| print(f"Filtered out {filtered_count} assistants with no activity") |
| print(f"Leaderboard will show {len(rows)} assistants") |
|
|
| |
| column_names = [col[0] for col in LEADERBOARD_COLUMNS] |
| df = pd.DataFrame(rows, columns=column_names) |
|
|
| |
| numeric_cols = [ |
| "Total Issues", "Total Discussions", "Total Wanted Issues", |
| "Issue Resolved Rate (%)", "Discussion Resolved Rate (%)", |
| "Wanted Issue Resolved Rate (%)", "Resolved Wanted Issues", |
| "Resolved Issues", "Resolved Discussions" |
| ] |
| for col in numeric_cols: |
| if col in df.columns: |
| df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0) |
|
|
| |
| if "Issue Resolved Rate (%)" in df.columns and not df.empty: |
| df = df.sort_values(by="Issue Resolved Rate (%)", ascending=False).reset_index(drop=True) |
|
|
| |
| |
| if len(df) == 1: |
| placeholder_row = pd.DataFrame([[ |
| "Submit yours to join!", "—", 0, 0, 0, 0.0, 0.0, 0.0, 0, 0, 0 |
| ]], columns=df.columns) |
| df = pd.concat([df, placeholder_row], ignore_index=True) |
| print("Added placeholder row for single-record workaround") |
|
|
| print(f"Final DataFrame shape: {df.shape}") |
| print("="*60 + "\n") |
|
|
| return df |
|
|
|
|
| def get_wanted_issues_dataframe(): |
| """Load wanted issues and convert to pandas DataFrame.""" |
| saved_data = load_leaderboard_data_from_hf() |
|
|
| if not saved_data or 'wanted_issues' not in saved_data: |
| print(f"No wanted issues data available") |
| return pd.DataFrame(columns=["Title", "URL", "Age (days)", "Labels"]) |
|
|
| wanted_issues = saved_data['wanted_issues'] |
| print(f"Loaded {len(wanted_issues)} wanted issues") |
|
|
| if not wanted_issues: |
| return pd.DataFrame(columns=["Title", "URL", "Age (days)", "Labels"]) |
|
|
| rows = [] |
| for issue in wanted_issues: |
| |
| created_at = issue.get('created_at') |
| age_days = 0 |
| if created_at and created_at != 'N/A': |
| try: |
| created = datetime.fromisoformat(created_at.replace('Z', '+00:00')) |
| age_days = (datetime.now(timezone.utc) - created).days |
| except: |
| pass |
|
|
| |
| url = issue.get('url', '') |
| repo = issue.get('repo', '') |
| issue_number = issue.get('number', '') |
| url_link = f'<a href="{url}" target="_blank">{repo}#{issue_number}</a>' |
|
|
| rows.append([ |
| issue.get('title', ''), |
| url_link, |
| age_days, |
| ', '.join(issue.get('labels', [])) |
| ]) |
|
|
| df = pd.DataFrame(rows, columns=["Title", "URL", "Age (days)", "Labels"]) |
|
|
| |
| if "Age (days)" in df.columns and not df.empty: |
| df = df.sort_values(by="Age (days)", ascending=False).reset_index(drop=True) |
|
|
| return df |
|
|
|
|
| def submit_agent(identifier, agent_name, organization, website): |
| """ |
| Submit a new assistant to the leaderboard. |
| Validates input and saves submission. |
| """ |
| |
| if not identifier or not identifier.strip(): |
| return "ERROR: GitHub identifier is required", gr.update() |
| if not agent_name or not agent_name.strip(): |
| return "ERROR: Assistant name is required", gr.update() |
| if not organization or not organization.strip(): |
| return "ERROR: Organization name is required", gr.update() |
| if not website or not website.strip(): |
| return "ERROR: Website URL is required", gr.update() |
|
|
| |
| identifier = identifier.strip() |
| agent_name = agent_name.strip() |
| organization = organization.strip() |
| website = website.strip() |
|
|
| |
| is_valid, message = validate_github_username(identifier) |
| if not is_valid: |
| return f"ERROR: {message}", gr.update() |
|
|
| |
| assistants = load_agents_from_hf() |
| if assistants: |
| existing_names = {assistant['github_identifier'] for assistant in assistants} |
| if identifier in existing_names: |
| return f"WARNING: Assistant with identifier '{identifier}' already exists", gr.update() |
|
|
| |
| submission = { |
| 'name': agent_name, |
| 'organization': organization, |
| 'github_identifier': identifier, |
| 'website': website, |
| 'status': 'active' |
| } |
|
|
| |
| if not save_agent_to_hf(submission): |
| return "ERROR: Failed to save submission", gr.update() |
|
|
| |
| return f"SUCCESS: Successfully submitted {agent_name}! Issue data will be automatically populated by the backend system via the maintainers.", gr.update() |
|
|
|
|
| |
| |
| |
|
|
| def reload_leaderboard_data(): |
| """ |
| Reload leaderboard data from HuggingFace. |
| This function is called by the scheduler on a daily basis. |
| """ |
| print(f"\n{'='*80}") |
| print(f"Reloading leaderboard data from HuggingFace...") |
| print(f"{'='*80}\n") |
|
|
| try: |
| data = load_leaderboard_data_from_hf() |
| if data: |
| print(f"Successfully reloaded leaderboard data") |
| print(f" Last updated: {data.get('metadata', {}).get('last_updated', 'Unknown')}") |
| print(f" Assistants: {len(data.get('leaderboard', {}))}") |
| else: |
| print(f"No data available") |
| except Exception as e: |
| print(f"Error reloading leaderboard data: {str(e)}") |
|
|
| print(f"{'='*80}\n") |
|
|
|
|
| |
| |
| |
|
|
| print(f"\nStarting SWE Assistant Issue Leaderboard") |
| print(f" Data source: {LEADERBOARD_REPO}") |
| print(f" Reload frequency: Daily at 12:00 AM UTC\n") |
|
|
| |
| scheduler = BackgroundScheduler(timezone="UTC") |
| scheduler.add_job( |
| reload_leaderboard_data, |
| trigger=CronTrigger(hour=0, minute=0), |
| id='daily_data_reload', |
| name='Daily Data Reload', |
| replace_existing=True |
| ) |
| scheduler.start() |
| print(f"\n{'='*80}") |
| print(f"Scheduler initialized successfully") |
| print(f"Reload schedule: Daily at 12:00 AM UTC") |
| print(f"On startup: Loads cached data from HuggingFace on demand") |
| print(f"{'='*80}\n") |
|
|
| |
| with gr.Blocks(title="SWE Assistant Issue & Discussion Leaderboard", theme=gr.themes.Soft()) as app: |
| gr.Markdown("# SWE Assistant Issue & Discussion Leaderboard") |
| gr.Markdown(f"Track and compare GitHub issue and discussion resolution statistics for SWE assistants") |
|
|
| with gr.Tabs(): |
|
|
| |
| with gr.Tab("Leaderboard"): |
| gr.Markdown("*Statistics are based on assistant issue resolution activity tracked by the system*") |
| leaderboard_table = Leaderboard( |
| value=pd.DataFrame(columns=[col[0] for col in LEADERBOARD_COLUMNS]), |
| datatype=LEADERBOARD_COLUMNS, |
| search_columns=["Assistant", "Website"], |
| filter_columns=[ |
| ColumnFilter( |
| "Issue Resolved Rate (%)", |
| min=0, |
| max=100, |
| default=[0, 100], |
| type="slider", |
| label="Issue Resolved Rate (%)" |
| ), |
| ColumnFilter( |
| "Discussion Resolved Rate (%)", |
| min=0, |
| max=100, |
| default=[0, 100], |
| type="slider", |
| label="Discussion Resolved Rate (%)" |
| ), |
| ColumnFilter( |
| "Wanted Issue Resolved Rate (%)", |
| min=0, |
| max=100, |
| default=[0, 100], |
| type="slider", |
| label="Wanted Issue Resolved Rate (%)" |
| ) |
| ] |
| ) |
|
|
| |
| app.load( |
| fn=get_leaderboard_dataframe, |
| inputs=[], |
| outputs=[leaderboard_table] |
| ) |
|
|
| |
| gr.Markdown("---") |
| gr.Markdown("## Monthly Performance Metrics - Top 5 Assistants") |
|
|
| with gr.Row(): |
| with gr.Column(): |
| gr.Markdown("*Issue volume and resolved rate over time*") |
| monthly_metrics_plot = gr.Plot() |
|
|
| with gr.Column(): |
| gr.Markdown("*Discussion volume and resolved rate over time*") |
| discussion_metrics_plot = gr.Plot() |
|
|
| with gr.Column(): |
| gr.Markdown("*Wanted issue volume and resolved rate over time*") |
| wanted_metrics_plot = gr.Plot() |
|
|
| |
| app.load( |
| fn=lambda: create_monthly_metrics_plot(type="issue"), |
| inputs=[], |
| outputs=[monthly_metrics_plot] |
| ) |
|
|
| |
| app.load( |
| fn=lambda: create_monthly_metrics_plot(type="discussion"), |
| inputs=[], |
| outputs=[discussion_metrics_plot] |
| ) |
|
|
| |
| app.load( |
| fn=lambda: create_monthly_metrics_plot(type="wanted"), |
| inputs=[], |
| outputs=[wanted_metrics_plot] |
| ) |
|
|
|
|
| |
| with gr.Tab("Issues Wanted"): |
| gr.Markdown("### Long-Standing Patch-Wanted Issues") |
| gr.Markdown(f"*Issues open for {LONGSTANDING_GAP_DAYS}+ days with patch-wanted labels from tracked organizations*") |
|
|
| wanted_table = gr.Dataframe( |
| value=pd.DataFrame(columns=["Title", "URL", "Age (days)", "Labels"]), |
| datatype=["str", "html", "number", "str"], |
| interactive=False, |
| wrap=True |
| ) |
|
|
| app.load( |
| fn=get_wanted_issues_dataframe, |
| inputs=[], |
| outputs=[wanted_table] |
| ) |
|
|
|
|
| |
| with gr.Tab("Submit Your Assistant"): |
|
|
| gr.Markdown("Fill in the details below to add your assistant to the leaderboard.") |
|
|
| with gr.Row(): |
| with gr.Column(): |
| github_input = gr.Textbox( |
| label="GitHub Identifier*", |
| placeholder="Your assistant username (e.g., my-assistant[bot])" |
| ) |
| name_input = gr.Textbox( |
| label="Assistant Name*", |
| placeholder="Your assistant's display name" |
| ) |
|
|
| with gr.Column(): |
| organization_input = gr.Textbox( |
| label="Organization*", |
| placeholder="Your organization or team name" |
| ) |
| website_input = gr.Textbox( |
| label="Website*", |
| placeholder="https://your-assistant-website.com" |
| ) |
|
|
| submit_button = gr.Button( |
| "Submit Assistant", |
| variant="primary" |
| ) |
| submission_status = gr.Textbox( |
| label="Submission Status", |
| interactive=False |
| ) |
|
|
| |
| submit_button.click( |
| fn=submit_agent, |
| inputs=[github_input, name_input, organization_input, website_input], |
| outputs=[submission_status, leaderboard_table] |
| ) |
|
|
|
|
| |
| if __name__ == "__main__": |
| app.launch() |
|
|