main.py

  1import os
  2from collections import defaultdict
  3from datetime import datetime, timedelta
  4from typing import Optional
  5
  6import typer
  7from github import Github
  8from github.Issue import Issue
  9from github.Repository import Repository
 10from pytz import timezone
 11from typer import Typer
 12
 13app: Typer = typer.Typer()
 14
 15DATETIME_FORMAT: str = "%m/%d/%Y %I:%M %p"
 16ISSUES_PER_LABEL: int = 50
 17
 18
 19class IssueData:
 20    def __init__(self, issue: Issue) -> None:
 21        self.title = issue.title
 22        self.url: str = issue.html_url
 23        self.like_count: int = issue._rawData["reactions"]["+1"]  # type: ignore [attr-defined]
 24        self.creation_datetime: str = issue.created_at.strftime(DATETIME_FORMAT)
 25        # TODO: Change script to support storing labels here, rather than directly in the script
 26        self.labels: set[str] = {label["name"] for label in issue._rawData["labels"]}  # type: ignore [attr-defined]
 27        self._issue = issue
 28
 29
 30@app.command()
 31def main(
 32    github_token: Optional[str] = None,
 33    issue_reference_number: Optional[int] = None,
 34    query_day_interval: Optional[int] = None,
 35) -> None:
 36    start_time: datetime = datetime.now()
 37
 38    start_date: datetime | None = None
 39
 40    if query_day_interval:
 41        tz = timezone("america/new_york")
 42        current_time = datetime.now(tz).replace(
 43            hour=0, minute=0, second=0, microsecond=0
 44        )
 45        start_date = current_time - timedelta(days=query_day_interval)
 46
 47    # GitHub Workflow will pass in the token as an environment variable,
 48    # but we can place it in our env when running the script locally, for convenience
 49    github_token = github_token or os.getenv("GITHUB_ACCESS_TOKEN")
 50    github = Github(github_token)
 51
 52    remaining_requests_before: int = github.rate_limiting[0]
 53    print(f"Remaining requests before: {remaining_requests_before}")
 54
 55    repo_name: str = "zed-industries/zed"
 56    repository: Repository = github.get_repo(repo_name)
 57
 58    label_to_issue_data: dict[str, list[IssueData]] = get_issue_maps(
 59        github, repository, start_date
 60    )
 61
 62    issue_text: str = get_issue_text(label_to_issue_data)
 63
 64    if issue_reference_number:
 65        top_ranking_issues_issue: Issue = repository.get_issue(issue_reference_number)
 66        top_ranking_issues_issue.edit(body=issue_text)
 67    else:
 68        print(issue_text)
 69
 70    remaining_requests_after: int = github.rate_limiting[0]
 71    print(f"Remaining requests after: {remaining_requests_after}")
 72    print(f"Requests used: {remaining_requests_before - remaining_requests_after}")
 73
 74    run_duration: timedelta = datetime.now() - start_time
 75    print(run_duration)
 76
 77
 78def get_issue_maps(
 79    github: Github,
 80    repository: Repository,
 81    start_date: datetime | None = None,
 82) -> dict[str, list[IssueData]]:
 83    label_to_issues: defaultdict[str, list[Issue]] = get_label_to_issues(
 84        github,
 85        repository,
 86        start_date,
 87    )
 88    label_to_issue_data: dict[str, list[IssueData]] = get_label_to_issue_data(
 89        label_to_issues
 90    )
 91
 92    # Create a new dictionary with labels ordered by the summation the of likes on the associated issues
 93    labels = list(label_to_issue_data.keys())
 94
 95    labels.sort(
 96        key=lambda label: sum(
 97            issue_data.like_count for issue_data in label_to_issue_data[label]
 98        ),
 99        reverse=True,
100    )
101
102    label_to_issue_data = {label: label_to_issue_data[label] for label in labels}
103
104    return label_to_issue_data
105
106
107def get_label_to_issues(
108    github: Github,
109    repository: Repository,
110    start_date: datetime | None = None,
111) -> defaultdict[str, list[Issue]]:
112    common_filters = [
113        f"repo:{repository.full_name}",
114        "is:open",
115        "is:issue",
116        '-label:"ignore top-ranking issues"',
117        "sort:reactions-+1-desc",
118    ]
119
120    date_query: str | None = (
121        f"created:>={start_date.strftime('%Y-%m-%d')}" if start_date else None
122    )
123
124    if date_query:
125        common_filters.append(date_query)
126
127    common_filter_string = " ".join(common_filters)
128
129    section_queries = {
130        "bug": "label:bug,type:Bug",
131        "crash": "label:crash,type:Crash",
132        "feature": "label:feature",
133        "meta": "type:Meta",
134        "unlabeled": "no:label no:type",
135    }
136
137    label_to_issues: defaultdict[str, list[Issue]] = defaultdict(list)
138
139    for section, section_query in section_queries.items():
140        label_query: str = f"{common_filter_string} {section_query}"
141
142        issues = github.search_issues(label_query)
143
144        if issues.totalCount > 0:
145            for issue in issues[0:ISSUES_PER_LABEL]:
146                label_to_issues[section].append(issue)
147
148    return label_to_issues
149
150
151def get_label_to_issue_data(
152    label_to_issues: defaultdict[str, list[Issue]],
153) -> dict[str, list[IssueData]]:
154    label_to_issue_data: dict[str, list[IssueData]] = {}
155
156    for label in label_to_issues:
157        issues: list[Issue] = label_to_issues[label]
158        issue_data: list[IssueData] = [IssueData(issue) for issue in issues]
159        issue_data.sort(
160            key=lambda issue_data: (
161                -issue_data.like_count,
162                issue_data.creation_datetime,
163            )
164        )
165
166        if issue_data:
167            label_to_issue_data[label] = issue_data
168
169    return label_to_issue_data
170
171
172def get_issue_text(
173    label_to_issue_data: dict[str, list[IssueData]],
174) -> str:
175    tz = timezone("america/new_york")
176    current_datetime: str = datetime.now(tz).strftime(f"{DATETIME_FORMAT} (%Z)")
177
178    highest_ranking_issues_lines: list[str] = get_highest_ranking_issues_lines(
179        label_to_issue_data
180    )
181
182    issue_text_lines: list[str] = [
183        f"*Updated on {current_datetime}*",
184        *highest_ranking_issues_lines,
185        "\n---\n",
186        "*For details on how this issue is generated, [see the script](https://github.com/zed-industries/zed/blob/main/script/update_top_ranking_issues/main.py)*",
187    ]
188
189    return "\n".join(issue_text_lines)
190
191
192def get_highest_ranking_issues_lines(
193    label_to_issue_data: dict[str, list[IssueData]],
194) -> list[str]:
195    highest_ranking_issues_lines: list[str] = []
196
197    if label_to_issue_data:
198        for label, issue_data in label_to_issue_data.items():
199            highest_ranking_issues_lines.append(f"\n## {label}\n")
200
201            for i, issue_data in enumerate(issue_data):
202                markdown_bullet_point: str = (
203                    f"{issue_data.url} ({issue_data.like_count} :thumbsup:)"
204                )
205
206                markdown_bullet_point = f"{i + 1}. {markdown_bullet_point}"
207                highest_ranking_issues_lines.append(markdown_bullet_point)
208
209    return highest_ranking_issues_lines
210
211
212if __name__ == "__main__":
213    app()
214
215# TODO: Sort label output into core and non core sections