GitHub Access Token became invalid

It seems like the GitHub access token used for retrieving details about this repository from GitHub became invalid. This might prevent certain types of inspections from being run (in particular, everything related to pull requests).
Please ask an admin of your repository to re-new the access token on this website.
Completed
Push — master ( 864634...3e5cd4 )
by Fedir
01:47
created

ghstat.go (1 issue)

Severity
1
// Copyright 2018 Fedir RYKHTIK. All rights reserved.
2
// Use of this source code is governed by the GNU GPL 3.0
3
// license that can be found in the LICENSE file.
4
package main
5
6
import (
7
	"encoding/csv"
8
	"encoding/json"
9
	"flag"
10
	"fmt"
11
	"log"
12
	"os"
13
	"path/filepath"
14
	"strings"
15
	"sync"
16
	"time"
17
18
	"github.com/fedir/ghstat/httpcache"
19
	"github.com/fedir/ghstat/timing"
20
)
21
22
func main() {
23
	var (
24
		clearHTTPCache         = flag.Bool("cc", false, "Clear HTTP cache")
25
		clearHTTPCacheDryRun   = flag.Bool("ccdr", false, "Clear HTTP cache (dry run)")
26
		debug                  = flag.Bool("d", false, "Debug mode")
27
		resultFileSavePath     = flag.String("f", "", "File path where result CSV file will be saved")
28
		rateLimitCheck         = flag.Bool("l", false, "Rate limit check")
29
		repositoriesKeysManual = flag.String("r", "", "Repositories keys")
30
		tmpFolder              = flag.String("t", "test_data", "Clear HTTP cache (dry run)")
31
		repositoriesKeys       = []string{}
32
	)
33
	flag.Parse()
34
	if *clearHTTPCache || *clearHTTPCacheDryRun {
35
		clearHTTPCacheFolder(*tmpFolder, *clearHTTPCacheDryRun)
36
		os.Exit(0)
37
	}
38
	if *rateLimitCheck {
39
		checkAndPrintRateLimit()
40
		os.Exit(0)
41
	}
42
	if *rateLimitCheck {
43
		checkAndPrintRateLimit()
44
		os.Exit(0)
45
	}
46
	if *repositoriesKeysManual != "" {
47
		repositoriesKeys = strings.Split(*repositoriesKeysManual, ",")
48
	} else {
49
		repositoriesKeys = []string{
50
			"astaxie/beego",
51
			"gohugoio/hugo",
52
			"gin-gonic/gin",
53
			"labstack/echo",
54
			"revel/revel",
55
			"gobuffalo/buffalo",
56
			"go-chi/chi",
57
			"kataras/iris",
58
		}
59
	}
60
	csvFilePath := ""
61
	if *resultFileSavePath != "" {
62
		csvFilePath = *resultFileSavePath
63
	} else {
64
		csvFilePath = "result.csv"
65
	}
66
	var ghData = [][]string{}
67
	headers := []string{
68
		"Name",
69
		"URL",
70
		"Author",
71
		"Author's followers",
72
		"Top 10 contributors followers",
73
		"Created at",
74
		"Age in days",
75
		"Total commits",
76
		"Total additions",
77
		"Total deletions",
78
		"Total code changes",
79
		"Medium commit size",
80
		"Stargazers",
81
		"Forks",
82
		"Contributors",
83
		"Active forkers, %",
84
		"Open issues",
85
		"Total issues",
86
		"Issue/day",
87
		"Closed issues, %",
88
		"Place",
89
	}
90
	ghDataColumnIndexes := map[string]int{
91
		"nameColumn":                       0,
92
		"authorsFollowersColumn":           3,
93
		"top10ContributorsFollowersColumn": 4,
94
		"totalAdditionsColumn":             8,
95
		"ageColumn":                        6,
96
		"totalCommitsColumn":               7,
97
		"totalDeletionsColumn":             9,
98
		"totalCodeChangesColumn":           10,
99
		"mediCommitSizeColumn":             11,
100
		"stargazersColumn":                 12,
101
		"activeForkersColumn":              15,
102
		"issuesByDayColumn":                18,
103
		"closedIssuesPercentageColumn":     19,
104
		"totalPointsColumnIndex":           20,
105
	}
106
	wg := &sync.WaitGroup{}
107
	dataChan := make(chan []string, len(repositoriesKeys))
108
	for _, rKey := range repositoriesKeys {
109
		wg.Add(1)
110
		go fillRepositoryStatistics(rKey, *tmpFolder, *debug, wg, dataChan)
111
	}
112
	for _ = range repositoriesKeys {
0 ignored issues
show
should omit values from range; this loop is equivalent to for range ...
Loading history...
113
		ghData = append(ghData, <-dataChan)
114
	}
115
	wg.Wait()
116
	rateGhData(ghData, ghDataColumnIndexes)
117
	writeCsv(csvFilePath, headers, ghData)
118
}
119
120
func fillRepositoryStatistics(rKey string, tmpFolder string, debug bool, wg *sync.WaitGroup, dataChan chan []string) {
121
	defer wg.Done()
122
	repositoryData := getRepositoryStatistics(rKey, tmpFolder, debug)
123
	repositoryAge := int(time.Since(repositoryData.CreatedAt).Seconds() / 86400)
124
	authorLogin := getRepositoryCommits(rKey, tmpFolder, debug)
125
	authorFollowers := 0
126
	if authorLogin != "" {
127
		authorFollowers = getUserFollowers(authorLogin, tmpFolder, debug)
128
	}
129
	closedIssues := getRepositoryClosedIssues(rKey, tmpFolder, debug)
130
	topContributorsFollowers, totalContributors := getRepositoryContributors(rKey, tmpFolder, debug)
131
	activeForkersPercentage := getActiveForkersPercentage(totalContributors, repositoryData.Forks)
132
	issueByDay := getIssueByDay(closedIssues+repositoryData.OpenIssues, repositoryAge)
133
	closedIssuesPercentage := getClosedIssuesPercentage(repositoryData.OpenIssues, int(closedIssues))
134
	contributionStatistics := getContributionStatistics(rKey, tmpFolder, debug)
135
	ghProjectData := []string{
136
		repositoryData.Name,
137
		fmt.Sprintf("https://github.com/%s", repositoryData.FullName),
138
		fmt.Sprintf("%s", func(a string) string {
139
			if a == "" {
140
				a = "[Account removed]"
141
			}
142
			return a
143
		}(authorLogin)),
144
		fmt.Sprintf("%d", authorFollowers),
145
		fmt.Sprintf("%d", topContributorsFollowers),
146
		fmt.Sprintf("%d/%02d", repositoryData.CreatedAt.Year(), repositoryData.CreatedAt.Month()),
147
		fmt.Sprintf("%d", repositoryAge),
148
		fmt.Sprintf("%d", contributionStatistics.TotalCommits),
149
		fmt.Sprintf("%d", contributionStatistics.TotalAdditions),
150
		fmt.Sprintf("%d", contributionStatistics.TotalDeletions),
151
		fmt.Sprintf("%d", contributionStatistics.TotalCodeChanges),
152
		fmt.Sprintf("%d", contributionStatistics.MediumCommitSize),
153
		fmt.Sprintf("%d", repositoryData.Watchers),
154
		fmt.Sprintf("%d", repositoryData.Forks),
155
		fmt.Sprintf("%d", totalContributors),
156
		fmt.Sprintf("%.2f", activeForkersPercentage),
157
		fmt.Sprintf("%d", repositoryData.OpenIssues),
158
		fmt.Sprintf("%d", closedIssues+repositoryData.OpenIssues),
159
		fmt.Sprintf("%.4f", issueByDay),
160
		fmt.Sprintf("%.2f", closedIssuesPercentage),
161
		"0",
162
	}
163
	dataChan <- ghProjectData
164
}
165
166
func clearHTTPCacheFolder(tmpFolderPath string, dryRun bool) error {
167
	d, err := os.Open(tmpFolderPath)
168
	if err != nil {
169
		log.Fatalf("Could not open %s", tmpFolderPath)
170
	}
171
	defer d.Close()
172
	names, err := d.Readdirnames(-1)
173
	if err != nil {
174
		log.Fatalf("Could not read from %s", tmpFolderPath)
175
	}
176
	for _, name := range names {
177
		fp := filepath.Join(tmpFolderPath, name)
178
		if dryRun {
179
			fmt.Printf("Deleted %s\n", fp)
180
		} else {
181
			err = os.RemoveAll(fp)
182
			if err != nil {
183
				log.Fatalf("Could not remove %s", fp)
184
			}
185
			fmt.Printf("Deleted %s\n", fp)
186
		}
187
	}
188
	return nil
189
}
190
191
func checkAndPrintRateLimit() {
192
	type RateLimits struct {
193
		Resources struct {
194
			Core struct {
195
				Limit     int `json:"limit"`
196
				Remaining int `json:"remaining"`
197
				Reset     int `json:"reset"`
198
			} `json:"core"`
199
			Search struct {
200
				Limit     int `json:"limit"`
201
				Remaining int `json:"remaining"`
202
				Reset     int `json:"reset"`
203
			} `json:"search"`
204
			GraphQL struct {
205
				Limit     int `json:"limit"`
206
				Remaining int `json:"remaining"`
207
				Reset     int `json:"reset"`
208
			} `json:"graphql"`
209
		} `json:"resources"`
210
		Rate struct {
211
			Limit     int `json:"limit"`
212
			Remaining int `json:"remaining"`
213
			Reset     int `json:"reset"`
214
		} `json:"rate"`
215
	}
216
	url := "https://api.github.com/rate_limit"
217
	resp, statusCode, err := httpcache.MakeHTTPRequest(url)
218
	if err != nil {
219
		log.Fatalf("Error during checking rate limit : %d %v#", statusCode, err)
220
	}
221
	jsonResponse, _, _ := httpcache.ReadResp(resp)
222
	rateLimits := RateLimits{}
223
	json.Unmarshal(jsonResponse, &rateLimits)
224
	fmt.Printf("Core: %d/%d (reset in %d minutes)\n", rateLimits.Resources.Core.Remaining, rateLimits.Resources.Core.Limit, timing.GetRelativeTime(rateLimits.Resources.Core.Reset))
225
	fmt.Printf("Search: %d/%d (reset in %d minutes)\n", rateLimits.Resources.Search.Remaining, rateLimits.Resources.Search.Limit, timing.GetRelativeTime(rateLimits.Resources.Search.Reset))
226
	fmt.Printf("GraphQL: %d/%d (reset in %d minutes)\n", rateLimits.Resources.GraphQL.Remaining, rateLimits.Resources.GraphQL.Limit, timing.GetRelativeTime(rateLimits.Resources.GraphQL.Reset))
227
	fmt.Printf("Rate: %d/%d (reset in %d minutes)\n", rateLimits.Rate.Remaining, rateLimits.Rate.Limit, timing.GetRelativeTime(rateLimits.Rate.Reset))
228
}
229
230
func writeCsv(csvFilePath string, headers []string, ghData [][]string) {
231
	file, err := os.Create(csvFilePath)
232
	if err != nil {
233
		log.Fatal("Cannot create file", err)
234
	}
235
	defer file.Close()
236
	writer := csv.NewWriter(file)
237
	defer writer.Flush()
238
	err = writer.Write(headers)
239
	if err != nil {
240
		log.Fatal("Cannot write to file", err)
241
	}
242
	for _, value := range ghData {
243
		err := writer.Write(value)
244
		if err != nil {
245
			log.Fatal("Cannot write to file", err)
246
		}
247
	}
248
}
249